modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-06-27 12:29:05
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
500 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
54 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-06-27 12:27:55
card
stringlengths
11
1.01M
aroot/eng-mya-simcse_nbrs_r
aroot
2023-07-18T22:44:34Z
9
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T20:05:06Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-mya-simcse_nbrs_r results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-mya-simcse_nbrs_r This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9292 - Bleu: 4.3259 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
jordyvl/18-tiny_tobacco3482_kd_NKD_t1.0_g1.5
jordyvl
2023-07-18T22:42:34Z
166
0
transformers
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-07-18T22:07:30Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 18-tiny_tobacco3482_kd_NKD_t1.0_g1.5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 18-tiny_tobacco3482_kd_NKD_t1.0_g1.5 This model is a fine-tuned version of [WinKawaks/vit-tiny-patch16-224](https://huggingface.co/WinKawaks/vit-tiny-patch16-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.0957 - Accuracy: 0.805 - Brier Loss: 0.2927 - Nll: 1.1753 - F1 Micro: 0.805 - F1 Macro: 0.7833 - Ece: 0.1572 - Aurc: 0.0655 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Brier Loss | Nll | F1 Micro | F1 Macro | Ece | Aurc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:|:-------:|:--------:|:--------:|:------:|:------:| | No log | 1.0 | 7 | 4.7898 | 0.1 | 1.0292 | 9.4902 | 0.1000 | 0.0772 | 0.3220 | 0.9001 | | No log | 2.0 | 14 | 3.9970 | 0.1 | 0.9420 | 10.0981 | 0.1000 | 0.1071 | 0.2441 | 0.8581 | | No log | 3.0 | 21 | 3.6641 | 0.075 | 0.8956 | 9.5324 | 0.075 | 0.0777 | 0.1896 | 0.9137 | | No log | 4.0 | 28 | 3.6014 | 0.18 | 0.8691 | 9.6679 | 0.18 | 0.0781 | 0.2345 | 0.5824 | | No log | 5.0 | 35 | 3.5833 | 0.23 | 0.8347 | 9.6569 | 0.23 | 0.1572 | 0.2618 | 0.5205 | | No log | 6.0 | 42 | 3.5576 | 0.44 | 0.7860 | 5.9410 | 0.44 | 0.2946 | 0.3475 | 0.3232 | | No log | 7.0 | 49 | 3.5400 | 0.575 | 0.7404 | 4.2387 | 0.575 | 0.4638 | 0.4007 | 0.2294 | | No log | 8.0 | 56 | 3.5319 | 0.545 | 0.7181 | 4.5958 | 0.545 | 0.4482 | 0.3502 | 0.2374 | | No log | 9.0 | 63 | 3.5405 | 0.52 | 0.7002 | 3.9862 | 0.52 | 0.4101 | 0.3148 | 0.2506 | | No log | 10.0 | 70 | 3.5341 | 0.61 | 0.6897 | 3.2707 | 0.61 | 0.5118 | 0.3775 | 0.2235 | | No log | 11.0 | 77 | 3.5259 | 0.66 | 0.6771 | 2.6882 | 0.66 | 0.5201 | 0.4365 | 0.1420 | | No log | 12.0 | 84 | 3.5215 | 0.66 | 0.6463 | 2.4544 | 0.66 | 0.5387 | 0.3750 | 0.1664 | | No log | 13.0 | 91 | 3.5363 | 0.58 | 0.6232 | 2.3149 | 0.58 | 0.5090 | 0.3285 | 0.1858 | | No log | 14.0 | 98 | 3.5161 | 0.675 | 0.6008 | 2.6144 | 0.675 | 0.5411 | 0.3690 | 0.1237 | | No log | 15.0 | 105 | 3.5073 | 0.67 | 0.5845 | 2.1229 | 0.67 | 0.5577 | 0.3405 | 0.1350 | | No log | 16.0 | 112 | 3.5272 | 0.67 | 0.5338 | 2.4215 | 0.67 | 0.5603 | 0.3154 | 0.1325 | | No log | 17.0 | 119 | 3.5332 | 0.695 | 0.5367 | 2.1675 | 0.695 | 0.6056 | 0.3140 | 0.1071 | | No log | 18.0 | 126 | 3.5659 | 0.655 | 0.4841 | 1.9565 | 0.655 | 0.5559 | 0.2600 | 0.1365 | | No log | 19.0 | 133 | 3.5438 | 0.69 | 0.4817 | 1.8201 | 0.69 | 0.5735 | 0.2574 | 0.1202 | | No log | 20.0 | 140 | 3.5019 | 0.74 | 0.4725 | 1.6346 | 0.74 | 0.6486 | 0.2939 | 0.0931 | | No log | 21.0 | 147 | 3.5236 | 0.755 | 0.4407 | 1.3134 | 0.755 | 0.6811 | 0.2762 | 0.0820 | | No log | 22.0 | 154 | 3.5303 | 0.755 | 0.4143 | 1.2834 | 0.755 | 0.6843 | 0.2434 | 0.0806 | | No log | 23.0 | 161 | 3.5541 | 0.77 | 0.4034 | 1.4417 | 0.7700 | 0.6891 | 0.2382 | 0.0842 | | No log | 24.0 | 168 | 3.5675 | 0.765 | 0.3853 | 1.6692 | 0.765 | 0.7072 | 0.2309 | 0.0807 | | No log | 25.0 | 175 | 3.5411 | 0.745 | 0.3914 | 1.2777 | 0.745 | 0.6720 | 0.2271 | 0.0784 | | No log | 26.0 | 182 | 3.5877 | 0.75 | 0.3710 | 1.4838 | 0.75 | 0.6717 | 0.2082 | 0.0789 | | No log | 27.0 | 189 | 3.6026 | 0.77 | 0.3483 | 1.4211 | 0.7700 | 0.7018 | 0.2089 | 0.0694 | | No log | 28.0 | 196 | 3.6374 | 0.78 | 0.3365 | 1.3205 | 0.78 | 0.7181 | 0.1953 | 0.0694 | | No log | 29.0 | 203 | 3.7319 | 0.775 | 0.3538 | 1.2749 | 0.775 | 0.7012 | 0.2149 | 0.0814 | | No log | 30.0 | 210 | 3.6359 | 0.805 | 0.3291 | 1.3272 | 0.805 | 0.7761 | 0.1991 | 0.0637 | | No log | 31.0 | 217 | 3.7160 | 0.785 | 0.3337 | 1.2632 | 0.785 | 0.7445 | 0.1727 | 0.0757 | | No log | 32.0 | 224 | 3.6810 | 0.8 | 0.3234 | 1.3720 | 0.8000 | 0.7636 | 0.1999 | 0.0649 | | No log | 33.0 | 231 | 3.7139 | 0.82 | 0.3221 | 1.2150 | 0.82 | 0.7919 | 0.2051 | 0.0677 | | No log | 34.0 | 238 | 3.7286 | 0.795 | 0.3130 | 1.0622 | 0.795 | 0.7575 | 0.1919 | 0.0639 | | No log | 35.0 | 245 | 3.7807 | 0.795 | 0.3154 | 1.0146 | 0.795 | 0.7672 | 0.1565 | 0.0714 | | No log | 36.0 | 252 | 3.6802 | 0.815 | 0.3131 | 1.0083 | 0.815 | 0.7933 | 0.2051 | 0.0626 | | No log | 37.0 | 259 | 3.7369 | 0.81 | 0.3168 | 1.0017 | 0.81 | 0.7862 | 0.1792 | 0.0690 | | No log | 38.0 | 266 | 3.7638 | 0.82 | 0.2971 | 1.3357 | 0.82 | 0.7977 | 0.1913 | 0.0628 | | No log | 39.0 | 273 | 3.7415 | 0.825 | 0.2954 | 1.0423 | 0.825 | 0.8072 | 0.1893 | 0.0599 | | No log | 40.0 | 280 | 3.8005 | 0.785 | 0.3140 | 1.0817 | 0.785 | 0.7453 | 0.1694 | 0.0684 | | No log | 41.0 | 287 | 3.7901 | 0.82 | 0.3127 | 1.0853 | 0.82 | 0.7993 | 0.1789 | 0.0673 | | No log | 42.0 | 294 | 3.7811 | 0.825 | 0.3019 | 1.2712 | 0.825 | 0.8020 | 0.1644 | 0.0644 | | No log | 43.0 | 301 | 3.7689 | 0.81 | 0.3110 | 0.8553 | 0.81 | 0.7932 | 0.1785 | 0.0645 | | No log | 44.0 | 308 | 3.7796 | 0.82 | 0.2919 | 1.2589 | 0.82 | 0.7972 | 0.1875 | 0.0643 | | No log | 45.0 | 315 | 3.8005 | 0.805 | 0.3036 | 1.1993 | 0.805 | 0.7789 | 0.1840 | 0.0660 | | No log | 46.0 | 322 | 3.7811 | 0.82 | 0.2909 | 1.0962 | 0.82 | 0.8004 | 0.1735 | 0.0618 | | No log | 47.0 | 329 | 3.8145 | 0.8 | 0.3040 | 1.1968 | 0.8000 | 0.7759 | 0.1795 | 0.0671 | | No log | 48.0 | 336 | 3.7969 | 0.835 | 0.2816 | 1.1019 | 0.835 | 0.8118 | 0.1624 | 0.0603 | | No log | 49.0 | 343 | 3.8020 | 0.815 | 0.2855 | 1.0383 | 0.815 | 0.7978 | 0.1556 | 0.0639 | | No log | 50.0 | 350 | 3.8049 | 0.815 | 0.2884 | 1.1121 | 0.815 | 0.7935 | 0.1608 | 0.0616 | | No log | 51.0 | 357 | 3.8048 | 0.81 | 0.2873 | 1.1173 | 0.81 | 0.7898 | 0.1574 | 0.0632 | | No log | 52.0 | 364 | 3.8581 | 0.8 | 0.2923 | 1.1257 | 0.8000 | 0.7767 | 0.1436 | 0.0664 | | No log | 53.0 | 371 | 3.8565 | 0.79 | 0.2984 | 1.0513 | 0.79 | 0.7670 | 0.1622 | 0.0668 | | No log | 54.0 | 378 | 3.8787 | 0.805 | 0.2901 | 1.0619 | 0.805 | 0.7874 | 0.1335 | 0.0655 | | No log | 55.0 | 385 | 3.8777 | 0.805 | 0.2940 | 1.0378 | 0.805 | 0.7883 | 0.1450 | 0.0647 | | No log | 56.0 | 392 | 3.8743 | 0.805 | 0.2906 | 1.1702 | 0.805 | 0.7849 | 0.1610 | 0.0634 | | No log | 57.0 | 399 | 3.9082 | 0.795 | 0.2959 | 1.0951 | 0.795 | 0.7711 | 0.1761 | 0.0662 | | No log | 58.0 | 406 | 3.8894 | 0.8 | 0.2898 | 1.0979 | 0.8000 | 0.7816 | 0.1774 | 0.0638 | | No log | 59.0 | 413 | 3.9005 | 0.825 | 0.2914 | 1.2358 | 0.825 | 0.8088 | 0.1687 | 0.0637 | | No log | 60.0 | 420 | 3.9115 | 0.815 | 0.2863 | 1.0318 | 0.815 | 0.7928 | 0.1672 | 0.0640 | | No log | 61.0 | 427 | 3.9172 | 0.805 | 0.2956 | 1.1397 | 0.805 | 0.7884 | 0.1646 | 0.0667 | | No log | 62.0 | 434 | 3.8993 | 0.82 | 0.2862 | 1.2349 | 0.82 | 0.8001 | 0.1544 | 0.0645 | | No log | 63.0 | 441 | 3.9334 | 0.825 | 0.2896 | 1.1718 | 0.825 | 0.8061 | 0.1662 | 0.0646 | | No log | 64.0 | 448 | 3.9179 | 0.815 | 0.2861 | 1.1727 | 0.815 | 0.7966 | 0.1592 | 0.0650 | | No log | 65.0 | 455 | 3.9489 | 0.8 | 0.2981 | 1.1681 | 0.8000 | 0.7805 | 0.1522 | 0.0674 | | No log | 66.0 | 462 | 3.9372 | 0.81 | 0.2855 | 1.1041 | 0.81 | 0.7870 | 0.1709 | 0.0647 | | No log | 67.0 | 469 | 3.9651 | 0.8 | 0.2935 | 1.1723 | 0.8000 | 0.7816 | 0.1492 | 0.0667 | | No log | 68.0 | 476 | 3.9600 | 0.815 | 0.2903 | 1.1687 | 0.815 | 0.7950 | 0.1466 | 0.0650 | | No log | 69.0 | 483 | 3.9695 | 0.82 | 0.2908 | 1.1251 | 0.82 | 0.8026 | 0.1532 | 0.0654 | | No log | 70.0 | 490 | 3.9817 | 0.805 | 0.2915 | 1.1879 | 0.805 | 0.7861 | 0.1537 | 0.0657 | | No log | 71.0 | 497 | 3.9838 | 0.81 | 0.2899 | 1.1688 | 0.81 | 0.7892 | 0.1538 | 0.0648 | | 3.4085 | 72.0 | 504 | 3.9960 | 0.805 | 0.2910 | 1.1702 | 0.805 | 0.7904 | 0.1568 | 0.0657 | | 3.4085 | 73.0 | 511 | 4.0046 | 0.8 | 0.2931 | 1.1743 | 0.8000 | 0.7800 | 0.1529 | 0.0658 | | 3.4085 | 74.0 | 518 | 4.0115 | 0.815 | 0.2917 | 1.1718 | 0.815 | 0.7968 | 0.1589 | 0.0647 | | 3.4085 | 75.0 | 525 | 4.0205 | 0.805 | 0.2920 | 1.1719 | 0.805 | 0.7833 | 0.1575 | 0.0654 | | 3.4085 | 76.0 | 532 | 4.0272 | 0.805 | 0.2919 | 1.1725 | 0.805 | 0.7833 | 0.1547 | 0.0659 | | 3.4085 | 77.0 | 539 | 4.0323 | 0.81 | 0.2923 | 1.1720 | 0.81 | 0.7892 | 0.1547 | 0.0653 | | 3.4085 | 78.0 | 546 | 4.0364 | 0.81 | 0.2907 | 1.1715 | 0.81 | 0.7892 | 0.1607 | 0.0650 | | 3.4085 | 79.0 | 553 | 4.0405 | 0.81 | 0.2910 | 1.1716 | 0.81 | 0.7892 | 0.1451 | 0.0650 | | 3.4085 | 80.0 | 560 | 4.0476 | 0.81 | 0.2917 | 1.1743 | 0.81 | 0.7892 | 0.1453 | 0.0650 | | 3.4085 | 81.0 | 567 | 4.0529 | 0.805 | 0.2921 | 1.1736 | 0.805 | 0.7833 | 0.1573 | 0.0654 | | 3.4085 | 82.0 | 574 | 4.0570 | 0.805 | 0.2919 | 1.1741 | 0.805 | 0.7861 | 0.1717 | 0.0655 | | 3.4085 | 83.0 | 581 | 4.0601 | 0.81 | 0.2918 | 1.1727 | 0.81 | 0.7892 | 0.1508 | 0.0650 | | 3.4085 | 84.0 | 588 | 4.0643 | 0.81 | 0.2919 | 1.1743 | 0.81 | 0.7892 | 0.1507 | 0.0652 | | 3.4085 | 85.0 | 595 | 4.0678 | 0.81 | 0.2922 | 1.1744 | 0.81 | 0.7892 | 0.1552 | 0.0651 | | 3.4085 | 86.0 | 602 | 4.0743 | 0.81 | 0.2925 | 1.1746 | 0.81 | 0.7892 | 0.1526 | 0.0651 | | 3.4085 | 87.0 | 609 | 4.0758 | 0.805 | 0.2924 | 1.1753 | 0.805 | 0.7833 | 0.1718 | 0.0653 | | 3.4085 | 88.0 | 616 | 4.0796 | 0.805 | 0.2924 | 1.1758 | 0.805 | 0.7833 | 0.1567 | 0.0654 | | 3.4085 | 89.0 | 623 | 4.0803 | 0.81 | 0.2920 | 1.1742 | 0.81 | 0.7892 | 0.1587 | 0.0650 | | 3.4085 | 90.0 | 630 | 4.0842 | 0.81 | 0.2925 | 1.1744 | 0.81 | 0.7892 | 0.1529 | 0.0651 | | 3.4085 | 91.0 | 637 | 4.0864 | 0.805 | 0.2926 | 1.1752 | 0.805 | 0.7833 | 0.1568 | 0.0654 | | 3.4085 | 92.0 | 644 | 4.0880 | 0.81 | 0.2925 | 1.1757 | 0.81 | 0.7892 | 0.1526 | 0.0651 | | 3.4085 | 93.0 | 651 | 4.0903 | 0.805 | 0.2927 | 1.1752 | 0.805 | 0.7833 | 0.1567 | 0.0654 | | 3.4085 | 94.0 | 658 | 4.0918 | 0.805 | 0.2927 | 1.1750 | 0.805 | 0.7833 | 0.1572 | 0.0655 | | 3.4085 | 95.0 | 665 | 4.0927 | 0.805 | 0.2926 | 1.1750 | 0.805 | 0.7833 | 0.1570 | 0.0655 | | 3.4085 | 96.0 | 672 | 4.0937 | 0.805 | 0.2927 | 1.1751 | 0.805 | 0.7833 | 0.1572 | 0.0655 | | 3.4085 | 97.0 | 679 | 4.0946 | 0.805 | 0.2926 | 1.1750 | 0.805 | 0.7833 | 0.1573 | 0.0655 | | 3.4085 | 98.0 | 686 | 4.0950 | 0.805 | 0.2926 | 1.1752 | 0.805 | 0.7833 | 0.1572 | 0.0655 | | 3.4085 | 99.0 | 693 | 4.0955 | 0.805 | 0.2927 | 1.1753 | 0.805 | 0.7833 | 0.1572 | 0.0655 | | 3.4085 | 100.0 | 700 | 4.0957 | 0.805 | 0.2927 | 1.1753 | 0.805 | 0.7833 | 0.1572 | 0.0655 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1.post200 - Datasets 2.9.0 - Tokenizers 0.13.2
anzeliu/my_billsum_model
anzeliu
2023-07-18T22:41:18Z
103
0
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "base_model:google-t5/t5-small", "base_model:finetune:google-t5/t5-small", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2023-07-18T22:35:28Z
--- license: apache-2.0 base_model: t5-small tags: - generated_from_trainer metrics: - rouge model-index: - name: my_billsum_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_billsum_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.5348 - Rouge1: 0.1375 - Rouge2: 0.05 - Rougel: 0.1161 - Rougelsum: 0.1161 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 55 | 2.8601 | 0.1257 | 0.0339 | 0.1051 | 0.1051 | 19.0 | | No log | 2.0 | 110 | 2.6251 | 0.1302 | 0.0403 | 0.1095 | 0.1095 | 19.0 | | No log | 3.0 | 165 | 2.5539 | 0.1359 | 0.0476 | 0.1146 | 0.1146 | 19.0 | | No log | 4.0 | 220 | 2.5348 | 0.1375 | 0.05 | 0.1161 | 0.1161 | 19.0 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Tokenizers 0.13.3
Falcinspire/ppo-LunarLander-v2
Falcinspire
2023-07-18T22:39:33Z
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T22:13:05Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 282.08 +/- 17.86 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
coreml-projects/Llama-2-7b-chat-coreml
coreml-projects
2023-07-18T22:34:22Z
4,217
135
transformers
[ "transformers", "coreml", "llama", "text-generation", "meta", "llama-2", "license:other", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T19:20:20Z
--- license: other tags: - meta - coreml - llama - llama-2 --- # **Core ML version of Llama 2** This is a Core ML version of [`meta-llama/Llama-2-7b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). For [license](LICENSE.txt) information, model details and acceptable [use policy](USE_POLICY.md), please refer to [the original model card](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). This conversion was performed in `float16` mode with a fixed sequence length of `64`, and is intended for evaluation and test purposes. Please, open a conversation in the `Community` tab if you have questions or want to report an issue.
aroot/eng-ind-tok_budget_longest
aroot
2023-07-18T22:31:15Z
104
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T22:17:54Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-ind-tok_budget_longest results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-ind-tok_budget_longest This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9635 - Bleu: 19.1577 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
aroot/eng-mya-simcse_nbrs_l
aroot
2023-07-18T22:27:01Z
106
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T19:57:14Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-mya-simcse_nbrs_l results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-mya-simcse_nbrs_l This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9966 - Bleu: 3.9919 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
aroot/eng-guj-simcse_nbrs_r
aroot
2023-07-18T22:12:52Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T19:48:02Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-guj-simcse_nbrs_r results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-guj-simcse_nbrs_r This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.3536 - Bleu: 2.6731 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
shamikbose89/mt5-small-finetuned-arxiv-cs
shamikbose89
2023-07-18T22:12:15Z
126
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "mt5", "text2text-generation", "generated_from_trainer", "summarization", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2022-03-02T23:29:05Z
--- license: apache-2.0 tags: - generated_from_trainer - summarization metrics: - rouge model-index: - name: mt5-small-finetuned-arxiv-cs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-arxiv-cs This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on a subset of the arxiv dataset. It achieves the following results on the evaluation set: - Loss: 1.6922 - Rouge1: 0.7734 - Rouge2: 0.2865 - Rougel: 0.6665 - Rougelsum: 0.6743 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 14.0947 | 1.0 | 500 | 2.7666 | 1.2101 | 0.459 | 1.1426 | 1.1385 | | 2.8524 | 2.0 | 1000 | 1.8208 | 0.0 | 0.0 | 0.0 | 0.0 | | 2.2623 | 3.0 | 1500 | 1.6922 | 0.7734 | 0.2865 | 0.6665 | 0.6743 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.15.1 - Tokenizers 0.10.3
NiiAmoo/outputs_test
NiiAmoo
2023-07-18T22:05:51Z
103
0
transformers
[ "transformers", "pytorch", "deberta-v2", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-07-18T22:05:15Z
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: outputs_test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # outputs_test This model is a fine-tuned version of [microsoft/deberta-v3-small](https://huggingface.co/microsoft/deberta-v3-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9518 - Accuracy: 0.7386 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 12 | 0.7810 | 0.5943 | | No log | 2.0 | 24 | 0.5357 | 0.6629 | | No log | 3.0 | 36 | 0.4338 | 0.7129 | | No log | 4.0 | 48 | 0.5672 | 0.6886 | | No log | 5.0 | 60 | 0.7802 | 0.7114 | | No log | 6.0 | 72 | 0.7019 | 0.73 | | No log | 7.0 | 84 | 0.7304 | 0.7514 | | No log | 8.0 | 96 | 1.0413 | 0.72 | | No log | 9.0 | 108 | 0.8902 | 0.7314 | | No log | 10.0 | 120 | 0.8441 | 0.7514 | | No log | 11.0 | 132 | 0.7846 | 0.7643 | | No log | 12.0 | 144 | 0.8730 | 0.7586 | | No log | 13.0 | 156 | 0.9532 | 0.7386 | | No log | 14.0 | 168 | 0.9541 | 0.74 | | No log | 15.0 | 180 | 0.9518 | 0.7386 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
medalpaca/medalpaca-13b
medalpaca
2023-07-18T21:54:56Z
1,679
85
transformers
[ "transformers", "pytorch", "llama", "text-generation", "medical", "en", "arxiv:2303.14070", "license:cc", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-03-30T13:43:05Z
--- license: cc language: - en library_name: transformers pipeline_tag: text-generation tags: - medical --- # MedAlpaca 13b ## Table of Contents [Model Description](#model-description) - [Architecture](#architecture) - [Training Data](#trainig-data) [Model Usage](#model-usage) [Limitations](#limitations) ## Model Description ### Architecture `medalpaca-13b` is a large language model specifically fine-tuned for medical domain tasks. It is based on LLaMA (Large Language Model Meta AI) and contains 13 billion parameters. The primary goal of this model is to improve question-answering and medical dialogue tasks. ### Training Data The training data for this project was sourced from various resources. Firstly, we used Anki flashcards to automatically generate questions, from the front of the cards and anwers from the back of the card. Secondly, we generated medical question-answer pairs from [Wikidoc](https://www.wikidoc.org/index.php/Main_Page). We extracted paragraphs with relevant headings, and used Chat-GPT 3.5 to generate questions from the headings and using the corresponding paragraphs as answers. This dataset is still under development and we believe that approximately 70% of these question answer pairs are factual correct. Thirdly, we used StackExchange to extract question-answer pairs, taking the top-rated question from five categories: Academia, Bioinformatics, Biology, Fitness, and Health. Additionally, we used a dataset from [ChatDoctor](https://arxiv.org/abs/2303.14070) consisting of 200,000 question-answer pairs, available at https://github.com/Kent0n-Li/ChatDoctor. | Source | n items | |------------------------------|--------| | ChatDoc large | 200000 | | wikidoc | 67704 | | Stackexchange academia | 40865 | | Anki flashcards | 33955 | | Stackexchange biology | 27887 | | Stackexchange fitness | 9833 | | Stackexchange health | 7721 | | Wikidoc patient information | 5942 | | Stackexchange bioinformatics | 5407 | ## Model Usage To evaluate the performance of the model on a specific dataset, you can use the Hugging Face Transformers library's built-in evaluation scripts. Please refer to the evaluation guide for more information. Inference You can use the model for inference tasks like question-answering and medical dialogues using the Hugging Face Transformers library. Here's an example of how to use the model for a question-answering task: ```python from transformers import pipeline pl = pipeline("text-generation", model="medalpaca/medalpaca-13b", tokenizer="medalpaca/medalpaca-13b") question = "What are the symptoms of diabetes?" context = "Diabetes is a metabolic disease that causes high blood sugar. The symptoms include increased thirst, frequent urination, and unexplained weight loss." answer = pl(f"Context: {context}\n\nQuestion: {question}\n\nAnswer: ") print(answer) ``` ## Limitations The model may not perform effectively outside the scope of the medical domain. The training data primarily targets the knowledge level of medical students, which may result in limitations when addressing the needs of board-certified physicians. The model has not been tested in real-world applications, so its efficacy and accuracy are currently unknown. It should never be used as a substitute for a doctor's opinion and must be treated as a research tool only.
vishalkm/medalpaca-7b
vishalkm
2023-07-18T21:54:12Z
43
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "medical", "en", "arxiv:2303.14070", "license:cc", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-08-18T06:57:12Z
--- license: cc language: - en library_name: transformers pipeline_tag: text-generation tags: - medical --- # MedAlpaca 7b ## Table of Contents [Model Description](#model-description) - [Architecture](#architecture) - [Training Data](#trainig-data) [Model Usage](#model-usage) [Limitations](#limitations) ## Model Description ### Architecture `medalpaca-7b` is a large language model specifically fine-tuned for medical domain tasks. It is based on LLaMA (Large Language Model Meta AI) and contains 7 billion parameters. The primary goal of this model is to improve question-answering and medical dialogue tasks. Architecture ### Training Data The training data for this project was sourced from various resources. Firstly, we used Anki flashcards to automatically generate questions, from the front of the cards and anwers from the back of the card. Secondly, we generated medical question-answer pairs from [Wikidoc](https://www.wikidoc.org/index.php/Main_Page). We extracted paragraphs with relevant headings, and used Chat-GPT 3.5 to generate questions from the headings and using the corresponding paragraphs as answers. This dataset is still under development and we believe that approximately 70% of these question answer pairs are factual correct. Thirdly, we used StackExchange to extract question-answer pairs, taking the top-rated question from five categories: Academia, Bioinformatics, Biology, Fitness, and Health. Additionally, we used a dataset from [ChatDoctor](https://arxiv.org/abs/2303.14070) consisting of 200,000 question-answer pairs, available at https://github.com/Kent0n-Li/ChatDoctor. | Source | n items | |------------------------------|--------| | ChatDoc large | 200000 | | wikidoc | 67704 | | Stackexchange academia | 40865 | | Anki flashcards | 33955 | | Stackexchange biology | 27887 | | Stackexchange fitness | 9833 | | Stackexchange health | 7721 | | Wikidoc patient information | 5942 | | Stackexchange bioinformatics | 5407 | ## Model Usage To evaluate the performance of the model on a specific dataset, you can use the Hugging Face Transformers library's built-in evaluation scripts. Please refer to the evaluation guide for more information. Inference You can use the model for inference tasks like question-answering and medical dialogues using the Hugging Face Transformers library. Here's an example of how to use the model for a question-answering task: ```python from transformers import pipeline pl = pipeline("text-generation", model="medalpaca/medalpaca-7b", tokenizer="medalpaca/medalpaca-7b") question = "What are the symptoms of diabetes?" context = "Diabetes is a metabolic disease that causes high blood sugar. The symptoms include increased thirst, frequent urination, and unexplained weight loss." answer = pl(f"Context: {context}\n\nQuestion: {question}\n\nAnswer: ") print(answer) ``` ## Limitations The model may not perform effectively outside the scope of the medical domain. The training data primarily targets the knowledge level of medical students, which may result in limitations when addressing the needs of board-certified physicians. The model has not been tested in real-world applications, so its efficacy and accuracy are currently unknown. It should never be used as a substitute for a doctor's opinion and must be treated as a research tool only.
aroot/eng-guj-tok_budget_random
aroot
2023-07-18T21:50:16Z
105
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T21:29:09Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-guj-tok_budget_random results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-guj-tok_budget_random This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.3275 - Bleu: 2.7936 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
aroot/eng-guj-tok_budget_longest
aroot
2023-07-18T21:47:13Z
104
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T21:28:39Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-guj-tok_budget_longest results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-guj-tok_budget_longest This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.6616 - Bleu: 1.6642 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
acdg1214/ppo-LunarLander-v2
acdg1214
2023-07-18T21:46:09Z
5
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T21:45:48Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.77 +/- 14.25 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
shivaneej/subset_model_t5
shivaneej
2023-07-18T21:34:31Z
104
0
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "base_model:google-t5/t5-small", "base_model:finetune:google-t5/t5-small", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2023-07-18T21:24:26Z
--- license: apache-2.0 base_model: t5-small tags: - generated_from_trainer metrics: - rouge model-index: - name: subset_model_t5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # subset_model_t5 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.7052 - Rouge1: 0.1 - Rouge2: 0.0 - Rougel: 0.1 - Rougelsum: 0.1 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 1 | 1.8253 | 0.1 | 0.0 | 0.1 | 0.1 | 19.0 | | No log | 2.0 | 2 | 1.7629 | 0.1 | 0.0 | 0.1 | 0.1 | 19.0 | | No log | 3.0 | 3 | 1.7243 | 0.1 | 0.0 | 0.1 | 0.1 | 19.0 | | No log | 4.0 | 4 | 1.7052 | 0.1 | 0.0 | 0.1 | 0.1 | 19.0 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
hector981/ppo-LunarLander-v2
hector981
2023-07-18T21:32:44Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T21:32:20Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 236.70 +/- 24.97 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
aroot/eng-fra-simcse_nbrs_r
aroot
2023-07-18T21:30:12Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T19:30:29Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-fra-simcse_nbrs_r results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-fra-simcse_nbrs_r This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1586 - Bleu: 32.5483 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
crcdng/a2c-AntBulletEnv-v0
crcdng
2023-07-18T21:29:32Z
1
0
stable-baselines3
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T21:22:58Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1859.70 +/- 599.36 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
wajdii/model_3
wajdii
2023-07-18T21:28:56Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "fill-mask", "generated_from_trainer", "license:afl-3.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2023-07-18T20:18:39Z
--- license: afl-3.0 tags: - generated_from_trainer model-index: - name: model_3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # model_3 This model is a fine-tuned version of [Davlan/afro-xlmr-large-61L](https://huggingface.co/Davlan/afro-xlmr-large-61L) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.3654 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.9218 | 1.0 | 1453 | 1.6494 | | 1.7438 | 2.0 | 2906 | 1.6273 | | 1.6321 | 3.0 | 4359 | 1.5267 | | 1.5142 | 4.0 | 5812 | 1.4903 | | 1.4759 | 5.0 | 7265 | 1.4603 | | 1.4138 | 6.0 | 8718 | 1.3933 | | 1.3501 | 7.0 | 10171 | 1.4280 | | 1.3373 | 8.0 | 11624 | 1.3632 | | 1.298 | 9.0 | 13077 | 1.4296 | | 1.2869 | 10.0 | 14530 | 1.3654 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu117 - Datasets 2.13.1 - Tokenizers 0.13.3
aroot/eng-fra-tok_budget_random
aroot
2023-07-18T21:28:39Z
103
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T21:08:48Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-fra-tok_budget_random results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-fra-tok_budget_random This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1528 - Bleu: 32.1323 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
aroot/eng-fra-tok_budget_longest
aroot
2023-07-18T21:28:11Z
103
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "translation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2023-07-18T21:08:42Z
--- tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: eng-fra-tok_budget_longest results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eng-fra-tok_budget_longest This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2164 - Bleu: 30.5856 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
Ocean3/SuperMix
Ocean3
2023-07-18T21:26:15Z
0
5
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "stable-diffusion-diffusers", "safetensors", "art", "en", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-07-15T23:40:59Z
--- language: - en thumbnail: "https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img1.png" tags: - text-to-image - stable-diffusion - stable-diffusion-diffusers - diffusers - safetensors - art license: creativeml-openrail-m --- <!--comment--> # 🍍 SuperMix ![Banner](https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img1.png) <div align="center"> <a href="https://huggingface.co/Ocean3/SuperMix/tree/main/1)%20Versions">Models</a> | <a href="./SuperMix#previews">Previews</a> | <a href="https://huggingface.co/Ocean3/SuperMix/tree/main/3)%20Alternate%20Versions">Alt Versions</a> | <a href="https://civitai.com/models/89213?modelVersionId=94946" target="_blank">CivitAI Page</a></div> **SuperMix** is an Anime focused Text-to-Image diffuser model capable of bringing out semi-realistic tones through detailing, lighting, textures, and other aspects of the composition. At the same time, this merged model is very versatile with the amount of styling, forms, and mediums you can choose to generate with outputs through chosen parameters. SuperMix is great with: * Portraits * Anime * Semi-Realism * Scenery * Concept Art * Detailed Textures * Detailed Backgrounds * Vehicles, Architecture, Food * & More! This mix started out as a spontaneous combination of various anime focused models. I took note of some of the details the merge had excelled at - then decided to create a mix highlighting those aspects continuing from there. After some iterations and branch tests, I decided this mix was decent enough to share with others as is without going too far with variations. I still consider myself newer to generated art and such in general, so if you see anything to be corrected or to improve upon, let me know 👌 I would love to see what people create with the outputs of this model, feel free to use the tag **#SuperMix** on various platforms if you decide to post anything! <div align="center"><a href="https://civitai.com/models/89213?modelVersionId=94946" target="_blank">CivitAI Page</a></div> <br><div align="center"><p style="font-size:90%; background-color:#f5f6ff; color:#173978;">Note</p></div> <p style="font-size:90%;">SuperMix1 is an older rough-merged model mixed at the end of 2022 from various models known at the time. As such, this model and merge-components are fairly dated and may be harder to manage at times with current webUI updates etc. There are many great models available now with similar styles and flexibility that may be easier to use depending on your style preference. If this model receives any future updates, any new version will be geared at ironing out any prevalent issues in this version, removing any license limitations, and finetuning to a better standard.</p> --- # Previews <img src="https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img2.png" title=previews> Below are some preview images with various configurations and prompt styles ranging from simple to more complex prompt and parameter range. SuperMix can be a very powerful model capable of many different styles, don't be afraid to use this model(s) the way you find best. You can view more examples over on the <a href="https://civitai.com/models/89213?modelVersionId=94946" target="_blank">CivitAI</a> pages as well. <br>Click to expand each category. --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Anime</summary> <div align="center"> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a1.png" title="Aspiring Heights"> <figcaption><i>Aspiring Heights - hires upscale, img2img upscale, prompt via tarabm246</i></figcaption> <small> ``` furry raccoon girl, 1girl, solo, multicolored eyes, raccoon ears, two-tone hair, (high quality, best quality), body fur, animal nose, sunset, horizon, mountain edge, long hair, gray coat, from behind, tail, upper body, snow, winter, smile ``` ``` (worst quality, low quality:1.4), looking at viewer ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 5.5, Seed: 41866449, Size: 512x512, Denoising strength: 0.58, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a2.png" title="Cosmic Flowers"> <figcaption><i>Cosmic Flowers - hires upscale, img2img upscale, initially sourced prompt</i></figcaption> <small> ``` extreme quality, cg, (bright colors:0.8), ultra-detailed, illustration, impasto, painting, 1girl, large white jacket, long jacket, short legs, short, forest, mystery, mysterious forest, girl investigator, tall boots, red flowers, starry sky, stars, nebula, white hair, walking, walking through the forest, relaxed expression, night, nebula sky, planets, ((red flowers)), solo, anime wallpaper, high quality wallpaper, official wallpaper, masterpiece, best quality, 8k ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), ugly face ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 757843492, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a3.png" title=""> <figcaption><i>Stars Align - hires upscale, img2img upscale, light touch-up, self-prompted</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (bright colors:0.9), (light pastel colors:1.4), photo of a curious girl, (ancient), (tan skin), fashion, light dust, patio, (depth of field:0.76), (fog), medium hair, long hair, white hair, masterpiece, 8k, tone mapping, hyper focus, white, blue eyes, upper body:0.8), natural body, limited palette, (detailed hair:1.13), dynamic angle, (pastel drawing:0.7), (black outlines), (pastel background), soft lighting, (fox girl), solo, clarity, (by Antonio Maria Panni:1.6), (raised eyebrows:0.8), hero attire, (plants, modern:1.2), colorful, bold, vivid, (creative), (starry sky), (random:1.4) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.4), (worst quality:1.4), ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, text, error, cropped, low quality, normal quality, jpeg artifacts, watermark, username, blurry, out of focus, watercolor, (nsfw:1.6), (cleavage:1.6) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 8, Seed: 638426066, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a4.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (bright colors:0.9), a cute girl, (dark skin), colored outlines, curly hair, red hair, orange, masterpiece, 8k, (tone mapping, hyper focus:0.7), aqua, (random:1.4) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.4), (worst quality:1.4), ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, text, error, cropped, low quality, normal quality, jpeg artifacts, watermark, username, blurry, out of focus, watercolor, (nsfw:1.6), (cleavage:1.6) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 8, Seed: 3599973939, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a5.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, initially sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.3), absurdres, highres, best quality, 1girl, victorian, outdoors, bush, foliage, scenery, dusk, colorful clouds, dark, stars, reflection, (iridescent:1.5), meteor, multicolored hair, :3, full body, swirling clouds, arms out stretched, (from behind:1.1), glowing hair, silhouette, arms up, silver dress, conductor ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.4), (worst quality:1.4), ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, text, error, cropped, low quality, normal quality, jpeg artifacts, watermark, username, blurry, out of focus, watercolor, (nsfw:1.4), (cleavage:1.4) ``` ``` Steps: 30, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3071813954, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.4, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a6.png" title=""> <figcaption><i>Untitled - method, hires upscale, self-prompted</i></figcaption> <small> ``` a photo of a cute girl in an utopian city, brown hair, short hair, brown eyes, messy hair, tan skin, (detailed texture), picturesque, day, dappled sunlight, outdoors, masterpiece, 8k, (tone mapping, hyper focus:0.5), limited palette, serious, (varied depth of field:0.8), complimentary colors, (wizard), wizard robes, wizard hat, magic, purple, (cat girl) ``` ``` ugly, old, deformed, amateur drawing, odd, fat, cell shading, cel shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, blurry, out of focus, watercolor, (worst quality, low quality:1.4), blurry, earmuffs, text, lowres, error, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, (cropped:1.2), normal quality, watermark, username, (signature:1.4), (text), (author), deformed, amateur drawing, long neck, extra fingers, by bad-artist, missing fingers, image sample, jpeg artifacts, gif artifacts, wallpaper forced, lossy-lossless, lossless-lossy, corrupted file, duplicate, redrawn, screenshot, game screenshot, bad art, amateur drawing, odd, ((merged limbs)), ((conjoined limbs)), (poorly drawn:1.3), poorly drawn hands, poorly drawn face, deformities, conjoined, stretched torso, (heterochromia), (disproportioned), bad face, (bad details), sloppy, sitting, (tanlines), (staff:1.5), (wand:1.5), (weapon:1.5) ``` ``` Steps: 18, Sampler: DPM++ 2M Karras, CFG scale: 9, Seed: 2879140632, Size: 512x576, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires resize: 832x1024, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a7.png" title=""> <figcaption><i>Untitled - method, hires upscale, initially sourced prompt</i></figcaption> <small> ``` girl in jungle, epic, intricate, smirk, from above, muscular, standing, (thunder rain storm, aura:1.1), blonde hair, tiger ears, messy hair, slit pupils, red eyes, black jacket, high collar ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), ugly face ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 10, Seed: 2044019025, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a8.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, initially sourced prompt</i></figcaption> <small> ``` detailed background, superb, 1girl, long hair light purple hair, curly hair, cute, eyelashes, sitting, white dress, pink ribbon around her waist, pink flats, white thighhighs, beautiful 8k wallpaper, outdoors, nature, tree, bush, flower, rustic, extremely detailed, intricate ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.4), (worst quality:1.4), ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, text, error, cropped, low quality, normal quality, jpeg artifacts, watermark, username, blurry, out of focus, watercolor, (nsfw, cleavage:1.3) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2568503293, Size: 512x704, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.39, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a9.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, self-prompted</i></figcaption> <small> ``` (masterpiece:1.0), (highest quality:1.12), (HDR:1.0), 1girl, solo, flat colors, colorful, animal ear fluff, solo, plants, (coral), gradient background, smooth lighting, (splash art:0.8), portrait, (upper body:0.85), (random:1.4) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (cleavage) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3693442341, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a10.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, LoRa, initially sourced prompt</i></figcaption> <small> ``` no metadata ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a11.png" title=""> <figcaption><i>Untitled - hires upscale, majority self-prompted</i></figcaption> <small> ``` photo of cute army girl, detailed face+eyes, tactical clothing, (white fox girl, animal ear fluff, (fluffy hair), medium hair, attractive, yellow eyes, picturesque, sporty, (dark skin:1.2), (tactical mask), upper body, dynamic angle, mad, by Jeremy Lipking, by Antonio J Manzanedo, (by Alphonse Mucha:0.5), masterpiece, (pov), metal, foggy snowy jungle, varied depth of field, captain ``` ``` censorship, ugly, old, deformed, amateur drawing, odd, fat, tall, anime, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, anime, watercolor, (gun:1.5), (rifle:1.5) ``` ``` Steps: 20, Sampler: DDIM, CFG scale: 16, Seed: 1312696386, Size: 512x576, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires resize: 768x896, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a12.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` (masterpiece:1.0), (highest quality:1.12), (HDR:1.0), 1girl, solo, flat colors, colorful, animal ear fluff, solo, plants, (tan), gradient background, smooth lighting, (splash art:0.8), portrait, (upper body:0.85), (random:1.4) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 149160650, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a13.png" title=""> <figcaption><i>Untitled - hires upscale, initially sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), 1girl, yellow eyes, baseball cap, lue hair, closed mouth, (shoulder armor:1.2), black background, hoop earrings, jewelry, looking at viewer, shirt, long hair, (simple background:0.8), (abstract background:1.2), solo, upper body, purple shirt, gold trim, (luxury:0.8) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 8, Seed: 1100956050, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a14.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), high quality, imp girl, special ambience, (flat color:0.8), (limited palette), high contrast, cg unity wallpaper, hyper focus, tone mapping, depth mapping, above clouds, starry sky, plants, tropic, 1girl, golden eyes, looking away, portrait, parted lips, (ethereal), indigo skin, shorts, wave, pretty face, fantasy, (magical:0.7) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 3428515668, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a15.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (colorful), (light pastel colors:1.4), photo of a righteous girl, (detailed background), (tan skin:1.2), fashion, light dust, (field), long hair, hair up, silver hair, masterpiece, 8k, tone mapping, hyper focus, yellow, hawk eyes, (upper body:0.8), natural body, limited palette, (detailed hair:1.13), (Ufotable aesthetic:1.3), (pastel drawing:0.7), (black outlines), (pastel background), soft lighting, (cat girl:1.3), solo, clarity, (by Vladimir Makovsky:1.3), (by Sam Haskins:1.3) ``` ``` (hands), (long arms), nsfw, censorship, ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, watercolor, (worst quality, low quality:1.4), heterochromia, asymmetrical eyes, tears, (tanlines:1.3), (denim), (brush), (vibrant), (hdr), (shiny skin), (expressionless:0.76), (bold colors), (ufo:1.5) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 8.5, Seed: 2964409537, Size: 512x704, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.39, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a16.png" title=""> <figcaption><i>Untitled - hires upscale, initially sourced prompt</i></figcaption> <small> ``` 1girl, night city, rain, coat, hands in pockets, white hair, long hair, (fox ears), fluff, (dark skin:1.2), full lips, pretty face, anime illustration, purple eyes ``` ``` (worst quality:1.6), (low quality:1.6), EasyNegative ``` ``` Steps: 29, Sampler: Euler a, CFG scale: 7, Seed: 2865357824, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.4, Clip skip: 2, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a17.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` realistic photo of an anime girl, (outdoors), detailed face+eyes, (detailed texture), picturesque, day, dappled sunlight, attractive, full lips, short hair, wavy hair, parted hair, parted bangs, forehead, hair intakes, blonde hair, hawk eyes, white eyes, masterpiece, varied depth of field, limited palette, (cute), bandana, landscape, (tan skin), bracelet, orange shirt, sleeveless, whisker markings, (varied depth of field:0.8), looking, orange, ambient lighting ``` ``` nsfw, censored, ugly, old, deformed, amateur drawing, odd, fat, tall, cel shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, blurry, out of focus, watercolor, (worst quality, low quality:1.4), blurry, earmuffs, text, lowres, error, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, (cropped:1.2), normal quality, watermark, username, (signature:1.4), (text), (author), deformed, amateur drawing, long neck, extra fingers, by bad-artist, missing fingers, image sample, jpeg artifacts, gif artifacts, wallpaper forced, lossy-lossless, lossless-lossy, corrupted file, duplicate, redrawn, screenshot, game screenshot, bad art, amateur drawing, odd, ((merged limbs)), ((conjoined limbs)), (poorly drawn:1.3), poorly drawn hands, poorly drawn face, deformities, conjoined, stretched torso, (heterochromia), (disproportioned), bad face, (bad details), sloppy, anime, facepaint, (wings), (tail), (animal), (cleavage:1.3) ``` ``` Steps: 20, Sampler: DDIM, CFG scale: 13, Seed: 2210442777, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires upscale: 1.8, Hires upscaler: 4x-UltraSharp, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a18.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` (masterpiece:1.0), (highest quality:1.12), (HDR:1.0), a girl, illustration, cover art, (black:1.2), (portrait), coral background, splash, (animal ear fluff:0.7) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 826826098, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Clip skip: 2, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a19.png" title=""> <figcaption><i>Untitled - hires upscale, initially sourced prompt</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (bright colors:0.8), (anime girl), 1girl, pink hair, hair bobbles, dark theme, soothing tones, muted colors, elf ears, high contrast, (natural skin texture, hyperrealism, soft light, sharp), exposure blend, medium shot, bokeh, (hdr:1.3), high contrast, (cinematic,teal and red:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), two horns, dress, all white eyes, masterpiece, top tier, extravagant, 8k ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), , (blue eyes:1.2), ugly face ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 9, Seed: 3969600209, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.39, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a20.png" title=""> <figcaption><i>Untitled - hires upscale, sourced prompt</i></figcaption> <small> ``` Girl with multicoloured hair, black hair, red hair, heavy rain, bad weather, black clouds, moonlight, sad, rain drops, flower field, (masterpiece:1.4), (highres), wet hair, looking_at_viewer, eye_contact, (extremely detailed background:1.2), hair_flower ``` ``` (worst quality, low quality:1.4), bad anatomy, extra fingers, extra hand, crooked fingers, badly sized fingers, cropped ``` ``` Steps: 35, Sampler: Euler a, CFG scale: 7, Seed: 2939377891, Size: 768x512, Denoising strength: 0.54, Clip skip: 2, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Anime/a21.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (bright colors), (anime girl), 1girl, impact, (winter), blizzard, time stop, sci fi, (tribal cat), eskimo, animal ear fluff, fur trim, winter hat, angry, clouds, tan skin, (feather headdress), cloth, masterpiece, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, ray tracing, 8k, cinematic, depth of field, octane render, intricate details, elegant, one mapping, hyper focus, parted lips, (violet), dappled sunlight, (snowing), nature, winter coat, upper body, (morning glow), lighthouse, (gold eyes), horizon ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), , (blue eyes:1.2), ugly face ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7.6, Seed: 168479386, Size: 768x512, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> </div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">General</summary> <div align="center"> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g1.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, self-prompted</i></figcaption> <small> ``` no metadata ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g2.png" title=""> <figcaption><i>Untitled - hires upscale, initially sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), abstract 1998 african white hair hiphop girl by sachin teng x supreme, attractive, stylish, designer, coral, asymmetrical, geometric shapes, graffiti, street art ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 1031384908, Size: 512x704, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRA: Contrast_LowRA(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g3.png" title=""> <figcaption><i>Untitled - hires upscale, sourced prompt</i></figcaption> <small> ``` olpntng style, Closeup of a black leopard, ferns, surrealistic, dreamlike, intricate details, pastel colors, dramatic intricate environment, butterfly, lumen reflections, highly detailed digital painting, smooth, sharp focus, Esao Andrews – Ernst Haeckel, digital art, oil painting, heavy strokes, paint dripping, 8k, fur texture, oil painting, heavy strokes, paint dripping ``` ``` blurry, (out of frame), (signature), (signatures), watermark, out of focus, poorly drawing, bad drawing, blur haze, cropped, cropping, extra features, extra rows of teeth, deformities, weird eyes ``` ``` Steps: 25, Sampler: DPM++ 2M Karras, CFG scale: 9, Seed: 1900795000, Size: 512x768, Denoising strength: 0.58, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g4.png" title=""> <figcaption><i>Untitled - hires upcale, img2img upscale, iniitially sourced prompt</i></figcaption> <small> ``` no metadata ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g5.png" title=""> <figcaption><i>Untitled - older hires upscale, img2img upscale, sourced prompt</i></figcaption> <small> ``` an epic fantastic realism comic book style portrait painting of a japanese robotic geisha with USSR tattoos and decals, apex legends, octane render, intricate detail, 4 k hd, unreal engine 5, ex machina, irobot, gerald brom, photorealistic, modelshoot style, kuvshinov, nvinkpunk ``` ``` disfigured, kitsch, ugly, oversaturated, grain, low-res, Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, poorly drawn hands, missing limb, blurry, floating limbs, disconnected limbs, malformed hands, blur, out of focus, long neck, long body, ugly, disgusting, poorly drawn, childish, mutilated, , mangled, old, surreal, text ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3086446090, Size: 640x960, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g6.png" title=""> <figcaption><i>Untitled - hires upscale, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), closeup of a rusted android in a corner of a basement, looking down, desolated, sad, sitting, concept art, character design, Unreal engine, vray, volumetric fog, sunbeam, insanely detailed, weathered, corroded, oxidized, rusted, decayed, flaking paint, vignette ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 619942128, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g7.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.0), (highest quality:1.12), (HDR:1.0), 1boy , (close-up:1.5), look at side, beard, blue, suit jacket, card background, (white background:1.5), [(background:1.4)::5], illustration, colorfantasystyle, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 2919182440, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.25), Color Fantasy(0.4), Add_detail(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g8.png" title=""> <figcaption><i>Untitled - older hires upscale, initially sourced prompt</i></figcaption> <small> ``` photorealistic, ,best quality,masterpiece,highly detailed,ultra-detailed,a futuristic muscle car in a cyberpunk city at night with neon lights and rain. by josan gonzalez splash art graphic design color scheme minimalism ultra realistic unreal engine 5 hd 8k resolution trending on deviantart pinterest dslr highly rendered 4K imax hyperrealistic full colour cinematic, metal, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, (ray tracing), 8k, depth of field, octane render, intricate details, elegant, tone mapping, hyper focus, shine, reflective surface ``` ``` (tatoo, things on face :1.2),(watermark:1.2),(bored photo:1.2),no color, blurry, ugly, poor quality, deformed hands, deformed face, deformed body, extra limbs, low quality, normal quality, text, errors, bad anatomy, mutation, deformed fingers, missing fingers, low res, bad hands, cropped, deformed hands, (deformed legs:1.2), (deformed arms:1.2), (multiple arms:1.2), (signature:1.2),bad_bad,, (long body :1.3), bad anatomy , liquid body, malformed, mutated,anatomical nonsense ,bad proportions, uncoordinated body, unnatural body, disfigured, ugly, gross proportions ,mutation, disfigured, deformed, (mutation, poorlydrawn :1.2), (nsfw:1.2) ,lowres,bad anatomy, bad hands,text,error,missing fingers,extra digit,fewer digits,cropped,worst quality, low quality,normal quality,jpeg artifacts,signature,watermark,username,blurry,missing arms, long neck,Humpbacked ``` ``` Steps: 40, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 637736715, Size: 960x640, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g9.png" title=""> <figcaption><i>Untitled - older hires upscale, textural inversion, self-prompted</i></figcaption> <small> ``` realistic photo of a (lotus bloom), profile picture, icon, logo, simple background, extreme quality, masterpiece, 8k, depth of field, intricate details, __artist*__ ``` ``` censorship, ugly, old, deformed, amateur drawing, odd, fat, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, ((blurry)), ((out of focus)), watercolor, (worst quality, low quality:1.4), (seeds), grain, (hand) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7.6, Seed: 166478748, Size: 1024x1024, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g10.png" title=""> <figcaption><i>Untitled - older hires upscale, majority sourced prompt</i></figcaption> <small> ``` ((a potion bottle filled with magical elements)), magical, (workshop background with lots of other bottles and tools:1.1), intricate detail, hyper detailed, ultra realistic, sharp focus, octane render, volumetric, ray tracing, artstation trending, cgsociety, sense of awe, mystical, 4k, High Saturation Clarity Contrast, deep levels, sharp, retouched, color graded, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, ray tracing, 8k, octane render, intricate details, elegant, tone mapping, hyper focus, close up, varied depth of field ``` ``` 3d, digital art, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts,signature, watermark, username, blurry, artist name, veil, scales, comic panels, gore, blood, black and white, nsfw, pattern, patterns ``` ``` Steps: 20, Sampler: DDIM, CFG scale: 10.5, Seed: 3209935207, Size: 640x960, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g11.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, initially sourced prompt</i></figcaption> <small> ``` photorealistic, best quality, masterpiece, highly detailed, ultra-detailed, a futuristic sports car in a cyberpunk city at night with neon lights and rain. by josan gonzalez splash art graphic design color scheme minimalism ultra realistic unreal engine 5 hd 8k resolution trending on deviantart pinterest dslr highly rendered 4K imax hyperrealistic full colour cinematic, metal, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, ray tracing, 8k, depth of field, octane render, intricate details, elegant, tone mapping, hyper focus, sheen, nijimecha, SMM, fantasy00d ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 4010446135, Size: 640x512, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRa: NijiMecha(0.5), Cool and Stylish(0.35), Add_detail(0.25), fantasy00d(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g12.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` no metadata ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g13.png" title=""> <figcaption><i>Untitled - hires upscale, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), Cake, tiramisu, flowers, fruit, cream, intricate detail, dark background, HD Photography ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 3909418144, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g14.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), (ultra-detailed), cupcake, rainbow sprinkles, photograph, decorated, cherry-on-top, pink chocolate drizzle, food photography ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry) ``` ``` Steps: 25, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 279206068, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Clip skip: 2, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRa: Contrast_LowRA(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g15.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (bright colors), anime man, barbarian, fit, short beard, glowing eyes, perpetual, impact, gladiator glory, throne, time stop, space age, (powerful), (holy halo), joyful, shape background, warrior, clouds, (fantasy:0.8), tan skin, helm, cape, aura, glass walkway, upper body, metal, masterpiece, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, ray tracing, 8k, cinematic, depth of field, octane render, intricate details, elegant, tone mapping, hyper focus, close up, upper body, (blue), dappled sunlight, small gold particles, short hair ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), censorship, ugly, old, deformed, amateur drawing, odd, fat, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, ((blurry)), ((out of focus)), watercolor, (worst quality, low quality:1.4), blurry, text, (heterochromia:1.3), (feminine), (shirtless:1.3) ``` ``` Steps: 28, Sampler: Euler a, CFG scale: 7.6, Seed: 2144691385, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/General/g16.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, initially sourced prompt</i></figcaption> <small> ``` best quality,masterpiece,highly detailed,ultra-detailed, RAW, analog style, ( 1 futuristic sports car no humans:1.2), high detailed skin, skin details, sharp focus, volumetric fog, 8k uhd, dslr, high quality, film grain, Fujifilm XT3 metal, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, ray tracing, 8k, depth of field, octane render, intricate details, elegant, tone mapping, hyper focus, shine, 111cine8matic55 ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, signature, copyright, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 40, Sampler: DPM++ SDE Karras, CFG scale: 7, Seed: 694693295, Size: 768x448, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: CinematicStyle(0.5), Add_detail(0.6), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> </div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">LoRa</summary> <div align="center"> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l1.png" title="Unbreakable"> <figcaption><i>Unbreakable - hires upscale, img2img upscale, LoRa, intially sourced prompt</i></figcaption> <small> ``` extreme quality, cg, detailed face, (bright colors:1.0), (anime), 1girl, (fox ears, animal ear fluff, (fluffy hair), white hair, medium hair, gold lens sunglasses, (sporty:0.8):1.2), (dark skin:1.2), solo, floating hair, looking at viewer, cute serious, smirk, glowing, animated, jacket, glitch, cinematic lighting, strong contrast, high level of detail, (flat color:0.6), masterpiece, best quality, 8k, white background, broken glass, explosion), tactical clothing, wearing sunglasses, bj_Fault art, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), (fire:1.3) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 907543725, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.4), Fault Art(0.55), FilmVelvia2(-0.15), Add_detail(0.25), Niji Default Style_v2(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l2.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, initially sourced prompt</i></figcaption> <small> ``` masterpiece, best quality, 1girl, closed eyes, upper body, splashing, abstract, psychedelic, neon, (honeycomb pattern), (creative:1.3), sy3, SMM, fantasy00d ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 2160912965, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRa: Bubble Drip(0.45), Cool and Stylish(0.45), Add_detail(0.15), fantasy00d(0.15), Discard penultimate sigma: True, ersion: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l3.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), detailed face+eyes, (1girl), solo, wearing tribal headdress, tribal aesthetic, ultra-detailed, highres, absurdres, (hair flaps), (gamma:1.3), (creative:1.3), negative space, starlit path, long hair, (explosion wave:1.2), sound barrier, time stop, extreme quality, cg unity wallpaper, anime, (marroon palette), SMM, bj_fault art ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), center line, split, vertical line, (fire) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 2504153053, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.65), Fault Art(0.5), FilmVelvia2(0.15), Add_detail(0.15), Niji Default Style_v2(0.2), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l4.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), a cute kitten, animal, fluffy, solo, (adorable), natural lighting, teal and yellow, (expressive cartoon), expressive face, (synthwave:1.2) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 3312163897, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: FilmVelvia2(-0.25), Add_detail(-0.05), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l5.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, self-prompted</i></figcaption> <small> ``` extreme quality, cg, detailed face+eyes, (bright colors), (anime), 1girl, impact, (winter), blizzard, time stop, sci fi, (tribal cat), (eskimo), animal ear fluff, fur trim, clouds, tan skin, (feather headdress), masterpiece, top tier, extravagant, 8k, unity wallpaper, unreal engine 5, ray tracing, 8k, cinematic, varied depth of field, octane render, elegant, tone mapping, hyper focus, parted lips, (indigo), dappled sunlight, (snowing), nature, winter coat, upper body, (morning glow), lighthouse, gold eyes, horizon, picturesque scenery, mountain, forest, looking at viewer, (tundra), SMM, bj_fault art ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1921118488, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish_SMM(0.25), Fault Art(0.4), Add_detail(0.15), FilmVelvia2(0.1), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l6.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), 1girl, mecha, robot, armor, bodysuit, mechanical arms, mysterious expression, magical, magical effects like sparkles or energy, flowing robes, mystical background, rim lighting, side lighting, cinematic light, ultra high res, 8k uhd, film grain, best shadow, delicate, RAW, light particles, detailed skin texture, detailed cloth texture, beautiful detailed face, intricate details, ultra detailed, mecha musume, mechanical arms, headgear, bodysuit, (plants:1.3), gold, luxury, (purple), (looking at viewer), nijimecha, SMM, fantasy00d ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3882651620, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Clip skip: 2, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRa: NijiMecha(0.65), Cool and Stylish_SMM(0.35), Add_detail(0.25), fantasy00d(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l7.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, self-prompted</i></figcaption> <small> ``` extreme quality, cg, (bright colors:0.8), high quality, a beautiful girl with tiger ears, flat color, (limited palette), high contrast, golden eyes, looking up at viewer, upper body, portrait, ethereal, (blue skin), crop hoodie, pretty face, natural sunlight, masterpiece, best quality, 8k, 111cine8matic55 ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (shirtless:1.3), ugly face ``` ``` Steps: 18, Sampler: Euler a, CFG scale: 7, Seed: 1387472521, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: CinematicStyle(0.65), Add_detail(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l8.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, img2img upscale, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), extreme quality, cg, (negative space), detailed face+eyes, 1girl, fox ears, animal ear fluff, (plants:1.18), (fractal art), (bright colors), splashes of color background, colors mashing, paint splatter, complimentary colors, neon, (thunder tiger), compassionate, electric, limited palette, synthwave, fine art, tan skin, upper body, (green and orange:1.2), time stop, sy3, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 4079573538, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, lora; Cool and Stylish(0.45), Bubble Drip(0.45), Add_detail(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l9.png" title=""> <figcaption><i>Untitled - older hires upscale, textural inversion, sourced prompt</i></figcaption> <small> ``` (nvinkpunk:1.2) (snthwve style:0.8) lion, anthro, lightwave, sunset, intricate, highly detailed ``` ``` cartoon, 3d, ((disfigured)), ((bad art)), ((deformed)), ((poorly drawn)), ((extra limbs)), ((close up)), ((b&w)), weird colors, blurry ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 9, Seed: 890485019, Size: 768x1024, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l10.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, LoRa, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), extreme quality, cg, (negative space), detailed face+eyes, 1girl, fox ears, animal ear fluff, (plants:1.18), (fractal art), (bright colors), splashes of color background, colors mashing, paint splatter, complimentary colors, neon, (thunder tiger), compassionate, electric, limited palette, synthwave, fine art, tan skin, upper body, (teal and white:1.2), time stop, colorfantasystyle, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 3467711840, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.45), Color Fantasy(0.55), Add_detail(0.2), Splash_v1.1(0.3), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l11.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, light manual touch-up, LoRa, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), 1girl, mecha, robot, armor, bodysuit, mechanical arms, mysterious expression, magical, magical effects like sparkles or energy, flowing robes, mystical background, rim lighting, side lighting, cinematic light, ultra high res, 8k uhd, film grain, best shadow, delicate, RAW, light particles, detailed skin texture, detailed cloth texture, beautiful detailed face, intricate details, ultra detailed, mecha musume, mechanical arms, headgear, bodysuit, (plants:1.3), gold, luxury, (violet), (looking at viewer), nijimecha, SMM, fantasy00d ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 2446927677, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRa: NijiMecha(0.65), Cool and Stylish(0.35), Add_detail(0.25), fantasy00d(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l12.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), extreme quality, cg, (negative space), detailed face+eyes, 1girl, fox ears, animal ear fluff, (plants:1.18), (fractal art), (bright colors), splashes of color background, colors mashing, paint splatter, complimentary colors, neon, (thunder tiger), compassionate, electric, limited palette, synthwave, fine art, tan skin, upper body, (green and orange:1.2), time stop, sy3, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 3438019576, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.45), Bubble Drip(0.45), Add_detail(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l13.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, self-prompted</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), extreme quality, cg, (negative space), detailed face+eyes, 1girl, fox ears, animal ear fluff, (plants:1.18), (fractal art), (bright colors), splashes of color background, colors mashing, paint splatter, complimentary colors, neon, (thunder tiger), compassionate, electric, limited palette, synthwave, fine art, tan skin, upper body, (green and orange:1.2), time stop, sy3, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 4240446306, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.45), Bubble Drip(0.45), Add_detail(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l14.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, sourced prompt</i></figcaption> <small> ``` masterpiece, best quality, 8K, highly detailed, 4k, very long hair, (hair flaps), (shiny hair), flipped hair, grin, ((monochrome)), yellow eyes, close-up, straw hat, (shaded face), white sundress, slit pupils, (anime) by WLOP, trending on ArtStation, bj_Fault art, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3), (fire:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 283841059, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.45), Fault Art(0.55), FilmVelvia2(0.1), Add_detail(0.15), Niji Default Style_v2(0.2), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l15.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), wallpaper, (highly detailed), [street, wall:(1girl), (solo), pale skin, [black eyes|red eyes], (hollow eyes), black hair, long hair, (liquid hair:1.2), floating hair, bangs, expressionless, (black goo:1.4), (white dress:1.2), (white skirt), white, intricated filigree, (stained clothes:1.2):0.25], (black goo:1.4), (black dripping), (black splashing:0.85), (tentacles:0.85), shiny, [:face focus, upper body, (cowboy shot), lateral view, dutch angle, dynamic:0.25], [white background|black goo], volumetric lighting, (high contrast:0.85), (limited palette:0.65), colorfantasystyle, SMM ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (cleavage:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 1835800510, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRa: Cool and Stylish(0.45), Color Fantasy(0.55), Add_detail(0.2), Splash_v1.1(0.3), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/LoRa/l16.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, initially sourced prompt</i></figcaption> <small> ``` masterpiece, best quality, 1girl, closed eyes, upper body, splashing, abstract, psychedelic, neon, (honeycomb pattern), (creative:1.3), sy3, SMM, fantasy00d ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry), (nsfw, cleavage:1.3) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 3121169266, Size: 512x640, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRa: Bubble Drip(0.45), Cool and Stylish(0.45), Add_detail(0.35), fantasy00d(0.25), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> </div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Scenery</summary> <div align="center"> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s1.png" title="Wayward Insight"> <figcaption><i>Wayward Insight - hires upscale, img2img upscale, self-prompted</i></figcaption> <small> ``` no metadata ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s2.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, photorealistic, (bright colors:0.9), light, elemental, water magic, blue, water, magical, righteous, (outdoors), masterpiece, 8k, (tone mapping, hyper focus:0.5), limited palette, (dappled sunlight:0.7), reflective surface, orange, wholesome, (varied depth of field:0.8), complimentary colors, particle dust, green embers, (ancient), (pyramid:0.7), flood, (vegetation), plants, (destiny 2:1.25), (no humans:1.5), (neon cyber technology:1.27), (architecture:0.8), magical, ruins, (catacomb:0.7), ripples, granite, detailed texture, flower, marble, vine ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), censorship, ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (low quality:1.4), asymmetrical eyes, metal, multicolored hair, red eyeliner, (multicolored hair:1.5), off center, dragon horns, bull horns, goat horns, single horn, pointy ears, (tanlines:1.5), lowres, error, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, (cropped:1.2), watermark, username, (signature:1.4), (text), (author), blurry, out of focus, deformed, amateur drawing, long neck, extra fingers, by bad-artist, missing fingers, image sample, jpeg artifacts, gif artifacts, wallpaper forced, lossy-lossless, lossless-lossy, corrupted file, duplicate, redrawn, screenshot, game screenshot, bad art, amateur drawing, odd, ((merged limbs)), ((conjoined limbs)), (poorly drawn:1.3), poorly drawn hands, poorly drawn face, deformities, conjoined, stretched torso, (heterochromia), cel shading, (disproportioned), bad face, (bad details), sloppy, (underwater, gun, weapon, tall, metal, (city), blurry foreground ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 8, Seed: 58025803, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s3.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, (colorful), (a realistic photo of a mountain aesthetic scene:1.2), visually appealing, ,, (varied depth of field:0.76), ,, masterpiece, 8k, tone mapping, hyper focus, indigo, limited palette, Ufotable aesthetic, (clarity), (Diarmuid Byron O'Connor), (smug:0.8), (no humans:1.6), picturesque scenery, landscape, plant, horizon, sky, ,, epic, nature ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (humans:1.5), (girl:1.5), (1girl:1.6), (1boy:1.5), (creature:1.5) ``` ``` Steps: 20, Sampler: DDIM, CFG scale: 13, Seed: 2983311593, Size: 960x576, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Clip skip: 2, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s4.png" title="Zen Garden"> <figcaption><i>Zen Garden - older hires upscale, sourced prompt</i></figcaption> <small> ``` photo of a beautiful zen garden in the moutains, golden ratio, cinematic lighting, intricate details, 8k detail post processing, hyperealistic, professional photograph, soft focus, f2.8, postprocessing ``` ``` 3d, digital art, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts,signature, watermark, username, blurry, artist name, pattern, patterns, black and white ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 10.5, Seed: 1997751605, Size: 640x960, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s5.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` realistic photo of reflective marble flooring, nature, luxury, (anime throne), plants, supreme, pillar, extreme quality, masterpiece, 8k, depth of field, intricate details, mirrorless ``` ``` censorship, ugly, old, deformed, amateur drawing, odd, fat, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, ((blurry)), ((out of focus)), watercolor, (worst quality, low quality:1.4), (seeds), grain, (hand) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7.6, Seed: 854718657, Size: 1024x1024, Denoising strength: 0.58, First pass size: 0x0 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s6.png" title="Cloud"> <figcaption><i>Cloud - method, prompt source</i></figcaption> <small> ``` extreme quality, cg, solo, (colorful:0.7), (a photo of a futuristic aesthetic scene:1.2), visually appealing, depth of field, fog, masterpiece, 8k, tone mapping, hyper focus, white, (limited palette:0.85), (clarity), (by Jarrod Castaing:1.45), curious, picturesque scenery, landscape, above clouds,sand, epic, nature, adorable, (anime wallpaper), wallpaper engine, photorealistic, dappled sunlight, (alone) ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (1boy:1.5), (creature:1.5), (ufo:1.5), (halo:1.5), (engine), stretched image, (book), (reading), deformed body, deformed figure, mutated body, mutated legs ``` ``` Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 7.5, Seed: 97890808, Size: 832x512, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Clip skip: 4, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s7.png" title="Foresight"> <figcaption><i>Foresight - hires upscale, img2img upscale, LoRa, initial sourced prompt</i></figcaption> <small> ``` extreme quality, cg, (bright colors:0.8), A up close photo of of the backside of a woman standing on a cliff overlooking a vast, serene lake. She is looking away from the camera out at the sunset. The mountains in the distance are reflected in the water, and the golden hues of the setting sun paint the sky in breathtaking colors., masterpiece, best quality, 8k, 111cine8matic55 ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.6), (shirtless:1.3) ``` ``` Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 1825374680, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, LoRA: CinematicStyle(0.65), Add_Detail(0.15), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s8.png" title=""> <figcaption><i>Untitled - hires upscale, LoRa, initial sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0), (cinematic masterpiece), (cinematic spotlight), ((caustic)), ultra wide shot, super detail, cinematic lighting, HDR, impressive, ultra resolution photo of an imaginative and otherworldly scene of an ocean filled with planets, stars, and nebulas, hyperrealistic surrealism, award winning masterpiece with incredible details, epic stunning, (natural skin texture, hyperrealism, soft light, sharp), fantasy00d ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, text, (blurry) ``` ``` Steps: 25, Sampler: Euler a, CFG scale: 7, Seed: 1074882576, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires upscaler: 4x-UltraSharp, LoRA: Add_Detail(0.35), Fantasy00d(0.3), Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s9.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, solo, (colorful:0.85), (a photo of a jungle aesthetic scene:1.2), visually appealing, depth of field, ,, masterpiece, 8k, tone mapping, hyper focus, black, (limited palette:0.85), (clarity), (by Mala Breuer:1.45), longing, picturesque scenery, landscape, coast,island,sky, epic, nature, adorable, (anime wallpaper), wallpaper engine, photorealistic, dappled sunlight, (alone), mood lighting, best shadow, high fantasy ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (1boy:1.5), (creature:1.5), (ufo:1.5), (halo:1.5), (engine), stretched image, (book), (reading), deformed body, deformed figure, mutated body, mutated legs ``` ``` Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 7.5, Seed: 2919981875, Size: 832x512, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Clip skip: 3, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s10.png" title=""> <figcaption><i>Untitled - hires upscale, initially sourced prompt</i></figcaption> <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.3), Architectural Digest photo of a maximalist blue (vaporwave/steampunk/solarpunk) living room with lots of flowers and plants, golden light, hyperrealistic surrealism, award winning masterpiece with incredible details, epic stunning, (bedroom aesthetic) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, ((blurry),) (cropped), ((out of focus)), watercolor, ugly, grain ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 8, Seed: 1756742325, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s11.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, light manual touch-up, initial sourced prompt</i></figcaption> <small> ``` night, scenery, (mountanious_horizon), horizon, sunset, city, river, city faraway, sky, cloudy_sky, night, dark, starry_sky, fantasy, fantasy_city, fantasy world, (((medieval))), ((mountain, mountains)) ``` ``` (worst quality, low quality:1.4), bad anatomy, extra fingers, extra hand, crooked fingers, badly sized fingers, cropped ``` ``` Steps: 21, Sampler: DPM++ SDE Karras, CFG scale: 6.5, Seed: 502209660, Size: 832x512, Denoising strength: 0.58, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s12.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, photorealistic, (bright colors:0.9), desert oasis scene, sand, (green oasis:1.3), (oasis water:1.34), outdoors, masterpiece, 8k, (tone mapping, hyper focus:0.5), limited palette, red, scared, (varied depth of field:0.8), complimentary colors, particle dust, (ancient), (pyramid:0.8), loose desert, (destiny 2:1.25), (no humans:1.5), (neon cyber technology:1.27), (architecture:0.8), magical, ruins, (catacomb:0.7), activation ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), censorship, ugly, old, deformed, amateur drawing, odd, fat, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (low quality:1.4), asymmetrical eyes, metal, multicolored hair, red eyeliner, (multicolored hair:1.5), off center, dragon horns, bull horns, goat horns, single horn, pointy ears, (tanlines:1.5), lowres, error, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, (cropped:1.2), watermark, username, (signature:1.4), (text), (author), blurry, out of focus, deformed, amateur drawing, long neck, extra fingers, by bad-artist, missing fingers, image sample, jpeg artifacts, gif artifacts, wallpaper forced, lossy-lossless, lossless-lossy, corrupted file, duplicate, redrawn, screenshot, game screenshot, bad art, amateur drawing, odd, ((merged limbs)), ((conjoined limbs)), (poorly drawn:1.3), poorly drawn hands, poorly drawn face, deformities, conjoined, stretched torso, (heterochromia), cel shading, (disproportioned), bad face, (bad details), sloppy, (underwater, gun, weapon, tall, metal, (city), blurry foreground ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 8, Seed: 1266378264, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s13.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, (colorful), (a realistic photo of a jungle aesthetic scene:1.2), visually appealing, ,, (varied depth of field:0.76), fog, masterpiece, 8k, tone mapping, hyper focus, magenta, limited palette, Ufotable aesthetic, (clarity), (Jef Murray), (loving:0.8), (no humans:1.6), picturesque scenery, landscape, plant, horizon, sky, ,, epic, nature ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (humans:1.5), (girl:1.5), (1girl:1.6), (1boy:1.5), (creature:1.5) ``` ``` Steps: 20, Sampler: DDIM, CFG scale: 13, Seed: 680413774, Size: 960x576, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.36, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s14.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, solo, (colorful:0.85), (a photo of a ancient chinese aesthetic scene:1.2), visually appealing, depth of field, ,, masterpiece, 8k, tone mapping, hyper focus, violet, (limited palette:0.85), (clarity), (by Otto Mengelberg:1.45), begging, picturesque scenery, landscape, sand,desert, epic, nature, adorable, (anime wallpaper), wallpaper engine, photorealistic, dappled sunlight, (alone), mood lighting, best shadow, high fantasy ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (1boy:1.5), (creature:1.5), (ufo:1.5), (halo:1.5), (engine), stretched image, (book), (reading), deformed body, deformed figure, mutated body, mutated legs ``` ``` Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 7.5, Seed: 2430179145, Size: 832x512, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Clip skip: 4, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s15.png" title=""> <figcaption><i>Untitled - hires upscale, img2img upscale, self-prompted</i></figcaption> <small> ``` no metadata ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s16.png" title=""> <figcaption><i>Untitled - hires upscale, majority sourced prompt</i></figcaption> <small> ``` (masterpiece:1.0), (highest quality:1.12), (HDR:1.0), (dark shot:1.22), old, (RAW photo), water, trending on ArtStation, alien landscape and vegetation, adhesives, middle ground, (tilt shift photography:1.2) ``` ``` EasyNegative, (badv2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7, Seed: 1498514055, Size: 512x768, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.44, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True, Version: v1.0.0-pre-1578-g394ffa7b ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s17.png" title=""> <figcaption><i>Untitled - hires upscale, sourced prompt</i></figcaption> <small> ``` masterpiece, best quality, wide shot of autumn forest scenery, sunset, sunbeams ``` ``` (worst quality, low quality:1.4), pixelated, film grain ``` ``` Steps: 20, Sampler: DPM++ 2S a Karras, CFG scale: 7, Seed: 1275953479, Size: 768x576, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Clip skip: 3, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B, Discard penultimate sigma: True ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s18.png" title=""> <figcaption><i>Untitled - no upcscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, rural prairie, tall grass, mountain, horizon, dappled sunlight, bush, 8k, (no humans:1.5) ``` ``` badv2, (worst quality, low quality:1.4) ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7.6, Seed: 3318385742, Size: 768x768 ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s19.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, (colorful:0.7), (a realistic photo of a winter aesthetic scene:1.2), visually appealing, depth of field, ,, masterpiece, 8k, tone mapping, hyper focus, red, (limited palette:0.85), Ufotable aesthetic, (clarity), (by Robin Wood:1.45), serious, (no humans:1.6), picturesque scenery, landscape, plant, horizon, sky, water, epic, nature ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (humans:1.5), (girl:1.5), (1girl:1.6), (1boy:1.5), (creature:1.5), (ufo:1.5) ``` ``` Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 7.6, Seed: 896865056, Size: 960x576, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Clip skip: 2, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s20.png" title=""> <figcaption><i>Untitled - hires upscale, self-prompted</i></figcaption> <small> ``` extreme quality, cg, solo, (colorful:0.85), (a photo of a tribal aesthetic scene:1.2), visually appealing, depth of field, petals, masterpiece, 8k, tone mapping, hyper focus, yellow, (limited palette:0.85), (clarity), (by Tinus van Doorn:1.45), crying, picturesque scenery, landscape, desert,plant, epic, nature, adorable, (anime wallpaper), wallpaper engine, photorealistic, dappled sunlight, (alone), mood lighting, best shadow, high fantasy ``` ``` nsfw, censorship, deformed, amateur drawing, odd, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, out of focus, cell shading, watercolor, (worst quality, low quality:1.4), (1boy:1.5), (creature:1.5), (ufo:1.5), (halo:1.5), (engine), stretched image, (book), (reading), deformed body, deformed figure, mutated body, mutated legs ``` ``` Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 7.5, Seed: 680247863, Size: 832x512, Model hash: 1504f30200, Model: SuperMix1, Denoising strength: 0.58, Clip skip: 4, Hires upscale: 2, Hires steps: 18, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` </small> <img style="margin-top: 7%;" src="https://huggingface.co/Ocean3/SuperMix/resolve/main/2)%20Previews/Scenery/s21.png" title=""> <figcaption><i>Untitled - older hires upscale, self-prompted</i></figcaption> <small> ``` realistic photo of a (seedling emerging through dirtL1.3), extreme quality, masterpiece, 8k, depth of field, intricate details, Zhang Kechun ``` ``` censorship, ugly, old, deformed, amateur drawing, odd, fat, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, ((blurry)), ((out of focus)), watercolor, (worst quality, low quality:1.4), (seeds), grain ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7.6, Seed: 2434833613, Size: 1024x1024, Denoising strength: 0.58, First pass size: 0x0 ``` </small> </div></details> --- <div align="center" style="margin-top: -4%;"><a href="https://huggingface.co/Ocean3/SuperMix/tree/main/Previews" target="_blank">View More</a></div> <div align="center"><p style="font-size:90%; background-color:#f5f6ff; color:#173978;">Note</p></div> <p style="font-size:90%;">SuperMix1 was originally merged and tested on a much <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/tree/4b3c5bc24bffdf429c463a465763b3077fe55eb8">older version</a> of Automatic1111 WebUI. Due to this, I suggest enabling -> <i>settings/compatibility/</i><b>use old karras scheduler sigmas (0.1 to 10)</b> compatability setting when using karras samplers or are trying to recreate some example images. This is completely optional and shouldn't be needed - I have not personally tested enough with this setting turned off on the newer webUI versions.</p> --- # General Use <img src="https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img3.png" title=general-use> This model is fairly versatile when it comes to a general use configuration and parameters. <br>In short, I would suggest starting simple and experiment with what works best for you and your prompt at the time. Perhaps try some older prompts and configurations, an example, or start from scratch and go from there. SuperMix1 really shines with a good prompt. You may experience some messy anatomy/hands until you find a good configuration + prompt, you'll know when you do. Keep in mind this model is geared more toward portrait style generations. There are many different examples of various configurations used in the <a href="./SuperMix#previews">Previews</a> section and <a href="https://civitai.com/models/89213?modelVersionId=94946" target="_blank">CivitAI</a> pages - feel free to explore your own styles. An additional img2img upscale at lower denoising values can do really well and really bring a clean polish to output images. Keep in mind you may lose some very fine detailing depending on your parameters, though you can also merge two upscales together for the best of both ranges. --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Prompts</summary> <div style="margin-top: 7%;"> SuperMix can excel with both simple and complex prompt styles. Start simple and small, then expand from there. 👑 Prompts are king in my opinion when it comes to one of the largest factors in a generation. Be keen about what you're using and how you're using it; what may conflict with something else; and how everything plays together with other parameter factors. <br><i>(ie sampler, steps, scale, clip skip, seed, lora, etc.</i> <br><br><b>Note:</b> artist tokens can hold a lot of weight in outputs, use at your own discretion. * **Positive Prompts:** Simple goes a long way as a starting point but you can really direct the model style-wise with some added structure. Try anything you find that works well with your other parameters. Here are a few starting points. <small> ``` (masterpiece:1.1), (highest quality:1.1), (HDR:1.0) ``` ``` extreme quality, cg, detailed face+eyes, (colorful:0.8), <content>, masterpiece, 8k, tone mapping, hyper focus ``` </small> * **Negative Prompts:** This model can do well with a simple negative prompt, a negative embedding(s), but can also do really well with some structure to the negative prompt as far as styling direction, undesired quality, etc. Keep in mind conflicting tokens with your positive prompt and otherwise and maybe not too too complex, but try anything that works! <small> ``` (bad quality:1.3), (worst quality:1.3) ``` ``` EasyNegative, (bad_prompt_version2:0.8), (badhandv4:1.18), (bad quality:1.3), (worst quality:1.3), watermark, (blurry), (cropped), (nsfw:1.3), (cleavage:1.3) ``` </small> You can check the <a href="./SuperMix#previews">Previews</a> for more examples.</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Hires Fix</summary> <div style="margin-top: 7%;"> * **Hires Denoising:** I tend to do a range between **~0.3-0.6**, I haven't really tried much else so far though. Experiment to see what works best for your parameters and prompts at the time. * **Hires Upscaler:** Upscalers seem to produce slightly different results between themselves - though I find any of them seem to work. I'm not sure what is typically used, though I mainly use **R-ESRGAN 4x+ Anime6B** or **4x-UltraSharp**. Use what you think is best as always.</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Sampling Steps</summary> <div style="margin-top: 7%;"> I suggest starting with **~18-30** step values, you can go lower or higher and see what works well with your prompt, sampler and other parameters.</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Samplers</summary> <div style="margin-top: 7%;"> Most of my tests with this model were using samplers: * **Eular a** * **DPM++ 2M Karras** * **DPM++ SDE Karras** * **DDIM** I also tried a bit of **DPM++ 2S a Karras**, and **PLMS** samplers. <br>I am unsure on the rest. Each sampler has their own styling and play differently with your prompt and other parameters at the time. <br><br>I suggest trying out what you typically use, then try out some of the others and see how they play with your other configurations and prompt. <br><br>Do note that some samplers may make use of certain terms/tokens of your prompt and other parameters differently than others. You may find better results with one sampler and "prompt a", then better results with another sampler and "prompt b" etc.</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Scale</summary> <div style="margin-top: 7%;"> **CFG Scale** may largely be dependent on your prompt, sampler, etc. Though, I generally suggest starting at default **7** and adjusting from there -> **~6.5-10** <br><br>I have had good results with higher scales **~13-16** on samplers such as DDIM for example, depending on the prompt and other factors used. This is not to say a lower value does not work as well also. The same can be said for other samplers and value ranges. <br>Experiment and see what works best for you and your prompt 👍</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Clip Skip</summary> <div style="margin-top: 7%;"> * **Clip Skip 1** - great with most samplers, especially Euler a in my experience. * **Clip Skip 2** - also great with most samplers, tends to be more 'literal' with various tokens in a prompt depending on sampler and other parameters. <br><br>Both work great and will each produce different styles and results - this is part of the reason I didn't go with some of the other test model variations due to the imbalance of quality between the two clip skip variations. I suggest trying both or even together in the same generation with the built-in X/Y/Z plot script. <br><br>You can always try higher as well, I have seen some good results with **Clip Skip 3-6**.</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">VAE</summary> <div style="margin-top: 7%;"> Use any VAE you prefer. I typically use **vae-ft-ema-560000-ema**. * **"SuperMix_A.vae"** (renamed SD vae-ft-ema-560000-ema.vae) <br>Recommended - bright vivid/bold colors * **"SuperMix_B.vae"** (renamed kl-f8-anime2.vae) <br>Very Similar - different details at times * **"SuperMix_C.vae"** (renamed Anything_v3.vae) <br>Another option - moderate colors/saturation in comparison <br>**vae-ft-mse-840000-ema** and [ClearVAE_V2.3](https://civitai.com/models/22354/clearvae_) can also be good options. <br><br>**Note:** model names containing "-bv" or "-bakedVAE" include VAE files baked-in making the use of these files no longer needed.</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Upscaling</summary> <div style="margin-top: 7%;"> A secondary img2img upscaling after generation can really bring out clarity in images and iron out details with this model. Keep in mind this can also soften some texturing detail depending on your settings. This is not needed of course, but can really sharpen up some generations. Use the settings or extension(s) that work best for you. <br><br>I generally use the built in SD upscale script with: * the same **base model** * the **same or similar** prompt * **DPM++ SDE Karras** sampler * **20** sampling steps * **7** cfg scale * a low denoising strength **~0.08-0.3** * a random seed, **-1** * tile overlap **~176-208** * scale factor **x2** * upscaler **R-ESRGAN 4x+ Anime6B** or **4x-UltraSharp** * loRa usually turned off</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">ENSD & Eta</summary> <div style="margin-top: 7%;"> I've only used the webUI defaults: * **0** Eta noise seed delta * **0** Eta for DDIM (noise multiplier) * **1** Eta for ancestral samplers (noise multiplier)</div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Other Settings</summary> <div style="margin-top: 7%;"> For the example images I used -> <i>settings/compatibility/</i><b>use old karras scheduler sigmas (0.1 to 10)</b> compatability setting which effects karras samplers. This is completely optional and shouldn't be needed. This setting better replicates some of the older webUI versions. I have not personally tested enough with this setting turned off on the newer webUI versions.</div></details> --- <div align="center"><p style="font-size:90%; background-color:#fff0f0; color:#8a0000;">Disclaimer</p></div> <p style="font-size:90%; margin-top: -5px;">This model(s) may output NSFW content unintentionally depending on parameters used. Make sure you tailor your prompts accordingly. For example "nsfw" in the negative prompt. <br><br>The purpose of sharing this model is not to showcase obscene material in a public forum. The use of this learning model is entirely at the discretion of the user, and they have the freedom to choose whether or not to create SFW or NSFW content. The decision of whether to engage with SFW or NSFW content lies with the user and their own personal preferences. The ai model(s) do not contain explicit visual content that can be accessed easily.</p> --- # Embeddings I initially hadn't used any negative embeddings or otherwise, but I have tried out a few recently as shown in some of the preview images. Try any you find resonable or none at all 👍. <br><br>Here are a few **negative embeddings**: * <a href="https://civitai.com/models/55700/badprompt-negative-embedding">bad_prompt_version2</a> (aka "badv2" in the example images) * <a href="https://huggingface.co/datasets/gsdf/EasyNegative">EasyNegative</a> * <a href="https://civitai.com/models/16993?modelVersionId=20068">badhandv4</a> --- # Recipes <img src="https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img4.png" title=recipes> ## SuperMix1 | Model | Hash | Weighted Sum | | ----------- | ----------- | - | | [AOM2_hard](https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors) | 0fc198c490 | start | | [DreamLike_Diffusion_v1.0](https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0/tree/main) | 0aecbcfa2c | 20% (.2) | | [Protogen_x3.4](https://civitai.com/models/3666/protogen-x34-photorealism-official-release) | 61a37adf76 | 15% (.15)| | [Anything_v3](https://huggingface.co/Linaqruf/anything-v3.0) | 543bcbc212 | 50% (.5) | | [Dawgsmix_v1](https://civitai.com/models/1585/dawgsmix)| 05135646f0 | 20% (.2) | | [Trinart_v2](https://huggingface.co/naclbit/trinart_stable_diffusion_v2/tree/main) | 776af18775 | 20% (.2) | | [EimisAnimeDiffusion_v1](https://huggingface.co/eimiss/EimisAnimeDiffusion_1.0v/tree/main) | 39ee30561f | 20% (.2) | | [Healy's Anime Blend_v1.7](https://civitai.com/models/1400/healys-anime-blend) | 8416edf88c | 20% (.2) | | [8528d-final](https://huggingface.co/ckpt/8528-diffusion/tree/main) | 4a1c4626a9 | 20% (.2) | | [Anything_v3](https://huggingface.co/Linaqruf/anything-v3.0) | 543bcbc212 | 30% (.3) | | [AOM2_hard](https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors) | 0fc198c490 | 70% (.7) | | [HassanBlend_v1.4](https://civitai.com/models/1173/hassanblend-1512-and-previous-versions) | eb172d270d | 2.5% (.025) | | Zeipher-f222 | 9e2c6ceff3 | 2.5% (.025) | | [StableDiffusion_v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) | e1441589a6 | 5% (.05) | | Cleaned and pruned via [Model Toolkit](https://github.com/arenatemp/stable-diffusion-webui-model-toolkit) | 1504f30200 | **SuperMix1** | | | <p style="margin-top:-7%;"><div align="center"><figcaption><i>individual model license(s) listed below</i></figcaption></div></p> --- # Alternate Versions <img src="https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img5.png" title=alternate-versions> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;"><b>SuperMix1-Pre</b> &nbsp; <small><i>2edb971aa8</i></small></summary> <div style="margin-top: 7%;"> The pre-start, or first part, of the SuperMix1 mix. <br>This Model wasn't intended to be a stand alone mix, however acted as a breakpoint while testing further iterations. This model can currently produce some unique 2d illustration/flatter color lineart styles merged with a paint-like photographic scenery feel. Simple, a bit messy, and a bit aesthetic! [Download](https://huggingface.co/Ocean3/SuperMix/resolve/main/3\)%20Alternate%20Versions/SuperMix1-Pre.safetensors) | [CivitAI](https://civitai.com/models/107775/supermix-pre-lineart-style) </div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;"><b>SuperMix1-Alt1</b> &nbsp; <small><i>bf574ab6e8</i></small></summary> <div style="margin-top: 7%;"> A minor change in comparison to SuperMix1. <br>Alt1 uses *Trinart-Derrida* in place of *Trinart2*. Depending on generation, this change can bring out some different results that some may find more pleasing. I've included this alternate version as another option and personally find both to function quite well. [Download](https://huggingface.co/Ocean3/SuperMix/resolve/main/3\)%20Alternate%20Versions/SuperMix1-Alt1.safetensors) | [CivitAI](https://civitai.com/models/89213?modelVersionId=115856) </div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;"><b>SuperMix1-Alt2</b> &nbsp; <small><i>aa6c524a48</i></small></summary> <div style="margin-top: 7%;"> Another alternate version to SuperMix1. <br>Alt2 uses AOM3 in place of AOM2_hard and Anything_v4.5 in place of Anything_v3. This mix also adds in (.2) of both Counterfeit_v2.5 and a random version of Chillout Mix_Ni at the end of the (.2) model addition sequence. [Download](https://huggingface.co/Ocean3/SuperMix/resolve/main/3\)%20Alternate%20Versions/SuperMix1-Alt2.safetensors) | [CivitAI](https://civitai.com/models/89213?modelVersionId=115892) </div></details> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;"><b>SuperMix1-RT</b> &nbsp; <small><i>4badc436cb</i></small></summary> <div style="margin-top: 7%;"> Replacement Test <br>This version removes [DreamLike_Diffusion_v1.0](https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0/tree/main) and [Protogen_x3.4](https://civitai.com/models/3666/protogen-x34-photorealism-official-release) models from the initial mix. They were replaced with [RevAnimated_v1.2.2](https://civitai.com/models/7371/rev-animated) and [DreamShaper_v6.3](https://civitai.com/models/4384?modelVersionId=94081) using the same weights respectively. Doing so removed any licensing restrictions for this version to my knowledge. <br><br>The overall styling can be very similar to the original model as well as slightly different in some aspects depending on your parameters. This version may still be a good mix in other models to retain the non-modified creativeml-openrail-m licensing. [Download](https://huggingface.co/Ocean3/SuperMix/resolve/main/1\)%20Versions/SuperMix1-RT/SuperMix1-RT.safetensors) | [CivitAI](https://civitai.com/models/89213?modelVersionId=119269) </div></details> --- # Model Comparisons As a whole, these comparisons are not fully indicative of each model and their differences. Please keep this in mind while viewing these small sample pools. Click to expand. <div align="center"><p style="background-color:#fffdf5; color:#363636;"><small>🚧</small></p></div> --- # License & Use This model is open access and available to all, with a [**modified CreativeML OpenRAIL-M**](https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0/blob/main/LICENSE.md) license further specifying rights and usage. <small>1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content. <br>2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license. <br>3. You may re-distribute the weights. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the modified CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully). <br><br>Please read the full license(s) [Stable Diffusion](https://huggingface.co/spaces/CompVis/stable-diffusion-license) and [Dreamlike Diffusion 1.0](https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0/blob/main/LICENSE.md).</small> --- <details><summary style="margin-top: -5%; margin-bottom: -5%; cursor: pointer;">Use Restrictions <small><i>(click to expand)</i></small></summary> <div style="margin-top: 7%;"></div> <small>**You agree not to use the Model or Derivatives of the Model:** <br>- In any way that violates any applicable national, federal, state, local or international law or regulation <br>- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way <br>- To generate or disseminate verifiably false information and/or content with the purpose of harming others <br>- To generate or disseminate personal identifiable information that can be used to harm an individual <br>- To defame, disparage or otherwise harass others <br>- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation <br>- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics <br>- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm <br>- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories <br>- To provide medical advice and medical results interpretation <br>- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use). <br>- To generate NFTs </small></details> --- **Terms of use** <small><br>- You are solely responsible for any legal liability resulting from unethical use of this model(s) <br>- If you use any of these models for merging, please state what steps you took to do so and clearly indicate where modifications have been made.</small> <div align="center"><figcaption><i>Note: if you see any conflicts or corrections to be made, please let me know</i></figcaption></div> ![Footer](https://huggingface.co/Ocean3/SuperMix/resolve/main/img/img6.png)
bwilkie/dqn-SpaceInvadersNoFrameskip-v4
bwilkie
2023-07-18T21:24:10Z
0
0
stable-baselines3
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T21:23:39Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 257.00 +/- 38.81 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga bwilkie -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga bwilkie -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga bwilkie ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.001), ('learning_starts', 100000), ('n_timesteps', 100000.0), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
felipec23/opt-iml-1.3b-finetuned-800
felipec23
2023-07-18T21:23:38Z
0
0
peft
[ "peft", "region:us" ]
null
2023-07-18T21:23:33Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.5.0.dev0
underactuated/opt-350m_ft_v3
underactuated
2023-07-18T21:17:34Z
144
0
transformers
[ "transformers", "pytorch", "opt", "text-generation", "generated_from_trainer", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T21:16:14Z
--- tags: - generated_from_trainer model-index: - name: opt-350m_ft_v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opt-350m_ft_v3 This model was trained from scratch on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
bobobert4/dqn-SpaceInvadersNoFrameskip-v4
bobobert4
2023-07-18T21:03:48Z
0
0
stable-baselines3
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T21:03:11Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 663.00 +/- 118.96 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga bobobert4 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga bobobert4 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga bobobert4 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 150000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 2000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
LarryAIDraw/belfastskins
LarryAIDraw
2023-07-18T21:02:38Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-07-18T20:51:37Z
--- license: creativeml-openrail-m --- https://civitai.com/models/111313/belfast-azur-lane-most-skins
LarryAIDraw/ShizukaMikazukiV1
LarryAIDraw
2023-07-18T21:02:17Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-07-18T20:51:16Z
--- license: creativeml-openrail-m --- https://civitai.com/models/111089/shizuka-mikazuki-zom-100-bucket-list-of-the-dead
LarryAIDraw/ShikiV1
LarryAIDraw
2023-07-18T21:01:44Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-07-18T20:50:34Z
--- license: creativeml-openrail-m --- https://civitai.com/models/110763/ryougi-shiki-5-outfits-riuki-lora
LarryAIDraw/Mini_Yaemori_V1
LarryAIDraw
2023-07-18T21:01:22Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-07-18T20:50:12Z
--- license: creativeml-openrail-m --- https://civitai.com/models/110626/mini-yaemori-or-rent-a-girlfriend-or-kanokari-or
LarryAIDraw/StarRail_Qingque_AP_v1
LarryAIDraw
2023-07-18T20:59:43Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-07-18T20:48:08Z
--- license: creativeml-openrail-m --- https://civitai.com/models/73792/qingquehonkai-star-rail
LarryAIDraw/Vita-_H_
LarryAIDraw
2023-07-18T20:59:15Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-07-18T20:47:16Z
--- license: creativeml-openrail-m --- https://civitai.com/models/108280/vita-h-honkai-3rd
robinhad/open_llama_3b_uk
robinhad
2023-07-18T20:57:15Z
9
0
peft
[ "peft", "text-generation", "uk", "dataset:robinhad/databricks-dolly-15k-uk", "license:apache-2.0", "region:us" ]
text-generation
2023-07-18T20:29:33Z
--- license: apache-2.0 datasets: - robinhad/databricks-dolly-15k-uk language: - uk library_name: peft pipeline_tag: text-generation --- This is a release of Open LLama, tuned for Ukrainian language. Currently it contains adapter weights, possible subject to change in future.
mrbesher/ppo-LunarLander-v2
mrbesher
2023-07-18T20:50:49Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T20:50:29Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 288.73 +/- 10.81 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
underactuated/opt-350m_ft_v2
underactuated
2023-07-18T20:48:02Z
144
0
transformers
[ "transformers", "pytorch", "opt", "text-generation", "generated_from_trainer", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T20:46:43Z
--- tags: - generated_from_trainer model-index: - name: opt-350m_ft_v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opt-350m_ft_v2 This model was trained from scratch on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
nolanaatama/lzymx
nolanaatama
2023-07-18T20:44:41Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-05-21T22:12:14Z
--- license: creativeml-openrail-m ---
ailabturkiye/melihkalkan
ailabturkiye
2023-07-18T20:36:55Z
0
0
null
[ "music", "tr", "license:openrail", "region:us" ]
null
2023-07-18T16:54:35Z
--- license: openrail language: - tr tags: - music --- 2022 Yılında vefat eden 14 yaşındaki yetenekli oyuncu Melih Kalkan'ın modeli. 2 dakikalık datasetine 250 epoch basılarak yapılmıştır. Kötü amaçla kullanım olmasın diye private alınmıştır.
jakelcoop/ppo-CartPole-v1
jakelcoop
2023-07-18T20:20:29Z
0
0
null
[ "CartPole-v1", "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T20:18:43Z
--- tags: - CartPole-v1 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # PPO Agent Playing CartPole-v1 This is a trained model of a PPO agent playing CartPole-v1. # Hyperparameters ```python {'run_name': 'ppo_250k' 'gym_id': 'CartPole-v1' 'num_envs': 5 'num_steps': 128 'total_timesteps': 250000 'seed': 1 'learning_rate': 0.001 'anneal_lr': True 'torch_deterministic': True 'cuda': True 'capture_video': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'repo_id': 'jakelcoop/ppo-CartPole-v1' 'env_id': 'CartPole-v1' 'batch_size': 640 'minibatch_size': 160} ```
ikaro79/distilbert-base-uncased-finetuned-test
ikaro79
2023-07-18T20:13:02Z
70
0
transformers
[ "transformers", "tf", "distilbert", "fill-mask", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2023-07-18T20:00:44Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ikaro79/distilbert-base-uncased-finetuned-test results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ikaro79/distilbert-base-uncased-finetuned-test This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2768 - Validation Loss: 0.2163 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'transformers.optimization_tf', 'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': -999, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}, 'registered_name': 'WarmUp'}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 0.2768 | 0.2163 | 0 | ### Framework versions - Transformers 4.30.2 - TensorFlow 2.13.0 - Datasets 2.13.1 - Tokenizers 0.13.3
zpattdev/q-FrozenLake-v1-4x4-noSlippery
zpattdev
2023-07-18T20:08:11Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T20:08:08Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="zpattdev/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
jliu596/dqn-Atari-SpaceInvadersNoFrameskip-v4
jliu596
2023-07-18T20:05:02Z
0
0
stable-baselines3
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T20:01:32Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 256.00 +/- 169.38 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga jliu596 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga jliu596 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga jliu596 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 128), ('buffer_size', 1500), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.05), ('exploration_fraction', 1), ('frame_stack', 4), ('gradient_steps', 2), ('learning_rate', 0.0001), ('learning_starts', 1000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 100), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
bk6000/Reinforce-CartPole-v1
bk6000
2023-07-18T20:02:21Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T20:02:12Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
jordyvl/39-tiny_tobacco3482_hint_
jordyvl
2023-07-18T20:01:56Z
166
0
transformers
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-07-18T19:09:12Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 39-tiny_tobacco3482_hint_ results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 39-tiny_tobacco3482_hint_ This model is a fine-tuned version of [WinKawaks/vit-tiny-patch16-224](https://huggingface.co/WinKawaks/vit-tiny-patch16-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 65.8239 - Accuracy: 0.84 - Brier Loss: 0.2807 - Nll: 1.1327 - F1 Micro: 0.8400 - F1 Macro: 0.8280 - Ece: 0.1437 - Aurc: 0.0472 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Brier Loss | Nll | F1 Micro | F1 Macro | Ece | Aurc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:|:------:|:--------:|:--------:|:------:|:------:| | No log | 1.0 | 25 | 69.1264 | 0.26 | 0.8707 | 4.9002 | 0.26 | 0.1920 | 0.3064 | 0.7815 | | No log | 2.0 | 50 | 68.3319 | 0.545 | 0.5960 | 2.8558 | 0.545 | 0.4562 | 0.2850 | 0.2564 | | No log | 3.0 | 75 | 67.8627 | 0.68 | 0.4406 | 1.6064 | 0.68 | 0.6157 | 0.2543 | 0.1333 | | No log | 4.0 | 100 | 67.5797 | 0.75 | 0.3829 | 1.5484 | 0.75 | 0.7343 | 0.2220 | 0.1152 | | No log | 5.0 | 125 | 67.2608 | 0.8 | 0.3072 | 1.7491 | 0.8000 | 0.7573 | 0.1809 | 0.0698 | | No log | 6.0 | 150 | 67.0950 | 0.78 | 0.3169 | 1.7708 | 0.78 | 0.7441 | 0.1576 | 0.0607 | | No log | 7.0 | 175 | 66.9178 | 0.755 | 0.3812 | 1.6929 | 0.755 | 0.6848 | 0.1899 | 0.1043 | | No log | 8.0 | 200 | 66.7335 | 0.75 | 0.3763 | 1.7649 | 0.75 | 0.7399 | 0.1870 | 0.0806 | | No log | 9.0 | 225 | 66.4371 | 0.805 | 0.3036 | 1.4686 | 0.805 | 0.7896 | 0.1378 | 0.0545 | | No log | 10.0 | 250 | 66.6823 | 0.75 | 0.3924 | 1.8808 | 0.75 | 0.6665 | 0.1964 | 0.0829 | | No log | 11.0 | 275 | 66.6079 | 0.775 | 0.3570 | 1.8872 | 0.775 | 0.7442 | 0.1811 | 0.0839 | | No log | 12.0 | 300 | 66.4364 | 0.765 | 0.3689 | 1.6981 | 0.765 | 0.7550 | 0.1909 | 0.0732 | | No log | 13.0 | 325 | 66.1317 | 0.785 | 0.3346 | 1.4062 | 0.785 | 0.7823 | 0.1753 | 0.0572 | | No log | 14.0 | 350 | 66.5182 | 0.73 | 0.4453 | 1.4431 | 0.7300 | 0.7208 | 0.2310 | 0.0985 | | No log | 15.0 | 375 | 66.5154 | 0.775 | 0.3769 | 1.4897 | 0.775 | 0.7398 | 0.1875 | 0.0821 | | No log | 16.0 | 400 | 66.4191 | 0.8 | 0.3315 | 1.5327 | 0.8000 | 0.7771 | 0.1651 | 0.0685 | | No log | 17.0 | 425 | 66.2374 | 0.765 | 0.3520 | 1.5388 | 0.765 | 0.7401 | 0.1767 | 0.0760 | | No log | 18.0 | 450 | 66.2010 | 0.805 | 0.3320 | 1.4280 | 0.805 | 0.7721 | 0.1756 | 0.0684 | | No log | 19.0 | 475 | 66.0335 | 0.85 | 0.2625 | 1.3549 | 0.85 | 0.8352 | 0.1431 | 0.0430 | | 65.4034 | 20.0 | 500 | 66.2213 | 0.815 | 0.3213 | 1.3912 | 0.815 | 0.7955 | 0.1645 | 0.0579 | | 65.4034 | 21.0 | 525 | 66.2647 | 0.77 | 0.3656 | 1.3241 | 0.7700 | 0.7743 | 0.1899 | 0.0755 | | 65.4034 | 22.0 | 550 | 66.1220 | 0.86 | 0.2684 | 1.2459 | 0.8600 | 0.8354 | 0.1327 | 0.0473 | | 65.4034 | 23.0 | 575 | 66.1615 | 0.85 | 0.2623 | 1.3400 | 0.85 | 0.8231 | 0.1291 | 0.0448 | | 65.4034 | 24.0 | 600 | 66.2114 | 0.825 | 0.3130 | 1.4118 | 0.825 | 0.8122 | 0.1565 | 0.0498 | | 65.4034 | 25.0 | 625 | 66.1048 | 0.835 | 0.2704 | 1.3571 | 0.835 | 0.8196 | 0.1405 | 0.0450 | | 65.4034 | 26.0 | 650 | 65.9832 | 0.825 | 0.2990 | 1.1514 | 0.825 | 0.8253 | 0.1603 | 0.0423 | | 65.4034 | 27.0 | 675 | 66.2567 | 0.805 | 0.3307 | 1.3509 | 0.805 | 0.8022 | 0.1699 | 0.0634 | | 65.4034 | 28.0 | 700 | 66.0668 | 0.82 | 0.3172 | 1.1445 | 0.82 | 0.7973 | 0.1538 | 0.0419 | | 65.4034 | 29.0 | 725 | 66.2254 | 0.81 | 0.3252 | 1.3290 | 0.81 | 0.8011 | 0.1659 | 0.0523 | | 65.4034 | 30.0 | 750 | 65.9643 | 0.84 | 0.2697 | 1.2052 | 0.8400 | 0.8245 | 0.1319 | 0.0425 | | 65.4034 | 31.0 | 775 | 66.3419 | 0.81 | 0.3249 | 1.2772 | 0.81 | 0.7969 | 0.1700 | 0.0612 | | 65.4034 | 32.0 | 800 | 66.0324 | 0.825 | 0.3003 | 1.3138 | 0.825 | 0.8000 | 0.1584 | 0.0445 | | 65.4034 | 33.0 | 825 | 66.3326 | 0.82 | 0.3336 | 1.2983 | 0.82 | 0.7826 | 0.1754 | 0.0590 | | 65.4034 | 34.0 | 850 | 66.1374 | 0.825 | 0.3061 | 1.5645 | 0.825 | 0.8012 | 0.1500 | 0.0459 | | 65.4034 | 35.0 | 875 | 66.2310 | 0.815 | 0.3207 | 1.5607 | 0.815 | 0.7939 | 0.1712 | 0.0646 | | 65.4034 | 36.0 | 900 | 66.0388 | 0.84 | 0.2873 | 1.1966 | 0.8400 | 0.8327 | 0.1456 | 0.0585 | | 65.4034 | 37.0 | 925 | 66.0520 | 0.835 | 0.2958 | 1.2728 | 0.835 | 0.8180 | 0.1508 | 0.0477 | | 65.4034 | 38.0 | 950 | 65.9916 | 0.84 | 0.2783 | 1.1635 | 0.8400 | 0.8233 | 0.1398 | 0.0438 | | 65.4034 | 39.0 | 975 | 65.9391 | 0.845 | 0.2743 | 1.2660 | 0.845 | 0.8289 | 0.1396 | 0.0458 | | 64.0802 | 40.0 | 1000 | 65.9291 | 0.845 | 0.2762 | 1.3335 | 0.845 | 0.8259 | 0.1373 | 0.0430 | | 64.0802 | 41.0 | 1025 | 65.8559 | 0.85 | 0.2686 | 1.3432 | 0.85 | 0.8338 | 0.1345 | 0.0428 | | 64.0802 | 42.0 | 1050 | 65.8612 | 0.845 | 0.2772 | 1.2679 | 0.845 | 0.8255 | 0.1389 | 0.0431 | | 64.0802 | 43.0 | 1075 | 65.8953 | 0.84 | 0.2742 | 1.2614 | 0.8400 | 0.8227 | 0.1408 | 0.0435 | | 64.0802 | 44.0 | 1100 | 65.8569 | 0.835 | 0.2769 | 1.2730 | 0.835 | 0.8199 | 0.1426 | 0.0432 | | 64.0802 | 45.0 | 1125 | 65.8610 | 0.84 | 0.2769 | 1.2622 | 0.8400 | 0.8248 | 0.1485 | 0.0425 | | 64.0802 | 46.0 | 1150 | 65.8237 | 0.845 | 0.2729 | 1.1920 | 0.845 | 0.8334 | 0.1462 | 0.0432 | | 64.0802 | 47.0 | 1175 | 65.8416 | 0.845 | 0.2785 | 1.1826 | 0.845 | 0.8317 | 0.1376 | 0.0431 | | 64.0802 | 48.0 | 1200 | 65.8452 | 0.845 | 0.2817 | 1.1876 | 0.845 | 0.8317 | 0.1417 | 0.0441 | | 64.0802 | 49.0 | 1225 | 65.8394 | 0.845 | 0.2750 | 1.1993 | 0.845 | 0.8309 | 0.1315 | 0.0419 | | 64.0802 | 50.0 | 1250 | 65.8527 | 0.84 | 0.2796 | 1.1860 | 0.8400 | 0.8279 | 0.1410 | 0.0432 | | 64.0802 | 51.0 | 1275 | 65.8286 | 0.845 | 0.2749 | 1.1977 | 0.845 | 0.8333 | 0.1444 | 0.0428 | | 64.0802 | 52.0 | 1300 | 65.8296 | 0.83 | 0.2779 | 1.1926 | 0.83 | 0.8171 | 0.1382 | 0.0435 | | 64.0802 | 53.0 | 1325 | 65.8121 | 0.83 | 0.2779 | 1.1955 | 0.83 | 0.8155 | 0.1387 | 0.0436 | | 64.0802 | 54.0 | 1350 | 65.8361 | 0.825 | 0.2769 | 1.1909 | 0.825 | 0.8095 | 0.1435 | 0.0419 | | 64.0802 | 55.0 | 1375 | 65.8370 | 0.83 | 0.2816 | 1.1925 | 0.83 | 0.8171 | 0.1416 | 0.0435 | | 64.0802 | 56.0 | 1400 | 65.8301 | 0.825 | 0.2763 | 1.1908 | 0.825 | 0.8101 | 0.1393 | 0.0439 | | 64.0802 | 57.0 | 1425 | 65.8301 | 0.82 | 0.2791 | 1.1881 | 0.82 | 0.8040 | 0.1443 | 0.0440 | | 64.0802 | 58.0 | 1450 | 65.8324 | 0.83 | 0.2754 | 1.1938 | 0.83 | 0.8198 | 0.1387 | 0.0460 | | 64.0802 | 59.0 | 1475 | 65.8407 | 0.825 | 0.2818 | 1.1893 | 0.825 | 0.8138 | 0.1393 | 0.0439 | | 63.8765 | 60.0 | 1500 | 65.8236 | 0.84 | 0.2782 | 1.1871 | 0.8400 | 0.8290 | 0.1512 | 0.0449 | | 63.8765 | 61.0 | 1525 | 65.8198 | 0.825 | 0.2846 | 1.1752 | 0.825 | 0.8138 | 0.1505 | 0.0438 | | 63.8765 | 62.0 | 1550 | 65.8243 | 0.83 | 0.2796 | 1.1753 | 0.83 | 0.8196 | 0.1480 | 0.0445 | | 63.8765 | 63.0 | 1575 | 65.8495 | 0.835 | 0.2781 | 1.1766 | 0.835 | 0.8257 | 0.1353 | 0.0451 | | 63.8765 | 64.0 | 1600 | 65.8204 | 0.835 | 0.2833 | 1.1752 | 0.835 | 0.8239 | 0.1400 | 0.0447 | | 63.8765 | 65.0 | 1625 | 65.8374 | 0.835 | 0.2800 | 1.1829 | 0.835 | 0.8239 | 0.1474 | 0.0441 | | 63.8765 | 66.0 | 1650 | 65.8433 | 0.83 | 0.2855 | 1.1678 | 0.83 | 0.8148 | 0.1498 | 0.0444 | | 63.8765 | 67.0 | 1675 | 65.8259 | 0.835 | 0.2820 | 1.1725 | 0.835 | 0.8257 | 0.1518 | 0.0457 | | 63.8765 | 68.0 | 1700 | 65.8443 | 0.83 | 0.2841 | 1.1652 | 0.83 | 0.8196 | 0.1491 | 0.0457 | | 63.8765 | 69.0 | 1725 | 65.8255 | 0.835 | 0.2849 | 1.1620 | 0.835 | 0.8247 | 0.1499 | 0.0460 | | 63.8765 | 70.0 | 1750 | 65.8421 | 0.83 | 0.2870 | 1.1681 | 0.83 | 0.8196 | 0.1418 | 0.0475 | | 63.8765 | 71.0 | 1775 | 65.8402 | 0.835 | 0.2839 | 1.1614 | 0.835 | 0.8230 | 0.1359 | 0.0466 | | 63.8765 | 72.0 | 1800 | 65.8224 | 0.84 | 0.2831 | 1.1555 | 0.8400 | 0.8280 | 0.1467 | 0.0459 | | 63.8765 | 73.0 | 1825 | 65.8233 | 0.84 | 0.2824 | 1.1578 | 0.8400 | 0.8280 | 0.1428 | 0.0465 | | 63.8765 | 74.0 | 1850 | 65.8299 | 0.84 | 0.2814 | 1.1574 | 0.8400 | 0.8280 | 0.1469 | 0.0465 | | 63.8765 | 75.0 | 1875 | 65.8309 | 0.835 | 0.2790 | 1.1575 | 0.835 | 0.8219 | 0.1407 | 0.0465 | | 63.8765 | 76.0 | 1900 | 65.8199 | 0.84 | 0.2789 | 1.1496 | 0.8400 | 0.8280 | 0.1437 | 0.0460 | | 63.8765 | 77.0 | 1925 | 65.8222 | 0.84 | 0.2828 | 1.1520 | 0.8400 | 0.8280 | 0.1539 | 0.0461 | | 63.8765 | 78.0 | 1950 | 65.8312 | 0.84 | 0.2801 | 1.1459 | 0.8400 | 0.8280 | 0.1354 | 0.0458 | | 63.8765 | 79.0 | 1975 | 65.8253 | 0.84 | 0.2836 | 1.1448 | 0.8400 | 0.8280 | 0.1542 | 0.0465 | | 63.7964 | 80.0 | 2000 | 65.8332 | 0.84 | 0.2839 | 1.1408 | 0.8400 | 0.8280 | 0.1486 | 0.0462 | | 63.7964 | 81.0 | 2025 | 65.8316 | 0.84 | 0.2818 | 1.1419 | 0.8400 | 0.8280 | 0.1430 | 0.0460 | | 63.7964 | 82.0 | 2050 | 65.8238 | 0.84 | 0.2824 | 1.1387 | 0.8400 | 0.8280 | 0.1411 | 0.0452 | | 63.7964 | 83.0 | 2075 | 65.8294 | 0.84 | 0.2786 | 1.1410 | 0.8400 | 0.8280 | 0.1539 | 0.0469 | | 63.7964 | 84.0 | 2100 | 65.8267 | 0.84 | 0.2818 | 1.1391 | 0.8400 | 0.8280 | 0.1463 | 0.0471 | | 63.7964 | 85.0 | 2125 | 65.8222 | 0.84 | 0.2814 | 1.1401 | 0.8400 | 0.8280 | 0.1463 | 0.0470 | | 63.7964 | 86.0 | 2150 | 65.8264 | 0.84 | 0.2776 | 1.1380 | 0.8400 | 0.8280 | 0.1359 | 0.0460 | | 63.7964 | 87.0 | 2175 | 65.8228 | 0.84 | 0.2781 | 1.1366 | 0.8400 | 0.8280 | 0.1468 | 0.0460 | | 63.7964 | 88.0 | 2200 | 65.8229 | 0.84 | 0.2832 | 1.1367 | 0.8400 | 0.8280 | 0.1455 | 0.0476 | | 63.7964 | 89.0 | 2225 | 65.8271 | 0.84 | 0.2792 | 1.1376 | 0.8400 | 0.8280 | 0.1598 | 0.0467 | | 63.7964 | 90.0 | 2250 | 65.8234 | 0.84 | 0.2830 | 1.1352 | 0.8400 | 0.8280 | 0.1427 | 0.0474 | | 63.7964 | 91.0 | 2275 | 65.8309 | 0.84 | 0.2804 | 1.1352 | 0.8400 | 0.8280 | 0.1426 | 0.0467 | | 63.7964 | 92.0 | 2300 | 65.8305 | 0.84 | 0.2796 | 1.1345 | 0.8400 | 0.8280 | 0.1438 | 0.0466 | | 63.7964 | 93.0 | 2325 | 65.8155 | 0.84 | 0.2808 | 1.1347 | 0.8400 | 0.8280 | 0.1499 | 0.0471 | | 63.7964 | 94.0 | 2350 | 65.8218 | 0.84 | 0.2803 | 1.1336 | 0.8400 | 0.8280 | 0.1487 | 0.0473 | | 63.7964 | 95.0 | 2375 | 65.8152 | 0.84 | 0.2812 | 1.1334 | 0.8400 | 0.8280 | 0.1441 | 0.0466 | | 63.7964 | 96.0 | 2400 | 65.8230 | 0.84 | 0.2801 | 1.1344 | 0.8400 | 0.8280 | 0.1488 | 0.0472 | | 63.7964 | 97.0 | 2425 | 65.8206 | 0.84 | 0.2808 | 1.1328 | 0.8400 | 0.8280 | 0.1490 | 0.0472 | | 63.7964 | 98.0 | 2450 | 65.8221 | 0.84 | 0.2807 | 1.1332 | 0.8400 | 0.8280 | 0.1438 | 0.0474 | | 63.7964 | 99.0 | 2475 | 65.8207 | 0.84 | 0.2809 | 1.1326 | 0.8400 | 0.8280 | 0.1446 | 0.0472 | | 63.7613 | 100.0 | 2500 | 65.8239 | 0.84 | 0.2807 | 1.1327 | 0.8400 | 0.8280 | 0.1437 | 0.0472 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1.post200 - Datasets 2.9.0 - Tokenizers 0.13.2
addiekline/luolabdemo
addiekline
2023-07-18T20:01:00Z
0
0
peft
[ "peft", "region:us" ]
null
2023-07-18T19:37:40Z
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.5.0.dev0
Wyzard1004/Reinforce-CartPoleV1
Wyzard1004
2023-07-18T19:59:47Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T03:16:29Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPoleV1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
drbeane/a2c-AntBulletEnv-v0
drbeane
2023-07-18T19:57:41Z
0
0
stable-baselines3
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T19:55:49Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1617.69 +/- 55.55 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
hongrui/chest_mimic_pe_v_1_3
hongrui
2023-07-18T19:55:00Z
1
0
diffusers
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "lora", "base_model:runwayml/stable-diffusion-v1-5", "base_model:adapter:runwayml/stable-diffusion-v1-5", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-07-18T10:01:43Z
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA text2image fine-tuning - hongrui/chest_mimic_pe_v_1_3 These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were fine-tuned on the hongrui/mimic_chest_xray_v_1 dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
eerichmond33/sourceformer-epoch10
eerichmond33
2023-07-18T19:51:57Z
93
0
transformers
[ "transformers", "pytorch", "gpt_neo", "text-generation", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T16:27:34Z
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: v9 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # v9 This model is a fine-tuned version of [EleutherAI/gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.3262 - Accuracy: 0.3995 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 8 - total_eval_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 70 - num_epochs: 10.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7485 | 1.0 | 72 | 2.7852 | 0.4448 | | 2.6279 | 2.0 | 144 | 2.7832 | 0.4450 | | 2.5097 | 3.0 | 216 | 2.7988 | 0.4425 | | 2.3899 | 4.0 | 288 | 2.8203 | 0.4403 | | 2.2636 | 5.0 | 360 | 2.8594 | 0.4366 | | 2.1351 | 6.0 | 432 | 2.9141 | 0.4307 | | 1.99 | 7.0 | 504 | 2.9844 | 0.4244 | | 1.8299 | 8.0 | 576 | 3.0723 | 0.4173 | | 1.6524 | 9.0 | 648 | 3.1855 | 0.4087 | | 1.4676 | 10.0 | 720 | 3.3262 | 0.3995 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1 - Datasets 2.12.0 - Tokenizers 0.13.3
jamesdborin/ct2-int8-redpajama-7b-chat
jamesdborin
2023-07-18T19:50:27Z
4
0
transformers
[ "transformers", "gpt_neox", "text-generation", "en", "dataset:togethercomputer/RedPajama-Data-1T", "dataset:OpenAssistant/oasst1", "dataset:databricks/databricks-dolly-15k", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T19:42:45Z
--- license: apache-2.0 language: - en datasets: - togethercomputer/RedPajama-Data-1T - OpenAssistant/oasst1 - databricks/databricks-dolly-15k widget: - text: "<human>: Write an email to my friends inviting them to come to my home on Friday for a dinner party, bring their own food to share.\n<bot>:" example_title: "Email Writing" - text: "<human>: Create a list of things to do in San Francisco\n<bot>:" example_title: "Brainstorming" inference: parameters: temperature: 0.7 top_p: 0.7 top_k: 50 max_new_tokens: 128 --- # RedPajama-INCITE-7B-Chat RedPajama-INCITE-7B-Chat was developed by Together and leaders from the open-source AI community including Ontocord.ai, ETH DS3Lab, AAI CERC, Université de Montréal, MILA - Québec AI Institute, Stanford Center for Research on Foundation Models (CRFM), Stanford Hazy Research research group and LAION. It is fine-tuned on OASST1 and Dolly2 to enhance chatting ability. - Base Model: [RedPajama-INCITE-7B-Base](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Base) - Instruction-tuned Version: [RedPajama-INCITE-7B-Instruct](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Instruct) - Chat Version: [RedPajama-INCITE-7B-Chat](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat) ## Model Details - **Developed by**: Together Computer. - **Model type**: Language Model - **Language(s)**: English - **License**: Apache 2.0 - **Model Description**: A 6.9B parameter pretrained language model. # Quick Start Please note that the model requires `transformers` version >= 4.25.1. To prompt the chat model, use the following format: ``` <human>: [Instruction] <bot>: ``` ## GPU Inference This requires a GPU with 16GB memory. ```python import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' # check transformers version assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' # init tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-7B-Chat") model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-7B-Chat", torch_dtype=torch.float16) model = model.to('cuda:0') # infer prompt = "<human>: Who is Alan Turing?\n<bot>:" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str) """ Alan Mathison Turing (23 June 1912 7 June 1954) was an English computer scientist, mathematician, logician, cryptanalyst, philosopher, mathematician, and theoretical biologist. """ ``` ## GPU Inference in Int8 This requires a GPU with 12GB memory. To run inference with int8, please ensure you have installed accelerate and bitandbytes. You can install them with the following command: ```bash pip install accelerate pip install bitsandbytes ``` Then you can run inference with int8 as follows: ```python import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' # check transformers version assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' # init tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-7B-Chat") model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-7B-Chat", device_map='auto', torch_dtype=torch.float16, load_in_8bit=True) # infer prompt = "<human>: Who is Alan Turing?\n<bot>:" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str) """ Alan Mathison Turing (23 June 1912 – 7 June 1954) was an English computer scientist, mathematician, logician, cryptanalyst, philosopher, and theoretical biologist. """ ``` ## CPU Inference ```python import torch import transformers from transformers import AutoTokenizer, AutoModelForCausalLM MIN_TRANSFORMERS_VERSION = '4.25.1' # check transformers version assert transformers.__version__ >= MIN_TRANSFORMERS_VERSION, f'Please upgrade transformers to version {MIN_TRANSFORMERS_VERSION} or higher.' # init tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-7B-Chat") model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-7B-Chat", torch_dtype=torch.bfloat16) # infer prompt = "<human>: Who is Alan Turing?\n<bot>:" inputs = tokenizer(prompt, return_tensors='pt').to(model.device) input_length = inputs.input_ids.shape[1] outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.7, top_k=50, return_dict_in_generate=True ) token = outputs.sequences[0, input_length:] output_str = tokenizer.decode(token) print(output_str) """ Alan Mathison Turing, OBE, FRS, (23 June 1912 – 7 June 1954) was an English computer scientist, mathematician, logician, cryptanalyst, philosopher, and theoretical biologist. """ ``` Please note that since `LayerNormKernelImpl` is not implemented in fp16 for CPU, we use `bfloat16` for CPU inference. # Uses ## Direct Use Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use It is the responsibility of the end user to ensure that the model is used in a responsible and ethical manner. #### Out-of-Scope Use `RedPajama-INCITE-7B-Chat` is a language model and may not perform well for other use cases outside of its intended scope. For example, it may not be suitable for use in safety-critical applications or for making decisions that have a significant impact on individuals or society. It is important to consider the limitations of the model and to only use it for its intended purpose. #### Misuse and Malicious Use `RedPajama-INCITE-7B-Chat` is designed for language modeling. Misuse of the model, such as using it to engage in illegal or unethical activities, is strictly prohibited and goes against the principles of the project. Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating fake news, misinformation, or propaganda - Promoting hate speech, discrimination, or violence against individuals or groups - Impersonating individuals or organizations without their consent - Engaging in cyberbullying or harassment - Defamatory content - Spamming or scamming - Sharing confidential or sensitive information without proper authorization - Violating the terms of use of the model or the data used to train it - Creating automated bots for malicious purposes such as spreading malware, phishing scams, or spamming ## Limitations `RedPajama-INCITE-7B-Chat`, like other language models, has limitations that should be taken into consideration. For example, the model may not always provide accurate or relevant answers, particularly for questions that are complex, ambiguous, or outside of its training data. We therefore welcome contributions from individuals and organizations, and encourage collaboration towards creating a more robust and inclusive chatbot. ## Training **Training Data** Please refer to [togethercomputer/RedPajama-Data-1T](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T) **Training Procedure** - **Hardware:** 8 A100 - **Optimizer:** Adam - **Gradient Accumulations**: 1 - **Num of Tokens:** 79M tokens - **Learning rate:** 1e-5 ## Community Join us on [Together Discord](https://discord.gg/6ZVDU8tTD4)
crumbly/gpt2-linear-xl
crumbly
2023-07-18T19:48:55Z
153
1
transformers
[ "transformers", "pytorch", "gpt2l", "text-generation", "gpt2", "exbert", "custom_code", "en", "license:mit", "autotrain_compatible", "region:us" ]
text-generation
2023-07-17T14:19:27Z
--- license: mit language: - en tags: - gpt2 - exbert inference: false --- # GPT2-Linear-XL A conversion of [gpt2-xl](https://hf.co/gpt2-xl) that uses linear layers instead of convolutional layers. This is not an official OpenAI project. > Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and first released at [this page](https://openai.com/blog/better-language-models/). > GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. > More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. > This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. - Main model: [crumbly/gpt2-linear-xl](https://hf.co/crumbly/gpt2-linear-xl) - Sharded model: [crumbly/gpt2-linear-xl-sharded](https://hf.co/crumbly/gpt2-linear-xl-sharded) - Sharded + Brain-float 16bit model: [crumbly/gpt2-linear-xl-sharded-bf16](https://hf.co/crumbly/gpt2-linear-xl-sharded-bf16) Config: ``` { "n_embd": 1600, "n_head": 25, "n_layer": 48, "n_positions": 1024, } ``` ### Usage Inference on GPU with 4-bit quantization: ``` %pip install -qq transformers accelerate bitsandbytes ``` ```python from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import BitsAndBytesConfig import torch model_id = "crumbly/gpt2-linear-xl-sharded-bf16" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, trust_remote_code=True, device_map={"":0}, quantization_config=bnb_config ) ``` ```python inputs = tokenizer("Once upon a time,", return_tensors='pt') inputs = { k:v.cuda() for k,v in inputs.items() } outputs = model.generate( **inputs, max_new_tokens=32, temperature=0.7, do_sample=True ) tokenizer.decode(outputs[0]) ``` TODO - ~~test to see if model works with .from_pretrained~~ <br> - ~~test fp32, fp16, 8 and 4 bit~~ - ~~shard model to max 1gb for use in even lower vram settings~~ <br> - safetensors <br> - ~~upload bf16 version of model~~ <br> - upload 8bit model and 4bit model <br> - ~~convert other base gpt2 models~~ - open orca QLoRA on XL - ReLoRA continued pretraining on RefinedWeb or RedPajama to reach 1T tokens
Devops-hestabit/otherhalf-2.7b-onnx
Devops-hestabit
2023-07-18T19:48:46Z
4
0
transformers
[ "transformers", "onnx", "gpt_neo", "text-generation", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T18:40:26Z
--- license: creativeml-openrail-m ---
RushTurtle/crnn_vgg16_bn_20230718-140705
RushTurtle
2023-07-18T19:47:17Z
48
0
transformers
[ "transformers", "pytorch", "en", "endpoints_compatible", "region:us" ]
null
2023-07-18T19:47:07Z
--- language: en --- <p align="center"> <img src="https://doctr-static.mindee.com/models?id=v0.3.1/Logo_doctr.gif&src=0" width="60%"> </p> **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch** ## Task: recognition https://github.com/mindee/doctr ### Example usage: ```python >>> from doctr.io import DocumentFile >>> from doctr.models import ocr_predictor, from_hub >>> img = DocumentFile.from_images(['<image_path>']) >>> # Load your model from the hub >>> model = from_hub('mindee/my-model') >>> # Pass it to the predictor >>> # If your model is a recognition model: >>> predictor = ocr_predictor(det_arch='db_mobilenet_v3_large', >>> reco_arch=model, >>> pretrained=True) >>> # If your model is a detection model: >>> predictor = ocr_predictor(det_arch=model, >>> reco_arch='crnn_mobilenet_v3_small', >>> pretrained=True) >>> # Get your predictions >>> res = predictor(img) ``` ### Run Configuration { "arch": "crnn_vgg16_bn", "train_path": "/tmp/dataset/train3_2800/", "val_path": "/tmp/dataset/val3_2800/", "train_samples": 1000, "val_samples": 20, "font": "FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", "min_chars": 1, "max_chars": 12, "name": null, "epochs": 1200, "batch_size": 64, "device": 0, "input_size": 32, "lr": 0.001, "weight_decay": 0, "workers": 6, "resume": null, "vocab": "french", "test_only": false, "show_samples": false, "wb": false, "push_to_hub": true, "pretrained": false, "sched": "cosine", "amp": false, "find_lr": false }
anonymous4chan/llama-2-13b
anonymous4chan
2023-07-18T19:40:00Z
12
0
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "facebook", "meta", "llama-2", "en", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-07-18T19:23:00Z
--- extra_gated_heading: Access Llama 2 on Hugging Face extra_gated_description: >- This is a form to enable access to Llama 2 on Hugging Face after you have been granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our license terms and acceptable use policy before submitting this form. Requests will be processed in 1-2 days. extra_gated_button_content: Submit extra_gated_fields: I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox language: - en pipeline_tag: text-generation inference: false tags: - facebook - meta - pytorch - llama - llama-2 --- ## Cuck. # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 13B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
eluzhnica/mpt-7b-8k-instruct-peft-compatible
eluzhnica
2023-07-18T19:38:34Z
7
1
transformers
[ "transformers", "pytorch", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "custom_code", "arxiv:2205.14135", "arxiv:2108.12409", "arxiv:2010.04245", "license:cc-by-sa-3.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-07-18T15:14:58Z
--- license: cc-by-sa-3.0 datasets: - competition_math - conceptofmind/cot_submix_original/cot_gsm8k - knkarthick/dialogsum - mosaicml/dolly_hhrlhf - duorc - tau/scrolls/qasper - emozilla/quality - scrolls/summ_screen_fd - spider tags: - Composer - MosaicML - llm-foundry inference: false --- # MPT-7B-Instruct-8k MPT-7B-Instruct-8K but with gradient checkpointing making it easy to train with LoRA/QLoRA. Not tested yet Original card below: MPT-7B-Instruct-8k is a model for long-form instruction following, especially question-answering on and summarization of longer documents. It is built by finetuning [MPT-7B-8k](https://huggingface.co/mosaicml/mpt-7b-8k) on [Dolly HHRLHF](https://huggingface.co/datasets/mosaicml/dolly_hhrlhf) derived from the [Databricks Dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) and the [Anthropic Helpful and Harmless (HH-RLHF)](https://huggingface.co/datasets/Anthropic/hh-rlhf) datasets. It is also trained on [Competition Math](https://huggingface.co/datasets/competition_math), [Duorc](https://huggingface.co/datasets/duorc), [CoT GSM8k](https://huggingface.co/datasets/conceptofmind/cot_submix_original), [Qasper](https://huggingface.co/datasets/allenai/qasper), [Quality](https://huggingface.co/datasets/emozilla/quality), [Summ Screen FD](https://huggingface.co/datasets/tau/scrolls) and [Spider](https://huggingface.co/datasets/spider). This is the same dataset that [MPT-30B-Instruct](https://huggingface.co/mosaicml/mpt-30b-instruct) was trained on. * License: _CC-By-SA-3.0_ This model was trained by [MosaicML](https://www.mosaicml.com) and follows a modified decoder-only transformer architecture. ## Model Date July 18, 2023 ## Model License _CC-By-SA-3.0_ ## Documentation * [Blog post: MPT-7B-8k](https://www.mosaicml.com/blog/long-context-mpt-7b-8k) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use This model is best used with the MosaicML [llm-foundry repository](https://github.com/mosaicml/llm-foundry) for training and finetuning. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b-instruct-8k', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b-instruct-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' # change this to use triton-based FlashAttention config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` The model was trained initially with a sequence length of 2048 with an additional pretraining stage for sequence length adapation up to 8192. However, ALiBi enables users to increase the maximum sequence length even further during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b-instruct-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 16384 # (input + output) tokens can now be up to 16384 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the MPT-7B-chat tokenizer which is based on the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer and includes additional ChatML tokens. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mosaicml/mpt-7b-8k') ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline with torch.autocast('cuda', dtype=torch.bfloat16): inputs = tokenizer('Here is a recipe for vegan banana bread:\n', return_tensors="pt").to('cuda') outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # or using the HF pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | 2048 | ## Data Mix The model was trained on the following data mix: | Data Source | Number of Tokens in Source | Proportion | |-------------|----------------------------|------------| | competition_math | 1.6 M | 3.66% | | cot_gsm8k | 3.36 M | 7.67% | | dialogsum | 0.1 M | 0.23% | | dolly_hhrlhf | 5.89 M | 13.43% | | duorc | 7.8 M | 17.80% | | qasper | 8.72 M | 19.90% | | quality | 11.29 M | 25.78% | | scrolls/summ_screen_fd | 4.97 M | 11.33% | | spider | 0.089 M | 0.20% | ### Training Configuration This model was trained on 8 80GB A100s for about 6.3 hours using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the AdamW optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B-Instruct-8k can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B-Instruct-8k was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## Acknowledgements This model was finetuned by the MosaicML NLP team. ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes. ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://www.mosaicml.com/get-started?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b-8k). ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-30B: Raising the bar for open-source foundation models}, year = {2023}, url = {www.mosaicml.com/blog/mpt-30b}, note = {Accessed: 2023-06-22}, urldate = {2023-06-22} } ```
Arikkod/PPO-LunarLander-v2
Arikkod
2023-07-18T19:38:26Z
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-17T20:10:51Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 281.51 +/- 18.52 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
4bit/Llama-2-7b-Chat-GPTQ
4bit
2023-07-18T19:28:17Z
20
10
transformers
[ "transformers", "llama", "text-generation", "facebook", "meta", "pytorch", "llama-2", "en", "license:other", "autotrain_compatible", "region:us" ]
text-generation
2023-07-18T19:26:31Z
--- extra_gated_button_content: Submit extra_gated_description: This is a form to enable access to Llama 2 on Hugging Face after you have been granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our license terms and acceptable use policy before submitting this form. Requests will be processed in 1-2 days. extra_gated_fields: ? I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website : checkbox extra_gated_heading: Access Llama 2 on Hugging Face inference: false language: - en license: other model_type: llama pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 --- <!-- header start --> <div style="width: 100%;"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p><a href="https://discord.gg/theblokeai">Chat & support: my new Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <!-- header end --> # Meta's Llama 2 7b Chat GPTQ These files are GPTQ model files for [Meta's Llama 2 7b Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. ## Repositories available * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGML) * [Unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) ## Prompt template: Llama-2-Chat ``` System: You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. User: {prompt} Assistant: ``` ## Provided files Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. | Branch | Bits | Group Size | Act Order (desc_act) | File Size | ExLlama Compatible? | Made With | Description | | ------ | ---- | ---------- | -------------------- | --------- | ------------------- | --------- | ----------- | | main | 4 | 128 | False | 3.90 GB | True | AutoGPTQ | Most compatible option. Good inference speed in AutoGPTQ and GPTQ-for-LLaMa. Lower inference quality than other options. | | gptq-4bit-32g-actorder_True | 4 | 32 | True | 4.28 GB | True | AutoGPTQ | 4-bit, with Act Order and group size. 32g gives highest possible inference quality, with maximum VRAM usage. Poor AutoGPTQ CUDA speed. | | gptq-4bit-64g-actorder_True | 4 | 64 | True | 4.02 GB | True | AutoGPTQ | 4-bit, with Act Order and group size. 64g uses less VRAM than 32g, but with slightly lower accuracy. Poor AutoGPTQ CUDA speed. | | gptq-4bit-128g-actorder_True | 4 | 128 | True | 3.90 GB | True | AutoGPTQ | 4-bit, with Act Order and group size. 128g uses even less VRAM, but with slightly lower accuracy. Poor AutoGPTQ CUDA speed. | ## How to download from branches - In text-generation-webui, you can add `:branch` to the end of the download name, eg `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - With Git, you can clone a branch with: ``` git clone --branch gptq-4bit-32g-actorder_True https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ` ``` - In Python Transformers code, the branch is the `revision` parameter; see below. ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui). Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Llama-2-7b-Chat-GPTQ`. - To download from a specific branch, enter for example `TheBloke/Llama-2-7b-Chat-GPTQ:gptq-4bit-32g-actorder_True` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done" 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Llama-2-7b-Chat-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. * Note that you do not need to set GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started! ## How to use this GPTQ model from Python code First make sure you have [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) installed: `GITHUB_ACTIONS=true pip install auto-gptq` Then try the following example code: ```python from transformers import AutoTokenizer, pipeline, logging from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig model_name_or_path = "TheBloke/Llama-2-7b-Chat-GPTQ" model_basename = "gptq_model-4bit-128g" use_triton = False tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) model = AutoGPTQForCausalLM.from_quantized(model_name_or_path, model_basename=model_basename use_safetensors=True, trust_remote_code=True, device="cuda:0", use_triton=use_triton, quantize_config=None) """ To download from a specific branch, use the revision parameter, as in this example: model = AutoGPTQForCausalLM.from_quantized(model_name_or_path, revision="gptq-4bit-32g-actorder_True", model_basename=model_basename, use_safetensors=True, trust_remote_code=True, device="cuda:0", quantize_config=None) """ prompt = "Tell me about AI" prompt_template=f'''System: You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. User: {prompt} Assistant: ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline # Prevent printing spurious transformers error when using pipeline with AutoGPTQ logging.set_verbosity(logging.CRITICAL) print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.7, top_p=0.95, repetition_penalty=1.15 ) print(pipe(prompt_template)[0]['generated_text']) ``` ## Compatibility The files provided will work with AutoGPTQ (CUDA and Triton modes), GPTQ-for-LLaMa (only CUDA has been tested), and Occ4m's GPTQ-for-LLaMa fork. ExLlama works with Llama models in 4-bit. Please see the Provided Files table above for per-file compatibility. <!-- footer start --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute. Thanks to the [chirper.ai](https://chirper.ai) team! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Luke from CarbonQuill, Aemon Algiz. **Patreon special mentions**: Space Cruiser, Nikolai Manek, Sam, Chris McCloskey, Rishabh Srivastava, Kalila, Spiking Neurons AB, Khalefa Al-Ahmad, WelcomeToTheClub, Chadd, Lone Striker, Viktor Bowallius, Edmond Seymore, Ai Maven, Chris Smitley, Dave, Alexandros Triantafyllidis, Luke @flexchar, Elle, ya boyyy, Talal Aujan, Alex , Jonathan Leane, Deep Realms, Randy H, subjectnull, Preetika Verma, Joseph William Delisle, Michael Levine, chris gileta, K, Oscar Rangel, LangChain4j, Trenton Dambrowitz, Eugene Pentland, Johann-Peter Hartmann, Femi Adebogun, Illia Dulskyi, senxiiz, Daniel P. Andersen, Sean Connelly, Artur Olbinski, RoA, Mano Prime, Derek Yates, Raven Klaugh, David Flickinger, Willem Michiel, Pieter, Willian Hasse, vamX, Luke Pendergrass, webtim, Ghost , Rainer Wilmers, Nathan LeClaire, Will Dee, Cory Kujawski, John Detwiler, Fred von Graf, biorpg, Iucharbius , Imad Khwaja, Pierre Kircher, terasurfer , Asp the Wyvern, John Villwock, theTransient, zynix , Gabriel Tamborski, Fen Risland, Gabriel Puliatti, Matthew Berman, Pyrater, SuperWojo, Stephen Murray, Karl Bernard, Ajan Kanaga, Greatston Gnanesh, Junyu Yang. Thank you to all my generous patrons and donaters! <!-- footer end --> # Original model card: Meta's Llama 2 7b Chat # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
arc-r/fasttext-language-identification
arc-r
2023-07-18T19:22:37Z
3
1
fasttext
[ "fasttext", "license:cc-by-sa-3.0", "region:us" ]
null
2023-07-18T19:12:39Z
--- license: cc-by-sa-3.0 library_name: fasttext --- https://fasttext.cc/docs/en/language-identification.html
jamesdborin/ct2-int8-mpt-7b-8k-chat
jamesdborin
2023-07-18T19:19:38Z
4
0
transformers
[ "transformers", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "custom_code", "arxiv:2205.14135", "arxiv:2108.12409", "arxiv:2010.04245", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "region:us" ]
text-generation
2023-07-18T19:12:22Z
--- license: cc-by-nc-sa-4.0 datasets: - camel-ai/code - ehartford/wizard_vicuna_70k_unfiltered - anon8231489123/ShareGPT_Vicuna_unfiltered - teknium1/GPTeacher/roleplay-instruct-v2-final - teknium1/GPTeacher/codegen-isntruct - timdettmers/openassistant-guanaco - camel-ai/math - project-baize/baize-chatbot/medical_chat_data - project-baize/baize-chatbot/quora_chat_data - project-baize/baize-chatbot/stackoverflow_chat_data - camel-ai/biology - camel-ai/chemistry - camel-ai/ai_society - jondurbin/airoboros-gpt4-1.2 - LongConversations - camel-ai/physics tags: - Composer - MosaicML - llm-foundry inference: false --- # MPT-7B-Chat-8k MPT-7B-Chat-8k is a chatbot-like model for dialogue generation. It was built by finetuning [MPT-7B-8k](https://huggingface.co/mosaicml/mpt-7b-8k) on the [ShareGPT-Vicuna](https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered), [Camel-AI](https://huggingface.co/camel-ai), [GPTeacher](https://github.com/teknium1/GPTeacher), [Guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Baize](https://github.com/project-baize/baize-chatbot) and some generated datasets. This is the same dataset that [MPT-30B-Chat](https://huggingface.co/mosaicml/mpt-30b-chat) was trained on. * License: _CC-By-NC-SA-4.0_ (non-commercial use only) This model was trained by [MosaicML](https://www.mosaicml.com) and follows a modified decoder-only transformer architecture. ## Model Date July 18, 2023 ## Model License _CC-By-NC-SA-4.0_ (non-commercial use only) ## Documentation * [Blog post: MPT-7B-8k](https://www.mosaicml.com/blog/long-context-mpt-7b-8k) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use This model is best used with the MosaicML [llm-foundry repository](https://github.com/mosaicml/llm-foundry) for training and finetuning. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b-chat-8k', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b-chat-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' # change this to use triton-based FlashAttention config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` The model was trained initially with a sequence length of 2048 with an additional pretraining stage for sequence length adapation up to 8192. However, ALiBi enables users to increase the maximum sequence length even further during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b-chat-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 16384 # (input + output) tokens can now be up to 16384 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the MPT-7B-chat tokenizer which is based on the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer and includes additional ChatML tokens. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mosaicml/mpt-7b-8k') ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline with torch.autocast('cuda', dtype=torch.bfloat16): inputs = tokenizer('Here is a recipe for vegan banana bread:\n', return_tensors="pt").to('cuda') outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # or using the HF pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | 2048 | ## Data Mix The model was trained on the following data mix: | Data Source | Number of Tokens in Source | Proportion | |-------------|----------------------------|------------| | Airoboros/GPT4-1.2 | 26.4M | 1.71% | | Baize | 55.0M | 3.57% | | Camel | 301M | 19.54% | | GPTeacher | 7.56M | 0.49% | | Guanaco | 15.6M | 1.02% | | LongCoversations | 18.4M | 1.19% | | ShareGPT | 821M | 53.24% | | WizardLM | 297M | 19.23% | "LongConversations" is a GPT3.5/4-generated dataset, details of which will be released at a later date. ### Training Configuration This model was trained on 192 H100s for about 48 minutes using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the AdamW optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B-Chat-8k can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B-Chat-8k was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## Acknowledgements This model was finetuned by the MosaicML NLP team ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes. ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://www.mosaicml.com/get-started?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b-8k). ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-30B: Raising the bar for open-source foundation models}, year = {2023}, url = {www.mosaicml.com/blog/mpt-30b}, note = {Accessed: 2023-06-22}, urldate = {2023-06-22} } ```
usakha/Prophetnet_GovReport_model
usakha
2023-07-18T19:16:47Z
12
0
transformers
[ "transformers", "pytorch", "tensorboard", "prophetnet", "text2text-generation", "summarization", "dataset:ccdv/govreport-summarization", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-06-27T10:58:54Z
--- datasets: - ccdv/govreport-summarization metrics: - bleu - rouge pipeline_tag: summarization --- # Hyperparameters learning_rate=2e-5 per_device_train_batch_size=14 per_device_eval_batch_size=14 weight_decay=0.01 save_total_limit=3 num_train_epochs=3 predict_with_generate=True fp16=True # Training Output global_step=3003, training_loss=2.5178213735600132, metrics={'train_runtime': 8703.174, 'train_samples_per_second': 4.83, 'train_steps_per_second': 0.345, 'total_flos': 9.272950245870797e+16, 'train_loss': 2.5178213735600132, 'epoch': 3.0} # Training Results | Epoch | Training Loss | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | Gen Len | |:----- |:------------ |:--------------- |:-------- | :------- |:-------- |:--------- |:-------- |:--------- | 1| 2.661100| 2.469111| 0.451300| 0.185200| 0.279000| 0.278900| 0.553300| 141.720300| 2| 2.434100| 2.403647| 0.456900| 0.192800| 0.284500| 0.284500| 0.556800| 141.763100| 3| 2.313700| 2.393932| 0.459500| 0.194400| 0.286300| 0.286200| 0.559200| 141.571600|
usakha/Pegasus_MedPaper_model
usakha
2023-07-18T19:12:02Z
23
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "summarization", "en", "dataset:pszemraj/scientific_lay_summarisation-plos-norm", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-06-21T23:09:42Z
--- datasets: - pszemraj/scientific_lay_summarisation-plos-norm language: - en metrics: - bleu - rouge pipeline_tag: summarization --- # Hyperparameters learning_rate=2e-5 per_device_train_batch_size=14 per_device_eval_batch_size=14 weight_decay=0.01 save_total_limit=3 num_train_epochs=3 predict_with_generate=True fp16=True # Training Output global_step=4248, training_loss=2.4160910424988598, metrics={'train_runtime': 14565.4519, 'train_samples_per_second': 4.082, 'train_steps_per_second': 0.292, 'total_flos': 1.7179021728232243e+17, 'train_loss': 2.4160910424988598, 'epoch': 3.0} # Training Results | Epoch | Training Loss | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | Gen Len | |:----- |:------------ |:--------------- |:-------- | :------- |:-------- |:--------- |:-------- |:--------- | |1| 2.467100| 2.303269| 0.410900| 0.136200| 0.235900| 0.235900| 0.465700| 182.332800 |2| 2.386700| 2.281062| 0.426300| 0.142300| 0.246800| 0.246700| 0.525200| 143.990900 |3| 2.362000| 2.274931| 0.428400| 0.143800| 0.248300| 0.248200| 0.532000| 139.585900
NasimB/cbt-rarity-all-p8k-new-loop
NasimB
2023-07-18T19:10:10Z
5
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "generated_from_trainer", "dataset:generator", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T17:20:50Z
--- license: mit tags: - generated_from_trainer datasets: - generator model-index: - name: cbt-rarity-all-p8k-new-loop results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cbt-rarity-all-p8k-new-loop This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the generator dataset. It achieves the following results on the evaluation set: - Loss: 4.0987 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 6 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 6.3453 | 0.29 | 500 | 5.3401 | | 5.0275 | 0.58 | 1000 | 4.9263 | | 4.708 | 0.88 | 1500 | 4.6871 | | 4.4417 | 1.17 | 2000 | 4.5488 | | 4.2994 | 1.46 | 2500 | 4.4302 | | 4.1954 | 1.75 | 3000 | 4.3288 | | 4.0767 | 2.04 | 3500 | 4.2527 | | 3.8912 | 2.33 | 4000 | 4.2084 | | 3.8656 | 2.63 | 4500 | 4.1505 | | 3.8241 | 2.92 | 5000 | 4.1019 | | 3.6333 | 3.21 | 5500 | 4.0954 | | 3.5783 | 3.5 | 6000 | 4.0619 | | 3.5677 | 3.79 | 6500 | 4.0357 | | 3.485 | 4.08 | 7000 | 4.0302 | | 3.3115 | 4.38 | 7500 | 4.0268 | | 3.3152 | 4.67 | 8000 | 4.0107 | | 3.2992 | 4.96 | 8500 | 3.9986 | | 3.1572 | 5.25 | 9000 | 4.0111 | | 3.1333 | 5.54 | 9500 | 4.0095 | | 3.1339 | 5.83 | 10000 | 4.0085 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.11.0+cu113 - Datasets 2.13.0 - Tokenizers 0.13.3
ByteExplorer/ppo-SnowballTarget
ByteExplorer
2023-07-18T19:08:56Z
1
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "SnowballTarget", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-SnowballTarget", "region:us" ]
reinforcement-learning
2023-07-17T02:30:03Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: ByteExplorer/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
usakha/Bart_GovReport_model
usakha
2023-07-18T19:08:39Z
127
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "bart", "text2text-generation", "summarization", "en", "dataset:ccdv/govreport-summarization", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-06-26T10:52:22Z
--- datasets: - ccdv/govreport-summarization language: - en metrics: - bleu - rouge pipeline_tag: summarization --- # Hyperparameters learning_rate=2e-5 per_device_train_batch_size=14 per_device_eval_batch_size=14 weight_decay=0.01 save_total_limit=3 num_train_epochs=3 predict_with_generate=True fp16=True # Training Output global_step=3003, training_loss=1.8524150695953217, metrics={'train_runtime': 2319.7329, 'train_samples_per_second': 18.122, 'train_steps_per_second': 1.295, 'total_flos': 9.110291036818637e+16, 'train_loss': 1.8524150695953217, 'epoch': 3.0} # Training Results | Epoch | Training Loss | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | Gen Len | |:----- |:------------ |:--------------- |:-------- | :------- |:-------- |:--------- |:-------- |:--------- | | 1 | 1.969100 | 1.756651 | 0.159100 | 0.088300 | 0.138800 | 0.138900 | 0.001600 | 20.000000 | | 2 | 1.794000 | 1.699691 | 0.158500 | 0.090300 | 0.139500 | 0.139600 | 0.001400 | 20.000000 | | 3 | 1.713700 | 1.687554 | 0.162700 | 0.091900 | 0.141800 | 0.141900 | 0.001660 | 20.000000 |
jordyvl/39-tiny_tobacco3482_kd_NKD_t1.0_g1.5
jordyvl
2023-07-18T19:08:19Z
165
0
transformers
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-07-18T18:33:48Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 39-tiny_tobacco3482_kd_NKD_t1.0_g1.5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 39-tiny_tobacco3482_kd_NKD_t1.0_g1.5 This model is a fine-tuned version of [WinKawaks/vit-tiny-patch16-224](https://huggingface.co/WinKawaks/vit-tiny-patch16-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.0812 - Accuracy: 0.835 - Brier Loss: 0.2748 - Nll: 1.2215 - F1 Micro: 0.835 - F1 Macro: 0.8213 - Ece: 0.1443 - Aurc: 0.0548 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Brier Loss | Nll | F1 Micro | F1 Macro | Ece | Aurc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:|:------:|:--------:|:--------:|:------:|:------:| | No log | 1.0 | 7 | 5.1938 | 0.095 | 1.0201 | 8.6917 | 0.095 | 0.0778 | 0.3242 | 0.9009 | | No log | 2.0 | 14 | 4.3129 | 0.13 | 0.9109 | 8.2379 | 0.13 | 0.0910 | 0.2544 | 0.8466 | | No log | 3.0 | 21 | 3.9690 | 0.225 | 0.8600 | 6.8547 | 0.225 | 0.1401 | 0.2626 | 0.6398 | | No log | 4.0 | 28 | 3.8651 | 0.375 | 0.7978 | 5.6610 | 0.375 | 0.2964 | 0.3198 | 0.4692 | | No log | 5.0 | 35 | 3.8115 | 0.465 | 0.7222 | 3.4731 | 0.465 | 0.3435 | 0.3007 | 0.3464 | | No log | 6.0 | 42 | 3.7351 | 0.575 | 0.6691 | 2.6672 | 0.575 | 0.4736 | 0.3509 | 0.2284 | | No log | 7.0 | 49 | 3.6913 | 0.62 | 0.6152 | 2.6026 | 0.62 | 0.4700 | 0.3255 | 0.1827 | | No log | 8.0 | 56 | 3.6687 | 0.68 | 0.5820 | 1.9726 | 0.68 | 0.5400 | 0.3735 | 0.1472 | | No log | 9.0 | 63 | 3.6771 | 0.645 | 0.5464 | 1.9938 | 0.645 | 0.5211 | 0.3013 | 0.1595 | | No log | 10.0 | 70 | 3.6759 | 0.685 | 0.4884 | 1.9735 | 0.685 | 0.5678 | 0.2672 | 0.1278 | | No log | 11.0 | 77 | 3.6587 | 0.71 | 0.4696 | 2.0625 | 0.7100 | 0.6080 | 0.2956 | 0.1115 | | No log | 12.0 | 84 | 3.6317 | 0.72 | 0.4121 | 2.2088 | 0.72 | 0.6137 | 0.2372 | 0.0925 | | No log | 13.0 | 91 | 3.6799 | 0.745 | 0.4167 | 2.0639 | 0.745 | 0.6372 | 0.2480 | 0.0978 | | No log | 14.0 | 98 | 3.6191 | 0.745 | 0.3850 | 1.9955 | 0.745 | 0.6384 | 0.2363 | 0.0728 | | No log | 15.0 | 105 | 3.6813 | 0.715 | 0.3814 | 2.0731 | 0.715 | 0.6026 | 0.1995 | 0.0918 | | No log | 16.0 | 112 | 3.6394 | 0.75 | 0.3644 | 1.9093 | 0.75 | 0.6492 | 0.1904 | 0.0777 | | No log | 17.0 | 119 | 3.7661 | 0.735 | 0.3786 | 1.5402 | 0.735 | 0.6352 | 0.2032 | 0.0982 | | No log | 18.0 | 126 | 3.6849 | 0.79 | 0.3369 | 1.8761 | 0.79 | 0.6965 | 0.1954 | 0.0708 | | No log | 19.0 | 133 | 3.6776 | 0.775 | 0.3358 | 1.4981 | 0.775 | 0.7021 | 0.1919 | 0.0744 | | No log | 20.0 | 140 | 3.6814 | 0.755 | 0.3546 | 1.5225 | 0.755 | 0.6873 | 0.1840 | 0.0794 | | No log | 21.0 | 147 | 3.6948 | 0.775 | 0.3267 | 1.4776 | 0.775 | 0.7052 | 0.1630 | 0.0710 | | No log | 22.0 | 154 | 3.7210 | 0.795 | 0.3191 | 1.3634 | 0.795 | 0.7383 | 0.1737 | 0.0705 | | No log | 23.0 | 161 | 3.7231 | 0.805 | 0.3062 | 1.3141 | 0.805 | 0.7679 | 0.1629 | 0.0665 | | No log | 24.0 | 168 | 3.7322 | 0.815 | 0.2903 | 1.2030 | 0.815 | 0.7771 | 0.1789 | 0.0609 | | No log | 25.0 | 175 | 3.7237 | 0.815 | 0.3020 | 1.1721 | 0.815 | 0.7947 | 0.1759 | 0.0603 | | No log | 26.0 | 182 | 3.8243 | 0.8 | 0.3138 | 1.3356 | 0.8000 | 0.7699 | 0.1735 | 0.0720 | | No log | 27.0 | 189 | 3.7675 | 0.81 | 0.3038 | 1.2662 | 0.81 | 0.7853 | 0.1891 | 0.0699 | | No log | 28.0 | 196 | 3.8006 | 0.81 | 0.2992 | 1.3422 | 0.81 | 0.7805 | 0.1709 | 0.0698 | | No log | 29.0 | 203 | 3.7783 | 0.815 | 0.3009 | 1.3322 | 0.815 | 0.7959 | 0.1729 | 0.0669 | | No log | 30.0 | 210 | 3.7547 | 0.835 | 0.2775 | 0.9761 | 0.835 | 0.8228 | 0.1751 | 0.0566 | | No log | 31.0 | 217 | 3.7810 | 0.82 | 0.2905 | 1.1472 | 0.82 | 0.7953 | 0.1670 | 0.0631 | | No log | 32.0 | 224 | 3.7935 | 0.82 | 0.2732 | 1.2016 | 0.82 | 0.7967 | 0.1429 | 0.0590 | | No log | 33.0 | 231 | 3.7871 | 0.83 | 0.2774 | 1.2459 | 0.83 | 0.8134 | 0.1495 | 0.0562 | | No log | 34.0 | 238 | 3.7689 | 0.815 | 0.2756 | 1.1135 | 0.815 | 0.7825 | 0.1609 | 0.0596 | | No log | 35.0 | 245 | 3.8169 | 0.81 | 0.2801 | 1.2621 | 0.81 | 0.7880 | 0.1570 | 0.0624 | | No log | 36.0 | 252 | 3.7973 | 0.82 | 0.2729 | 1.1310 | 0.82 | 0.7894 | 0.1466 | 0.0585 | | No log | 37.0 | 259 | 3.8560 | 0.835 | 0.2825 | 1.3222 | 0.835 | 0.8114 | 0.1466 | 0.0606 | | No log | 38.0 | 266 | 3.8351 | 0.83 | 0.2892 | 1.2548 | 0.83 | 0.8178 | 0.1489 | 0.0593 | | No log | 39.0 | 273 | 3.8258 | 0.82 | 0.2711 | 1.1900 | 0.82 | 0.8037 | 0.1455 | 0.0589 | | No log | 40.0 | 280 | 3.8288 | 0.815 | 0.2840 | 1.2167 | 0.815 | 0.7913 | 0.1574 | 0.0619 | | No log | 41.0 | 287 | 3.8264 | 0.82 | 0.2790 | 1.1737 | 0.82 | 0.8020 | 0.1394 | 0.0609 | | No log | 42.0 | 294 | 3.8276 | 0.81 | 0.2797 | 1.1603 | 0.81 | 0.7888 | 0.1585 | 0.0580 | | No log | 43.0 | 301 | 3.8554 | 0.815 | 0.2771 | 1.1695 | 0.815 | 0.7943 | 0.1310 | 0.0594 | | No log | 44.0 | 308 | 3.8405 | 0.825 | 0.2768 | 1.1593 | 0.825 | 0.8149 | 0.1413 | 0.0569 | | No log | 45.0 | 315 | 3.8640 | 0.815 | 0.2891 | 1.1752 | 0.815 | 0.7980 | 0.1516 | 0.0590 | | No log | 46.0 | 322 | 3.8624 | 0.825 | 0.2653 | 1.1548 | 0.825 | 0.8024 | 0.1384 | 0.0581 | | No log | 47.0 | 329 | 3.8546 | 0.83 | 0.2766 | 1.1634 | 0.83 | 0.8106 | 0.1411 | 0.0594 | | No log | 48.0 | 336 | 3.8652 | 0.82 | 0.2805 | 1.1651 | 0.82 | 0.8069 | 0.1278 | 0.0581 | | No log | 49.0 | 343 | 3.8716 | 0.83 | 0.2758 | 1.1895 | 0.83 | 0.8065 | 0.1486 | 0.0590 | | No log | 50.0 | 350 | 3.8720 | 0.815 | 0.2737 | 1.1709 | 0.815 | 0.7937 | 0.1375 | 0.0578 | | No log | 51.0 | 357 | 3.8812 | 0.82 | 0.2762 | 1.2348 | 0.82 | 0.7993 | 0.1292 | 0.0600 | | No log | 52.0 | 364 | 3.8844 | 0.805 | 0.2815 | 1.0870 | 0.805 | 0.7843 | 0.1525 | 0.0581 | | No log | 53.0 | 371 | 3.8968 | 0.825 | 0.2704 | 1.2235 | 0.825 | 0.8011 | 0.1452 | 0.0582 | | No log | 54.0 | 378 | 3.8996 | 0.81 | 0.2788 | 1.3264 | 0.81 | 0.7909 | 0.1453 | 0.0573 | | No log | 55.0 | 385 | 3.9037 | 0.81 | 0.2757 | 1.2231 | 0.81 | 0.7928 | 0.1307 | 0.0574 | | No log | 56.0 | 392 | 3.9024 | 0.81 | 0.2775 | 1.2369 | 0.81 | 0.7869 | 0.1493 | 0.0581 | | No log | 57.0 | 399 | 3.8951 | 0.83 | 0.2722 | 1.2151 | 0.83 | 0.8171 | 0.1491 | 0.0556 | | No log | 58.0 | 406 | 3.9224 | 0.82 | 0.2741 | 1.2957 | 0.82 | 0.8001 | 0.1351 | 0.0575 | | No log | 59.0 | 413 | 3.9397 | 0.805 | 0.2782 | 1.3017 | 0.805 | 0.7870 | 0.1342 | 0.0584 | | No log | 60.0 | 420 | 3.9250 | 0.835 | 0.2721 | 1.2251 | 0.835 | 0.8151 | 0.1466 | 0.0570 | | No log | 61.0 | 427 | 3.9381 | 0.825 | 0.2753 | 1.2330 | 0.825 | 0.8044 | 0.1384 | 0.0577 | | No log | 62.0 | 434 | 3.9475 | 0.82 | 0.2759 | 1.2171 | 0.82 | 0.8054 | 0.1485 | 0.0576 | | No log | 63.0 | 441 | 3.9591 | 0.83 | 0.2761 | 1.2299 | 0.83 | 0.8122 | 0.1551 | 0.0568 | | No log | 64.0 | 448 | 3.9496 | 0.835 | 0.2709 | 1.2282 | 0.835 | 0.8223 | 0.1397 | 0.0559 | | No log | 65.0 | 455 | 3.9360 | 0.83 | 0.2688 | 1.2238 | 0.83 | 0.8171 | 0.1384 | 0.0535 | | No log | 66.0 | 462 | 3.9594 | 0.835 | 0.2733 | 1.2395 | 0.835 | 0.8094 | 0.1540 | 0.0563 | | No log | 67.0 | 469 | 3.9648 | 0.84 | 0.2700 | 1.2154 | 0.8400 | 0.8252 | 0.1673 | 0.0557 | | No log | 68.0 | 476 | 3.9725 | 0.83 | 0.2712 | 1.2297 | 0.83 | 0.8171 | 0.1248 | 0.0552 | | No log | 69.0 | 483 | 3.9844 | 0.835 | 0.2719 | 1.2243 | 0.835 | 0.8151 | 0.1605 | 0.0557 | | No log | 70.0 | 490 | 3.9845 | 0.83 | 0.2699 | 1.2288 | 0.83 | 0.8100 | 0.1223 | 0.0553 | | No log | 71.0 | 497 | 3.9986 | 0.835 | 0.2729 | 1.2206 | 0.835 | 0.8223 | 0.1381 | 0.0556 | | 3.4116 | 72.0 | 504 | 3.9973 | 0.835 | 0.2727 | 1.2242 | 0.835 | 0.8223 | 0.1446 | 0.0553 | | 3.4116 | 73.0 | 511 | 4.0092 | 0.835 | 0.2733 | 1.2226 | 0.835 | 0.8223 | 0.1482 | 0.0554 | | 3.4116 | 74.0 | 518 | 4.0072 | 0.83 | 0.2714 | 1.2248 | 0.83 | 0.8152 | 0.1219 | 0.0549 | | 3.4116 | 75.0 | 525 | 4.0168 | 0.835 | 0.2742 | 1.2200 | 0.835 | 0.8223 | 0.1329 | 0.0551 | | 3.4116 | 76.0 | 532 | 4.0223 | 0.835 | 0.2737 | 1.2248 | 0.835 | 0.8213 | 0.1380 | 0.0552 | | 3.4116 | 77.0 | 539 | 4.0250 | 0.84 | 0.2719 | 1.2208 | 0.8400 | 0.8252 | 0.1405 | 0.0551 | | 3.4116 | 78.0 | 546 | 4.0338 | 0.835 | 0.2745 | 1.2242 | 0.835 | 0.8213 | 0.1536 | 0.0551 | | 3.4116 | 79.0 | 553 | 4.0380 | 0.835 | 0.2740 | 1.2234 | 0.835 | 0.8213 | 0.1494 | 0.0552 | | 3.4116 | 80.0 | 560 | 4.0445 | 0.835 | 0.2744 | 1.2223 | 0.835 | 0.8213 | 0.1500 | 0.0555 | | 3.4116 | 81.0 | 567 | 4.0449 | 0.835 | 0.2735 | 1.2209 | 0.835 | 0.8213 | 0.1504 | 0.0552 | | 3.4116 | 82.0 | 574 | 4.0515 | 0.835 | 0.2747 | 1.2228 | 0.835 | 0.8213 | 0.1526 | 0.0549 | | 3.4116 | 83.0 | 581 | 4.0534 | 0.835 | 0.2743 | 1.2226 | 0.835 | 0.8213 | 0.1501 | 0.0548 | | 3.4116 | 84.0 | 588 | 4.0572 | 0.835 | 0.2740 | 1.2225 | 0.835 | 0.8213 | 0.1447 | 0.0550 | | 3.4116 | 85.0 | 595 | 4.0605 | 0.835 | 0.2743 | 1.2222 | 0.835 | 0.8213 | 0.1466 | 0.0548 | | 3.4116 | 86.0 | 602 | 4.0621 | 0.835 | 0.2744 | 1.2215 | 0.835 | 0.8213 | 0.1427 | 0.0548 | | 3.4116 | 87.0 | 609 | 4.0653 | 0.835 | 0.2745 | 1.2214 | 0.835 | 0.8213 | 0.1439 | 0.0549 | | 3.4116 | 88.0 | 616 | 4.0673 | 0.835 | 0.2746 | 1.2217 | 0.835 | 0.8213 | 0.1410 | 0.0548 | | 3.4116 | 89.0 | 623 | 4.0705 | 0.835 | 0.2748 | 1.2214 | 0.835 | 0.8213 | 0.1440 | 0.0549 | | 3.4116 | 90.0 | 630 | 4.0717 | 0.835 | 0.2744 | 1.2217 | 0.835 | 0.8213 | 0.1426 | 0.0547 | | 3.4116 | 91.0 | 637 | 4.0740 | 0.835 | 0.2747 | 1.2217 | 0.835 | 0.8213 | 0.1432 | 0.0548 | | 3.4116 | 92.0 | 644 | 4.0753 | 0.835 | 0.2748 | 1.2217 | 0.835 | 0.8213 | 0.1442 | 0.0547 | | 3.4116 | 93.0 | 651 | 4.0763 | 0.835 | 0.2746 | 1.2214 | 0.835 | 0.8213 | 0.1434 | 0.0546 | | 3.4116 | 94.0 | 658 | 4.0777 | 0.835 | 0.2746 | 1.2213 | 0.835 | 0.8213 | 0.1433 | 0.0547 | | 3.4116 | 95.0 | 665 | 4.0788 | 0.835 | 0.2747 | 1.2217 | 0.835 | 0.8213 | 0.1442 | 0.0547 | | 3.4116 | 96.0 | 672 | 4.0800 | 0.835 | 0.2748 | 1.2217 | 0.835 | 0.8213 | 0.1466 | 0.0547 | | 3.4116 | 97.0 | 679 | 4.0802 | 0.835 | 0.2747 | 1.2215 | 0.835 | 0.8213 | 0.1435 | 0.0547 | | 3.4116 | 98.0 | 686 | 4.0808 | 0.835 | 0.2747 | 1.2214 | 0.835 | 0.8213 | 0.1435 | 0.0547 | | 3.4116 | 99.0 | 693 | 4.0811 | 0.835 | 0.2748 | 1.2214 | 0.835 | 0.8213 | 0.1443 | 0.0547 | | 3.4116 | 100.0 | 700 | 4.0812 | 0.835 | 0.2748 | 1.2215 | 0.835 | 0.8213 | 0.1443 | 0.0548 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1.post200 - Datasets 2.9.0 - Tokenizers 0.13.2
usakha/Bart_MedPaper_model
usakha
2023-07-18T19:07:38Z
20
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "bart", "text2text-generation", "summarization", "en", "dataset:pszemraj/scientific_lay_summarisation-plos-norm", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-06-26T10:51:55Z
--- datasets: - pszemraj/scientific_lay_summarisation-plos-norm language: - en metrics: - bleu - rouge pipeline_tag: summarization --- # Hyperparameters learning_rate=2e-5 per_device_train_batch_size=14 per_device_eval_batch_size=14 weight_decay=0.01 save_total_limit=3 num_train_epochs=3 predict_with_generate=True fp16=True # Training Output global_step=4248, training_loss=2.172659089111788, metrics={'train_runtime': 3371.7912, 'train_samples_per_second': 17.633, 'train_steps_per_second': 1.26, 'total_flos': 1.2884303701396685e+17, 'train_loss': 2.172659089111788, 'epoch': 3.0} # Training Results | Epoch | Training Loss | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | Gen Len | |:----- |:------------ |:--------------- |:-------- | :------- |:-------- |:--------- |:-------- |:--------- | | 1 | 2.318000 | 2.079500 | 0.128100 | 0.046700 | 0.104200 | 0.104200 | 0.001100 | 20.000000 | | 2 | 2.130000 | 2.043523 | 0.130200 | 0.047400 | 0.105400 | 0.105300 | 0.001300 | 20.000000 | | 3 | 2.047100 | 2.034664 | 0.130700 | 0.047800 | 0.105900 | 0.105900 | 0.001300 | 20.000000 |
usakha/Bart_multiNews_model
usakha
2023-07-18T19:06:08Z
14
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "bart", "text2text-generation", "summarization", "dataset:multi_news", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-06-26T10:51:55Z
--- datasets: - multi_news metrics: - bleu - rouge pipeline_tag: summarization --- # Hyperparameters learning_rate=2e-5 per_device_train_batch_size=14 per_device_eval_batch_size=14 weight_decay=0.01 save_total_limit=3 num_train_epochs=3 predict_with_generate=True fp16=True # Training Output global_step=7710 training_loss=2.1297076629757417 metrics={'train_runtime': 6059.0418, 'train_samples_per_second': 17.813, 'train_steps_per_second': 1.272, 'total_flos': 2.3389776681055027e+17, 'train_loss': 2.1297076629757417, 'epoch': 3.0} # Training Results | Epoch | Training Loss | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | Gen Len | |:----- |:------------ |:--------------- |:-------- | :------- |:-------- |:--------- |:-------- |:--------- | | 1 | 2.223100 | 2.038599 | 0.147400 | 0.054800 | 0.113500 | 0.113500 | 0.001400 | 20.000000 | | 2 | 2.078100 | 2.009619 | 0.152900 | 0.057800 | 0.117000 | 0.117000 | 0.001600 | 20.000000 | | 3 | 1.989000 | 2.006006 | 0.152900 | 0.057300 | 0.116700 | 0.116700 | 0.001700 | 20.000000 |
shreeramchandra/ser_wav2vec_indianEnglish_greek_pretrained
shreeramchandra
2023-07-18T19:05:56Z
102
0
transformers
[ "transformers", "pytorch", "wav2vec2", "audio-classification", "en", "license:afl-3.0", "endpoints_compatible", "region:us" ]
audio-classification
2023-07-13T03:19:34Z
--- license: afl-3.0 language: - en pipeline_tag: audio-classification ---
shreeramchandra/ser_wav2vec_indianEnglish_greek_pretrained_balanced_data
shreeramchandra
2023-07-18T19:04:55Z
162
0
transformers
[ "transformers", "pytorch", "wav2vec2", "audio-classification", "en", "endpoints_compatible", "region:us" ]
audio-classification
2023-07-18T17:48:21Z
--- language: - en metrics: - accuracy pipeline_tag: audio-classification ---
bwilkie/taxi
bwilkie
2023-07-18T19:04:22Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T19:03:23Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="bwilkie/taxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
uraskargi/dqn-SpaceInvadersNoFrameskip-v4
uraskargi
2023-07-18T18:54:50Z
0
0
stable-baselines3
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T18:54:08Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 733.50 +/- 298.87 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga uraskargi -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga uraskargi -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga uraskargi ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
chinmaybharti/unit-testing
chinmaybharti
2023-07-18T18:52:38Z
1
0
peft
[ "peft", "region:us" ]
null
2023-07-18T18:52:36Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.5.0.dev0
anonymous4chan/llama-2-7b
anonymous4chan
2023-07-18T18:52:02Z
347
0
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "facebook", "meta", "llama-2", "en", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-07-18T18:26:13Z
--- extra_gated_heading: Access Llama 2 on Hugging Face extra_gated_description: >- This is a form to enable access to Llama 2 on Hugging Face after you have been granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our license terms and acceptable use policy before submitting this form. Requests will be processed in 1-2 days. extra_gated_button_content: Submit extra_gated_fields: I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox language: - en pipeline_tag: text-generation inference: false tags: - facebook - meta - pytorch - llama - llama-2 --- ## Cuck. # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
gsaivinay/airoboros-13B-gpt4-1.3-GGML
gsaivinay
2023-07-18T18:50:36Z
5
0
transformers
[ "transformers", "llama", "text-generation", "sft", "en", "dataset:jondurbin/airoboros-gpt4-1.3", "license:other", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T18:39:24Z
--- license: other language: - en tags: - sft pipeline_tag: text-generation widget: - text: <|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|> - text: <|prompter|>What's the Earth total population<|endoftext|><|assistant|> - text: <|prompter|>Write a story about future of AI development<|endoftext|><|assistant|> datasets: - jondurbin/airoboros-gpt4-1.3 --- # Jon Durbin's Airoboros 13B GPT4 1.3 GGML These files are GGML format model files for [Jon Durbin's Airoboros 13B GPT4 1.3](https://huggingface.co/jondurbin/airoboros-13b-gpt4-1.3). **Note from model creator Jon Durbin: This version has problems, use if you dare, or wait for 1.4.** GGML files are for CPU + GPU inference using [llama.cpp](https://github.com/ggerganov/llama.cpp) and libraries and UIs which support this format, such as: * [text-generation-webui](https://github.com/oobabooga/text-generation-webui) * [KoboldCpp](https://github.com/LostRuins/koboldcpp) * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui) * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) * [ctransformers](https://github.com/marella/ctransformers) ## Repositories available * [4-bit GPTQ models for GPU inference](https://huggingface.co/TheBloke/airoboros-13B-gpt4-1.3-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference](https://huggingface.co/TheBloke/airoboros-13B-gpt4-1.3-GGML) * [Unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/jondurbin/airoboros-13b-gpt4-1.3) ## Prompt template ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. USER: prompt ASSISTANT: ``` <!-- compatibility_ggml start --> ## Compatibility ### Original llama.cpp quant methods: `q4_0, q4_1, q5_0, q5_1, q8_0` I have quantized these 'original' quantisation methods using an older version of llama.cpp so that they remain compatible with llama.cpp as of May 19th, commit `2d5db48`. These are guaranteed to be compatbile with any UIs, tools and libraries released since late May. ### New k-quant methods: `q2_K, q3_K_S, q3_K_M, q3_K_L, q4_K_S, q4_K_M, q5_K_S, q6_K` These new quantisation methods are compatible with llama.cpp as of June 6th, commit `2d43387`. They are now also compatible with recent releases of text-generation-webui, KoboldCpp, llama-cpp-python and ctransformers. Other tools and libraries may or may not be compatible - check their documentation if in doubt. ## Explanation of the new k-quant methods The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw * GGML_TYPE_Q8_K - "type-0" 8-bit quantization. Only used for quantizing intermediate results. The difference to the existing Q8_0 is that the block size is 256. All 2-6 bit dot products are implemented for this quantization type. Refer to the Provided Files table below to see what files use which methods, and how. <!-- compatibility_ggml end --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | airoboros-13b-gpt4-1.3.ggmlv3.q2_K.bin | q2_K | 2 | 5.51 GB | 8.01 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.vw and feed_forward.w2 tensors, GGML_TYPE_Q2_K for the other tensors. | | airoboros-13b-gpt4-1.3.ggmlv3.q3_K_L.bin | q3_K_L | 3 | 6.93 GB | 9.43 GB | New k-quant method. Uses GGML_TYPE_Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K | | airoboros-13b-gpt4-1.3.ggmlv3.q3_K_M.bin | q3_K_M | 3 | 6.31 GB | 8.81 GB | New k-quant method. Uses GGML_TYPE_Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else GGML_TYPE_Q3_K | | airoboros-13b-gpt4-1.3.ggmlv3.q3_K_S.bin | q3_K_S | 3 | 5.66 GB | 8.16 GB | New k-quant method. Uses GGML_TYPE_Q3_K for all tensors | | airoboros-13b-gpt4-1.3.ggmlv3.q4_0.bin | q4_0 | 4 | 7.32 GB | 9.82 GB | Original llama.cpp quant method, 4-bit. | | airoboros-13b-gpt4-1.3.ggmlv3.q4_1.bin | q4_1 | 4 | 8.14 GB | 10.64 GB | Original llama.cpp quant method, 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. | | airoboros-13b-gpt4-1.3.ggmlv3.q4_K_M.bin | q4_K_M | 4 | 7.87 GB | 10.37 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q4_K | | airoboros-13b-gpt4-1.3.ggmlv3.q4_K_S.bin | q4_K_S | 4 | 7.37 GB | 9.87 GB | New k-quant method. Uses GGML_TYPE_Q4_K for all tensors | | airoboros-13b-gpt4-1.3.ggmlv3.q5_0.bin | q5_0 | 5 | 8.95 GB | 11.45 GB | Original llama.cpp quant method, 5-bit. Higher accuracy, higher resource usage and slower inference. | | airoboros-13b-gpt4-1.3.ggmlv3.q5_1.bin | q5_1 | 5 | 9.76 GB | 12.26 GB | Original llama.cpp quant method, 5-bit. Even higher accuracy, resource usage and slower inference. | | airoboros-13b-gpt4-1.3.ggmlv3.q5_K_M.bin | q5_K_M | 5 | 9.23 GB | 11.73 GB | New k-quant method. Uses GGML_TYPE_Q6_K for half of the attention.wv and feed_forward.w2 tensors, else GGML_TYPE_Q5_K | | airoboros-13b-gpt4-1.3.ggmlv3.q5_K_S.bin | q5_K_S | 5 | 8.97 GB | 11.47 GB | New k-quant method. Uses GGML_TYPE_Q5_K for all tensors | | airoboros-13b-gpt4-1.3.ggmlv3.q6_K.bin | q6_K | 6 | 10.68 GB | 13.18 GB | New k-quant method. Uses GGML_TYPE_Q8_K - 6-bit quantization - for all tensors | | airoboros-13b-gpt4-1.3.ggmlv3.q8_0.bin | q8_0 | 8 | 13.83 GB | 16.33 GB | Original llama.cpp quant method, 8-bit. Almost indistinguishable from float16. High resource use and slow. Not recommended for most users. | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. ## How to run in `llama.cpp` I use the following command line; adjust for your tastes and needs: ``` ./main -t 10 -ngl 32 -m airoboros-13b-gpt4-1.3.ggmlv3.q5_0.bin --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "USER: Write a story about llamas\nASSISTANT:" ``` If you're able to use full GPU offloading, you should use `-t 1` to get best performance. If not able to fully offload to GPU, you should use more cores. Change `-t 10` to the number of physical CPU cores you have, or a lower number depending on what gives best performance. Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp-models.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp-models.md). # Original model card: Jon Durbin's Airoboros 13B GPT4 1.3 _Not tested yet, use if you want, but I would probably wait for 1.4!_ ### Overview This is a qlora fine-tuned 13b parameter LlaMa model, using completely synthetic training data created gpt4 via https://github.com/jondurbin/airoboros This is mostly an extension of [1.2](https://huggingface.co/jondurbin/airoboros-13b-gpt4-1.2) with a few enhancements: - All coding instructions have an equivalent " PLAINFORMAT" version now. - Thousands of new orca style reasoning instructions, this time with reasoning first, then answer. - Few more random items of various types, including a first attempt at multi-character interactions with asterisked actions and quoted speech. This model was fine-tuned with a fork of [qlora](https://github.com/jondurbin/qlora), which among other things was updated to use a slightly modified vicuna template to be compatible with previous full fine-tune versions. ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. USER: [prompt] ASSISTANT: ``` So in other words, it's the preamble/system prompt, followed by a single space, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space, followed by "ASSISTANT: " (with a single space after the colon). ### Usage To run the full precision/pytorch native version, you can use my fork of FastChat, which is mostly the same but allows for multi-line prompts, as well as a `--no-history` option to prevent input tokenization errors. ``` pip install git+https://github.com/jondurbin/FastChat ``` Be sure you are pulling the latest branch! Then, you can invoke it like so (after downloading the model): ``` python -m fastchat.serve.cli \ --model-path airoboros-13b-gpt4-1.3 \ --temperature 0.5 \ --max-new-tokens 2048 \ --no-history ```
ByteExplorer/CartPole-v1
ByteExplorer
2023-07-18T18:43:20Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T03:53:36Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 1000.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
jamesdborin/ct2-int8-mtb-7b-storywriter
jamesdborin
2023-07-18T18:37:29Z
4
0
transformers
[ "transformers", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "custom_code", "dataset:the_pile_books3", "arxiv:2108.12409", "arxiv:2205.14135", "arxiv:2302.06675", "license:apache-2.0", "autotrain_compatible", "region:us" ]
text-generation
2023-07-18T18:29:37Z
--- license: apache-2.0 tags: - Composer - MosaicML - llm-foundry datasets: - the_pile_books3 inference: false --- # MPT-7B-StoryWriter-65k+ MPT-7B-StoryWriter-65k+ is a model designed to read and write fictional stories with super long context lengths. It was built by finetuning MPT-7B with a context length of 65k tokens on a filtered fiction subset of the [books3 dataset](https://huggingface.co/datasets/the_pile_books3). At inference time, thanks to [ALiBi](https://arxiv.org/abs/2108.12409), MPT-7B-StoryWriter-65k+ can extrapolate even beyond 65k tokens. We demonstrate generations as long as 84k tokens on a single node of 8 A100-80GB GPUs in our [blogpost](https://www.mosaicml.com/blog/mpt-7b). * License: Apache 2.0 This model was trained by [MosaicML](https://www.mosaicml.com) and follows a modified decoder-only transformer architecture. ## Model Date May 5, 2023 ## Model License Apache 2.0 ## Documentation * [Blog post: Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs](https://www.mosaicml.com/blog/mpt-7b) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom model architecture that is not yet part of the `transformers` package. It includes options for many training efficiency features such as [FlashAttention (Dao et al. 2022)](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), QK LayerNorm, and more. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b-storywriter', trust_remote_code=True ) ``` To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b-storywriter' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` Although the model was trained with a sequence length of 2048 and finetuned with a sequence length of 65536, ALiBi enables users to increase the maximum sequence length during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 83968 # (input + output) tokens can now be up to 83968 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Community-Created Resources These were not created by MosaicML, but you may find them useful. These links are not an endorsement of the creators or their content. - [Oobabooga Running MPT-7B-Storywriter](https://youtu.be/QVVb6Md6huA) - [NEW MPT-7B-StoryWriter CRUSHES GPT-4!](https://www.youtube.com/watch?v=O9Y_ZdsuKWQ&t=649s) - Has a long section on running locally using Oobabooga ## Example Epilogue The full text of the _The Great Gatsby_ (67873 tokens) was fed to the model, followed by the text "EPILOGUE" **TheRealFitzgerald233**: > ... > > So we beat on, boats against the current, borne back ceaselessly into the past > > EPILOGUE **MPT-7B-StoryWriter**: >I have lived so long in the shadow of this dream that I may never see the bright sunlight of reality. I may look up some morning and find that I am six feet under, and that Gatsby, who gave me the dream and everything else, will have vanished behind me. > >And so it was. I had hardly crossed the threshold of the old world when my eye caught the headlines of a newspaper, and I knew at once that my dream was over. I knew that Gatsby was dead. > >And I knew that day that I didn't love her. > >That night I took the last train for West Egg, and when I reached home I went directly to my attic and brought down all Gatsby's letters. > >I have sorted them by date and placed them in the order in which they were written. They are very few in number. > >They are dated between April of nineteen eighteen and January of nineteen nineteen, and the last one I have here is the one that told me he was dead. > >There is something in that last letter that frightens me. It is the sense of a finality, of a completeness. I never heard of him after that, and I was glad. > >But a few months ago I read somewhere that he had been seen in New York. He had become a great man. > >And I knew that he had not changed at all. ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | **65536** | ## PreTraining Data For more details on the pretraining process, see [MPT-7B](https://huggingface.co/mosaicml/mpt-7b). The data was tokenized using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ### Training Configuration This model was trained on 8 A100-80GBs for about 2 days using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the [LION](https://arxiv.org/abs/2302.06675) optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B-StoryWriter can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B-StoryWriter was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## Acknowledgements This model was finetuned by Alex Trott and the MosaicML NLP team ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b). ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please cosult an attorney before using this model for commercial purposes. ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs}, year = {2023}, url = {www.mosaicml.com/blog/mpt-7b}, note = {Accessed: 2023-03-28}, % change this date urldate = {2023-03-28} % change this date } ```
rami8k/a2c-AntBulletEnv-v0
rami8k
2023-07-18T18:35:43Z
0
0
stable-baselines3
[ "stable-baselines3", "AntBulletEnv-v0", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T18:33:16Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1014.42 +/- 260.15 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
tanmoy-in/test_model_v02
tanmoy-in
2023-07-18T18:34:04Z
0
0
peft
[ "peft", "region:us" ]
null
2023-07-18T18:33:50Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.5.0.dev0
jamesdborin/ct2-int8-mtb-7b
jamesdborin
2023-07-18T18:27:10Z
4
0
transformers
[ "transformers", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "StreamingDatasets", "custom_code", "dataset:mc4", "dataset:c4", "dataset:togethercomputer/RedPajama-Data-1T", "dataset:bigcode/the-stack", "dataset:allenai/s2orc", "arxiv:2108.12409", "arxiv:2302.13971", "arxiv:2205.14135", "arxiv:2010.04245", "arxiv:1909.08053", "arxiv:2302.06675", "license:apache-2.0", "autotrain_compatible", "region:us" ]
text-generation
2023-07-18T18:19:55Z
--- license: apache-2.0 tags: - Composer - MosaicML - llm-foundry - StreamingDatasets datasets: - mc4 - c4 - togethercomputer/RedPajama-Data-1T - bigcode/the-stack - allenai/s2orc inference: false --- # MPT-7B MPT-7B is a decoder-style transformer pretrained from scratch on 1T tokens of English text and code. This model was trained by [MosaicML](https://www.mosaicml.com). MPT-7B is part of the family of MosaicPretrainedTransformer (MPT) models, which use a modified transformer architecture optimized for efficient training and inference. These architectural changes include performance-optimized layer implementations and the elimination of context length limits by replacing positional embeddings with Attention with Linear Biases ([ALiBi](https://arxiv.org/abs/2108.12409)). Thanks to these modifications, MPT models can be trained with high throughput efficiency and stable convergence. MPT models can also be served efficiently with both standard HuggingFace pipelines and NVIDIA's [FasterTransformer](https://github.com/NVIDIA/FasterTransformer). This model uses the MosaicML LLM codebase, which can be found in the [llm-foundry repository](https://github.com/mosaicml/llm-foundry). It was trained by MosaicML’s NLP team on the [MosaicML platform](https://www.mosaicml.com/training) for LLM pretraining, finetuning, and inference. ### How is this model different? MPT-7B is * **Licensed for the possibility of commercial use** (unlike [LLaMA](https://arxiv.org/abs/2302.13971)). * **Trained on a large amount of data** (1T tokens like [LLaMA](https://arxiv.org/abs/2302.13971) vs. 300B for [Pythia](https://github.com/EleutherAI/pythia), 300B for [OpenLLaMA](https://github.com/openlm-research/open_llama), and 800B for [StableLM](https://github.com/Stability-AI/StableLM)). * **Prepared to handle extremely long inputs** thanks to [ALiBi](https://arxiv.org/abs/2108.12409) (we finetuned [MPT-7B-StoryWriter-65k+](https://huggingface.co/mosaicml/mpt-7b-storywriter) on up to 65k inputs and can handle up to 84k vs. 2k-4k for other open source models). * **Capable of fast training and inference** (via [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) and [FasterTransformer](https://github.com/NVIDIA/FasterTransformer)) * **Equipped with highly efficient open-source training code** via the [llm-foundry repository](https://github.com/mosaicml/llm-foundry) ### Models finetuned off MPT-7B: The following models are finetuned on MPT-7B: * [MPT-7B-StoryWriter-65k+](https://huggingface.co/mosaicml/mpt-7b-storywriter): a model designed to read and write fictional stories with super long context lengths. Built by finetuning MPT-7B with a context length of 65k tokens on a filtered fiction subset of the [books3 dataset](https://huggingface.co/datasets/the_pile_books3). At inference time, thanks to [ALiBi](https://arxiv.org/abs/2108.12409), MPT-7B-StoryWriter-65k+ can extrapolate even beyond 65k tokens. We demonstrate generations as long as 80k tokens on a single A100-80GB GPU in our [blogpost](www.mosaicml.com/blog/mpt-7b). * License: Apache 2.0 * [MPT-7B-Instruct](https://huggingface.co/mosaicml/mpt-7b-instruct): a model for short-form instruction following. Built by finetuning MPT-7B on a [dataset](https://huggingface.co/datasets/mosaicml/dolly_hhrlhf) we also release, derived from the [Databricks Dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) and the [Anthropic Helpful and Harmless (HH-RLHF)](https://huggingface.co/datasets/Anthropic/hh-rlhf) datasets. * License: _CC-By-SA-3.0_ * [Demo on Hugging Face Spaces](https://huggingface.co/spaces/mosaicml/mpt-7b-instruct) * [MPT-7B-Chat](https://huggingface.co/mosaicml/mpt-7b-chat): a chatbot-like model for dialogue generation. Built by finetuning MPT-7B on the [ShareGPT-Vicuna](https://huggingface.co/datasets/jeffwan/sharegpt_vicuna), [HC3](https://huggingface.co/datasets/Hello-SimpleAI/HC3), [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca), [HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf), and [Evol-Instruct](https://huggingface.co/datasets/victor123/evol_instruct_70k) datasets. * License: _CC-By-NC-SA-4.0_ * [Demo on Hugging Face Spaces](https://huggingface.co/spaces/mosaicml/mpt-7b-chat) ## Model Date May 5, 2023 ## Model License Apache-2.0 ## Documentation * [Blog post: Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs](https://www.mosaicml.com/blog/mpt-7b) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use This model is best used with the MosaicML [llm-foundry repository](https://github.com/mosaicml/llm-foundry) for training and finetuning. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` Although the model was trained with a sequence length of 2048, ALiBi enables users to increase the maximum sequence length during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 4096 # (input + output) tokens can now be up to 4096 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b') ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | 2048 | ## Training Data ### Streaming Datasets Data was formatted using the MosaicML [StreamingDataset](https://github.com/mosaicml/streaming) library to host our data in object storage and efficiently stream it to our compute cluster during training. StreamingDataset obviates the need to download the whole dataset before starting training, and allows instant resumption of training from any point in the dataset. ### Data Mix The model was trained for 1T tokens (with batch size 1760 and sequence length 2048). It was trained on the following data mix: | Data Source | Number of Tokens in Source | Proportion | Effective Number of Tokens | Epochs | |-------------|----------------------------|------------|----------------------------|--------| | mC4 3.1.0 - English | 417.99 B | 0.33 | 330 B | 0.14 | | C4 - English - SemDedup 80% | 100.42 B | 0.299 | 299 B | 2.98 | | RedPajama - CommonCrawl | 878.45 B | 0.1 | 100 B | 0.11 | | The Stack - Selected Languages | 463.78 B | 0.1 | 100 B | 0.22 | | RedPajama - Wikipedia - En | 4.87 B | 0.04 | 40 B | 8.21 | | The Stack - Markdown | 107.07 B | 0.035 | 35 B | 0.33 | | S2ORC | 48.85 B | 0.033 | 33 B | 0.68 | | RedPajama - Books | 26.02 B | 0.03 | 30B | 1.15 | | RedPajama - arXiv | 28.10 B | 0.019 | 19 B | 0.68 | | RedPajama - StackExchange | 20.54 B | 0.014 | 14 B |0.68 | Samples for each batch were selected from one of the datasets with the probability specified above. The examples were shuffled within each dataset, and each example was constructed from as many sequences from that dataset as were necessary to fill the 2048 sequence length. The data was tokenized using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. This BPE tokenizer has a number of desirable characteristics, most of which are relevant for tokenizing code: (1) It was trained on a diverse mix of data that includes code (The Pile) (2) It applies consistent space delimitation, unlike the GPT2 tokenizer which tokenizes inconsistently depending on the presence of prefix spaces (3) It contains tokens for repeated space characters, which allows superior compression of text with large amounts of repeated space characters. The model vocabulary size of 50432 was set to be a multiple of 128 (as in [MEGATRON-LM](https://arxiv.org/abs/1909.08053)), model flop utilization (MFU) increased by up to four percentage points. ### Training Configuration This model was trained on 440 A100-40GBs for about 9.5 days using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the [LION](https://arxiv.org/abs/2302.06675) optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B (Base) is **not** intended for deployment without finetuning. It should not be used for human-facing interactions without further guardrails and user consent. MPT-7B can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b). ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please cosult an attorney before using this model for commercial purposes. ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs}, year = {2023}, url = {www.mosaicml.com/blog/mpt-7b}, note = {Accessed: 2023-05-05}, urldate = {2023-05-05} } ```
yahs/yang_xiao_long_rwby
yahs
2023-07-18T18:23:19Z
0
0
null
[ "arxiv:1910.09700", "region:us" ]
null
2023-07-18T18:20:55Z
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards {} --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This model card aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
lrex93497/bert_qa_classifier_pt_3
lrex93497
2023-07-18T18:23:08Z
184
0
transformers
[ "transformers", "pytorch", "bert", "text-classification", "license:apache-2.0", "autotrain_compatible", "region:us" ]
text-classification
2023-07-18T18:00:03Z
--- license: apache-2.0 inference: false --- This is a model for bert QA to classify answerable/unanswerable question for SQuAD 2.0. It trained on SQuAD 2.0 train dataset, epoch 4 (start from 1).</br> For details please see https://github.com/lrex93497/fine-tune-BERT-base-uncased-QA-SQuAD2.0/ <br>In our system, we achieved Exact Match (EM): 53.0784, f1: 59.1615 <br>This classifier model can have 72.6899% on distinguishing answerable and unawserable questions.
SaffalPoosh/nllb-200-3.3B
SaffalPoosh
2023-07-18T18:21:30Z
2
0
transformers
[ "transformers", "endpoints_compatible", "region:us" ]
null
2023-07-17T14:02:55Z
This is C++ optimized model for Language detection to use with ctranslate2 APIs.
tanmoy-in/test_model
tanmoy-in
2023-07-18T18:20:00Z
1
0
peft
[ "peft", "tensorboard", "generated_from_trainer", "base_model:facebook/opt-350m", "base_model:adapter:facebook/opt-350m", "license:other", "region:us" ]
null
2023-07-18T18:02:28Z
--- license: other base_model: facebook/opt-350m tags: - generated_from_trainer model-index: - name: test_model results: [] library_name: peft --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test_model This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2 - training_steps: 5 ### Training results ### Framework versions - PEFT 0.5.0.dev0 - Transformers 4.32.0.dev0 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
ByteExplorer/Acrobot-v1
ByteExplorer
2023-07-18T18:19:34Z
0
0
null
[ "Acrobot-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T18:19:21Z
--- tags: - Acrobot-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Acrobot-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Acrobot-v1 type: Acrobot-v1 metrics: - type: mean_reward value: -1000.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **Acrobot-v1** This is a trained model of a **Reinforce** agent playing **Acrobot-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
jamesdborin/ct2-int8-falcon-7b
jamesdborin
2023-07-18T18:19:00Z
12
0
transformers
[ "transformers", "RefinedWebModel", "text-generation", "custom_code", "en", "dataset:tiiuae/falcon-refinedweb", "arxiv:2205.14135", "arxiv:1911.02150", "arxiv:2101.00027", "arxiv:2005.14165", "arxiv:2104.09864", "arxiv:2306.01116", "license:apache-2.0", "autotrain_compatible", "region:us" ]
text-generation
2023-07-18T18:11:15Z
--- datasets: - tiiuae/falcon-refinedweb language: - en inference: false license: apache-2.0 --- # 🚀 Falcon-7B **Falcon-7B is a 7B parameters causal decoder-only model built by [TII](https://www.tii.ae) and trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. It is made available under the Apache 2.0 license.** *Paper coming soon* 😊. 🤗 To get started with Falcon (inference, finetuning, quantization, etc.), we recommend reading [this great blogpost fron HF](https://huggingface.co/blog/falcon)! ## Why use Falcon-7B? * **It outperforms comparable open-source models** (e.g., [MPT-7B](https://huggingface.co/mosaicml/mpt-7b), [StableLM](https://github.com/Stability-AI/StableLM), [RedPajama](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1) etc.), thanks to being trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). * **It features an architecture optimized for inference**, with FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)) and multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)). * **It is made available under a permissive Apache 2.0 license allowing for commercial use**, without any royalties or restrictions. ⚠️ **This is a raw, pretrained model, which should be further finetuned for most usecases.** If you are looking for a version better suited to taking generic instructions in a chat format, we recommend taking a look at [Falcon-7B-Instruct](https://huggingface.co/tiiuae/falcon-7b-instruct). 🔥 **Looking for an even more powerful model?** [Falcon-40B](https://huggingface.co/tiiuae/falcon-40b) is Falcon-7B's big brother! ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` 💥 **Falcon LLMs require PyTorch 2.0 for use with `transformers`!** For fast inference with Falcon, check-out [Text Generation Inference](https://github.com/huggingface/text-generation-inference)! Read more in this [blogpost]((https://huggingface.co/blog/falcon). You will need **at least 16GB of memory** to swiftly run inference with Falcon-7B. # Model Card for Falcon-7B ## Model Details ### Model Description - **Developed by:** [https://www.tii.ae](https://www.tii.ae); - **Model type:** Causal decoder-only; - **Language(s) (NLP):** English and French; - **License:** Apache 2.0. ### Model Source - **Paper:** *coming soon*. ## Uses ### Direct Use Research on large language models; as a foundation for further specialization and finetuning for specific usecases (e.g., summarization, text generation, chatbot, etc.) ### Out-of-Scope Use Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful. ## Bias, Risks, and Limitations Falcon-7B is trained on English and French data only, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online. ### Recommendations We recommend users of Falcon-7B to consider finetuning it for the specific set of tasks of interest, and for guardrails and appropriate precautions to be taken for any production use. ## How to Get Started with the Model ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Training Details ### Training Data Falcon-7B was trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), a high-quality filtered and deduplicated web dataset which we enhanced with curated corpora. Significant components from our curated copora were inspired by The Pile ([Gao et al., 2020](https://arxiv.org/abs/2101.00027)). | **Data source** | **Fraction** | **Tokens** | **Sources** | |--------------------|--------------|------------|-----------------------------------| | [RefinedWeb-English](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) | 79% | 1,185B | massive web crawl | | Books | 7% | 110B | | | Conversations | 6% | 85B | Reddit, StackOverflow, HackerNews | | Code | 3% | 45B | | | RefinedWeb-French | 3% | 45B | massive web crawl | | Technical | 2% | 30B | arXiv, PubMed, USPTO, etc. | The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) tokenizer. ### Training Procedure Falcon-7B was trained on 384 A100 40GB GPUs, using a 2D parallelism strategy (PP=2, DP=192) combined with ZeRO. #### Training Hyperparameters | **Hyperparameter** | **Value** | **Comment** | |--------------------|------------|-------------------------------------------| | Precision | `bfloat16` | | | Optimizer | AdamW | | | Learning rate | 6e-4 | 4B tokens warm-up, cosine decay to 1.2e-5 | | Weight decay | 1e-1 | | | Z-loss | 1e-4 | | | Batch size | 2304 | 30B tokens ramp-up | #### Speeds, Sizes, Times Training happened in early March 2023 and took about two weeks. ## Evaluation *Paper coming soon*. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for early results. ## Technical Specifications ### Model Architecture and Objective Falcon-7B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token). The architecture is broadly adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), with the following differences: * **Positionnal embeddings:** rotary ([Su et al., 2021](https://arxiv.org/abs/2104.09864)); * **Attention:** multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)); * **Decoder-block:** parallel attention/MLP with a single layer norm. | **Hyperparameter** | **Value** | **Comment** | |--------------------|-----------|----------------------------------------| | Layers | 32 | | | `d_model` | 4544 | Increased to compensate for multiquery | | `head_dim` | 64 | Reduced to optimise for FlashAttention | | Vocabulary | 65024 | | | Sequence length | 2048 | | ### Compute Infrastructure #### Hardware Falcon-7B was trained on AWS SageMaker, on 384 A100 40GB GPUs in P4d instances. #### Software Falcon-7B was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.) ## Citation *Paper coming soon* 😊. In the meanwhile, you can use the following information to cite: ``` @article{falcon40b, title={{Falcon-40B}: an open large language model with state-of-the-art performance}, author={Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme}, year={2023} } ``` To learn more about the pretraining dataset, see the 📓 [RefinedWeb paper](https://arxiv.org/abs/2306.01116). ``` @article{refinedweb, title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only}, author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay}, journal={arXiv preprint arXiv:2306.01116}, eprint={2306.01116}, eprinttype = {arXiv}, url={https://arxiv.org/abs/2306.01116}, year={2023} } ``` ## License Falcon-7B is made available under the Apache 2.0 license. ## Contact [email protected]
anzeliu/my_awesome_billsum_model
anzeliu
2023-07-18T18:13:14Z
106
0
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:billsum", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2023-07-18T17:28:15Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - billsum metrics: - rouge model-index: - name: my_awesome_billsum_model results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: billsum type: billsum config: default split: ca_test args: default metrics: - name: Rouge1 type: rouge value: 0.1459 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_billsum_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the billsum dataset. It achieves the following results on the evaluation set: - Loss: 2.5028 - Rouge1: 0.1459 - Rouge2: 0.0561 - Rougel: 0.1211 - Rougelsum: 0.1212 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 55 | 2.8222 | 0.1315 | 0.0408 | 0.1092 | 0.1092 | 19.0 | | No log | 2.0 | 110 | 2.5901 | 0.1393 | 0.0508 | 0.1161 | 0.1161 | 19.0 | | No log | 3.0 | 165 | 2.5214 | 0.1489 | 0.0576 | 0.1237 | 0.1237 | 19.0 | | No log | 4.0 | 220 | 2.5028 | 0.1459 | 0.0561 | 0.1211 | 0.1212 | 19.0 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
digiplay/Colorful_v3.1
digiplay
2023-07-18T18:09:01Z
268
2
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-07-18T00:14:40Z
--- license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- Model info : https://civitai.com/models/7279?modelVersionId=90599 Original Author's DEMO image : ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7bd73518-9fc0-4388-8818-8353f55922bd/width=720/9.jpeg)
emilylearning/specification-induced-spurious-correlations
emilylearning
2023-07-18T18:01:35Z
0
0
null
[ "arxiv:2210.00131", "license:cc", "region:us" ]
null
2022-12-07T17:41:47Z
--- license: cc --- This is a model card to link the paper ["Underspecification in Language Modeling Tasks: A Causality-Informed Study of Gendered Pronoun Resolution "](https://arxiv.org/abs/2210.00131) to HF Spaces demos: - [Demo of identifying 'specification-induced' spurious correlations](https://huggingface.co/spaces/emilylearning/spurious_correlation_evaluation) - [Demo of exploiting spurious correlations for detection of task specification](https://huggingface.co/spaces/emilylearning/llm_uncertainty)
jordyvl/39-tiny_tobacco3482_kd_CEKD_t2.5_a0.5
jordyvl
2023-07-18T17:57:18Z
165
0
transformers
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-07-18T17:21:31Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 39-tiny_tobacco3482_kd_CEKD_t2.5_a0.5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 39-tiny_tobacco3482_kd_CEKD_t2.5_a0.5 This model is a fine-tuned version of [WinKawaks/vit-tiny-patch16-224](https://huggingface.co/WinKawaks/vit-tiny-patch16-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5285 - Accuracy: 0.81 - Brier Loss: 0.3907 - Nll: 0.9159 - F1 Micro: 0.81 - F1 Macro: 0.7907 - Ece: 0.3421 - Aurc: 0.0542 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Brier Loss | Nll | F1 Micro | F1 Macro | Ece | Aurc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:|:------:|:--------:|:--------:|:------:|:------:| | No log | 1.0 | 7 | 1.8936 | 0.11 | 1.0097 | 8.5078 | 0.11 | 0.0902 | 0.3251 | 0.8953 | | No log | 2.0 | 14 | 1.2597 | 0.16 | 0.8753 | 5.5353 | 0.16 | 0.1308 | 0.2539 | 0.7902 | | No log | 3.0 | 21 | 1.0230 | 0.355 | 0.7833 | 5.1396 | 0.3550 | 0.2872 | 0.2810 | 0.4521 | | No log | 4.0 | 28 | 0.8743 | 0.545 | 0.6497 | 3.0412 | 0.545 | 0.4328 | 0.3224 | 0.2775 | | No log | 5.0 | 35 | 0.8020 | 0.625 | 0.5958 | 2.6099 | 0.625 | 0.5465 | 0.3186 | 0.2136 | | No log | 6.0 | 42 | 0.7221 | 0.675 | 0.5300 | 2.3085 | 0.675 | 0.5632 | 0.3257 | 0.1562 | | No log | 7.0 | 49 | 0.6964 | 0.68 | 0.4843 | 1.9033 | 0.68 | 0.5761 | 0.3039 | 0.1453 | | No log | 8.0 | 56 | 0.6729 | 0.72 | 0.4598 | 1.8200 | 0.72 | 0.6195 | 0.3089 | 0.1170 | | No log | 9.0 | 63 | 0.6470 | 0.77 | 0.4318 | 1.5607 | 0.7700 | 0.7058 | 0.3518 | 0.0897 | | No log | 10.0 | 70 | 0.5889 | 0.795 | 0.4019 | 1.1238 | 0.795 | 0.7546 | 0.3324 | 0.0675 | | No log | 11.0 | 77 | 0.5829 | 0.795 | 0.4013 | 1.0267 | 0.795 | 0.7667 | 0.3142 | 0.0728 | | No log | 12.0 | 84 | 0.5763 | 0.785 | 0.3923 | 1.2697 | 0.785 | 0.7655 | 0.3286 | 0.0751 | | No log | 13.0 | 91 | 0.5854 | 0.765 | 0.3934 | 1.4915 | 0.765 | 0.7291 | 0.2936 | 0.0806 | | No log | 14.0 | 98 | 0.5779 | 0.795 | 0.3983 | 1.2207 | 0.795 | 0.7409 | 0.3141 | 0.0681 | | No log | 15.0 | 105 | 0.5564 | 0.795 | 0.3752 | 1.1974 | 0.795 | 0.7687 | 0.3201 | 0.0626 | | No log | 16.0 | 112 | 0.5599 | 0.815 | 0.3945 | 1.0987 | 0.815 | 0.7827 | 0.3233 | 0.0618 | | No log | 17.0 | 119 | 0.5748 | 0.77 | 0.4001 | 1.2395 | 0.7700 | 0.7497 | 0.3136 | 0.0866 | | No log | 18.0 | 126 | 0.5611 | 0.79 | 0.4028 | 1.3279 | 0.79 | 0.7738 | 0.3127 | 0.0680 | | No log | 19.0 | 133 | 0.5514 | 0.805 | 0.4063 | 0.8598 | 0.805 | 0.7873 | 0.3656 | 0.0575 | | No log | 20.0 | 140 | 0.5566 | 0.81 | 0.4028 | 0.9944 | 0.81 | 0.7943 | 0.3449 | 0.0676 | | No log | 21.0 | 147 | 0.5489 | 0.81 | 0.3879 | 1.1351 | 0.81 | 0.7966 | 0.3432 | 0.0682 | | No log | 22.0 | 154 | 0.5586 | 0.82 | 0.4091 | 1.1107 | 0.82 | 0.7894 | 0.3526 | 0.0580 | | No log | 23.0 | 161 | 0.5593 | 0.795 | 0.4131 | 1.1693 | 0.795 | 0.7765 | 0.3483 | 0.0641 | | No log | 24.0 | 168 | 0.5493 | 0.79 | 0.3962 | 1.2363 | 0.79 | 0.7740 | 0.3494 | 0.0646 | | No log | 25.0 | 175 | 0.5489 | 0.8 | 0.3930 | 1.0310 | 0.8000 | 0.7638 | 0.3342 | 0.0614 | | No log | 26.0 | 182 | 0.5492 | 0.79 | 0.3944 | 1.3201 | 0.79 | 0.7670 | 0.3096 | 0.0667 | | No log | 27.0 | 189 | 0.5441 | 0.805 | 0.4002 | 1.1304 | 0.805 | 0.7886 | 0.3528 | 0.0600 | | No log | 28.0 | 196 | 0.5397 | 0.815 | 0.3960 | 1.1210 | 0.815 | 0.7902 | 0.3630 | 0.0544 | | No log | 29.0 | 203 | 0.5418 | 0.785 | 0.3977 | 0.9580 | 0.785 | 0.7575 | 0.3536 | 0.0646 | | No log | 30.0 | 210 | 0.5374 | 0.815 | 0.3931 | 1.0186 | 0.815 | 0.7855 | 0.3422 | 0.0604 | | No log | 31.0 | 217 | 0.5405 | 0.815 | 0.3983 | 0.8948 | 0.815 | 0.7980 | 0.3671 | 0.0531 | | No log | 32.0 | 224 | 0.5394 | 0.805 | 0.3998 | 1.0680 | 0.805 | 0.7841 | 0.3695 | 0.0568 | | No log | 33.0 | 231 | 0.5296 | 0.81 | 0.3868 | 1.1222 | 0.81 | 0.7891 | 0.3530 | 0.0545 | | No log | 34.0 | 238 | 0.5338 | 0.81 | 0.3952 | 1.1333 | 0.81 | 0.7825 | 0.3453 | 0.0559 | | No log | 35.0 | 245 | 0.5339 | 0.805 | 0.3941 | 0.8600 | 0.805 | 0.7905 | 0.3552 | 0.0554 | | No log | 36.0 | 252 | 0.5332 | 0.81 | 0.3918 | 0.9018 | 0.81 | 0.7996 | 0.3669 | 0.0527 | | No log | 37.0 | 259 | 0.5336 | 0.79 | 0.3907 | 0.7768 | 0.79 | 0.7612 | 0.3374 | 0.0611 | | No log | 38.0 | 266 | 0.5327 | 0.805 | 0.3906 | 0.9987 | 0.805 | 0.7750 | 0.3430 | 0.0564 | | No log | 39.0 | 273 | 0.5342 | 0.805 | 0.3898 | 1.1024 | 0.805 | 0.7837 | 0.3295 | 0.0563 | | No log | 40.0 | 280 | 0.5310 | 0.81 | 0.3906 | 0.8426 | 0.81 | 0.7820 | 0.3513 | 0.0556 | | No log | 41.0 | 287 | 0.5327 | 0.81 | 0.3950 | 1.0952 | 0.81 | 0.7927 | 0.3418 | 0.0570 | | No log | 42.0 | 294 | 0.5305 | 0.82 | 0.3961 | 0.7830 | 0.82 | 0.8011 | 0.3501 | 0.0545 | | No log | 43.0 | 301 | 0.5308 | 0.81 | 0.3926 | 0.9752 | 0.81 | 0.7907 | 0.3534 | 0.0573 | | No log | 44.0 | 308 | 0.5287 | 0.81 | 0.3898 | 0.9838 | 0.81 | 0.7904 | 0.3454 | 0.0570 | | No log | 45.0 | 315 | 0.5270 | 0.815 | 0.3890 | 0.8682 | 0.815 | 0.8004 | 0.3499 | 0.0543 | | No log | 46.0 | 322 | 0.5272 | 0.81 | 0.3884 | 0.9784 | 0.81 | 0.7827 | 0.3415 | 0.0541 | | No log | 47.0 | 329 | 0.5306 | 0.805 | 0.3900 | 1.1153 | 0.805 | 0.7800 | 0.3388 | 0.0571 | | No log | 48.0 | 336 | 0.5288 | 0.82 | 0.3915 | 0.9916 | 0.82 | 0.7912 | 0.3519 | 0.0527 | | No log | 49.0 | 343 | 0.5274 | 0.81 | 0.3886 | 0.8415 | 0.81 | 0.7855 | 0.3524 | 0.0550 | | No log | 50.0 | 350 | 0.5264 | 0.81 | 0.3868 | 0.9713 | 0.81 | 0.7907 | 0.3408 | 0.0559 | | No log | 51.0 | 357 | 0.5295 | 0.815 | 0.3916 | 1.0340 | 0.815 | 0.7933 | 0.3683 | 0.0536 | | No log | 52.0 | 364 | 0.5294 | 0.81 | 0.3920 | 0.9178 | 0.81 | 0.7854 | 0.3499 | 0.0563 | | No log | 53.0 | 371 | 0.5283 | 0.81 | 0.3912 | 0.8517 | 0.81 | 0.7907 | 0.3648 | 0.0540 | | No log | 54.0 | 378 | 0.5301 | 0.815 | 0.3927 | 0.9279 | 0.815 | 0.7933 | 0.3579 | 0.0558 | | No log | 55.0 | 385 | 0.5275 | 0.805 | 0.3888 | 0.9225 | 0.805 | 0.7800 | 0.3406 | 0.0553 | | No log | 56.0 | 392 | 0.5284 | 0.815 | 0.3903 | 0.9064 | 0.815 | 0.7933 | 0.3463 | 0.0551 | | No log | 57.0 | 399 | 0.5261 | 0.81 | 0.3872 | 0.9072 | 0.81 | 0.7907 | 0.3527 | 0.0551 | | No log | 58.0 | 406 | 0.5278 | 0.815 | 0.3900 | 0.8469 | 0.815 | 0.7966 | 0.3622 | 0.0526 | | No log | 59.0 | 413 | 0.5280 | 0.81 | 0.3900 | 0.9220 | 0.81 | 0.7907 | 0.3467 | 0.0551 | | No log | 60.0 | 420 | 0.5296 | 0.81 | 0.3932 | 0.9166 | 0.81 | 0.7907 | 0.3620 | 0.0555 | | No log | 61.0 | 427 | 0.5288 | 0.815 | 0.3925 | 0.8647 | 0.815 | 0.7966 | 0.3491 | 0.0529 | | No log | 62.0 | 434 | 0.5288 | 0.81 | 0.3909 | 0.9205 | 0.81 | 0.7907 | 0.3482 | 0.0552 | | No log | 63.0 | 441 | 0.5274 | 0.81 | 0.3889 | 0.9143 | 0.81 | 0.7907 | 0.3457 | 0.0541 | | No log | 64.0 | 448 | 0.5283 | 0.81 | 0.3905 | 0.9141 | 0.81 | 0.7907 | 0.3578 | 0.0549 | | No log | 65.0 | 455 | 0.5283 | 0.81 | 0.3907 | 0.9177 | 0.81 | 0.7907 | 0.3536 | 0.0548 | | No log | 66.0 | 462 | 0.5289 | 0.81 | 0.3912 | 0.9179 | 0.81 | 0.7907 | 0.3502 | 0.0550 | | No log | 67.0 | 469 | 0.5282 | 0.81 | 0.3903 | 0.9134 | 0.81 | 0.7907 | 0.3511 | 0.0547 | | No log | 68.0 | 476 | 0.5279 | 0.81 | 0.3901 | 0.9105 | 0.81 | 0.7907 | 0.3473 | 0.0541 | | No log | 69.0 | 483 | 0.5283 | 0.81 | 0.3907 | 0.9128 | 0.81 | 0.7907 | 0.3558 | 0.0539 | | No log | 70.0 | 490 | 0.5283 | 0.81 | 0.3904 | 0.9191 | 0.81 | 0.7907 | 0.3414 | 0.0543 | | No log | 71.0 | 497 | 0.5284 | 0.81 | 0.3905 | 0.9183 | 0.81 | 0.7907 | 0.3478 | 0.0546 | | 0.3962 | 72.0 | 504 | 0.5285 | 0.81 | 0.3909 | 0.9151 | 0.81 | 0.7907 | 0.3415 | 0.0545 | | 0.3962 | 73.0 | 511 | 0.5283 | 0.81 | 0.3906 | 0.9144 | 0.81 | 0.7907 | 0.3499 | 0.0542 | | 0.3962 | 74.0 | 518 | 0.5282 | 0.81 | 0.3903 | 0.9146 | 0.81 | 0.7907 | 0.3411 | 0.0541 | | 0.3962 | 75.0 | 525 | 0.5284 | 0.81 | 0.3909 | 0.9159 | 0.81 | 0.7907 | 0.3571 | 0.0542 | | 0.3962 | 76.0 | 532 | 0.5284 | 0.81 | 0.3906 | 0.9155 | 0.81 | 0.7907 | 0.3361 | 0.0543 | | 0.3962 | 77.0 | 539 | 0.5283 | 0.81 | 0.3906 | 0.9159 | 0.81 | 0.7907 | 0.3480 | 0.0541 | | 0.3962 | 78.0 | 546 | 0.5282 | 0.81 | 0.3905 | 0.9120 | 0.81 | 0.7907 | 0.3413 | 0.0540 | | 0.3962 | 79.0 | 553 | 0.5283 | 0.81 | 0.3905 | 0.9162 | 0.81 | 0.7907 | 0.3412 | 0.0542 | | 0.3962 | 80.0 | 560 | 0.5285 | 0.81 | 0.3907 | 0.9189 | 0.81 | 0.7907 | 0.3361 | 0.0543 | | 0.3962 | 81.0 | 567 | 0.5285 | 0.81 | 0.3907 | 0.9162 | 0.81 | 0.7907 | 0.3470 | 0.0541 | | 0.3962 | 82.0 | 574 | 0.5283 | 0.81 | 0.3904 | 0.9144 | 0.81 | 0.7907 | 0.3411 | 0.0540 | | 0.3962 | 83.0 | 581 | 0.5284 | 0.81 | 0.3906 | 0.9153 | 0.81 | 0.7907 | 0.3361 | 0.0542 | | 0.3962 | 84.0 | 588 | 0.5284 | 0.81 | 0.3907 | 0.9151 | 0.81 | 0.7907 | 0.3419 | 0.0542 | | 0.3962 | 85.0 | 595 | 0.5283 | 0.81 | 0.3905 | 0.9143 | 0.81 | 0.7907 | 0.3362 | 0.0541 | | 0.3962 | 86.0 | 602 | 0.5285 | 0.81 | 0.3908 | 0.9152 | 0.81 | 0.7907 | 0.3418 | 0.0540 | | 0.3962 | 87.0 | 609 | 0.5284 | 0.81 | 0.3907 | 0.9156 | 0.81 | 0.7907 | 0.3365 | 0.0543 | | 0.3962 | 88.0 | 616 | 0.5285 | 0.81 | 0.3907 | 0.9155 | 0.81 | 0.7907 | 0.3419 | 0.0541 | | 0.3962 | 89.0 | 623 | 0.5284 | 0.81 | 0.3906 | 0.9154 | 0.81 | 0.7907 | 0.3360 | 0.0541 | | 0.3962 | 90.0 | 630 | 0.5285 | 0.81 | 0.3907 | 0.9168 | 0.81 | 0.7907 | 0.3418 | 0.0543 | | 0.3962 | 91.0 | 637 | 0.5285 | 0.81 | 0.3907 | 0.9160 | 0.81 | 0.7907 | 0.3420 | 0.0543 | | 0.3962 | 92.0 | 644 | 0.5285 | 0.81 | 0.3908 | 0.9164 | 0.81 | 0.7907 | 0.3421 | 0.0541 | | 0.3962 | 93.0 | 651 | 0.5285 | 0.81 | 0.3907 | 0.9164 | 0.81 | 0.7907 | 0.3473 | 0.0542 | | 0.3962 | 94.0 | 658 | 0.5285 | 0.81 | 0.3907 | 0.9164 | 0.81 | 0.7907 | 0.3420 | 0.0542 | | 0.3962 | 95.0 | 665 | 0.5285 | 0.81 | 0.3907 | 0.9161 | 0.81 | 0.7907 | 0.3473 | 0.0541 | | 0.3962 | 96.0 | 672 | 0.5285 | 0.81 | 0.3907 | 0.9157 | 0.81 | 0.7907 | 0.3421 | 0.0542 | | 0.3962 | 97.0 | 679 | 0.5285 | 0.81 | 0.3907 | 0.9154 | 0.81 | 0.7907 | 0.3363 | 0.0542 | | 0.3962 | 98.0 | 686 | 0.5285 | 0.81 | 0.3907 | 0.9164 | 0.81 | 0.7907 | 0.3420 | 0.0542 | | 0.3962 | 99.0 | 693 | 0.5285 | 0.81 | 0.3907 | 0.9162 | 0.81 | 0.7907 | 0.3420 | 0.0542 | | 0.3962 | 100.0 | 700 | 0.5285 | 0.81 | 0.3907 | 0.9159 | 0.81 | 0.7907 | 0.3421 | 0.0542 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1.post200 - Datasets 2.9.0 - Tokenizers 0.13.2
Benjiccee/xlm-roberta-base-finetuned-panx-en
Benjiccee
2023-07-18T17:56:26Z
105
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-07-18T17:39:45Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.en split: validation args: PAN-X.en metrics: - name: F1 type: f1 value: 0.68561872909699 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.4175 - F1: 0.6856 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.1397 | 1.0 | 50 | 0.5561 | 0.5147 | | 0.5148 | 2.0 | 100 | 0.4851 | 0.6312 | | 0.3772 | 3.0 | 150 | 0.4175 | 0.6856 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cpu - Datasets 2.12.0 - Tokenizers 0.13.3
yodi/falcon-7b-chat-oasst1
yodi
2023-07-18T17:45:29Z
1
0
peft
[ "peft", "region:us" ]
null
2023-07-18T17:45:22Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.5.0.dev0
nic70/distilhubert-finetuned-gtzan
nic70
2023-07-18T17:43:45Z
161
0
transformers
[ "transformers", "pytorch", "tensorboard", "hubert", "audio-classification", "generated_from_trainer", "dataset:marsyas/gtzan", "license:apache-2.0", "endpoints_compatible", "region:us" ]
audio-classification
2023-07-18T15:53:11Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - marsyas/gtzan metrics: - accuracy model-index: - name: distilhubert-finetuned-gtzan-7.5E-5rate results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilhubert-finetuned-gtzan-7.5E-5rate This model is a fine-tuned version of [ntu-spml/distilhubert](https://huggingface.co/ntu-spml/distilhubert) on the GTZAN dataset. It achieves the following results on the evaluation set: - Loss: 0.7037 - Accuracy: 0.83 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.500000000000001e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.6131 | 1.0 | 113 | 1.7407 | 0.43 | | 1.0878 | 2.0 | 226 | 1.1306 | 0.68 | | 0.7836 | 3.0 | 339 | 0.8427 | 0.77 | | 0.5646 | 4.0 | 452 | 0.6842 | 0.8 | | 0.2202 | 5.0 | 565 | 0.5216 | 0.84 | | 0.1047 | 6.0 | 678 | 0.5698 | 0.82 | | 0.0824 | 7.0 | 791 | 0.6976 | 0.83 | | 0.1118 | 8.0 | 904 | 0.6875 | 0.81 | | 0.1161 | 9.0 | 1017 | 0.6779 | 0.84 | | 0.0855 | 10.0 | 1130 | 0.7037 | 0.83 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
jamesdborin/ct2-int8-falcon-7b-instruct
jamesdborin
2023-07-18T17:41:01Z
12
0
transformers
[ "transformers", "RefinedWebModel", "text-generation", "custom_code", "en", "dataset:tiiuae/falcon-refinedweb", "arxiv:2205.14135", "arxiv:1911.02150", "arxiv:2005.14165", "arxiv:2104.09864", "arxiv:2306.01116", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2023-07-18T17:30:00Z
--- datasets: - tiiuae/falcon-refinedweb language: - en inference: true widget: - text: "Hey Falcon! Any recommendations for my holidays in Abu Dhabi?" example_title: "Abu Dhabi Trip" - text: "What's the Everett interpretation of quantum mechanics?" example_title: "Q/A: Quantum & Answers" - text: "Give me a list of the top 10 dive sites you would recommend around the world." example_title: "Diving Top 10" - text: "Can you tell me more about deep-water soloing?" example_title: "Extreme sports" - text: "Can you write a short tweet about the Apache 2.0 release of our latest AI model, Falcon LLM?" example_title: "Twitter Helper" - text: "What are the responsabilities of a Chief Llama Officer?" example_title: "Trendy Jobs" license: apache-2.0 --- # ✨ Falcon-7B-Instruct **Falcon-7B-Instruct is a 7B parameters causal decoder-only model built by [TII](https://www.tii.ae) based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and finetuned on a mixture of chat/instruct datasets. It is made available under the Apache 2.0 license.** *Paper coming soon 😊.* 🤗 To get started with Falcon (inference, finetuning, quantization, etc.), we recommend reading [this great blogpost fron HF](https://huggingface.co/blog/falcon)! ## Why use Falcon-7B-Instruct? * **You are looking for a ready-to-use chat/instruct model based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b).** * **Falcon-7B is a strong base model, outperforming comparable open-source models** (e.g., [MPT-7B](https://huggingface.co/mosaicml/mpt-7b), [StableLM](https://github.com/Stability-AI/StableLM), [RedPajama](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-7B-v0.1) etc.), thanks to being trained on 1,500B tokens of [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) enhanced with curated corpora. See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). * **It features an architecture optimized for inference**, with FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)) and multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)). 💬 **This is an instruct model, which may not be ideal for further finetuning.** If you are interested in building your own instruct/chat model, we recommend starting from [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b). 🔥 **Looking for an even more powerful model?** [Falcon-40B-Instruct](https://huggingface.co/tiiuae/falcon-40b-instruct) is Falcon-7B-Instruct's big brother! ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b-instruct" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` 💥 **Falcon LLMs require PyTorch 2.0 for use with `transformers`!** For fast inference with Falcon, check-out [Text Generation Inference](https://github.com/huggingface/text-generation-inference)! Read more in this [blogpost]((https://huggingface.co/blog/falcon). You will need **at least 16GB of memory** to swiftly run inference with Falcon-7B-Instruct. # Model Card for Falcon-7B-Instruct ## Model Details ### Model Description - **Developed by:** [https://www.tii.ae](https://www.tii.ae); - **Model type:** Causal decoder-only; - **Language(s) (NLP):** English and French; - **License:** Apache 2.0; - **Finetuned from model:** [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b). ### Model Source - **Paper:** *coming soon*. ## Uses ### Direct Use Falcon-7B-Instruct has been finetuned on a mixture of instruct and chat datasets. ### Out-of-Scope Use Production use without adequate assessment of risks and mitigation; any use cases which may be considered irresponsible or harmful. ## Bias, Risks, and Limitations Falcon-7B-Instruct is mostly trained on English data, and will not generalize appropriately to other languages. Furthermore, as it is trained on a large-scale corpora representative of the web, it will carry the stereotypes and biases commonly encountered online. ### Recommendations We recommend users of Falcon-7B-Instruct to develop guardrails and to take appropriate precautions for any production use. ## How to Get Started with the Model ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model = "tiiuae/falcon-7b-instruct" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) sequences = pipeline( "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", max_length=200, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Training Details ### Training Data Falcon-7B-Instruct was finetuned on a 250M tokens mixture of instruct/chat datasets. | **Data source** | **Fraction** | **Tokens** | **Description** | |--------------------|--------------|------------|-----------------------------------| | [Bai ze](https://github.com/project-baize/baize-chatbot) | 65% | 164M | chat | | [GPT4All](https://github.com/nomic-ai/gpt4all) | 25% | 62M | instruct | | [GPTeacher](https://github.com/teknium1/GPTeacher) | 5% | 11M | instruct | | [RefinedWeb-English](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) | 5% | 13M | massive web crawl | The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b) tokenizer. ## Evaluation *Paper coming soon.* See the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for early results. Note that this model variant is not optimized for NLP benchmarks. ## Technical Specifications For more information about pretraining, see [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b). ### Model Architecture and Objective Falcon-7B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token). The architecture is broadly adapted from the GPT-3 paper ([Brown et al., 2020](https://arxiv.org/abs/2005.14165)), with the following differences: * **Positionnal embeddings:** rotary ([Su et al., 2021](https://arxiv.org/abs/2104.09864)); * **Attention:** multiquery ([Shazeer et al., 2019](https://arxiv.org/abs/1911.02150)) and FlashAttention ([Dao et al., 2022](https://arxiv.org/abs/2205.14135)); * **Decoder-block:** parallel attention/MLP with a single layer norm. | **Hyperparameter** | **Value** | **Comment** | |--------------------|-----------|----------------------------------------| | Layers | 32 | | | `d_model` | 4544 | Increased to compensate for multiquery | | `head_dim` | 64 | Reduced to optimise for FlashAttention | | Vocabulary | 65024 | | | Sequence length | 2048 | | ### Compute Infrastructure #### Hardware Falcon-7B-Instruct was trained on AWS SageMaker, on 32 A100 40GB GPUs in P4d instances. #### Software Falcon-7B-Instruct was trained a custom distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO and high-performance Triton kernels (FlashAttention, etc.) ## Citation *Paper coming soon* 😊. In the meanwhile, you can use the following information to cite: ``` @article{falcon40b, title={{Falcon-40B}: an open large language model with state-of-the-art performance}, author={Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme}, year={2023} } ``` To learn more about the pretraining dataset, see the 📓 [RefinedWeb paper](https://arxiv.org/abs/2306.01116). ``` @article{refinedweb, title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only}, author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay}, journal={arXiv preprint arXiv:2306.01116}, eprint={2306.01116}, eprinttype = {arXiv}, url={https://arxiv.org/abs/2306.01116}, year={2023} } ``` ## License Falcon-7B-Instruct is made available under the Apache 2.0 license. ## Contact [email protected]
eluzhnica/mpt-7b-8k-peft-compatible
eluzhnica
2023-07-18T17:34:52Z
196
3
transformers
[ "transformers", "pytorch", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "StreamingDatasets", "custom_code", "dataset:mc4", "dataset:c4", "dataset:togethercomputer/RedPajama-Data-1T", "dataset:bigcode/the-stack", "dataset:allenai/s2orc", "arxiv:2108.12409", "arxiv:2302.13971", "arxiv:2205.14135", "arxiv:2010.04245", "arxiv:1909.08053", "arxiv:2302.06675", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-07-18T15:15:13Z
--- license: apache-2.0 tags: - Composer - MosaicML - llm-foundry - StreamingDatasets datasets: - mc4 - c4 - togethercomputer/RedPajama-Data-1T - bigcode/the-stack - allenai/s2orc inference: false --- # MPT-7B-8k Same as MPT-7B-8K but with gradient checkpointing so you can LORA/QLORA. Not tested yet. Original announcement from MosaicML team below: MPT-7B-8k is a decoder-style transformer pretrained starting from MPT-7B, but updating the sequence length to 8k and training for an additional 500B tokens, resulting in a total of 1.5T tokens of text and code. This model was trained by [MosaicML](https://www.mosaicml.com). MPT-7B-8k is part of the family of Mosaic Pretrained Transformer (MPT) models, which use a modified transformer architecture optimized for efficient training and inference. These architectural changes include performance-optimized layer implementations and the elimination of context length limits by replacing positional embeddings with Attention with Linear Biases ([ALiBi](https://arxiv.org/abs/2108.12409)). Thanks to these modifications, MPT models can be trained with high throughput efficiency and stable convergence. MPT models can also be served efficiently with both standard HuggingFace pipelines and NVIDIA's [FasterTransformer](https://github.com/NVIDIA/FasterTransformer). This model uses the MosaicML LLM codebase, which can be found in the [llm-foundry repository](https://github.com/mosaicml/llm-foundry). It was trained by MosaicML’s NLP team on the [MosaicML platform](https://www.mosaicml.com/training) for LLM pretraining, finetuning, and inference. ### How is this model different? MPT-7B-8k is * **Licensed for the possibility of commercial use.** * **Trained on a large amount of data** (1.5T tokens like [XGen](https://huggingface.co/Salesforce/xgen-7b-8k-base) vs. 1T for [LLaMA](https://arxiv.org/abs/2302.13971), 1T for [MPT-7B](https://www.mosaicml.com/blog/mpt-7b), 300B for [Pythia](https://github.com/EleutherAI/pythia), 300B for [OpenLLaMA](https://github.com/openlm-research/open_llama), and 800B for [StableLM](https://github.com/Stability-AI/StableLM)). * **Prepared to handle long inputs** thanks to [ALiBi](https://arxiv.org/abs/2108.12409). With ALiBi, the model can extrapolate beyond the 8k training sequence length to up to 10k, and with a few million tokens it can be finetuned to extrapolate much further. * **Capable of fast training and inference** via [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) and [FasterTransformer](https://github.com/NVIDIA/FasterTransformer) * **Equipped with highly efficient open-source training code** via the [llm-foundry repository](https://github.com/mosaicml/llm-foundry) ### Models finetuned off MPT-7B-8k: The following models are finetuned on MPT-7B-8k: * [MPT-7B-8k-Instruct](https://huggingface.co/mosaicml/mpt-7b-8k-instruct): a model for long-form instruction following (especially summarization and question-answering). Built by finetuning MPT-7B-8k on several carefully curated datasets. * License: _CC-BY-SA-3.0_ * [MPT-7B-8k-Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat): a chatbot-like model for dialogue generation. Built by finetuning MPT-7B-8k on approximately 1.5B tokens of chat data. * License: _CC-By-NC-SA-4.0_ ## Model Date July 18, 2023 ## Model License Apache-2.0 ## Documentation * [Blog post: MPT-7B-8k](https://www.mosaicml.com/blog/long-context-mpt-7b-8k) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ## How to Use This model is best used with the MosaicML [llm-foundry repository](https://github.com/mosaicml/llm-foundry) for training and finetuning. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b-8k', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` Although the model was trained with a sequence length of 2048, ALiBi enables users to increase the maximum sequence length during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b-8k' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 10000 # (input + output) tokens can now be up to 10000 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the MPT-7B-8k tokenizer which is identical to the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mosaicml/mpt-7b-8k') ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline with torch.autocast('cuda', dtype=torch.bfloat16): inputs = tokenizer('Here is a recipe for vegan banana bread:\n', return_tensors="pt").to('cuda') outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) # or using the HF pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | 2048 | ## Training Data ### Streaming Datasets Data was formatted using the MosaicML [StreamingDataset](https://github.com/mosaicml/streaming) library to host our data in object storage and efficiently stream it to our compute cluster during training. StreamingDataset obviates the need to download the whole dataset before starting training, and allows instant resumption of training from any point in the dataset. ### Data Mix The model was trained for ___T tokens. First it was trained for 1T tokens (with batch size 1760 and sequence length 2048) on the following data mix: #### Data Mix for Original 1T Tokens Used to Train MPT-7B | Data Source | Number of Tokens in Source | Proportion | Effective Number of Tokens | Epochs | |-------------|----------------------------|------------|----------------------------|--------| | mC4 3.1.0 - English | 417.99 B | 0.33 | 330 B | 0.14 | | C4 - English - SemDedup 80% | 100.42 B | 0.299 | 299 B | 2.98 | | RedPajama - CommonCrawl | 878.45 B | 0.1 | 100 B | 0.11 | | The Stack - Selected Languages | 463.78 B | 0.1 | 100 B | 0.22 | | RedPajama - Wikipedia - En | 4.87 B | 0.04 | 40 B | 8.21 | | The Stack - Markdown | 107.07 B | 0.035 | 35 B | 0.33 | | S2ORC | 48.85 B | 0.033 | 33 B | 0.68 | | RedPajama - Books | 26.02 B | 0.03 | 30B | 1.15 | | RedPajama - arXiv | 28.10 B | 0.019 | 19 B | 0.68 | | RedPajama - StackExchange | 20.54 B | 0.014 | 14 B |0.68 | #### Data Mix for Additional 500B Tokens Used to Further Train MPT-7B-8k We took 80B tokens from document samples that were longer than 4096 tokens, and 120B tokens with varying document sample lengths that matched the "baseline" length distribution for a total of 200B tokens in a single dataset. We then trained MPT-7B for 500B tokens with a maximum sequence length of 8192, resulting in MPT-7B-8k. Since we trained for 500B tokens using 200B tokens, nearly every subset was trained on for exactly 2.5 epochs. | Sequence Length Distribution | Number of Tokens in Source (Billion) | Proportion | Effective Number of Tokens (Billion) | Epochs | |---|---|---|---|---| | mC4 3.1.0 - English (200+ words) - Baseline | 33.60 | 16.80% | 84.00 | 2.50 | | mC4 3.1.0 - English (200+ words) - ≥4096 tokens | 23.04 | 11.52% | 57.60 | 2.50 | | c4 - English - SemDedup 80% - Baseline | 30.12 | 15.06% | 75.30 | 2.50 | | c4 - English - SemDedup 80% - ≥4096 tokens | 0.92 | 0.46% | 2.30 | 2.50 | | RedPajama - CommonCrawl - Baseline | 8.52 | 4.26% | 21.30 | 2.50 | | RedPajama - CommonCrawl - ≥4096 tokens | 12.80 | 6.40% | 32.00 | 2.50 | | The Stack - Selected Languages - Baseline | 30.00 | 15.00% | 75.00 | 2.50 | | The Stack - Selected Languages - ≥4096 tokens | 10.00 | 5.00% | 25.00 | 2.50 | | RedPajama - Wikipedia - Baseline | 3.60 | 1.80% | 9.00 | 2.50 | | RedPajama - Wikipedia - ≥4096 tokens | 1.04 | 0.52% | 2.60 | 2.50 | | The Stack - Markdown - Baseline | 4.50 | 2.25% | 11.25 | 2.50 | | The Stack - Markdown - ≥4096 tokens | 8.00 | 4.00% | 20.00 | 2.50 | | Semantic Scholar ORC - Baseline | 3.30 | 1.65% | 8.25 | 2.50 | | Semantic Scholar ORC - ≥4096 tokens | 8.00 | 4.00% | 20.00 | 2.50 | | RedPajama - Books - Baseline | 3.00 | 1.50% | 7.50 | 2.50 | | RedPajama - Books - ≥4096 tokens | 8.00 | 4.00% | 20.00 | 2.50 | | RedPajama - arXiv - Baseline | 1.92 | 0.96% | 4.80 | 2.50 | | RedPajama - arXiv - ≥4096 tokens | 5.40 | 2.70% | 13.50 | 2.50 | | RedPajama - StackExchange - Baseline | 1.44 | 0.72% | 3.60 | 2.50 | | RedPajama - StackExchange - ≥4096 tokens | 1.52 | 1.40% | 7.00 | 4.60 | | N Training Tokens | 200 | 100.00% | | 2.5 epochs * 200B = 500B tokens | Samples for each batch were selected from one of the datasets with the probability specified above. The examples were shuffled within each dataset, and each example was constructed from as many sequences from that dataset as were necessary to fill the 2048 sequence length. The data was tokenized using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. This BPE tokenizer has a number of desirable characteristics, most of which are relevant for tokenizing code: (1) It was trained on a diverse mix of data that includes code (The Pile) (2) It applies consistent space delimitation, unlike the GPT2 tokenizer which tokenizes inconsistently depending on the presence of prefix spaces (3) It contains tokens for repeated space characters, which allows superior compression of text with large amounts of repeated space characters. The model vocabulary size of 50432 was set to be a multiple of 128 (as in [MEGATRON-LM](https://arxiv.org/abs/1909.08053)), model flop utilization (MFU) increased by up to four percentage points. ### Training Configuration This model was trained on 440 A100-40GBs for about 9.5 days using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the [LION](https://arxiv.org/abs/2302.06675) optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B-8k is **not** intended for deployment without finetuning. It should not be used for human-facing interactions without further guardrails and user consent. MPT-7B-8k can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B-8k was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://www.mosaicml.com/get-started?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b-8k). ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please cosult an attorney before using this model for commercial purposes. ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-7B: A New Standard for Open-Source, ly Usable LLMs}, year = {2023}, url = {www.mosaicml.com/blog/mpt-7b}, note = {Accessed: 2023-03-28}, % change this date urldate = {2023-03-28} % change this date } ```
shubhamagarwal92/q-Taxi-v3
shubhamagarwal92
2023-07-18T17:32:46Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T17:32:43Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="shubhamagarwal92/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
adarsha30735/alpaca-heart-status-dataset
adarsha30735
2023-07-18T17:31:27Z
0
0
peft
[ "peft", "region:us" ]
null
2023-07-18T17:31:26Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.5.0.dev0
learn3r/sd-class-butterflies-32
learn3r
2023-07-18T17:29:07Z
30
0
diffusers
[ "diffusers", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
unconditional-image-generation
2023-07-18T10:51:34Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('learn3r/sd-class-butterflies-32') image = pipeline().images[0] image ```
shubhamagarwal92/q-FrozenLake-v1-4x4-noSlippery
shubhamagarwal92
2023-07-18T17:26:44Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-07-18T17:26:42Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="shubhamagarwal92/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
jordyvl/60-tiny_tobacco3482_hint_
jordyvl
2023-07-18T17:20:10Z
165
0
transformers
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-07-18T16:29:15Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 60-tiny_tobacco3482_hint_ results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 60-tiny_tobacco3482_hint_ This model is a fine-tuned version of [WinKawaks/vit-tiny-patch16-224](https://huggingface.co/WinKawaks/vit-tiny-patch16-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 63.5396 - Accuracy: 0.84 - Brier Loss: 0.3043 - Nll: 1.1495 - F1 Micro: 0.8400 - F1 Macro: 0.8244 - Ece: 0.1568 - Aurc: 0.0457 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Brier Loss | Nll | F1 Micro | F1 Macro | Ece | Aurc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:|:------:|:--------:|:--------:|:------:|:------:| | No log | 1.0 | 25 | 66.6267 | 0.26 | 0.8706 | 4.9000 | 0.26 | 0.1920 | 0.2904 | 0.7812 | | No log | 2.0 | 50 | 65.8408 | 0.54 | 0.5951 | 2.8543 | 0.54 | 0.4535 | 0.2542 | 0.2567 | | No log | 3.0 | 75 | 65.3708 | 0.675 | 0.4400 | 1.6094 | 0.675 | 0.6134 | 0.2395 | 0.1333 | | No log | 4.0 | 100 | 65.0889 | 0.76 | 0.3809 | 1.5505 | 0.76 | 0.7422 | 0.2333 | 0.1125 | | No log | 5.0 | 125 | 64.7800 | 0.8 | 0.3080 | 1.7523 | 0.8000 | 0.7663 | 0.1734 | 0.0708 | | No log | 6.0 | 150 | 64.6296 | 0.78 | 0.3286 | 1.7771 | 0.78 | 0.7427 | 0.1752 | 0.0642 | | No log | 7.0 | 175 | 64.3879 | 0.765 | 0.3584 | 1.7400 | 0.765 | 0.6986 | 0.1799 | 0.0937 | | No log | 8.0 | 200 | 64.4361 | 0.72 | 0.4640 | 1.4368 | 0.72 | 0.7385 | 0.2350 | 0.1314 | | No log | 9.0 | 225 | 64.2223 | 0.76 | 0.3846 | 1.6420 | 0.76 | 0.7417 | 0.2006 | 0.0915 | | No log | 10.0 | 250 | 64.2618 | 0.725 | 0.4268 | 1.6667 | 0.7250 | 0.7132 | 0.2131 | 0.1110 | | No log | 11.0 | 275 | 64.2839 | 0.7 | 0.4830 | 1.7975 | 0.7 | 0.6829 | 0.2213 | 0.1207 | | No log | 12.0 | 300 | 64.0218 | 0.785 | 0.3523 | 1.7098 | 0.785 | 0.7363 | 0.1742 | 0.0702 | | No log | 13.0 | 325 | 63.8071 | 0.78 | 0.3218 | 1.4587 | 0.78 | 0.7574 | 0.1640 | 0.0674 | | No log | 14.0 | 350 | 64.7387 | 0.645 | 0.5871 | 2.0188 | 0.645 | 0.6360 | 0.2996 | 0.1765 | | No log | 15.0 | 375 | 64.2173 | 0.765 | 0.3832 | 1.8093 | 0.765 | 0.6909 | 0.1978 | 0.0892 | | No log | 16.0 | 400 | 64.2233 | 0.765 | 0.3897 | 1.4456 | 0.765 | 0.7432 | 0.1983 | 0.0805 | | No log | 17.0 | 425 | 63.7977 | 0.825 | 0.2971 | 1.4248 | 0.825 | 0.8057 | 0.1583 | 0.0546 | | No log | 18.0 | 450 | 63.5818 | 0.82 | 0.2983 | 1.3079 | 0.82 | 0.7936 | 0.1532 | 0.0474 | | No log | 19.0 | 475 | 64.1935 | 0.78 | 0.3764 | 1.6662 | 0.78 | 0.7618 | 0.1911 | 0.0669 | | 63.0313 | 20.0 | 500 | 63.6054 | 0.825 | 0.2871 | 1.4054 | 0.825 | 0.8118 | 0.1605 | 0.0520 | | 63.0313 | 21.0 | 525 | 63.6316 | 0.79 | 0.3258 | 1.3131 | 0.79 | 0.7714 | 0.1632 | 0.0485 | | 63.0313 | 22.0 | 550 | 63.6978 | 0.84 | 0.2935 | 1.2425 | 0.8400 | 0.8236 | 0.1508 | 0.0586 | | 63.0313 | 23.0 | 575 | 63.8266 | 0.825 | 0.3117 | 1.5766 | 0.825 | 0.8019 | 0.1550 | 0.0554 | | 63.0313 | 24.0 | 600 | 63.6750 | 0.825 | 0.3130 | 1.1848 | 0.825 | 0.8158 | 0.1553 | 0.0462 | | 63.0313 | 25.0 | 625 | 63.8469 | 0.82 | 0.3259 | 1.3997 | 0.82 | 0.8007 | 0.1603 | 0.0564 | | 63.0313 | 26.0 | 650 | 63.7656 | 0.815 | 0.3285 | 1.2752 | 0.815 | 0.7969 | 0.1656 | 0.0535 | | 63.0313 | 27.0 | 675 | 63.8074 | 0.805 | 0.3455 | 1.1282 | 0.805 | 0.7870 | 0.1732 | 0.0542 | | 63.0313 | 28.0 | 700 | 63.8411 | 0.81 | 0.3437 | 1.1501 | 0.81 | 0.7917 | 0.1759 | 0.0529 | | 63.0313 | 29.0 | 725 | 63.8158 | 0.81 | 0.3345 | 1.1519 | 0.81 | 0.7901 | 0.1706 | 0.0544 | | 63.0313 | 30.0 | 750 | 63.7917 | 0.815 | 0.3383 | 1.2013 | 0.815 | 0.8006 | 0.1706 | 0.0557 | | 63.0313 | 31.0 | 775 | 63.7855 | 0.815 | 0.3396 | 1.2088 | 0.815 | 0.7974 | 0.1687 | 0.0551 | | 63.0313 | 32.0 | 800 | 63.8003 | 0.825 | 0.3297 | 1.2233 | 0.825 | 0.8091 | 0.1694 | 0.0547 | | 63.0313 | 33.0 | 825 | 63.8029 | 0.815 | 0.3405 | 1.2628 | 0.815 | 0.8007 | 0.1729 | 0.0547 | | 63.0313 | 34.0 | 850 | 63.7752 | 0.81 | 0.3352 | 1.2587 | 0.81 | 0.7979 | 0.1727 | 0.0574 | | 63.0313 | 35.0 | 875 | 63.7800 | 0.815 | 0.3346 | 1.1948 | 0.815 | 0.7977 | 0.1679 | 0.0560 | | 63.0313 | 36.0 | 900 | 63.7885 | 0.825 | 0.3313 | 1.2728 | 0.825 | 0.8173 | 0.1591 | 0.0569 | | 63.0313 | 37.0 | 925 | 63.7730 | 0.815 | 0.3354 | 1.2726 | 0.815 | 0.8027 | 0.1689 | 0.0555 | | 63.0313 | 38.0 | 950 | 63.8327 | 0.815 | 0.3405 | 1.4350 | 0.815 | 0.8043 | 0.1675 | 0.0632 | | 63.0313 | 39.0 | 975 | 63.7324 | 0.785 | 0.3686 | 1.6439 | 0.785 | 0.7745 | 0.1808 | 0.0666 | | 61.6786 | 40.0 | 1000 | 63.8625 | 0.765 | 0.3946 | 1.6127 | 0.765 | 0.7727 | 0.1961 | 0.0723 | | 61.6786 | 41.0 | 1025 | 64.1254 | 0.765 | 0.3904 | 1.5456 | 0.765 | 0.7570 | 0.2020 | 0.0850 | | 61.6786 | 42.0 | 1050 | 63.6201 | 0.78 | 0.3728 | 1.4198 | 0.78 | 0.7447 | 0.1869 | 0.0647 | | 61.6786 | 43.0 | 1075 | 63.6033 | 0.835 | 0.2968 | 1.5430 | 0.835 | 0.8059 | 0.1574 | 0.0479 | | 61.6786 | 44.0 | 1100 | 63.6777 | 0.795 | 0.3606 | 1.3542 | 0.795 | 0.7638 | 0.1806 | 0.0529 | | 61.6786 | 45.0 | 1125 | 63.5747 | 0.83 | 0.2996 | 1.5403 | 0.83 | 0.8079 | 0.1450 | 0.0504 | | 61.6786 | 46.0 | 1150 | 63.6022 | 0.805 | 0.3389 | 1.3842 | 0.805 | 0.7791 | 0.1794 | 0.0466 | | 61.6786 | 47.0 | 1175 | 63.6342 | 0.81 | 0.3346 | 1.2861 | 0.81 | 0.7811 | 0.1678 | 0.0476 | | 61.6786 | 48.0 | 1200 | 63.6065 | 0.81 | 0.3298 | 1.2911 | 0.81 | 0.7807 | 0.1654 | 0.0465 | | 61.6786 | 49.0 | 1225 | 63.5937 | 0.815 | 0.3260 | 1.3576 | 0.815 | 0.7844 | 0.1613 | 0.0467 | | 61.6786 | 50.0 | 1250 | 63.6029 | 0.815 | 0.3241 | 1.2826 | 0.815 | 0.7844 | 0.1662 | 0.0467 | | 61.6786 | 51.0 | 1275 | 63.5947 | 0.81 | 0.3232 | 1.4156 | 0.81 | 0.7789 | 0.1631 | 0.0471 | | 61.6786 | 52.0 | 1300 | 63.6501 | 0.81 | 0.3268 | 1.4148 | 0.81 | 0.7785 | 0.1703 | 0.0468 | | 61.6786 | 53.0 | 1325 | 63.6207 | 0.81 | 0.3207 | 1.2785 | 0.81 | 0.7785 | 0.1698 | 0.0479 | | 61.6786 | 54.0 | 1350 | 63.6021 | 0.815 | 0.3233 | 1.3519 | 0.815 | 0.7818 | 0.1629 | 0.0456 | | 61.6786 | 55.0 | 1375 | 63.6128 | 0.815 | 0.3207 | 1.2837 | 0.815 | 0.7818 | 0.1641 | 0.0474 | | 61.6786 | 56.0 | 1400 | 63.5974 | 0.81 | 0.3194 | 1.3542 | 0.81 | 0.7789 | 0.1679 | 0.0474 | | 61.6786 | 57.0 | 1425 | 63.6173 | 0.81 | 0.3260 | 1.2907 | 0.81 | 0.7761 | 0.1653 | 0.0486 | | 61.6786 | 58.0 | 1450 | 63.6057 | 0.81 | 0.3163 | 1.2981 | 0.81 | 0.7789 | 0.1651 | 0.0471 | | 61.6786 | 59.0 | 1475 | 63.6052 | 0.81 | 0.3197 | 1.3444 | 0.81 | 0.7789 | 0.1680 | 0.0467 | | 61.52 | 60.0 | 1500 | 63.5865 | 0.82 | 0.3143 | 1.2748 | 0.82 | 0.7920 | 0.1617 | 0.0465 | | 61.52 | 61.0 | 1525 | 63.5754 | 0.82 | 0.3126 | 1.2677 | 0.82 | 0.7920 | 0.1595 | 0.0468 | | 61.52 | 62.0 | 1550 | 63.5876 | 0.815 | 0.3120 | 1.2691 | 0.815 | 0.7879 | 0.1567 | 0.0478 | | 61.52 | 63.0 | 1575 | 63.6040 | 0.82 | 0.3110 | 1.2632 | 0.82 | 0.7920 | 0.1526 | 0.0472 | | 61.52 | 64.0 | 1600 | 63.5956 | 0.82 | 0.3111 | 1.1976 | 0.82 | 0.7963 | 0.1592 | 0.0468 | | 61.52 | 65.0 | 1625 | 63.5792 | 0.815 | 0.3095 | 1.1928 | 0.815 | 0.7879 | 0.1571 | 0.0469 | | 61.52 | 66.0 | 1650 | 63.5704 | 0.82 | 0.3086 | 1.2509 | 0.82 | 0.7936 | 0.1543 | 0.0467 | | 61.52 | 67.0 | 1675 | 63.5918 | 0.82 | 0.3118 | 1.2536 | 0.82 | 0.7936 | 0.1619 | 0.0471 | | 61.52 | 68.0 | 1700 | 63.5741 | 0.82 | 0.3072 | 1.2491 | 0.82 | 0.7963 | 0.1562 | 0.0465 | | 61.52 | 69.0 | 1725 | 63.5581 | 0.825 | 0.3085 | 1.2490 | 0.825 | 0.8021 | 0.1566 | 0.0460 | | 61.52 | 70.0 | 1750 | 63.5796 | 0.82 | 0.3087 | 1.2456 | 0.82 | 0.7963 | 0.1556 | 0.0471 | | 61.52 | 71.0 | 1775 | 63.5776 | 0.825 | 0.3073 | 1.2530 | 0.825 | 0.8021 | 0.1571 | 0.0474 | | 61.52 | 72.0 | 1800 | 63.5524 | 0.825 | 0.3064 | 1.2402 | 0.825 | 0.8021 | 0.1555 | 0.0465 | | 61.52 | 73.0 | 1825 | 63.5638 | 0.825 | 0.3075 | 1.2465 | 0.825 | 0.8021 | 0.1607 | 0.0466 | | 61.52 | 74.0 | 1850 | 63.5654 | 0.82 | 0.3058 | 1.2425 | 0.82 | 0.7963 | 0.1552 | 0.0468 | | 61.52 | 75.0 | 1875 | 63.5654 | 0.825 | 0.3041 | 1.2439 | 0.825 | 0.8021 | 0.1563 | 0.0466 | | 61.52 | 76.0 | 1900 | 63.5499 | 0.83 | 0.3018 | 1.2432 | 0.83 | 0.8082 | 0.1541 | 0.0463 | | 61.52 | 77.0 | 1925 | 63.5563 | 0.825 | 0.3059 | 1.2385 | 0.825 | 0.8021 | 0.1570 | 0.0466 | | 61.52 | 78.0 | 1950 | 63.5524 | 0.825 | 0.3045 | 1.2364 | 0.825 | 0.8021 | 0.1524 | 0.0464 | | 61.52 | 79.0 | 1975 | 63.5507 | 0.825 | 0.3064 | 1.2344 | 0.825 | 0.8021 | 0.1523 | 0.0463 | | 61.4257 | 80.0 | 2000 | 63.5531 | 0.825 | 0.3062 | 1.2266 | 0.825 | 0.8035 | 0.1625 | 0.0463 | | 61.4257 | 81.0 | 2025 | 63.5486 | 0.825 | 0.3029 | 1.1850 | 0.825 | 0.8024 | 0.1506 | 0.0463 | | 61.4257 | 82.0 | 2050 | 63.5479 | 0.82 | 0.3081 | 1.2269 | 0.82 | 0.7963 | 0.1588 | 0.0458 | | 61.4257 | 83.0 | 2075 | 63.5444 | 0.835 | 0.3029 | 1.1721 | 0.835 | 0.8139 | 0.1475 | 0.0461 | | 61.4257 | 84.0 | 2100 | 63.5435 | 0.835 | 0.3047 | 1.2306 | 0.835 | 0.8171 | 0.1529 | 0.0464 | | 61.4257 | 85.0 | 2125 | 63.5393 | 0.83 | 0.3058 | 1.2255 | 0.83 | 0.8081 | 0.1462 | 0.0464 | | 61.4257 | 86.0 | 2150 | 63.5437 | 0.835 | 0.3048 | 1.2254 | 0.835 | 0.8171 | 0.1481 | 0.0464 | | 61.4257 | 87.0 | 2175 | 63.5463 | 0.83 | 0.3039 | 1.1549 | 0.83 | 0.8115 | 0.1562 | 0.0463 | | 61.4257 | 88.0 | 2200 | 63.5408 | 0.835 | 0.3055 | 1.2211 | 0.835 | 0.8187 | 0.1485 | 0.0462 | | 61.4257 | 89.0 | 2225 | 63.5477 | 0.825 | 0.3054 | 1.1541 | 0.825 | 0.8024 | 0.1521 | 0.0463 | | 61.4257 | 90.0 | 2250 | 63.5383 | 0.83 | 0.3051 | 1.1577 | 0.83 | 0.8095 | 0.1532 | 0.0463 | | 61.4257 | 91.0 | 2275 | 63.5466 | 0.84 | 0.3057 | 1.1583 | 0.8400 | 0.8244 | 0.1516 | 0.0458 | | 61.4257 | 92.0 | 2300 | 63.5447 | 0.835 | 0.3049 | 1.1518 | 0.835 | 0.8188 | 0.1615 | 0.0462 | | 61.4257 | 93.0 | 2325 | 63.5327 | 0.84 | 0.3044 | 1.1540 | 0.8400 | 0.8244 | 0.1508 | 0.0459 | | 61.4257 | 94.0 | 2350 | 63.5392 | 0.84 | 0.3046 | 1.1506 | 0.8400 | 0.8244 | 0.1569 | 0.0459 | | 61.4257 | 95.0 | 2375 | 63.5305 | 0.835 | 0.3050 | 1.1520 | 0.835 | 0.8188 | 0.1571 | 0.0457 | | 61.4257 | 96.0 | 2400 | 63.5413 | 0.835 | 0.3042 | 1.1494 | 0.835 | 0.8188 | 0.1571 | 0.0461 | | 61.4257 | 97.0 | 2425 | 63.5387 | 0.835 | 0.3047 | 1.1489 | 0.835 | 0.8188 | 0.1652 | 0.0461 | | 61.4257 | 98.0 | 2450 | 63.5383 | 0.84 | 0.3046 | 1.1503 | 0.8400 | 0.8244 | 0.1568 | 0.0458 | | 61.4257 | 99.0 | 2475 | 63.5374 | 0.835 | 0.3045 | 1.1489 | 0.835 | 0.8188 | 0.1570 | 0.0456 | | 61.3919 | 100.0 | 2500 | 63.5396 | 0.84 | 0.3043 | 1.1495 | 0.8400 | 0.8244 | 0.1568 | 0.0457 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1.post200 - Datasets 2.9.0 - Tokenizers 0.13.2
digiplay/AbsoluteReality_v1.6_diffusers
digiplay
2023-07-18T17:07:51Z
439
2
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-07-18T11:18:31Z
--- license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- Model info : https://civitai.com/models/81458/absolutereality?modelVersionId=108576