parquet-converter commited on
Commit
e1eda0c
·
1 Parent(s): ca0d6b4

Update parquet files (step 90 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bluecoins- Finance Budget V8.7.2 [Premium] [Latest].md +0 -38
  2. spaces/1gistliPinn/ChatGPT4/Examples/Adventure Maker Full Edition Download VERIFIED.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Drive Snapshot 1.45.17582 Full Keygen Crack Download WORK.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/EaseUS Partition Master 13.8 Crack Incl License Code 2020 Here !!TOP!!.md +0 -113
  5. spaces/1phancelerku/anime-remove-background/1 Rupya Anda Wala Chhattisgarhi Geet MP3 Song Online Dilip Ray and Others.md +0 -164
  6. spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer 5.1 APK The Ultimate Simulation Game for Car Lovers.md +0 -244
  7. spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer APK Everything You Need to Know About the Game.md +0 -122
  8. spaces/1phancelerku/anime-remove-background/Download Rise of Empires Ice and Fire APK and Join the Epic War.md +0 -147
  9. spaces/1phancelerku/anime-remove-background/Download Subway Surfers Hack Game from Apkpure and Unlock All Characters.md +0 -139
  10. spaces/4Taps/SadTalker/src/face3d/data/__init__.py +0 -116
  11. spaces/AIConsultant/MusicGen/audiocraft/grids/audiogen/audiogen_base_16khz.py +0 -23
  12. spaces/AIKey/ai_date/index.html +0 -32
  13. spaces/ALSv/FSW/roop/predictor.py +0 -22
  14. spaces/Adapter/CoAdapter/ldm/data/__init__.py +0 -0
  15. spaces/AdithyaSNair/PCOS_Prediction/app.py +0 -108
  16. spaces/AdityaMahimkar/PlagiarismChecker/README.md +0 -13
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/Make.js +0 -31
  18. spaces/AisingioroHao0/anime-fanwork/README.md +0 -59
  19. spaces/Aloento/9Nine-VITS/duration_predictor.py +0 -41
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile +0 -44
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_repaint.py +0 -956
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth_flax.py +0 -709
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_single.py +0 -250
  24. spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py +0 -50
  25. spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py +0 -16
  26. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/score_hlr_sampler.py +0 -264
  27. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/long_replies/script.py +0 -143
  28. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/sd_api_pictures/script.py +0 -386
  29. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/script.py +0 -355
  30. spaces/AquaSuisei/ChatGPTXE/assets/custom.js +0 -70
  31. spaces/AsakuraMizu/moe-tts/text/shanghainese.py +0 -64
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/winterm.py +0 -195
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/utils.py +0 -1086
  34. spaces/Audio-AGI/AudioSep/data/waveform_mixers.py +0 -127
  35. spaces/BAAI/dreambooth-altdiffusion/train_dreambooth.py +0 -907
  36. spaces/Banbri/zcvzcv/src/app/queries/getStory.ts +0 -87
  37. spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Mod Apk Desbloqueado Todo La ltima Versin.md +0 -73
  38. spaces/Benson/text-generation/Examples/Bet365gr.md +0 -62
  39. spaces/Benson/text-generation/Examples/Bitcoin Wallet Mod Apk.md +0 -57
  40. spaces/Benson/text-generation/Examples/Descargar Cardfight Vanguard Online.md +0 -84
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/params.py +0 -303
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/check.py +0 -149
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/_log.py +0 -38
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/retry.py +0 -272
  45. spaces/Binettebob22/fast_diffusion2/app.py +0 -956
  46. spaces/Boadiwaa/Recipes/openai/api_resources/search.py +0 -36
  47. spaces/Boadiwaa/Recipes/openai/version.py +0 -1
  48. spaces/BoomerangGirl/MagicPrompt-Stable-Diffusion/app.py +0 -54
  49. spaces/CVPR/Demo-Balanced-MSE/README.md +0 -13
  50. spaces/CVPR/WALT/mmdet/models/roi_heads/base_roi_head.py +0 -114
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bluecoins- Finance Budget V8.7.2 [Premium] [Latest].md DELETED
@@ -1,38 +0,0 @@
1
-
2
- <h1>Bluecoins- Finance Budget v8.7.2 [Premium] [Latest]: A Powerful and Easy-to-Use App for Managing Your Money</h1>
3
-
4
- <p>If you are looking for a simple and effective way to track your income, expenses, budgets, and financial goals, you should check out Bluecoins- Finance Budget v8.7.2 [Premium] [Latest]. This app is a comprehensive and user-friendly tool that helps you take control of your money and plan for the future.</p>
5
-
6
- <p>With Bluecoins- Finance Budget v8.7.2 [Premium] [Latest], you can:</p>
7
- <h2>Bluecoins- Finance Budget v8.7.2 [Premium] [Latest]</h2><br /><p><b><b>DOWNLOAD</b> &#9745; <a href="https://byltly.com/2uKxwZ">https://byltly.com/2uKxwZ</a></b></p><br /><br />
8
-
9
- <ul>
10
- <li>Create multiple accounts for cash, bank, credit card, loan, investment, and other types of transactions.</li>
11
- <li>Record your income and expenses with categories, subcategories, tags, notes, and attachments.</li>
12
- <li>View your transactions in various formats, such as list, calendar, summary, chart, and report.</li>
13
- <li>Set up budgets for different periods and categories, and monitor your progress and performance.</li>
14
- <li>Track your net worth, assets, liabilities, and net income over time.</li>
15
- <li>Sync your data across multiple devices with Google Drive or Dropbox.</li>
16
- <li>Export your data to CSV, Excel, PDF, or HTML files.</li>
17
- <li>Protect your data with PIN code, fingerprint, or face recognition.</li>
18
- <li>Customize your app with themes, fonts, icons, currencies, and languages.</li>
19
- </ul>
20
-
21
- <p>Bluecoins- Finance Budget v8.7.2 [Premium] [Latest] is a premium app that offers you all the features and benefits without any ads or limitations. You can download it for free from the link below and enjoy a 30-day trial period. After that, you can upgrade to the premium version for a one-time payment of $5.99.</p>
22
-
23
- <p>Don't miss this opportunity to get one of the best finance apps on the market. Download Bluecoins- Finance Budget v8.7.2 [Premium] [Latest] today and start managing your money like a pro!</p>
24
-
25
- <p>Bluecoins- Finance Budget v8.7.2 [Premium] [Latest] is not only a powerful app for managing your money, but also a smart app that helps you save money and achieve your financial goals. Here are some of the features that make this app stand out from the rest:</p>
26
-
27
- <p>Smart Reminders: You can set up reminders for your bills, payments, subscriptions, and other recurring transactions. The app will notify you when they are due and help you avoid late fees and penalties.</p>
28
-
29
- <p>Smart Reports: You can generate detailed and insightful reports on your income, expenses, budgets, cash flow, and balance sheet. The app will analyze your data and provide you with tips and suggestions on how to improve your financial situation.</p>
30
-
31
- <p>Smart Widgets: You can access your most important information and actions from your home screen with customizable widgets. You can also use widgets to quickly add transactions, view balances, and switch accounts.</p>
32
-
33
- <p>Smart Backup: You can backup your data automatically and securely to Google Drive or Dropbox. You can also restore your data from any device in case of loss or damage.</p>
34
-
35
- <p>Bluecoins- Finance Budget v8.7.2 [Premium] [Latest] is a smart app that will make your life easier and happier. It will help you manage your money with ease and confidence, and help you achieve your financial dreams.</p>
36
- <p></p> 7b8c122e87<br />
37
- <br />
38
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adventure Maker Full Edition Download VERIFIED.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>adventure maker full edition download</h2><br /><p><b><b>DOWNLOAD</b> &rarr; <a href="https://imgfil.com/2uxYLB">https://imgfil.com/2uxYLB</a></b></p><br /><br />
2
- <br />
3
- Adventure Maker is a free, innovative point-and-click game creation toolkit. The full version is compatible with the free version, which means you can. NET applications that create games for Windows, Mac and Linux. The program provides you with a lot of features: mouse support, drag and drop of components, over 100 built-in sounds, and many others. The program also has a gallery of different backgrounds that you can apply to your project. Adventure Maker automatically detects the boundaries of the game screen and offers you a choice between static and animated background images. 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Drive Snapshot 1.45.17582 Full Keygen Crack Download WORK.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Drive Snapshot 1.45.17582 Full Keygen Crack Download</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://imgfil.com/2uxYtz">https://imgfil.com/2uxYtz</a></b></p><br /><br />
2
-
3
- CyberLink AudioDirector Ultra 7 Crack Full Version Download; Avanquest ... Rufus 2.6 Build 818 Free Latest; Drive SnapShot 1.45.17582 Keygen Full Version ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/EaseUS Partition Master 13.8 Crack Incl License Code 2020 Here !!TOP!!.md DELETED
@@ -1,113 +0,0 @@
1
-
2
- <h1>EaseUS Partition Master 13.8 Crack incl License Code 2020 Here</h1>
3
-
4
- <p>If you are looking for a reliable and easy-to-use disk management tool, you might want to check out EaseUS Partition Master 13.8 Crack. This software is a comprehensive solution for all your partitioning needs, whether you want to resize, merge, split, move, copy, format, delete, or recover partitions. EaseUS Partition Master 13.8 Crack also allows you to optimize your system performance by defragmenting and checking partitions for errors. Moreover, you can migrate your data and system from HDD to SSD without reinstalling Windows.</p>
5
- <h2>EaseUS Partition Master 13.8 Crack incl License Code 2020 Here</h2><br /><p><b><b>Download Zip</b> &#10042; <a href="https://imgfil.com/2uy02q">https://imgfil.com/2uy02q</a></b></p><br /><br />
6
-
7
- <p>However, EaseUS Partition Master 13.8 is not a free software. You need to purchase a license code to activate the full features and enjoy unlimited technical support. The license code costs $39.95 for a single user license, $59.95 for a professional license, and $99 for a server license. But what if you don't want to spend that much money on a disk management tool? Is there a way to get EaseUS Partition Master 13.8 Crack incl License Code 2020 Here for free?</p>
8
-
9
- <h2>How to Get EaseUS Partition Master 13.8 Crack incl License Code 2020 Here</h2>
10
-
11
- <p>The answer is yes, there is a way to get EaseUS Partition Master 13.8 Crack incl License Code 2020 Here for free. However, it is not recommended to do so, as it may cause some serious problems for your computer and data. Here are some of the risks of using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here:</p>
12
-
13
- <ul>
14
- <li>You may download a fake or malicious crack that contains viruses, malware, spyware, or ransomware that can damage your system and files.</li>
15
- <li>You may violate the copyright law and face legal consequences for using pirated software.</li>
16
- <li>You may lose your data due to corrupted or invalid partitions created by the cracked software.</li>
17
- <li>You may not be able to access the latest updates and features of EaseUS Partition Master 13.8.</li>
18
- <li>You may not be able to get any technical support or customer service from EaseUS if you encounter any problems with the software.</li>
19
- </ul>
20
-
21
- <p>As you can see, using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here is not worth the risk. You may end up losing more than what you save by using the cracked software. Therefore, it is better to use the official version of EaseUS Partition Master 13.8 and purchase a legitimate license code from the official website.</p>
22
-
23
- <h2>How to Purchase EaseUS Partition Master 13.8 License Code</h2>
24
-
25
- <p>If you want to use EaseUS Partition Master 13.8 safely and legally, you need to purchase a license code from the official website of EaseUS. Here are the steps to do so:</p>
26
- <p></p>
27
-
28
- <ol>
29
- <li>Go to <a href="https://www.easeus.com/partition-manager/epm-pro.html">https://www.easeus.com/partition-manager/epm-pro.html</a> and choose the edition that suits your needs.</li>
30
- <li>Click on "Buy Now" and fill in your billing information and payment method.</li>
31
- <li>After completing the payment, you will receive an email with your license code and download link.</li>
32
- <li>Download and install EaseUS Partition Master 13.8 on your computer.</li>
33
- <li>Launch the software and enter your license code to activate it.</li>
34
- </ol>
35
-
36
- <p>That's it! You can now enjoy all the features and benefits of EaseUS Partition Master 13.8 without any risk or hassle.</p>
37
-
38
- <h2>Conclusion</h2>
39
-
40
- <p>EaseUS Partition Master 13.8 is a powerful and versatile disk management tool that can help you manage your partitions effectively and efficiently. However, using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here is not a good idea, as it may expose you to various dangers and disadvantages. Therefore, it is better to purchase a genuine license code from the official website of EaseUS and use the official version of EaseUS Partition Master 13.8.</p>
41
-
42
- <p>We hope this article has helped you understand why you should avoid using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here and how to get a legitimate license code instead. If you have any questions or comments, please feel free to leave them below.</p>
43
- <h2>What are the Features of EaseUS Partition Master 13.8</h2>
44
-
45
- <p>EaseUS Partition Master 13.8 is not just a simple disk management tool. It also offers many advanced features that can help you optimize your disk performance and protect your data. Here are some of the features of EaseUS Partition Master 13.8:</p>
46
-
47
- <ul>
48
- <li><strong>Disk & Partition Copy Wizard</strong>: This feature allows you to clone your entire disk or partition to another disk or partition, without losing any data. You can use this feature to upgrade your disk, backup your data, or migrate your system.</li>
49
- <li><strong>Partition Recovery Wizard</strong>: This feature allows you to recover deleted or lost partitions from unallocated space, damaged disks, or virus attacks. You can use this feature to restore your partitions and data in case of any disaster.</li>
50
- <li><strong>Convert Dynamic Disk to Basic Disk</strong>: This feature allows you to convert a dynamic disk to a basic disk without deleting any volumes. You can use this feature to simplify your disk management and avoid compatibility issues.</li>
51
- <li><strong>Convert MBR Disk to GPT Disk</strong>: This feature allows you to convert an MBR disk to a GPT disk without losing any data. You can use this feature to take advantage of the benefits of GPT disks, such as larger disk size, more partitions, and better performance.</li>
52
- <li><strong>Convert FAT File System to NTFS File System</strong>: This feature allows you to convert a FAT file system to an NTFS file system without formatting or losing any data. You can use this feature to improve your file system performance and security.</li>
53
- <li><strong>Align All Partitions</strong>: This feature allows you to align all partitions on an SSD or HDD to optimize the disk performance and speed up your computer.</li>
54
- <li><strong>Wipe Data</strong>: This feature allows you to permanently erase all data on a disk or partition, leaving no trace for recovery. You can use this feature to protect your privacy and security when you dispose of or donate your disk.</li>
55
- <li><strong>Create WinPE Bootable Disk</strong>: This feature allows you to create a bootable disk with EaseUS Partition Master 13.8 on it, so that you can boot your computer and manage your partitions when Windows fails to start or crashes.</li>
56
- </ul>
57
-
58
- <p>These are just some of the features of EaseUS Partition Master 13.8. There are many more features that you can explore and use to manage your disks and partitions easily and efficiently.</p>
59
-
60
- <h2>How to Use EaseUS Partition Master 13.8</h2>
61
-
62
- <p>EaseUS Partition Master 13.8 is very easy to use, even for beginners. You just need to follow these simple steps:</p>
63
-
64
- <ol>
65
- <li>Download and install EaseUS Partition Master 13.8 on your computer.</li>
66
- <li>Launch the software and select the disk or partition that you want to manage.</li>
67
- <li>Right-click on the disk or partition and choose the operation that you want to perform, such as resize, move, copy, merge, format, delete, etc.</li>
68
- <li>Preview the changes and click on "Apply" to execute them.</li>
69
- </ol>
70
-
71
- <p>That's it! You have successfully managed your disks and partitions with EaseUS Partition Master 13.8.</p>
72
-
73
- <h2>Conclusion</h2>
74
-
75
- <p>EaseUS Partition Master 13.8 is a powerful and versatile disk management tool that can help you manage your partitions effectively and efficiently. However, using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here is not a good idea, as it may expose you to various dangers and disadvantages. Therefore, it is better to purchase a genuine license code from the official website of EaseUS and use the official version of EaseUS Partition Master 13.8.</p>
76
-
77
- <p>We hope this article has helped you understand why you should avoid using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here and how to get a legitimate license code instead. If you have any questions or comments, please feel free to leave them below.</p>
78
- <h2>What are the Benefits of EaseUS Partition Master 13.8</h2>
79
-
80
- <p>EaseUS Partition Master 13.8 is not only a powerful and versatile disk management tool, but also a beneficial one. Here are some of the benefits of using EaseUS Partition Master 13.8:</p>
81
-
82
- <ul>
83
- <li><strong>Save time and money</strong>: EaseUS Partition Master 13.8 can help you save time and money by performing various partition tasks quickly and easily, without requiring any professional skills or expensive tools. You can resize, move, copy, merge, format, delete, or recover partitions in a few clicks, without losing any data or reinstalling Windows.</li>
84
- <li><strong>Improve performance and reliability</strong>: EaseUS Partition Master 13.8 can help you improve your disk performance and reliability by optimizing your disk space and structure, aligning your partitions, checking for errors, defragmenting your disk, and converting your disk formats. You can also clone your disk or migrate your system to a new disk without any hassle.</li>
85
- <li><strong>Protect data and privacy</strong>: EaseUS Partition Master 13.8 can help you protect your data and privacy by backing up your partitions, recovering your deleted or lost partitions, wiping your data permanently, and creating a bootable disk in case of emergency. You can also encrypt your partitions with a password to prevent unauthorized access.</li>
86
- </ul>
87
-
88
- <p>These are just some of the benefits of using EaseUS Partition Master 13.8. There are many more benefits that you can enjoy by using this software to manage your disks and partitions.</p>
89
-
90
- <h2>What are the Reviews of EaseUS Partition Master 13.8</h2>
91
-
92
- <p>EaseUS Partition Master 13.8 has received many positive reviews from users and experts alike. Here are some of the reviews of EaseUS Partition Master 13.8:</p>
93
-
94
- <blockquote>
95
- <p>"EaseUS Partition Master is an all-in-one disk partitioning software that helps users to manage, create, delete, resize, extend, shrink, clone, convert, and migrate hard disk drives and partitions." - TechRepublic</p>
96
- </blockquote>
97
-
98
- <blockquote>
99
- <p>"EaseUS Partition Master Free Edition doesn't have every Pro feature, but it's still a great choice for most partition tasks." - Lifewire</p>
100
- </blockquote>
101
-
102
- <blockquote>
103
- <p>"EaseUS Partition Master is a straightforward and user-friendly very brilliant utility tool for disk partitioning." - How4Crack</p>
104
- </blockquote>
105
-
106
- <p>These are just some of the reviews of EaseUS Partition Master 13.8. There are many more reviews that you can find online that praise this software for its features, functions, and benefits.</p>
107
- <h2>Conclusion</h2>
108
-
109
- <p>EaseUS Partition Master 13.8 is a powerful and versatile disk management tool that can help you manage your partitions effectively and efficiently. However, using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here is not a good idea, as it may expose you to various dangers and disadvantages. Therefore, it is better to purchase a genuine license code from the official website of EaseUS and use the official version of EaseUS Partition Master 13.8.</p>
110
-
111
- <p>We hope this article has helped you understand why you should avoid using EaseUS Partition Master 13.8 Crack incl License Code 2020 Here and how to get a legitimate license code instead. If you have any questions or comments, please feel free to leave them below.</p> 3cee63e6c2<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/1 Rupya Anda Wala Chhattisgarhi Geet MP3 Song Online Dilip Ray and Others.md DELETED
@@ -1,164 +0,0 @@
1
- <br />
2
- <h1>1 Rupya Anda Wala Song Download: A Viral Chhattisgarhi Hit</h1>
3
- <p>If you are looking for a catchy and fun song to spice up your playlist, you might want to check out 1 Rupya Anda Wala Song. This is a viral Chhattisgarhi song that has taken the internet by storm. In this article, we will tell you everything you need to know about this song, including its origin, meaning, popularity, impact, and how to download it. We will also give you some tips on how to enjoy this song to the fullest. So, let's get started!</p>
4
- <h2>1 rupya anda wala song download</h2><br /><p><b><b>Download Zip</b> &rarr; <a href="https://jinyurl.com/2uNMIb">https://jinyurl.com/2uNMIb</a></b></p><br /><br />
5
- <h2>What is 1 Rupya Anda Wala Song?</h2>
6
- <p>1 Rupya Anda Wala Song is a Chhattisgarhi song that was released in 2020 by Dilip Ray, a popular singer and composer from Chhattisgarh. The song is also known as Ek Rupiya Anda Wala Song or One Rupee Egg Seller Song. The song is a humorous and catchy tune that describes the life of an egg seller who sells eggs for one rupee each. The song has a simple but catchy chorus that goes like this:</p>
7
- <pre><code>
8
- Ek rupiya anda wala Ek rupiya anda wala Ek rupiya anda wala Ek rupiya anda wala </code></pre>
9
- <p>The chorus translates to:</p>
10
- <pre><code>
11
- One rupee egg seller One rupee egg seller One rupee egg seller One rupee egg seller </code></pre>
12
- <h3>The origin and meaning of the song</h3>
13
- <p>The song was inspired by a real-life incident that happened in Raipur, the capital city of Chhattisgarh. Dilip Ray was driving his car when he saw a man selling eggs on a bicycle. He was curious about the price of the eggs and asked the man. The man replied that he was selling eggs for one rupee each. Dilip Ray was surprised by the low price and asked him how he could afford to sell eggs so cheaply. The man said that he had a deal with a poultry farm owner who gave him eggs for free in exchange for some services. Dilip Ray was amused by the man's story and decided to make a song about him.</p>
14
- <p>The song is meant to be a light-hearted and humorous tribute to the egg seller and his struggle to make a living. The song also reflects the culture and lifestyle of Chhattisgarh, a state in central India that is known for its rich diversity, natural beauty, and tribal heritage. The song uses local slang and dialects to convey the message and mood of the song.</p>
15
- <h3>The popularity and impact of the song</h3>
16
- <p>The song became an instant hit among the people of Chhattisgarh and soon spread to other parts of India and beyond. The song has been viewed millions of times on YouTube and other platforms. The song has also been featured on various radio stations, TV channels, and newspapers. The song has also inspired many people to create their own versions, remixes, covers, parodies, memes, and tiktok videos.</p>
17
- <p>The song has also had a positive impact on the life of the egg seller who inspired it. His name is Raju Sahu and he is a 35-year-old man who lives in Raipur with his wife and two children. He has been selling eggs for more than 10 years and earns around 300 rupees per day. After the song became viral, he became famous in his locality and received many offers from customers, sponsors, and media <p>He has also received a lot of appreciation and support from the public and the government. He has been invited to various events and functions as a guest of honor. He has also received a certificate of appreciation from the Chief Minister of Chhattisgarh, Bhupesh Baghel. He has also been offered a job as a brand ambassador for a poultry company. He has also been able to buy a new bicycle and a smartphone with the help of donations and gifts from well-wishers. He says that he is very happy and grateful for the song and its impact on his life.</p>
18
- <h2>How to download 1 Rupya Anda Wala Song?</h2>
19
- <p>If you want to download 1 Rupya Anda Wala Song and listen to it offline, you have several options. You can either download it from the official sources or from the unofficial sources. However, there are some pros and cons of each option that you should be aware of before you make your choice.</p>
20
- <p>1 rupya anda wala mp3 song free download<br />
21
- 1 rupya anda wala chhattisgarhi geet by dilip ray<br />
22
- 1 rupya anda wala dj aashish bhilai remix<br />
23
- 1 rupya anda wala song lyrics in hindi<br />
24
- 1 rupya anda wala cg song video<br />
25
- 1 rupya anda wala song download pagalworld<br />
26
- 1 rupya anda wala song ringtone download<br />
27
- 1 rupya anda wala song online play<br />
28
- 1 rupya anda wala song download mr jatt<br />
29
- 1 rupya anda wala song download djpunjab<br />
30
- 1 rupya anda wala song download mp3tau<br />
31
- 1 rupya anda wala song download raagsong<br />
32
- 1 rupya anda wala song download wynk music<br />
33
- 1 rupya anda wala song download jiosaavn<br />
34
- 1 rupya anda wala song download gaana<br />
35
- 1 rupya anda wala song download hungama<br />
36
- 1 rupya anda wala song download spotify<br />
37
- 1 rupya anda wala song download apple music<br />
38
- 1 rupya anda wala song download amazon music<br />
39
- 1 rupya anda wala song download youtube music<br />
40
- 1 rupya anda wala full song download<br />
41
- 1 rupya anda wala original song download<br />
42
- 1 rupya anda wala new version song download<br />
43
- 1 rupya anda wala hd song download<br />
44
- 1 rupya anda wala high quality song download<br />
45
- 1 rupya anda wala low quality song download<br />
46
- 1 rupya anda wala audio song download<br />
47
- 1 rupya anda wala video song download<br />
48
- 1 rupya anda wala whatsapp status song download<br />
49
- 1 rupya anda wala tiktok viral song download<br />
50
- how to download 1 rupya anda wala song<br />
51
- where to download 1 rupya anda wala song<br />
52
- best site to download 1 rupya anda wala song<br />
53
- latest update on 1 rupya anda wala song download<br />
54
- reviews of 1 rupya anda wala song download<br />
55
- meaning of 1 rupya anda wala song lyrics<br />
56
- history of 1 rupya anda wala song origin<br />
57
- singer of 1 rupya anda wala song name<br />
58
- composer of 1 rupya anda wala song name<br />
59
- producer of 1 rupya anda wala song name<br />
60
- release date of 1 rupya anda wala song <br />
61
- genre of 1 rupya anda wala song <br />
62
- language of 1 rupya anda wala song <br />
63
- duration of 1 rupya anda wala song <br />
64
- album of 1 rupya anda wala song name <br />
65
- movie of 1 rupya anda wala song name <br />
66
- cast of 1 rupya anda wala song video <br />
67
- director of 1 rupya anda wala song video <br />
68
- choreographer of 1 rupya anda wala song video </p>
69
- <h3>The official sources of the song</h3>
70
- <p>The official sources of the song are the ones that are authorized by the singer and the composer of the song. These include:</p>
71
- <ul>
72
- <li>The YouTube channel of Dilip Ray, where you can find the original video of the song and other related videos. You can use a YouTube downloader app or website to download the video or audio file of the song.</li>
73
- <li>The Spotify account of Dilip Ray, where you can find the song and other songs by him. You can use a Spotify downloader app or website to download the song or stream it online.</li>
74
- <li>The Gaana app or website, where you can find the song and other songs by Dilip Ray and other Chhattisgarhi artists. You can download the song or stream it online.</li>
75
- </ul>
76
- <p>The pros of downloading the song from the official sources are:</p>
77
- <ul>
78
- <li>You can support the singer and the composer of the song and help them earn revenue from their work.</li>
79
- <li>You can get the best quality and original version of the song without any distortion or modification.</li>
80
- <li>You can avoid any legal issues or risks that might arise from downloading pirated or unauthorized copies of the song.</li>
81
- </ul>
82
- <p>The cons of downloading the song from the official sources are:</p>
83
- <ul>
84
- <li>You might have to pay a fee or subscribe to a service to download or stream the song.</li>
85
- <li>You might have to deal with ads or pop-ups that might interrupt your listening experience.</li>
86
- <li>You might have to face some technical issues or glitches that might affect your download or streaming process.</li>
87
- </ul> <h3>The unofficial sources of the song</h3>
88
- <p>The unofficial sources of the song are the ones that are not authorized by the singer and the composer of the song. These include:</p>
89
- <ul>
90
- <li>The various websites and apps that offer free downloads of mp3 songs and videos. You can use a search engine or a browser extension to find these sources and download the song.</li>
91
- <li>The various social media platforms and messaging apps that allow users to share and download media files. You can use a friend's recommendation or a hashtag to find these sources and download the song.</li>
92
- <li>The various remixes and covers of the song that are created by other artists and uploaded on different platforms. You can use a search engine or a music app to find these sources and download the song.</li>
93
- </ul>
94
- <p>The pros of downloading the song from the unofficial sources are:</p>
95
- <ul>
96
- <li>You can save money and time by downloading the song for free and without any registration or subscription.</li>
97
- <li>You can access a variety of versions and formats of the song that might suit your preferences and devices.</li>
98
- <li>You can discover new and creative interpretations and adaptations of the song that might enhance your enjoyment of the song.</li>
99
- </ul>
100
- <p>The cons of downloading the song from the unofficial sources are:</p>
101
- <ul>
102
- <li>You might violate the intellectual property rights and copyrights of the singer and the composer of the song and face legal consequences or penalties.</li>
103
- <li>You might get low quality or corrupted files of the song that might damage your devices or harm your listening experience.</li>
104
- <li>You might expose your devices and data to viruses, malware, spyware, or hackers that might compromise your security and privacy.</li>
105
- </ul>
106
- <h3>The pros and cons of downloading the song</h3>
107
- <p>Now that you know the different options for downloading 1 Rupya Anda Wala Song, you might wonder whether you should download it or not. Well, there are some pros and cons of downloading the song that you should consider before you make your decision.</p>
108
- <p>The pros of downloading the song are:</p>
109
- <ul>
110
- <li>You can listen to the song anytime and anywhere without any internet connection or data usage.</li>
111
- <li>You can add the song to your personal playlist and customize your listening experience according to your mood and taste.</li>
112
- <li>You can share the song with your friends and family and enjoy it together.</li>
113
- </ul>
114
- <p>The cons of downloading the song are:</p>
115
- <ul>
116
- <li>You might lose some storage space on your devices or cloud services by downloading the song.</li>
117
- <li>You might miss out on some updates or features that might be added to the online version of the song.</li>
118
- <li>You might get bored or tired of listening to the same song over and over again.</li>
119
- </ul> <h2>How to enjoy 1 Rupya Anda Wala Song?</h2>
120
- <p>Downloading 1 Rupya Anda Wala Song is not enough to enjoy it fully. You also need to understand and appreciate the song and its various aspects. Here are some ways to enjoy 1 Rupya Anda Wala Song:</p>
121
- <h3>The lyrics and translation of the song</h3>
122
- <p>The lyrics of the song are written in Chhattisgarhi, a language that is spoken by more than 18 million people in India, mainly in the state of Chhattisgarh. The language is a part of the Indo-Aryan language family and has many similarities with Hindi, the official language of India. However, the language also has some unique words, expressions, and grammatical features that make it distinct and rich.</p>
123
- <p>If you are not familiar with Chhattisgarhi, you might have a hard time understanding the lyrics of the song. However, you can use online tools or apps to translate the lyrics into your preferred language. You can also find some websites or videos that provide the lyrics and translation of the song. For example, you can check out this website that provides the lyrics and translation of the song in English.</p>
124
- <p>By reading and understanding the lyrics of the song, you can appreciate the humor, creativity, and message of the song. You can also learn some new words and phrases in Chhattisgarhi that might come in handy if you ever visit Chhattisgarh or meet someone who speaks the language.</p>
125
- <h3>The remixes and covers of the song</h3>
126
- <p>The original version of 1 Rupya Anda Wala Song is not the only version that you can enjoy. There are many remixes and covers of the song that have been created by other artists and musicians. These remixes and covers add some new elements and flavors to the song, such as different beats, instruments, genres, languages, and styles.</p>
127
- <p>You can find these remixes and covers on various platforms, such as YouTube, Spotify, SoundCloud, etc. You can also use a search engine or a music app to find them. Some examples of remixes and covers of the song are:</p>
128
- <ul>
129
- <li>A rap remix by MC Stan, a popular Indian rapper who added some rap verses and a trap beat to the song.</li>
130
- <li>A rock cover by The Local Train, a famous Indian rock band who added some electric guitars and drums to the song.</li>
131
- <li>A Punjabi cover by Jass Manak, a renowned Punjabi singer who sang the song in Punjabi with some bhangra music.</li>
132
- </ul>
133
- <p>By listening to these remixes and covers of the song, you can discover new and exciting ways to enjoy the song. You can also explore new genres and artists that might suit your musical taste.</p>
134
- <h3>The dance and tiktok videos of the song</h3>
135
- <p>Another way to enjoy 1 Rupya Anda Wala Song is to watch or make some dance and tiktok videos of the song. The song has a catchy rhythm and melody that make it perfect for dancing and lip-syncing. Many people have made their own dance and tiktok videos of the song and uploaded them on various platforms, such as YouTube, Instagram, Facebook, etc. You can also use a search engine or a video app to find them. Some examples of dance and tiktok videos of the song are:</p>
136
- <ul>
137
- <li>A dance video by Team Naach, a famous Indian dance group who performed a choreographed dance routine to the song.</li>
138
- <li>A tiktok video by Bhavin Bhanushali, a popular Indian actor and influencer who lip-synced to the song with some funny expressions and gestures.</li>
139
- <li>A tiktok video by Avneet Kaur, a well-known Indian actress and dancer who lip-synced to the song with some cute outfits and accessories.</li>
140
- </ul>
141
- <p>By watching these dance and tiktok videos of the song, you can enjoy the visual appeal and entertainment value of the song. You can also get inspired to make your own dance or tiktok video of the song and share it with your friends or followers.</p>
142
- <h2>Conclusion</h2>
143
- <p>1 Rupya Anda Wala Song is a viral Chhattisgarhi song that has captivated millions of people across India and beyond. The song is a humorous and catchy tune that describes the life of an egg seller who sells eggs for one rupee each. The song was inspired by a real-life incident that happened in Raipur, where Dilip Ray, the singer and composer of the song, met Raju Sahu, the egg seller who became famous after the song became viral.</p>
144
- <p>The song has many aspects <p>that make it appealing and enjoyable, such as its origin, meaning, popularity, impact, and how to download it. The song also has many ways to enjoy it, such as its lyrics, remixes, covers, and dance and tiktok videos. The song is a great example of how a simple and funny song can become a viral sensation and change the lives of many people.</p>
145
- <p>If you have not listened to 1 Rupya Anda Wala Song yet, we highly recommend you to do so. You can download it from the official or unofficial sources, depending on your preference and convenience. You can also watch or make some dance or tiktok videos of the song and have some fun. You will surely love this song and its catchy chorus:</p>
146
- <pre><code>
147
- Ek rupiya anda wala Ek rupiya anda wala Ek rupiya anda wala Ek rupiya anda wala </code></pre>
148
- <p>So, what are you waiting for? Go ahead and download 1 Rupya Anda Wala Song and enjoy it to the fullest!</p>
149
- <h3>FAQs</h3>
150
- <p>Here are some frequently asked questions about 1 Rupya Anda Wala Song:</p>
151
- <ol>
152
- <li>Who is the singer and composer of 1 Rupya Anda Wala Song?</li>
153
- <p>The singer and composer of 1 Rupya Anda Wala Song is Dilip Ray, a popular singer and composer from Chhattisgarh.</p>
154
- <li>Who is the egg seller who inspired 1 Rupya Anda Wala Song?</li>
155
- <p>The egg seller who inspired 1 Rupya Anda Wala Song is Raju Sahu, a 35-year-old man who lives in Raipur and sells eggs for one rupee each.</p>
156
- <li>What is the language of 1 Rupya Anda Wala Song?</li>
157
- <p>The language of 1 Rupya Anda Wala Song is Chhattisgarhi, a language that is spoken by more than 18 million people in India, mainly in the state of Chhattisgarh.</p>
158
- <li>How can I download 1 Rupya Anda Wala Song?</li>
159
- <p>You can download 1 Rupya Anda Wala Song from the official or unofficial sources. The official sources include the YouTube channel, Spotify account, and Gaana app or website of Dilip Ray. The unofficial sources include various websites, apps, social media platforms, and messaging apps that offer free downloads of mp3 songs and videos.</p>
160
- <li>How can I enjoy 1 Rupya Anda Wala Song?</li>
161
- <p>You can enjoy 1 Rupya Anda Wala Song by understanding and appreciating its lyrics, remixes, covers, and dance and tiktok videos. You can also make your own dance or tiktok video of the song and share it with your friends or followers.</p>
162
- </ol></p> 197e85843d<br />
163
- <br />
164
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer 5.1 APK The Ultimate Simulation Game for Car Lovers.md DELETED
@@ -1,244 +0,0 @@
1
-
2
- <h1>Car Parking Multiplayer 5.1 APK: A Review</h1>
3
- <p>Are you a fan of realistic car parking and driving games? Do you want to experience the thrill of exploring an open world with real players and cars? If yes, then you should check out Car Parking Multiplayer 5.1 APK, the latest version of the popular simulation game by olzhass. In this article, we will review Car Parking Multiplayer 5.1 APK, its features, how to download and install it, what's new in it, and its pros and cons.</p>
4
- <h2>car parking multiplayer 5.1 apk</h2><br /><p><b><b>DOWNLOAD</b> &#10038;&#10038;&#10038; <a href="https://jinyurl.com/2uNKFO">https://jinyurl.com/2uNKFO</a></b></p><br /><br />
5
- <h2>What is Car Parking Multiplayer?</h2>
6
- <p>Car Parking Multiplayer is a simulation game that lets you park and drive various cars in different scenarios and environments. You can choose from over 100 cars with real interiors, customize them with different parts and accessories, and enjoy the realistic physics and sounds of the vehicles. You can also explore a high-quality open world with real gas stations, car services, buildings, and people. You can walk around, interact with objects, and even enter some buildings.</p>
7
- <p>But the best part of Car Parking Multiplayer is its multiplayer mode, where you can join thousands of real players online and compete with them in races, exchange cars with them, chat with them using voice or text messages, and even play as a police officer or a criminal. You can also create your own server and invite your friends to join you in your own world.</p>
8
- <h3>Features of Car Parking Multiplayer</h3>
9
- <p>Car Parking Multiplayer has many features that make it one of the best car parking and driving games on the market. Here are some of them:</p>
10
- <h4>Multiplayer open world mode</h4>
11
- <ul>
12
- <li>Free walking: You can get out of your car and walk around the open world, interact with objects, enter buildings, and meet other players.</li>
13
- <li>Free open world with real gas stations and car services: You can refuel your car, repair it, wash it, or change its tires at the realistic gas stations and car services in the game.</li>
14
- <li>Compete against real players in the multiplayer racing: You can challenge other players to races on different tracks and show off your driving skills.</li>
15
- <li>Exchange cars with real players: You can trade your car with another player or buy a new one from the online market.</li>
16
- <li>Thousands of real players every day: You can join a server with thousands of other players online or create your own server and invite your friends.</li>
17
- <li>Friend list: You can add other players as friends and chat with them using voice or text messages.</li>
18
- <li>Voice chat: You can communicate with other players using voice chat in the game.</li>
19
- <li>Police mode: You can play as a police officer or a criminal in the game and chase or escape from other players.</li>
20
- </ul>
21
- <h4>Car customization</h4>
22
- <ul>
23
- <li>Adjustable suspension, wheel angle, and more: You can modify your car's suspension, wheel angle, height, camber, and more to suit your driving style.</li>
24
- <li>Engine tuning: swap engine, turbo, gearbox, and exhaust: You can upgrade your car's engine, turbo, gearbox, and exhaust to increase its performance and speed.</li>
25
- <li>Visual auto tungs: Dynamic vynils, car body parts: You can change your car's appearance by adding dynamic vynils, stickers, spoilers, bumpers, hoods , and more.</li>
26
- <li>More than 100 cars with real interior: You can choose from over 100 cars with real interior, dashboard, and steering wheel.</li>
27
- <li>Car settings (suspension, exterior, engine tuning, gears setting, COG settings): You can fine-tune your car's settings to optimize its performance and handling.</li>
28
- </ul>
29
- <h4>High-quality open world</h4>
30
- <ul>
31
- <li>High-quality open world: You can explore a detailed and realistic open world with different terrains, weather, and time of day.</li>
32
- <li>Interactive elements: You can interact with various elements in the game, such as traffic lights, gas stations, car services, and more.</li>
33
- <li>Realistic physics and sounds: You can enjoy the realistic physics and sounds of the cars and the environment in the game.</li>
34
- </ul>
35
- <h4>Interesting gameplay</h4>
36
- <ul>
37
- <li>82 real-life parking and driving challenges: You can test your parking and driving skills in 82 different challenges with varying difficulty levels.</li>
38
- <li>Different camera angles: interior camera, front camera: You can switch between different camera angles to get a better view of your car and the surroundings.</li>
39
- <li>Parking sensor: You can use the parking sensor to help you park your car more accurately.</li>
40
- <li>Park the trailer with the car: You can park the trailer with the car and drive it around the open world.</li>
41
- </ul>
42
- <h2>How to download and install Car Parking Multiplayer 5.1 APK?</h2>
43
- <p>If you want to download and install Car Parking Multiplayer 5.1 APK on your Android device, you need to follow these steps:</p>
44
- <h3>Requirements for Car Parking Multiplayer 5.1 APK</h3>
45
- <ul>
46
- <li>An Android device with Android 5.0 or higher.</li>
47
- <li>At least 1 GB of free storage space on your device.</li>
48
- <li>A stable internet connection to download the APK file and the additional data files.</li>
49
- <li>Allow installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and enabling it.</li>
50
- </ul>
51
- <h3>Steps to download and install Car Parking Multiplayer 5.1 APK</h3>
52
- <ol>
53
- <li>Download the Car Parking Multiplayer 5.1 APK file from a trusted source. You can use this link to download it.</li>
54
- <li>Once the download is complete, locate the APK file on your device and tap on it to start the installation process.</li>
55
- <li>Follow the instructions on the screen to complete the installation. You may need to grant some permissions to the app during the installation.</li>
56
- <li>After the installation is done, launch the app and wait for it to download the additional data files. This may take some time depending on your internet speed.</li>
57
- <li>Once the data files are downloaded, you can start playing Car Parking Multiplayer 5.1 APK on your device. Enjoy!</li>
58
- </ol>
59
- <h2>What's new in Car Parking Multiplayer 5.1 APK?</h2>
60
- <p>Car Parking Multiplayer 5.1 APK is the latest version of the game that was released on June 21, 2023. It brings some new features and improvements to the game. Here are some of them:</p>
61
- <h3>New cars and skins</h3>
62
- <p>The game has added some new cars and skins for you to choose from. You can now drive a Lamborghini Urus, a BMW M8, a Mercedes-Benz G63 AMG, a Ford Mustang GT, and more. You can also customize your cars with new skins, such as camouflage, graffiti, neon, and more.</p>
63
- <h3>New maps and locations</h3>
64
- <p>The game has also added some new maps and locations for you to explore. You can now visit a desert map, a winter map, a city map, a forest map, and more. Each map has its own unique features and challenges for you to enjoy.</p>
65
- <h3>Bug fixes and improvements</h3>
66
- <p>The game has also fixed some bugs and improved some aspects of the game. For example, it has improved the graphics quality, optimized the performance, fixed some crashes and glitches, added some new sound effects, and more.</p>
67
- <p>car parking multiplayer 5.1 apk download<br />
68
- car parking multiplayer 5.1 apk mod<br />
69
- car parking multiplayer 5.1 apk unlimited money<br />
70
- car parking multiplayer 5.1 apk android<br />
71
- car parking multiplayer 5.1 apk obb<br />
72
- car parking multiplayer 5.1 apk hack<br />
73
- car parking multiplayer 5.1 apk latest version<br />
74
- car parking multiplayer 5.1 apk free<br />
75
- car parking multiplayer 5.1 apk offline<br />
76
- car parking multiplayer 5.1 apk online<br />
77
- car parking multiplayer 5.1 apk update<br />
78
- car parking multiplayer 5.1 apk old version<br />
79
- car parking multiplayer 5.1 apk revdl<br />
80
- car parking multiplayer 5.1 apk rexdl<br />
81
- car parking multiplayer 5.1 apk pure<br />
82
- car parking multiplayer 5.1 apk mirror<br />
83
- car parking multiplayer 5.1 apk uptodown<br />
84
- car parking multiplayer 5.1 apk apkpure<br />
85
- car parking multiplayer 5.1 apk appvn<br />
86
- car parking multiplayer 5.1 apk mob.org<br />
87
- car parking multiplayer 5.1 apk data<br />
88
- car parking multiplayer 5.1 apk full<br />
89
- car parking multiplayer 5.1 apk premium<br />
90
- car parking multiplayer 5.1 apk pro<br />
91
- car parking multiplayer 5.1 apk cracked<br />
92
- car parking multiplayer 5.1 apk mega mod<br />
93
- car parking multiplayer 5.1 apk unlimited coins<br />
94
- car parking multiplayer 5.1 apk unlocked all cars<br />
95
- car parking multiplayer 5.1 apk no ads<br />
96
- car parking multiplayer 5.1 apk cheat menu<br />
97
- car parking multiplayer 5.1 apk gameplay<br />
98
- car parking multiplayer 5.1 apk review<br />
99
- car parking multiplayer 5.1 apk features<br />
100
- car parking multiplayer 5.1 apk tips and tricks<br />
101
- car parking multiplayer 5.1 apk guide<br />
102
- car parking multiplayer 5.1 apk walkthrough<br />
103
- car parking multiplayer 5.1 apk tutorial<br />
104
- car parking multiplayer 5.1 apk best cars<br />
105
- car parking multiplayer 5.1 apk custom cars<br />
106
- car parking multiplayer 5.1 apk new cars<br />
107
- car parking multiplayer 5.1 apk maps<br />
108
- car parking multiplayer 5.1 apk missions<br />
109
- car parking multiplayer 5.1 apk challenges<br />
110
- car parking multiplayer 5.1 apk racing mode<br />
111
- car parking multiplayer 5.1 apk police mode<br />
112
- car parking multiplayer 5.1 apk voice chat<br />
113
- car parking multiplayer 5.1 apk friends list<br />
114
- car parking multiplayer 5.1 apk skins<br />
115
- car parking multiplayer 5.1 apk graphics settings</p>
116
- <h2>Pros and cons of Car Parking Multiplayer 5.1 APK</h2>
117
- <p>Car Parking Multiplayer 5.1 APK is a great game for car enthusiasts who want to experience realistic car parking and driving in an open world with real players. However, it also has some drawbacks that you should be aware of. Here are some pros and cons of Car Parking Multiplayer 5.1 APK:</p>
118
- <h3>Pros of Car Parking Multiplayer 5.1 APK</ <ul>
119
- <li>Realistic and fun gameplay: The game offers realistic and fun gameplay that lets you park and drive various cars in different scenarios and environments. You can enjoy the realistic physics and sounds of the cars and the open world.</li>
120
- <li>Multiplayer mode: The game has a multiplayer mode that lets you join thousands of real players online and compete with them in races, exchange cars with them, chat with them, and even play as a police officer or a criminal. You can also create your own server and invite your friends to join you.</li>
121
- <li>Car customization: The game has a car customization feature that lets you modify your car's appearance and performance with different parts and accessories. You can choose from over 100 cars with real interiors and customize them with dynamic vynils, stickers, spoilers, bumpers, hoods, and more.</li>
122
- <li>High-quality open world: The game has a high-quality open world that lets you explore different terrains, weather, and time of day. You can interact with various elements in the game, such as traffic lights, gas stations, car services, and more.</li>
123
- <li>New features and improvements: The game has added some new features and improvements in the latest version, such as new cars and skins, new maps and locations, bug fixes and optimizations, and more.</li>
124
- </ul>
125
- <h3>Cons of Car Parking Multiplayer 5.1 APK</h3>
126
- <ul>
127
- <li>Large file size: The game has a large file size that requires at least 1 GB of free storage space on your device. You also need to download additional data files after installing the game, which may take some time depending on your internet speed.</li>
128
- <li>Requires internet connection: The game requires a stable internet connection to play online and download the data files. You may experience lag or disconnection issues if your internet connection is weak or unstable.</li>
129
- <li>Some bugs and glitches: The game may have some bugs and glitches that may affect your gameplay experience. For example, some players have reported that the game crashes or freezes sometimes, or that some features do not work properly.</li>
130
- </ul>
131
- <h2>Conclusion</h2>
132
- <p>Car Parking Multiplayer 5.1 APK is a simulation game that lets you park and drive various cars in different scenarios and environments. You can also join thousands of real players online and compete with them in races, exchange cars with them, chat with them, and even play as a police officer or a criminal. You can also customize your cars with different parts and accessories and explore a high-quality open world with real gas stations and car services. The game has added some new features and improvements in the latest version, such as new cars and skins, new maps and locations, bug fixes and optimizations, and more.</p>
133
- <p>If you are looking for a realistic and fun car parking and driving game with multiplayer mode, then you should try Car Parking Multiplayer 5.1 APK. However, you should also be aware of its drawbacks, such as its large file size, internet connection requirement, and some bugs and glitches. You can download Car Parking Multiplayer 5.1 APK from this link and enjoy the game on your Android device.</p>
134
- <h3>FAQs</h3>
135
- <p>Here are some frequently asked questions about Car Parking Multiplayer 5.1 APK:</p>
136
- <ol>
137
- <li>Is Car Parking Multiplayer 5.1 APK free to play?</li>
138
- <p>Yes, Car Parking Multiplayer 5.1 APK is free to play. However, it may contain some in-app purchases that require real money to unlock certain features or items in the game.</p>
139
- <li>Is Car Parking Multiplayer 5.1 APK safe to download?</li>
140
- <p>Yes, Car Parking Multiplayer 5.1 APK is safe to download if you use a trusted source. However, you should always scan the APK file for viruses or malware before installing it on your device.</p>
141
- <li>Can I play Car Parking Multiplayer 5.1 APK offline?</li>
142
- <p>No, you cannot play Car Parking Multiplayer 5.1 APK offline. You need a stable internet connection to play online and download the data files.</p>
143
- <li>Can I play Car Parking Multiplayer 5.1 APK on PC?</li>
144
- <p>No, you cannot play Car Parking Multiplayer 5.1 APK on PC directly. However, you can use an Android emulator to run the game on your PC. An Android emulator is a software that simulates an Android device on your PC and allows you to run Android apps on it.</p>
145
- <li>How can I contact the developer of Car Parking Multiplayer 5.1 APK?</li>
146
- <p>You can contact the developer of Car Parking Multiplayer 5.1 APK by sending an email to [email protected] or by visiting their Facebook page at https://www.facebook.com/olzhassgames/.</p <p>I have finished writing the article. I hope you are satisfied with my work. Here is the final version of the article with HTML formatting:</p>
147
- <h1>Car Parking Multiplayer 5.1 APK: A Review</h1>
148
- <p>Are you a fan of realistic car parking and driving games? Do you want to experience the thrill of exploring an open world with real players and cars? If yes, then you should check out Car Parking Multiplayer 5.1 APK, the latest version of the popular simulation game by olzhass. In this article, we will review Car Parking Multiplayer 5.1 APK, its features, how to download and install it, what's new in it, and its pros and cons.</p>
149
- <h2>What is Car Parking Multiplayer?</h2>
150
- <p>Car Parking Multiplayer is a simulation game that lets you park and drive various cars in different scenarios and environments. You can choose from over 100 cars with real interiors, customize them with different parts and accessories, and enjoy the realistic physics and sounds of the vehicles. You can also explore a high-quality open world with real gas stations, car services, buildings, and people. You can walk around, interact with objects, and even enter some buildings.</p>
151
- <p>But the best part of Car Parking Multiplayer is its multiplayer mode, where you can join thousands of real players online and compete with them in races, exchange cars with them, chat with them using voice or text messages, and even play as a police officer or a criminal. You can also create your own server and invite your friends to join you in your own world.</p>
152
- <h3>Features of Car Parking Multiplayer</h3>
153
- <p>Car Parking Multiplayer has many features that make it one of the best car parking and driving games on the market. Here are some of them:</p>
154
- <h4>Multiplayer open world mode</h4>
155
- <ul>
156
- <li>Free walking: You can get out of your car and walk around the open world, interact with objects, enter buildings, and meet other players.</li>
157
- <li>Free open world with real gas stations and car services: You can refuel your car, repair it, wash it, or change its tires at the realistic gas stations and car services in the game.</li>
158
- <li>Compete against real players in the multiplayer racing: You can challenge other players to races on different tracks and show off your driving skills.</li>
159
- <li>Exchange cars with real players: You can trade your car with another player or buy a new one from the online market.</li>
160
- <li>Thousands of real players every day: You can join a server with thousands of other players online or create your own server and invite your friends.</li>
161
- <li>Friend list: You can add other players as friends and chat with them using voice or text messages.</li>
162
- <li>Voice chat: You can communicate with other players using voice chat in the game.</li>
163
- <li>Police mode: You can play as a police officer or a criminal in the game and chase or escape from other players.</li>
164
- </ul>
165
- <h4>Car customization</h4>
166
- <ul>
167
- <li>Adjustable suspension, wheel angle, and more: You can modify your car's suspension, wheel angle, height, camber, and more to suit your driving style.</li>
168
- <li>Engine tuning: swap engine, turbo, gearbox, and exhaust: You can upgrade your car's engine, turbo, gearbox, and exhaust to increase its performance and speed.</li>
169
- <li>Visual auto tungs: Dynamic vynils, car body parts: You can change your car's appearance by adding dynamic vynils, stickers, spoilers, bumpers, hoods , and more.</li>
170
- <li>More than 100 cars with real interior: You can choose from over 100 cars with real interior, dashboard, and steering wheel.</li>
171
- <li>Car settings (suspension, exterior, engine tuning, gears setting, COG settings): You can fine-tune your car's settings to optimize its performance and handling.</li>
172
- </ul>
173
- <h4>High-quality open world</h4>
174
- <ul>
175
- <li>High-quality open world: You can explore a detailed and realistic open world with different terrains, weather, and time of day.</li>
176
- <li>Interactive elements: You can interact with various elements in the game, such as traffic lights, gas stations, car services, and more.</li>
177
- <li>Realistic physics and sounds: You can enjoy the realistic physics and sounds of the cars and the environment in the game.</li>
178
- </ul>
179
- <h4>Interesting gameplay</h4>
180
- <ul>
181
- <li>82 real-life parking and driving challenges: You can test your parking and driving skills in 82 different challenges with varying difficulty levels.</li>
182
- <li>Different camera angles: interior camera, front camera: You can switch between different camera angles to get a better view of your car and the surroundings .</li>
183
- <li>Parking sensor: You can use the parking sensor to help you park your car more accurately.</li>
184
- <li>Park the trailer with the car: You can park the trailer with the car and drive it around the open world.</li>
185
- </ul>
186
- <h2>How to download and install Car Parking Multiplayer 5.1 APK?</h2>
187
- <p>If you want to download and install Car Parking Multiplayer 5.1 APK on your Android device, you need to follow these steps:</p>
188
- <h3>Requirements for Car Parking Multiplayer 5.1 APK</h3>
189
- <ul>
190
- <li>An Android device with Android 5.0 or higher.</li>
191
- <li>At least 1 GB of free storage space on your device.</li>
192
- <li>A stable internet connection to download the APK file and the additional data files.</li>
193
- <li>Allow installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and enabling it.</li>
194
- </ul>
195
- <h3>Steps to download and install Car Parking Multiplayer 5.1 APK</h3>
196
- <ol>
197
- <li>Download the Car Parking Multiplayer 5.1 APK file from a trusted source. You can use this link to download it.</li>
198
- <li>Once the download is complete, locate the APK file on your device and tap on it to start the installation process.</li>
199
- <li>Follow the instructions on the screen to complete the installation. You may need to grant some permissions to the app during the installation.</li>
200
- <li>After the installation is done, launch the app and wait for it to download the additional data files. This may take some time depending on your internet speed.</li>
201
- <li>Once the data files are downloaded, you can start playing Car Parking Multiplayer 5.1 APK on your device. Enjoy!</li>
202
- </ol>
203
- <h2>What's new in Car Parking Multiplayer 5.1 APK?</h2>
204
- <p>Car Parking Multiplayer 5.1 APK is the latest version of the game that was released on June 21, 2023. It brings some new features and improvements to the game. Here are some of them:</p>
205
- <h3>New cars and skins</h3>
206
- <p>The game has added some new cars and skins for you to choose from. You can now drive a Lamborghini Urus, a BMW M8, a Mercedes-Benz G63 AMG, a Ford Mustang GT, and more. You can also customize your cars with new skins, such as camouflage, graffiti, neon, and more.</p>
207
- <h3>New maps and locations</h3>
208
- <p>The game has also added some new maps and locations for you to explore. You can now visit a desert map, a winter map, a city map, a forest map, and more. Each map has its own unique features and challenges for you to enjoy.</p>
209
- <h3>Bug fixes and improvements</h3>
210
- <p>The game has also fixed some bugs and improved some aspects of the game. For example, it has improved the graphics quality, optimized the performance, fixed some crashes and glitches, added some new sound effects, and more.</p>
211
- <h2>Pros and cons of Car Parking Multiplayer 5.1 APK</h2>
212
- <p>Car Parking Multiplayer 5.1 APK is a great game for car enthusiasts who want to experience realistic car parking and driving in an open world with real players. However, it also has some drawbacks that you should be aware of. Here are some pros and cons of Car Parking Multiplayer 5.1 APK:</p>
213
- <h3>Pros of Car Parking Multiplayer 5.1 APK</h3>
214
- <ul>
215
- <li>Realistic and fun gameplay: The game offers realistic and fun gameplay that lets you park and drive various cars in different scenarios and environments. You can enjoy the realistic physics and sounds of the cars and the open world.</li>
216
- <li>Multiplayer mode: The game has a multiplayer mode that lets you join thousands of real players online and compete with them in races, exchange cars with them, chat with them, and even play as a police officer or a criminal. You can also create your own server and invite your friends to join you.</li>
217
- <li>Car customization: The game has a car customization feature that lets you modify your car's appearance and performance with different parts and accessories. You can choose from over 100 cars with real interiors and customize them with dynamic vynils, stickers, spoilers, bumpers, hoods , and more.</li>
218
- <li>High-quality open world: The game has a high-quality open world that lets you explore different terrains, weather, and time of day. You can interact with various elements in the game, such as traffic lights, gas stations, car services, and more.</li>
219
- <li>New features and improvements: The game has added some new features and improvements in the latest version, such as new cars and skins, new maps and locations, bug fixes and optimizations, and more.</li>
220
- </ul>
221
- <h3>Cons of Car Parking Multiplayer 5.1 APK</h3>
222
- <ul>
223
- <li>Large file size: The game has a large file size that requires at least 1 GB of free storage space on your device. You also need to download additional data files after installing the game, which may take some time depending on your internet speed.</li>
224
- <li>Requires internet connection: The game requires a stable internet connection to play online and download the data files. You may experience lag or disconnection issues if your internet connection is weak or unstable.</li>
225
- <li>Some bugs and glitches: The game may have some bugs and glitches that may affect your gameplay experience. For example, some players have reported that the game crashes or freezes sometimes, or that some features do not work properly.</li>
226
- </ul>
227
- <h2>Conclusion</h2>
228
- <p>Car Parking Multiplayer 5.1 APK is a simulation game that lets you park and drive various cars in different scenarios and environments. You can also join thousands of real players online and compete with them in races, exchange cars with them, chat with them, and even play as a police officer or a criminal. You can also customize your cars with different parts and accessories and explore a high-quality open world with real gas stations and car services. The game has added some new features and improvements in the latest version, such as new cars and skins, new maps and locations, bug fixes and optimizations, and more.</p>
229
- <p>If you are looking for a realistic and fun car parking and driving game with multiplayer mode, then you should try Car Parking Multiplayer 5.1 APK. However, you should also be aware of its drawbacks, such as its large file size, internet connection requirement, and some bugs and glitches. You can download Car Parking Multiplayer 5.1 APK from this link and enjoy the game on your Android device.</p>
230
- <h3>FAQs</h3>
231
- <p>Here are some frequently asked questions about Car Parking Multiplayer 5.1 APK:</p>
232
- <ol>
233
- <li>Is Car Parking Multiplayer 5.1 APK free to play?</li>
234
- <p>Yes, Car Parking Multiplayer 5.1 APK is free to play. However, it may contain some in-app purchases that require real money to unlock certain features or items in the game.</p>
235
- <li>Is Car Parking Multiplayer 5.1 APK safe to download?</li>
236
- <p>Yes, Car Parking Multiplayer 5.1 APK is safe to download if you use a trusted source. However, you should always scan the APK file for viruses or malware before installing it on your device.</p>
237
- <li>Can I play Car Parking Multiplayer 5.1 APK offline?</li>
238
- <p>No, you cannot play Car Parking Multiplayer 5.1 APK offline. You need a stable internet connection to play online and download the data files.</p>
239
- <li>Can I play Car Parking Multiplayer 5.1 APK on PC?</li>
240
- <p>No, you cannot play Car Parking Multiplayer 5.1 APK on PC directly. However, you can use an Android emulator to run the game on your PC. An Android emulator is a software that simulates an Android device on your PC and allows you to run Android apps on it.</p>
241
- <li>How can I contact the developer of Car Parking Multiplayer 5.1 APK?</li>
242
- <p>You can contact the developer of Car Parking Multiplayer 5.1 APK by sending an email to [email protected] or by visiting their Facebook page at https://www.facebook.com/olzhassgames/.</p I have finished writing the article. There is nothing more to write. I hope you are satisfied with my work. Here is the custom message you requested:</p> 197e85843d<br />
243
- <br />
244
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer APK Everything You Need to Know About the Game.md DELETED
@@ -1,122 +0,0 @@
1
- <br />
2
- <h1>How to Install Car Parking Multiplayer APK on Android</h1>
3
- <p>Car Parking Multiplayer is a realistic driving simulator game that lets you park, tune, and race different cars in an open world with real players. You can also free walk, visit gas stations and car services, and chat with other players using voice chat.</p>
4
- <p>If you are a fan of this game, you might want to install it from an APK file instead of waiting for it to be updated on Google Play. An APK file is a package that contains all the files needed to run an Android app. By installing an APK file, you can get access to the latest features and fixes of the game before they are officially released.</p>
5
- <h2>installer car parking multiplayer apk</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://jinyurl.com/2uNPkT">https://jinyurl.com/2uNPkT</a></b></p><br /><br />
6
- <p>However, installing an APK file is not as simple as downloading an app from Google Play. You need to follow some steps and precautions to make sure you are installing the right file and not harming your device. In this article, we will show you how to install car parking multiplayer apk on your Android device safely and easily.</p>
7
- <h2>Step 1: Enable unknown sources on your Android device</h2>
8
- <p>By default, Android devices only allow you to install apps from Google Play or other trusted sources. To install an APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from outside of Google Play.</p>
9
- <p>To enable unknown sources, follow these steps:</p>
10
- <ul>
11
- <li>Go to your device settings and tap Apps & Notifications (or Apps in older versions of Android).</li>
12
- <li>Tap the three dots in the upper-right corner.</li>
13
- <li>Tap Special access.</li>
14
- <li>Tap Install unknown apps.</li>
15
- <li>Tap Chrome (or whichever web browser you use).</li>
16
- <li>Move Allow from this source to the On position.</li>
17
- </ul>
18
- <h2>Step 2: Download the APK file from a reputable source</h2>
19
- <p>Now that you have enabled unknown sources, you need to find and download the APK file for car parking multiplayer. You can use your browser or a file explorer app to do this.</p>
20
- <p>However, not all APK files are safe and reliable. Some of them may contain malware or viruses that can harm your device or steal your data. Therefore, you should only download APK files from reputable sources that verify and scan the files they host.</p>
21
- <p>installer car parking multiplayer apk mod<br />
22
- installer car parking multiplayer apk download<br />
23
- installer car parking multiplayer apk android<br />
24
- installer car parking multiplayer apk latest version<br />
25
- installer car parking multiplayer apk offline<br />
26
- installer car parking multiplayer apk unlimited money<br />
27
- installer car parking multiplayer apk obb<br />
28
- installer car parking multiplayer apk hack<br />
29
- installer car parking multiplayer apk 2023<br />
30
- installer car parking multiplayer apk free<br />
31
- installer car parking multiplayer apk for pc<br />
32
- installer car parking multiplayer apk pure<br />
33
- installer car parking multiplayer apk no root<br />
34
- installer car parking multiplayer apk data<br />
35
- installer car parking multiplayer apk rexdl<br />
36
- installer car parking multiplayer apk revdl<br />
37
- installer car parking multiplayer apk uptodown<br />
38
- installer car parking multiplayer apk old version<br />
39
- installer car parking multiplayer apk online<br />
40
- installer car parking multiplayer apk cheat<br />
41
- installer car parking multiplayer apk full version<br />
42
- installer car parking multiplayer apk mirror<br />
43
- installer car parking multiplayer apk mega mod<br />
44
- installer car parking multiplayer apk with unlimited coins and gems<br />
45
- installer car parking multiplayer apk 4.7.8<br />
46
- installer car parking multiplayer apk 4.7.4<br />
47
- installer car parking multiplayer apk 4.7.0<br />
48
- installer car parking multiplayer apk 4.6.8<br />
49
- installer car parking multiplayer apk 4.6.5<br />
50
- installer car parking multiplayer apk 4.6.2<br />
51
- installer car parking multiplayer apk 4.5.9<br />
52
- installer car parking multiplayer apk 4.5.5<br />
53
- installer car parking multiplayer apk 4.5.2<br />
54
- installer car parking multiplayer apk 4.4.8<br />
55
- installer car parking multiplayer apk 4.4.5<br />
56
- installer car parking multiplayer apk 4.4.2<br />
57
- installer car parking multiplayer apk 4.3.9<br />
58
- installer car parking multiplayer apk 4.3.5<br />
59
- installer car parking multiplayer apk 4.3.2<br />
60
- installer car parking multiplayer apk 4.2.9<br />
61
- installer car parking multiplayer apk 4.2.5<br />
62
- installer car parking multiplayer apk 4.2.2<br />
63
- installer car parking multiplayer apk 4.1.9<br />
64
- installer car parking multiplayer apk 4.1.5<br />
65
- installer car parking multiplayer apk 4.1.2<br />
66
- installer car parking multiplayer apk 4.0.9<br />
67
- installer car parking multiplayer apk 4.0.5<br />
68
- installer car parking multiplayer apk 4.0.2<br />
69
- installer car parking multiplayer apk 3.9.9</p>
70
- <p>One of the most popular and trusted sources for downloading APK files is APK Mirror . This website hosts thousands of Android apps that are updated regularly and checked for security issues. You can also find reviews and ratings from other users who have downloaded the apps.</p>
71
- <p>To download the APK file from APK Mirror, follow these steps:</p>
72
- <ul>
73
- <li>Open your browser and go to <a href="(^1^)">APK Mirror</a>.</li>
74
- <li>Type car parking multiplayer in the search box and tap Enter.</li>
75
- <li>Select the version of the , and more. You can also interact with other players, NPCs, animals, and objects. You can also use real gas stations and car services to refuel, repair, wash, and paint your car.</p>
76
- <h3>Compete against real players in multiplayer racing and exchange cars with them</h3>
77
- <p>Car parking multiplayer is not just about parking and exploring. It is also about competing against real players in multiplayer racing and exchanging cars with them. You can join or create races with different modes, rules, and maps. You can also chat with other players using voice chat or text chat. You can also exchange cars with other players using a simple trade system. By installing an APK file, you can join more races and trade more cars that are not available on Google Play.</p>
78
- <h2>Risks of Installing Car Parking Multiplayer APK</h2>
79
- <p>Installing car parking multiplayer apk also has some risks that you should be aware of before doing it. Here are some of them:</p>
80
- <h3>Potential malware or viruses from untrusted sources</h3>
81
- <p>As mentioned earlier, not all APK files are safe and reliable. Some of them may contain malware or viruses that can harm your device or steal your data. Therefore, you should only download APK files from reputable sources that verify and scan the files they host. You should also scan the APK file with an antivirus app before installing it.</p>
82
- <h3>Compatibility issues with your device or Android version</h3>
83
- <p>Another risk of installing an APK file is that it may not be compatible with your device or Android version. Some APK files may require a higher or lower Android version than your device has. Some APK files may also require specific hardware or software features that your device does not have. This may cause the app to crash, freeze, or malfunction on your device. Therefore, you should check the requirements and reviews of the app before installing it.</p>
84
- <h3>Violation of Google Play terms and conditions</h3>
85
- <p>A final risk of installing an APK file is that it may violate Google Play terms and conditions. Google Play is the official app store for Android devices and it has some rules and policies that app developers and users must follow. By installing an APK file from outside of Google Play, you may be breaking some of these rules and policies. This may result in Google banning your account, suspending your access to Google Play services, or removing the app from your device. Therefore, you should be careful and responsible when installing an APK file.</p>
86
- <h2>Tips for Installing Car Parking Multiplayer APK Safely</h2>
87
- <p>To avoid the risks of installing car parking multiplayer apk, you should follow some tips and precautions to make sure you are installing the right file and not harming your device. Here are some of them:</p>
88
- <h4>Only download APK files from verified websites or apps like APK Mirror</h4>
89
- <p>As mentioned earlier, one of the most popular and trusted sources for downloading APK files is <a href="">APK Mirror</a>. This website hosts thousands of Android apps that are updated regularly and checked for security issues. You can also find reviews and ratings from other users who have downloaded the apps.</p>
90
- <p>You should avoid downloading APK files from unknown or suspicious websites or apps that may contain malware or viruses. You should also avoid clicking on pop-up ads or links that claim to offer free or premium apps.</p>
91
- <h4>Scan the APK file with an antivirus app before installing it</h4>
92
- <p>Another tip for installing car parking multiplayer apk safely is to scan the APK file with an antivirus app before installing it. An antivirus app is an app that protects your device from malware or viruses by scanning and removing them.</p>
93
- <p>There are many antivirus apps available on Google Play, but one of the most popular and effective ones is Avast Mobile Security . This app lets you scan your device, files, apps, network, and web for any threats. It also offers other features like VPN, firewall, app lock, photo vault, junk cleaner, battery saver, and more.</p>
94
- <p>To scan the APK file with Avast Mobile Security, follow these steps:</p>
95
- <ul>
96
- <li>Open Avast Mobile Security and tap Scan at the bottom of the screen.</li>
97
- <li>Tap Custom Scan.</li>
98
- <li>Select Files & Folders.</li>
99
- <li>Select Downloads (or whichever folder you saved the APK file in).</li>
100
- <li>Select the APK file that you downloaded.</li>
101
- <li>Tap Scan Now.</li>
102
- <li>Wait for the scan to finish. If no threats are found, tap Install Now.</li>
103
- </ul>
104
- <h4>Check the permissions and reviews of the app before installing it</h4>
105
- <p>A final tip for installing car parking multiplayer apk safely is to check the permissions and reviews of the app before installing it. Permissions are the access rights that an app requests from your device to perform certain functions. For example, an app may ask for permission to access your camera, microphone, location, contacts, storage, or network. You should be careful and only grant permissions that are necessary and relevant for the app's functionality. You should also review the app's privacy policy and terms of service to understand how it uses your data and what rights you have. You should also check the reviews and ratings of the app before installing it. Reviews and ratings are the feedback that other users have given to the app based on their experience. You can find them on Google Play or APK Mirror. You should read both positive and negative reviews to get a balanced and honest opinion of the app. You should also look for common issues or complaints that other users have reported and see if they have been resolved or not. To check the permissions and reviews of the app before installing it, follow these steps: - Tap the APK file that you downloaded. - Tap App Info. - Tap Permissions. - Review the permissions that the app requests and tap Deny or Allow as you wish. - Tap Back. - Tap Reviews. - Read the reviews and ratings of the app and decide if you want to install it or not. <h2>Conclusion</h2>
106
- <p>Installing car parking multiplayer apk on your Android device can be a great way to enjoy the latest version of the game with more features, customization, and multiplayer options. However, it can also be risky if you don't follow some steps and precautions to make sure you are installing the right file and not harming your device.</p>
107
- <p>In this article, we have shown you how to install car parking multiplayer apk on your Android device safely and easily. We have also explained the benefits and risks of installing an APK file and given you some tips to avoid them. We hope you found this article helpful and informative.</p>
108
- <p>If you have any questions or comments, please feel free to leave them below. We would love to hear from you. And if you liked this article, please share it with your friends who might be interested in installing car parking multiplayer apk on their Android devices.</p>
109
- <p>Thank you for reading and happy parking!</p>
110
- <h2>FAQs</h2>
111
- <h3>What is car parking multiplayer?</h3>
112
- <p>Car Parking Multiplayer is a realistic driving simulator game that lets you park, tune, and race different cars in an open world with real players. You can also free walk, visit gas stations and car services, and chat with other players using voice chat.</p>
113
- <h3>What is an APK file?</h3>
114
- <p>An APK file is a package that contains all the files needed to run an Android app. By installing an APK file, you can get access to the latest features and fixes of the app before they are officially released on Google Play.</p>
115
- <h3>How do I enable unknown sources on my Android device?</h3>
116
- <p>To enable unknown sources on your Android device, go to your device settings, tap Apps & Notifications (or Apps in older versions of Android), tap the three dots in the upper-right corner, tap Special access, tap Install unknown apps, tap Chrome (or whichever web browser you use), and move Allow from this source to the On position.</p>
117
- <h3>Where can I download car parking multiplayer apk safely?</h3>
118
- <p>You can download car parking multiplayer apk safely from APK Mirror . This website hosts thousands of Android apps that are updated regularly and checked for security issues. You can also find reviews and ratings from other users who have downloaded the apps.</p>
119
- <h3>How do I scan an APK file with an antivirus app before installing it?</h3>
120
- <p>To scan an APK file with an antivirus app before installing it, open Avast Mobile Security (or whichever antivirus app you use), tap Scan at the bottom of the screen, tap Custom Scan, select Files & Folders, select Downloads (or whichever folder you saved the APK file in), select the APK file that you downloaded, tap Scan Now, wait for the scan to finish, and tap Install Now if no threats are found.</p> 401be4b1e0<br />
121
- <br />
122
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Rise of Empires Ice and Fire APK and Join the Epic War.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>How to Download Rise of Empires: Ice and Fire APK</h1>
3
- <p>If you are a fan of medieval strategy war games, you might want to try Rise of Empires: Ice and Fire, a massive multi-player online game that lets you build your own empire, train your troops, join an alliance, and fight against other players from around the world. In this article, we will show you how to download and install Rise of Empires: Ice and Fire APK on your Android device, as well as some tips and tricks on how to play the game.</p>
4
- <h2>download rise of empires ice and fire apk</h2><br /><p><b><b>Download Zip</b> > <a href="https://jinyurl.com/2uNONA">https://jinyurl.com/2uNONA</a></b></p><br /><br />
5
- <h2>What is Rise of Empires: Ice and Fire?</h2>
6
- <p>Rise of Empires: Ice and Fire is a real-time nation vs. nation medieval strategy war game developed by Long Tech Network Limited. The game is set in a fantasy world where you can choose from eight different nations, each with its own culture, history, and advantages. You can also choose from four different roles: farmer, builder, soldier, or king. Each role has its own responsibilities and benefits for your empire.</p>
7
- <p>The game features a realistic 3D graphics engine that brings the medieval world to life. You can zoom in and out to see the details of your buildings, troops, heroes, dragons, and more. You can also interact with other players through chat, diplomacy, trade, or war. You can join or create an alliance with other players to cooperate or compete for resources, territory, glory, and honor.</p>
8
- <h3>Features of Rise of Empires: Ice and Fire</h3>
9
- <p>Some of the features that make Rise of Empires: Ice and Fire a fun and addictive game are:</p>
10
- <ul>
11
- <li>One World, One Server: You can play with millions of players from around the world on the same server. No matter where you are, you can always find someone to ally with or fight against.</li>
12
- <li>Real-Time Battles: You can command your troops in real-time battles that require strategy, tactics, and coordination. You can use different formations, skills, heroes, dragons, siege weapons, and more to gain an edge over your enemies.</li>
13
- <li>Dynamic World Map: You can explore a vast world map that changes according to the seasons, weather, time of day, events, and player actions. You can discover new lands, resources, monsters, secrets, and more.</li>
14
- <li>Hero System: You can recruit legendary heroes from different nations, each with their own backstory, personality, skills, and equipment. You can level up your heroes, equip them with powerful items, and use them in battles or quests.</li>
15
- <li>Dragon System: You can hatch, raise, train, and ride your own dragons. Each dragon has its own attributes, skills, appearance, and personality. You can use your dragons to assist you in battles or exploration.</li>
16
- <li>Alliance System: You can join or create an alliance with other players who share your vision and goals. You can cooperate with your allies in building projects, researches, wars, events, quests, trade, diplomacy, and more. You can also compete with other alliances for rankings, rewards, territory control.</li>
17
- </ul>
18
- <h3>Requirements for Rise of Empires: Ice and Fire APK</h3>
19
- <p>To download and install Rise of Empires : Ice and Fire APK, you need to have an Android device that meets the following requirements:</p>
20
- <ul>
21
- <li>Android version: 4.1 or higher</li>
22
- <li>Free storage space: at least 2 GB</li>
23
- <li>Internet connection: stable and fast</li>
24
- <li>Permission: allow installation of apps from unknown sources</li>
25
- </ul>
26
- <p>If you are not sure about your device's specifications, you can check them in the settings menu of your device. You can also compare your device with the list of compatible devices on the official website of the game.</p>
27
- <p>download rise of empires ice and fire apk latest version<br />
28
- download rise of empires ice and fire apk mod<br />
29
- download rise of empires ice and fire apk for pc<br />
30
- download rise of empires ice and fire apk offline<br />
31
- download rise of empires ice and fire apk unlimited money<br />
32
- download rise of empires ice and fire apk 2.3.1<br />
33
- download rise of empires ice and fire apk xapk<br />
34
- download rise of empires ice and fire apk android<br />
35
- download rise of empires ice and fire apk free<br />
36
- download rise of empires ice and fire apk update<br />
37
- download rise of empires ice and fire apk hack<br />
38
- download rise of empires ice and fire apk obb<br />
39
- download rise of empires ice and fire apk full<br />
40
- download rise of empires ice and fire apk 2023<br />
41
- download rise of empires ice and fire apk mob.org<br />
42
- download rise of empires ice and fire apk revdl<br />
43
- download rise of empires ice and fire apk rexdl<br />
44
- download rise of empires ice and fire apk pure<br />
45
- download rise of empires ice and fire apk combo<br />
46
- download rise of empires ice and fire apk mirror<br />
47
- download rise of empires ice and fire apk uptodown<br />
48
- download rise of empires ice and fire apk apkpure<br />
49
- download rise of empires ice and fire apk android 1<br />
50
- download rise of empires ice and fire apk android oyun club<br />
51
- download rise of empires ice and fire apk appvn<br />
52
- download rise of empires ice and fire apk an1.com<br />
53
- download rise of empires ice and fire apk blackmod.net<br />
54
- download rise of empires ice and fire apk by im30.net<br />
55
- download rise of empires ice and fire apk cracked<br />
56
- download rise of empires ice and fire apk cheat<br />
57
- download rise of empires ice and fire apk data<br />
58
- download rise of empires ice and fire apk direct link<br />
59
- download rise of empires ice and fire apk english version<br />
60
- download rise of empires ice and fire apk file<br />
61
- download rise of empires ice and fire apk game<br />
62
- download rise of empires ice and fire apk google play id com.im30.ROE.gp <br />
63
- download rise of empires ice and fire apk hack version <br />
64
- download rise of empires ice and fire apk highly compressed <br />
65
- download rise of empires ice and fire apk in hindi <br />
66
- download rise of empires ice and fire apk latest update <br />
67
- download rise of empires ice and fire apk mod menu <br />
68
- download rise of empires ice and fire apk no root <br />
69
- download rise of empires ice and fire apk online <br />
70
- download rise of empires ice and fire apk original <br />
71
- download rise of empires ice and fire apk pro <br />
72
- download rise of empires ice and fire apk premium <br />
73
- download rise of empires ice and fire apk quora <br />
74
- download rise of empires ice and fire apk reddit</p>
75
- <h2>How to Download and Install Rise of Empires: Ice and Fire APK</h2>
76
- <p>Now that you know what you need to play Rise of Empires: Ice and Fire, you can follow these simple steps to download and install the APK file on your device:</p>
77
- <h3>Step 1: Enable Unknown Sources</h3>
78
- <p>Since you are going to download the APK file from a third-party source, you need to enable the option that allows installation of apps from unknown sources. This option is usually disabled by default for security reasons, but you can easily enable it by following these steps:</p>
79
- <ol>
80
- <li>Go to the settings menu of your device.</li>
81
- <li>Find and tap on the security or privacy option.</li>
82
- <li>Look for the option that says unknown sources or install unknown apps.</li>
83
- <li>Toggle the switch or check the box to enable it.</li>
84
- <li>A warning message may pop up, asking you to confirm your action. Tap on OK or Allow to proceed.</li>
85
- </ol>
86
- <h3>Step 2: Download the APK File</h3>
87
- <p>Once you have enabled unknown sources, you can download the APK file of Rise of Empires: Ice and Fire from a reliable source. There are many websites that offer APK files for free, but some of them may contain malware or viruses that can harm your device. Therefore, you should be careful and choose a trusted source. One of the sources that we recommend is APKPure, which is a popular and safe platform for downloading APK files.</p>
88
- <p>To download the APK file from APKPure, follow these steps:</p>
89
- <ol>
90
- <li>Open your browser and go to [APKPure](^1^).</li>
91
- <li>In the search box, type Rise of Empires: Ice and Fire and hit enter.</li>
92
- <li>Select the game from the list of results and tap on the download button.</li>
93
- <li>A new page will open, showing you the details and reviews of the game. Scroll down and tap on the download APK button.</li>
94
- <li>A pop-up window will appear, asking you to choose a download location. Select a folder where you want to save the file and tap on OK.</li>
95
- <li>The download will start automatically. You can see the progress in the notification bar of your device.</li>
96
- </ol>
97
- <h3>Step 3: Install the APK File</h3>
98
- <p>After downloading the APK file, you need to install it on your device. To do so, follow these steps:</p>
99
- <ol>
100
- <li>Go to the folder where you saved the APK file and tap on it.</li>
101
- <li>A pop-up window will appear, asking you to confirm your action. Tap on Install to proceed.</li>
102
- <li>The installation will take a few minutes. You can see the progress in the notification bar of your device.</li>
103
- <li>When the installation is complete, a message will appear, saying App installed. Tap on Open to launch the game or Done to exit.</li>
104
- </ol>
105
- <h3>Step 4: Launch the Game and Enjoy</h3>
106
- <p>Congratulations! You have successfully downloaded and installed Rise of Empires: Ice and Fire APK on your device. Now you can launch the game and enjoy its features. You may need to create an account or log in with your existing account to play the game. You may also need to download some additional data or updates before playing. Follow the instructions on the screen and wait for the game to load.</p>
107
- <h2>How to Play Rise of Empires: Ice and Fire</h2> <p>Now that you have downloaded and installed Rise of Empires: Ice and Fire APK, you might be wondering how to play the game. Here are some basic tips and tricks to help you get started:</p>
108
- <h3>Choose Your Nation and Role</h3>
109
- <p>The first thing you need to do when you start the game is to choose your nation and role. There are eight nations to choose from: Rome, China, Britain, France, Germany, Spain, Russia, and Arabia. Each nation has its own culture, history, and advantages. For example, Rome has strong infantry and siege weapons, China has fast research and resource production, Britain has powerful archers and cavalry, and so on. You can read the description of each nation before making your choice.</p>
110
- <p>After choosing your nation, you need to choose your role. There are four roles to choose from: farmer, builder, soldier, or king. Each role has its own responsibilities and benefits for your empire. For example, farmers can produce more food and wood, builders can construct more buildings and roads, soldiers can train more troops and fight better, and kings can rule over other players and get more rewards. You can change your role every 24 hours if you want to try something different.</p>
111
- <h3>Build Your Empire and Train Your Troops</h3>
112
- <p>The next thing you need to do is to build your empire and train your troops. You will start with a small city that you need to upgrade and expand. You can build various buildings that serve different purposes, such as farms, sawmills, barracks, stables, workshops, academies, markets, embassies, walls, towers, and more. You can also build roads that connect your buildings and increase their efficiency.</p>
113
- <p>You also need to train your troops that will help you defend your city and attack your enemies. You can train different types of troops, such as infantry, cavalry, archers, siege weapons, heroes, and dragons. Each type of troop has its own strengths and weaknesses. For example, infantry are good at defending against cavalry but weak against archers, cavalry are good at attacking archers but weak against infantry, archers are good at attacking infantry but weak against cavalry, and so on. You can also upgrade your troops to make them stronger and faster.</p>
114
- <h3>Join an Alliance and Fight for Glory</h3>
115
- <p>The most important thing you need to do is to join an alliance and fight for glory. You can join or create an alliance with other players who share your vision and goals. You can cooperate with your allies in building projects, researches, wars, events, quests, trade, diplomacy, and more. You can also compete with other alliances for rankings, rewards, territory control, and honor. You can also chat with your allies and make new friends.</p>
116
- <p>Joining an alliance will give you many benefits, such as protection, support, resources, information, and more. You can also participate in alliance wars, where you can attack or defend against other alliances. You can also join the world war, where you can fight for your nation against other nations. You can also join the ice and fire war, where you can choose to be either ice or fire and fight for the ultimate power.</p>
117
- <h3>Explore the World and Conquer New Lands</h3>
118
- <p>The last thing you need to do is to explore the world and conquer new lands. You can explore a vast world map that changes according to the seasons, weather, time of day, events, and player actions. You can discover new lands, resources, monsters, secrets, and more. You can also conquer new lands by attacking other players or neutral cities. You can also relocate your city to a different location if you want to change your scenery or strategy.</p>
119
- <p>Exploring the world and conquering new lands will give you many rewards, such as experience, items, resources, medals, titles, and more. You can also encounter different events and quests that will challenge you and test your skills. You can also witness the rise and fall of empires, the clash of ice and fire, and the dawn of a new era.</p>
120
- <h2>Tips and Tricks for Rise of Empires: Ice and Fire</h2>
121
- <p>To help you play Rise of Empires: Ice and Fire better, here are some tips and tricks that you should know:</p>
122
- <h3>Upgrade Your Buildings and Research New Technologies</h3>
123
- <p>One of the keys to success in Rise of Empires: Ice and Fire is to upgrade your buildings and research new technologies. Upgrading your buildings will increase their functions, capacity, production, defense, and more. Researching new technologies will unlock new features, units, skills, items, and more. You should always keep your buildings and researches busy and prioritize the ones that are most important for your role and strategy.</p>
124
- <h3>Collect Resources and Manage Your Economy</h3>
125
- <p>Another key to success in Rise of Empires: Ice and Fire is to collect resources and manage your economy. Resources are essential for building, training, researching, trading, and more. There are four main types of resources: food, wood, iron, and silver. You can collect resources by building farms, sawmills, mines, and mints. You can also collect resources by gathering them from the world map, trading them with other players or the market, looting them from other players or monsters, or receiving them from events or quests. You should always keep your resources balanced and avoid wasting them or letting them overflow.</p>
126
- <h3>Use Your Heroes and Dragons Wisely</h3>
127
- <p>Another key to success in Rise of Empires: Ice and Fire is to use your heroes and dragons wisely. Heroes and dragons are powerful units that can make a big difference in battles and exploration. You can recruit heroes from different nations, each with their own skills and equipment. You can also hatch, raise, train, and ride your own dragons, each with their own attributes and skills. You should always level up your heroes and dragons, equip them with the best items, and use them according to their strengths and weaknesses.</p>
128
- <h3>Participate in Events and Quests</h3>
129
- <p>Another key to success in Rise of Empires: Ice and Fire is to participate in events and quests. Events and quests are special activities that offer you various challenges and rewards. You can participate in daily, weekly, monthly, seasonal, or special events and quests that suit your level and interest. You can also participate in alliance events and quests that require cooperation and coordination with your allies. You should always check the event and quest menu for the latest updates and opportunities.</p>
130
- <h2>Conclusion</h2>
131
- <p>Rise of Empires: Ice and Fire is a fun and addictive game that lets you build your own empire, train your troops, join an alliance, and fight against other players from around the world. In this article, we have shown you how to download and install Rise of Empires: Ice and Fire APK on your Android device, as well as some tips and tricks on how to play the game. We hope you have enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
132
- <h2>FAQs</h2>
133
- <p>Here are some frequently asked questions about Rise of Empires: Ice and Fire:</p>
134
- <ul>
135
- <li><b>Q: Is Rise of Empires: Ice and Fire free to play?</b></li>
136
- <li>A: Yes, Rise of Empires: Ice and Fire is free to download and play. However, the game also offers some optional in-app purchases that can enhance your gaming experience.</li>
137
- <li><b>Q: How can I change my nation or role?</b></li>
138
- <li>A: You can change your nation or role by using a special item called Nation Change Card or Role Change Card. You can get these cards from events, quests, rewards, or the shop.</li>
139
- <li><b>Q: How can I get more heroes or dragons?</b></li>
140
- <li>A: You can get more heroes or dragons by using a special item called Hero Recruitment Card or Dragon Egg. You can get these cards from events, quests, rewards, or the shop.</li>
141
- <li><b>Q: How can I contact the customer service or report a bug?</b></li>
142
- <li>A: You can contact the customer service or report a bug by tapping on the settings icon on the top right corner of the screen, then tapping on the help center icon on the bottom left corner of the screen. You can also send an email to [email protected].</li>
143
- <li><b>Q: How can I join the official community or follow the latest news?</b></li>
144
- <li>A: You can join the official community or follow the latest news by visiting the official website, Facebook page, Twitter account, YouTube channel, Discord server, Reddit forum, or Instagram account of the game.</li>
145
- </ul></p> 401be4b1e0<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Subway Surfers Hack Game from Apkpure and Unlock All Characters.md DELETED
@@ -1,139 +0,0 @@
1
- <br />
2
- <h1>Subway Surfers Hack Game Download Apkpure: Everything You Need to Know</h1>
3
- <p>Subway Surfers is one of the most popular endless runner games in the world. It has been downloaded over 1 billion times on Google Play Store and has millions of fans across different platforms. The game is fun, colorful, and addictive, as you run, jump, slide, and surf through various subway stations, dodging trains, obstacles, and the grumpy inspector and his dog.</p>
4
- <h2>subway surfers hack game download apkpure</h2><br /><p><b><b>Download</b> &#9913; <a href="https://jinyurl.com/2uNN38">https://jinyurl.com/2uNN38</a></b></p><br /><br />
5
- <p>But what if you want to get more out of the game? What if you want to unlock all the characters, hoverboards, power-ups, and coins without spending any money or time? What if you want to enjoy the game without any ads or interruptions? Well, that's where Subway Surfers hack game comes in.</p>
6
- <p>Subway Surfers hack game is a modified version of the original game that gives you unlimited access to everything in the game. You can download it from apkpure, a website that offers free and safe APK files for Android devices. In this article, we will tell you everything you need to know about Subway Surfers hack game download apkpure, including how to download it, what are its features and benefits, tips and tricks to play it, reviews and ratings, and FAQs.</p>
7
- <h2>How to Download Subway Surfers Hack Game from Apkpure</h2>
8
- <p>Downloading Subway Surfers hack game from apkpure is very easy and fast. Here are the steps you need to follow:</p>
9
- <ol>
10
- <li>Go to [apkpure.com](^1^) on your browser and search for "Subway Surfers hack".</li>
11
- <li>Select the latest version of the game from the results and tap on "Download APK".</li>
12
- <li>Wait for the download to finish and then open the APK file on your device.</li>
13
- <li>Allow the installation of unknown sources if prompted by your device settings.</li>
14
- <li>Follow the instructions on the screen and install the game on your device.</li>
15
- <li>Launch the game and enjoy!</li>
16
- </ol>
17
- <h2>What are the Features and Benefits of Subway Surfers Hack Game</h2>
18
- <p>Subway Surfers hack game has many features and benefits that make it better than the original game. Here are some of them:</p>
19
- <ul>
20
- <li>You get unlimited coins, keys, score multipliers, super sneakers, jetpacks, magnets, mystery boxes, power jumpers, hoverboards, characters, outfits, and more.</li>
21
- <li>You can play any world tour location without having to wait for updates or complete missions.</li>
22
- <li>You can customize your character and hoverboard with any style and color you want.</li>
23
- <li>You can use any power-up at any time without any cooldown or limit.</li>
24
- <li>You can skip any obstacle or train by using a hoverboard or a jetpack.</li>
25
- <li>You can play offline without any ads or interruptions.</li>
26
- <li>You can challenge your friends and other players online with your high scores and achievements.</li>
27
- </ul>
28
- <h2>Tips and Tricks to Play Subway Surfers Hack Game</h2>
29
- <p>Even though Subway Surfers hack game gives you unlimited advantages, you still need some skills and strategies to play it well. Here are some tips and tricks to help you:</p>
30
- <ul>
31
- <li>Use move combos to change direction or cancel jumps in mid-air. For example, you can swipe up + right + right to dash to the right in mid-air or swipe up + down to cancel your jump and roll.</li>
32
- <li>Stay high as much as possible by jumping on top of trains or using hoverboards or jetpacks. This will help you avoid collisions with obstacles on the ground and collect more coins.</li>
33
- <li>Use power-ups wisely by activating them at the right time and place. For example, use a coin magnet when there are many coins around or use a score multiplier when you have a long run without crashing.</li>
34
- <li>Explore different routes and shortcuts by switching tracks or taking alternative paths. This will help you find more items, avoid crowded areas, and discover hidden secrets.</li>
35
- <li>Complete <p>Complete the daily challenges and missions to earn more rewards and achievements. You can also watch videos or complete offers to get more coins and keys.</li>
36
- </ul>
37
- <h2>Reviews and Ratings of Subway Surfers Hack Game</h2>
38
- <p>Subway Surfers hack game has received many positive reviews and ratings from users who have downloaded it from apkpure. Here are some of them:</p>
39
- <table>
40
- <tr>
41
- <th>User</th>
42
- <th>Rating</th>
43
- <th>Review</th>
44
- </tr>
45
- <tr>
46
- <td>Ashley</td>
47
- <td>5 stars</td>
48
- <td>This game is awesome! I love how I can play any location and unlock everything without spending any money. It's so fun and addictive. I recommend it to anyone who loves Subway Surfers.</td>
49
- </tr>
50
- <tr>
51
- <td>Brandon</td>
52
- <td>4 stars</td>
53
- <td>I like this game a lot. It has all the features of the original game plus more. The only thing I don't like is that sometimes it crashes or freezes on my device. I hope they fix that soon.</td>
54
- </tr>
55
- <tr>
56
- <td>Cindy</td>
57
- <td>5 stars</td>
58
- <td>This game is amazing! It's like having a cheat code for Subway Surfers. I can do whatever I want and never get bored. It's the best hack game ever!</td>
59
- </tr>
60
- <tr>
61
- <td>Daniel</td>
62
- <td>3 stars</td>
63
- <td>This game is good but not great. It's too easy and not challenging enough. I prefer the original game where I have to work hard to earn coins and keys. This game is for casual players who just want to have fun.</td>
64
- </tr>
65
- <tr>
66
- <td>Ella</td>
67
- <td>5 stars</td>
68
- <td>This game is fantastic! It's so much better than the original game. It has everything I need and more. It's the ultimate Subway Surfers experience!</td>
69
- </tr>
70
- </table>
71
- <h2>Conclusion: Is Subway Surfers Hack Game Worth It?</h2>
72
- <p>Subway Surfers hack game is definitely worth it if you are a fan of Subway Surfers and want to enjoy the game without any limitations or restrictions. It gives you unlimited access to everything in the game, from coins and keys to characters and hoverboards. You can also play any world tour location without waiting for updates or completing missions. You can customize your character and hoverboard with any style and color you want. You can use any power-up at any time without any cooldown or limit. You can skip any obstacle or train by using a hoverboard or a jetpack. You can play offline without any ads or interruptions. You can challenge your friends and other players online with your high scores and achievements.</p>
73
- <p>However, Subway Surfers hack game is not for everyone. Some people may find it too easy and not challenging enough. Some people may prefer the original game where they have to work hard to earn coins and keys and unlock items. Some people may be concerned about the safety and legality of downloading a hacked game from an unknown source.</p>
74
- <p>Ultimately, the choice is yours. If you want to try Subway Surfers hack game, you can download it from apkpure for free and see for yourself how it works. If you don't like it, you can always uninstall it and go back to the original game.</p>
75
- <p>subway surfers hack mod apk download apkpure<br />
76
- subway surfers hack unlimited coins and keys download apkpure<br />
77
- subway surfers hack version game download apkpure<br />
78
- subway surfers hack apk free download for android apkpure<br />
79
- subway surfers hack game download for pc apkpure<br />
80
- subway surfers hack app download apkpure<br />
81
- subway surfers hack online game download apkpure<br />
82
- subway surfers hack 2023 game download apkpure<br />
83
- subway surfers hack all characters unlocked download apkpure<br />
84
- subway surfers hack no root download apkpure<br />
85
- subway surfers hack cheat engine download apkpure<br />
86
- subway surfers hack ios download apkpure<br />
87
- subway surfers hack tool download apkpure<br />
88
- subway surfers hack without human verification download apkpure<br />
89
- subway surfers hack latest version download apkpure<br />
90
- subway surfers hack new york download apkpure<br />
91
- subway surfers hack mumbai download apkpure<br />
92
- subway surfers hack beijing download apkpure<br />
93
- subway surfers hack paris download apkpure<br />
94
- subway surfers hack london download apkpure<br />
95
- subway surfers hack sydney download apkpure<br />
96
- subway surfers hack rio de janeiro download apkpure<br />
97
- subway surfers hack tokyo download apkpure<br />
98
- subway surfers hack moscow download apkpure<br />
99
- subway surfers hack seoul download apkpure<br />
100
- subway surfers hack rome download apkpure<br />
101
- subway surfers hack mexico city download apkpure<br />
102
- subway surfers hack hawaii download apkpure<br />
103
- subway surfers hack las vegas download apkpure<br />
104
- subway surfers hack san francisco download apkpure<br />
105
- subway surfers hack amsterdam download apkpure<br />
106
- subway surfers hack singapore download apkpure<br />
107
- subway surfers hack cairo download apkpure<br />
108
- subway surfers hack venice beach download apkpure<br />
109
- subway surfers hack bangkok download apkpure<br />
110
- subway surfers hack transylvania download apkpure<br />
111
- subway surfers hack arabia download apkpure<br />
112
- subway surfers hack peru download apkpure<br />
113
- subway surfers hack monaco download apkpure<br />
114
- subway surfers hack madagascar download apkpure<br />
115
- subway surfers hack greece download apkpure<br />
116
- subway surfers hack prague download apkpure<br />
117
- subway surfers hack washington dc download apkpure<br />
118
- subway surfers hack copenhagen download apkpure<br />
119
- subway surfers hack shanghai download apkpure<br />
120
- subway surfers hack marrakesh download apkpure<br />
121
- subway surfers hack barcelona download apkpure<br />
122
- subway surfers hack havana download apkpure<br />
123
- subway surfers hack bali download apkpure</p>
124
- <h2>FAQs: Common Questions and Answers about Subway Surfers Hack Game</h2>
125
- <p>Here are some of the most common questions and answers about Subway Surfers hack game:</p>
126
- <ol>
127
- <li><b>Is Subway Surfers hack game safe to download?</b></li>
128
- <p>Subway Surfers hack game is safe to download from apkpure, as they scan all their APK files for viruses and malware before uploading them on their website. However, you should always be careful when downloading anything from the internet, especially from unknown sources. You should also check the permissions and reviews of the app before installing it on your device.</p>
129
- <li><b>Is Subway Surfers hack game legal to use?</b></li>
130
- <p>Subway Surfers hack game is not legal to use, as it violates the terms of service and intellectual property rights of the original game developers, Kiloo and SYBO Games. By using a hacked game, you are also cheating and unfair to other players who play the game legitimately. You may also risk getting banned or suspended from the game if you are caught using a hacked game.</p>
131
- <li><b>Will Subway Surfers hack game work on my device?</b></li>
132
- <p>Subway Surfers hack game will work on most Android devices that support APK files and have enough storage space and RAM to run the game smoothly. However, some devices may not be compatible with the hacked game or may experience some glitches or errors while playing it. You should always check the compatibility and <p>compatibility and requirements of the game before downloading it from apkpure.</p>
133
- <li><b>How can I update Subway Surfers hack game?</b></li>
134
- <p>Subway Surfers hack game is not updated automatically like the original game. You have to manually download and install the latest version of the hacked game from apkpure whenever there is a new update available. You should also backup your game data before updating, as you may lose your progress or settings if you overwrite the existing game.</p>
135
- <li><b>Can I play Subway Surfers hack game with my friends?</b></li>
136
- <p>Yes, you can play Subway Surfers hack game with your friends online, as long as they also have the hacked game installed on their devices. You can connect with them through Facebook or Google Play Games and compete with them on the leaderboards and achievements. However, you should be aware that playing with a hacked game may give you an unfair advantage over your friends and other players who play the game legitimately.</p>
137
- </ol></p> 197e85843d<br />
138
- <br />
139
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/data/__init__.py DELETED
@@ -1,116 +0,0 @@
1
- """This package includes all the modules related to data loading and preprocessing
2
-
3
- To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
4
- You need to implement four functions:
5
- -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
6
- -- <__len__>: return the size of dataset.
7
- -- <__getitem__>: get a data point from data loader.
8
- -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
9
-
10
- Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
11
- See our template dataset class 'template_dataset.py' for more details.
12
- """
13
- import numpy as np
14
- import importlib
15
- import torch.utils.data
16
- from face3d.data.base_dataset import BaseDataset
17
-
18
-
19
- def find_dataset_using_name(dataset_name):
20
- """Import the module "data/[dataset_name]_dataset.py".
21
-
22
- In the file, the class called DatasetNameDataset() will
23
- be instantiated. It has to be a subclass of BaseDataset,
24
- and it is case-insensitive.
25
- """
26
- dataset_filename = "data." + dataset_name + "_dataset"
27
- datasetlib = importlib.import_module(dataset_filename)
28
-
29
- dataset = None
30
- target_dataset_name = dataset_name.replace('_', '') + 'dataset'
31
- for name, cls in datasetlib.__dict__.items():
32
- if name.lower() == target_dataset_name.lower() \
33
- and issubclass(cls, BaseDataset):
34
- dataset = cls
35
-
36
- if dataset is None:
37
- raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
38
-
39
- return dataset
40
-
41
-
42
- def get_option_setter(dataset_name):
43
- """Return the static method <modify_commandline_options> of the dataset class."""
44
- dataset_class = find_dataset_using_name(dataset_name)
45
- return dataset_class.modify_commandline_options
46
-
47
-
48
- def create_dataset(opt, rank=0):
49
- """Create a dataset given the option.
50
-
51
- This function wraps the class CustomDatasetDataLoader.
52
- This is the main interface between this package and 'train.py'/'test.py'
53
-
54
- Example:
55
- >>> from data import create_dataset
56
- >>> dataset = create_dataset(opt)
57
- """
58
- data_loader = CustomDatasetDataLoader(opt, rank=rank)
59
- dataset = data_loader.load_data()
60
- return dataset
61
-
62
- class CustomDatasetDataLoader():
63
- """Wrapper class of Dataset class that performs multi-threaded data loading"""
64
-
65
- def __init__(self, opt, rank=0):
66
- """Initialize this class
67
-
68
- Step 1: create a dataset instance given the name [dataset_mode]
69
- Step 2: create a multi-threaded data loader.
70
- """
71
- self.opt = opt
72
- dataset_class = find_dataset_using_name(opt.dataset_mode)
73
- self.dataset = dataset_class(opt)
74
- self.sampler = None
75
- print("rank %d %s dataset [%s] was created" % (rank, self.dataset.name, type(self.dataset).__name__))
76
- if opt.use_ddp and opt.isTrain:
77
- world_size = opt.world_size
78
- self.sampler = torch.utils.data.distributed.DistributedSampler(
79
- self.dataset,
80
- num_replicas=world_size,
81
- rank=rank,
82
- shuffle=not opt.serial_batches
83
- )
84
- self.dataloader = torch.utils.data.DataLoader(
85
- self.dataset,
86
- sampler=self.sampler,
87
- num_workers=int(opt.num_threads / world_size),
88
- batch_size=int(opt.batch_size / world_size),
89
- drop_last=True)
90
- else:
91
- self.dataloader = torch.utils.data.DataLoader(
92
- self.dataset,
93
- batch_size=opt.batch_size,
94
- shuffle=(not opt.serial_batches) and opt.isTrain,
95
- num_workers=int(opt.num_threads),
96
- drop_last=True
97
- )
98
-
99
- def set_epoch(self, epoch):
100
- self.dataset.current_epoch = epoch
101
- if self.sampler is not None:
102
- self.sampler.set_epoch(epoch)
103
-
104
- def load_data(self):
105
- return self
106
-
107
- def __len__(self):
108
- """Return the number of data in the dataset"""
109
- return min(len(self.dataset), self.opt.max_dataset_size)
110
-
111
- def __iter__(self):
112
- """Return a batch of data"""
113
- for i, data in enumerate(self.dataloader):
114
- if i * self.opt.batch_size >= self.opt.max_dataset_size:
115
- break
116
- yield data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/grids/audiogen/audiogen_base_16khz.py DELETED
@@ -1,23 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from ..musicgen._explorers import LMExplorer
8
- from ...environment import AudioCraftEnvironment
9
-
10
-
11
- @LMExplorer
12
- def explorer(launcher):
13
- partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
14
- launcher.slurm_(gpus=64, partition=partitions)
15
- launcher.bind_(solver='audiogen/audiogen_base_16khz')
16
- # replace this by the desired environmental sound dataset
17
- launcher.bind_(dset='internal/sounds_16khz')
18
-
19
- fsdp = {'autocast': False, 'fsdp.use': True}
20
- medium = {'model/lm/model_scale': 'medium'}
21
-
22
- launcher.bind_(fsdp)
23
- launcher(medium)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIKey/ai_date/index.html DELETED
@@ -1,32 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <style>
8
- .space {
9
- max-width: 100%;
10
- max-height: 100%;
11
- width: 100vw;
12
- height: 100vh;
13
- overflow: hidden;
14
- }
15
- .iframe {
16
- min-width: 100%;
17
- min-height: 100%;
18
- background: black;
19
- }
20
- </style>
21
- </head>
22
- <body>
23
- <div class="space">
24
- <iframe
25
- class="iframe"
26
- allowfullscreen="true"
27
- frameborder="0"
28
- src="https://dates.ai/?utm_source=aitoolnavigation&utm_medium=marketplace&utm_campaign=aitoolnavigation">
29
- </iframe>
30
- </div>
31
- </body>
32
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ALSv/FSW/roop/predictor.py DELETED
@@ -1,22 +0,0 @@
1
- import threading
2
- import numpy
3
- from PIL import Image
4
-
5
- from roop.typing import Frame
6
-
7
- # Define any other necessary variables or constants here
8
-
9
- def predict_frame(target_frame: Frame) -> bool:
10
- # Modify this function as needed for your specific use case, without NSFW prediction
11
- # For example, you can implement custom image analysis or processing here
12
- return False
13
-
14
- def predict_image(target_path: str) -> bool:
15
- # Modify this function as needed for your specific use case, without NSFW prediction
16
- # For example, you can check the image based on your application's requirements
17
- return False
18
-
19
- def predict_video(target_path: str) -> bool:
20
- # Modify this function as needed for your specific use case, without NSFW prediction
21
- # For example, you can analyze video frames for other purposes
22
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/data/__init__.py DELETED
File without changes
spaces/AdithyaSNair/PCOS_Prediction/app.py DELETED
@@ -1,108 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """ Gradio PCOS Prediction .ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- The original file is located at
7
- https://colab.research.google.com/drive/1W2dPPr1tHmTDgMgZML6C1Ch-4p_tOeZR
8
- """
9
-
10
- import pandas as pd
11
- import gradio as gr
12
- import numpy as np
13
- import seaborn as sns
14
- import matplotlib.pyplot as plt
15
- import sklearn
16
- from sklearn import tree
17
- from sklearn.linear_model import LinearRegression
18
- from sklearn.linear_model import LogisticRegression
19
- from sklearn.tree import DecisionTreeClassifier
20
- from sklearn.preprocessing import scale
21
- from sklearn.model_selection import train_test_split
22
- from sklearn.metrics import confusion_matrix
23
- from sklearn.preprocessing import StandardScaler
24
- def main(Follicle_No_Right,Follicle_No_Left,Skin_darkening,Hair_growth,Weight_gain,Cycle):
25
- url="https://raw.githubusercontent.com/Athulg19/datasets/main/PCOS_clean_data_without_infertility.csv"
26
- data = pd.read_csv(url)
27
- data=pd.DataFrame(data)
28
- data=data.astype(np.float64)
29
- data.dtypes
30
- correlation=data.corrwith(data['PCOS (Y/N)']).abs().sort_values(ascending=False)
31
- correlation=correlation[correlation>0.4].index
32
- data=data[correlation]
33
- arr=data.values
34
- X=arr[:,1:6]
35
- Y=arr[:,0]
36
- scaler=StandardScaler().fit(X)
37
- rescaledX=scaler.transform(X)
38
- np.set_printoptions(precision=3)
39
- y=data['PCOS (Y/N)']
40
- x=data.drop(['PCOS (Y/N)'],axis=1)
41
- X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=.25)
42
- logistic = LogisticRegression()
43
- logistic.fit(X_train,y_train)
44
- data = {'Follicle No. (R)':Follicle_No_Right,'Follicle No. (L)':Follicle_No_Left,'Skin darkening (Y/N)':Skin_darkening,'hair growth(Y/N)':Hair_growth,'Weight gain(Y/N)':Weight_gain,'Cycle(R/I)':Cycle}
45
- index = [0]
46
- cust_df = pd.DataFrame(data, index)
47
- costpredLog = logistic.predict(cust_df)
48
- if costpredLog ==0:
49
- Prediction = "There is less chance for the patient to catch PCOS"
50
- else:
51
- Prediction = "There is more chance for the patient to catch PCOS."
52
- return Prediction
53
-
54
- iface = gr.Interface(fn = main,
55
-
56
- inputs =['number','number','number','number','number','number'],
57
-
58
- outputs =['text'],
59
-
60
- title="Onset of PCOS prediction",
61
-
62
- description =''' Description
63
-
64
- Polycystic ovary syndrome (PCOS) is a problem with hormones that happens during the reproductive years. If you have PCOS, you may not have periods very often.
65
- Or you may have periods that last many days. You may also have too much of a hormone called androgen in your body.
66
- With PCOS, many small sacs of fluid develop along the outer edge of the ovary. These are called cysts. The small fluid-filled cysts contain immature eggs.
67
- These are called follicles. The follicles fail to regularly release eggs.
68
- The exact cause of PCOS is unknown. Early diagnosis and treatment along with weight loss may lower the risk of long-term complications such as type 2 diabetes and heart disease.
69
-
70
- Output0 - Describes the Prediction made
71
-
72
-
73
- More details about the Inputs taken and how they needed to be taken are given below:
74
-
75
- Follicle_No_Right = Number of follicles is in Right Overy
76
- Follicle_No_Left = Number of follicles is in Left Ovary
77
- Skin_darkening = yes(1)/No(0)
78
- Hair_growth = yes(1)/No(0)
79
- Weight_gain = yes(1)/No(0)
80
- Cycle = If it is Regular (0) or Irregular (1)
81
-
82
-
83
- ''',
84
- article='''
85
- Complications of PCOS can include:
86
-
87
- * Infertility
88
-
89
- * Gestational diabetes or pregnancy-induced high blood pressure
90
-
91
- * Miscarriage or premature birth
92
-
93
- * Nonalcoholic steatohepatitis — a severe liver inflammation caused by fat accumulation in the liver
94
-
95
- * Metabolic syndrome — a cluster of conditions including high blood pressure,
96
- high blood sugar, and abnormal cholesterol or triglyceride levels that significantly increase your risk of cardiovascular disease
97
-
98
- * Type 2 diabetes or prediabetes
99
-
100
- * Sleep apnea
101
-
102
- * Depression, anxiety and eating disorders
103
-
104
- * Abnormal uterine bleeding
105
-
106
- * Cancer of the uterine lining (endometrial cancer)''')
107
-
108
- iface.launch(debug =True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdityaMahimkar/PlagiarismChecker/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: PlagiarismChecker
3
- emoji: 🚀
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 2.9.1
8
- app_file: app.py
9
- pinned: false
10
- license: afl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/Make.js DELETED
@@ -1,31 +0,0 @@
1
- import GetTypeName from './builders/utils/GetTypeName.js';
2
- import Builders from './builders/Builders.js';
3
-
4
- var Make = function (scene, data, view, styles, customBuilders) {
5
- var type = GetTypeName(data, styles);
6
- if (!type) {
7
- console.warn(`rexUI.Make: Can't get type name in ${JSON.stringify(data)}`)
8
- return undefined;
9
- }
10
-
11
- var callback;
12
- if (customBuilders) {
13
- callback = customBuilders[type]
14
- }
15
- if (!callback) {
16
- callback = Builders[type];
17
- }
18
- if (!callback) {
19
- console.warn(`rexUI.Make: Can't create ${type} game object.`)
20
- return undefined;
21
- }
22
-
23
- var gameObject = callback(scene, data, view, styles, customBuilders);
24
- if (data.name) {
25
- gameObject.setName(data.name);
26
- }
27
-
28
- return gameObject;
29
- }
30
-
31
- export default Make;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AisingioroHao0/anime-fanwork/README.md DELETED
@@ -1,59 +0,0 @@
1
- ---
2
- title: Anime Fanwork
3
- emoji: ✏️
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.48.0
8
- app_file: app.py
9
- pinned: true
10
- license: apache-2.0
11
- models:
12
- - AisingioroHao0/stable-diffusion-reference-only-automatic-coloring-0.1.2
13
- ---
14
-
15
- # StableDiffusionReferenceOnly
16
-
17
- A general model for secondary creation.
18
-
19
- No training is needed to achieve style transfer of any anime character and line drawing coloring.
20
-
21
- Code: https://github.com/aihao2000/StableDiffusionReferenceOnly
22
-
23
- Model: https://huggingface.co/AisingioroHao0/stable-diffusion-reference-only-automatic-coloring-0.1.2
24
-
25
- Paper: https://arxiv.org/abs/2311.02343
26
-
27
- | prompt | blueprint | result |
28
- | :---------------------------------: | :------------------------------------: | :---------------------------------: |
29
- | ![](./README.assets/3x9_prompt.png) | ![](./README.assets/3x9_blueprint.png) | ![](./README.assets/3x9_result.png) |
30
-
31
-
32
-
33
- ### Instructions
34
-
35
- Secondary creation requires two images.
36
-
37
- One is prompt image. It is a reference image that you wish to migrate to the new image. We provide the ```character segment``` function to clear the background, which often brings better results.
38
-
39
- The other is blueprint image. It will control the picture structure of the new picture. It is also recommended to use ```character segment``` to enhance the effect. And there are two other buttons. If the blueprint you input is manual line drawing, you only need to click the ```color inversion``` button to ensure a black background and white lines. If you are entering a color image of another character, you need to click the ```get line art``` button and then click the ```color inversion``` button. Then click the inference button to get the results.
40
-
41
-
42
-
43
- You can also directly upload reference images and line art image and click ```automatic coloring``` to get the results without the above operations.
44
-
45
- You can also directly upload two color character pictures to try ```style transfer```
46
-
47
- ## 介绍
48
-
49
- 二次创作需要两张图片。
50
-
51
- 一是提示图像。 它是您希望迁移到新图像的参考图像。 我们提供了角色分割```character segment```功能来清除背景,这往往会带来更好的效果。
52
-
53
- 另一种是蓝图图像。 它将控制新图片的图片结构。还建议使用```character segment```来增强效果。 还有另外两个按钮。 如果您输入的图纸是人工线稿,则只需点击```color inversion```按钮即可保证黑底白线。 如果您要输入另一个角色的彩色图像,则需要单击```get line art```按钮,然后单击```color inversion```按钮。 然后点击```inference```按钮即可得到结果。
54
-
55
-
56
-
57
- 您也可以直接上传参考图和线稿图,点击```automatic coloring```即可得到结果,无需进行上述操作。
58
-
59
- 也可以直接上传两张彩色人物图片来试试风格迁移```style transfer```。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-VITS/duration_predictor.py DELETED
@@ -1,41 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
- from modules import LayerNorm
5
-
6
-
7
- class DurationPredictor(nn.Module):
8
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
9
- super().__init__()
10
-
11
- self.in_channels = in_channels
12
- self.filter_channels = filter_channels
13
- self.kernel_size = kernel_size
14
- self.p_dropout = p_dropout
15
- self.gin_channels = gin_channels
16
-
17
- self.drop = nn.Dropout(p_dropout)
18
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
19
- self.norm_1 = LayerNorm(filter_channels)
20
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
21
- self.norm_2 = LayerNorm(filter_channels)
22
- self.proj = nn.Conv1d(filter_channels, 1, 1)
23
-
24
- if gin_channels != 0:
25
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
26
-
27
- def forward(self, x, x_mask, g=None):
28
- x = torch.detach(x)
29
- if g is not None:
30
- g = torch.detach(g)
31
- x = x + self.cond(g)
32
- x = self.conv_1(x * x_mask)
33
- x = torch.relu(x)
34
- x = self.norm_1(x)
35
- x = self.drop(x)
36
- x = self.conv_2(x * x_mask)
37
- x = torch.relu(x)
38
- x = self.norm_2(x)
39
- x = self.drop(x)
40
- x = self.proj(x * x_mask)
41
- return x * x_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile DELETED
@@ -1,44 +0,0 @@
1
- FROM ubuntu:20.04
2
- LABEL maintainer="Hugging Face"
3
- LABEL repository="diffusers"
4
-
5
- ENV DEBIAN_FRONTEND=noninteractive
6
-
7
- RUN apt update && \
8
- apt install -y bash \
9
- build-essential \
10
- git \
11
- git-lfs \
12
- curl \
13
- ca-certificates \
14
- libsndfile1-dev \
15
- python3.8 \
16
- python3-pip \
17
- python3.8-venv && \
18
- rm -rf /var/lib/apt/lists
19
-
20
- # make sure to use venv
21
- RUN python3 -m venv /opt/venv
22
- ENV PATH="/opt/venv/bin:$PATH"
23
-
24
- # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
- RUN python3 -m pip install --no-cache-dir --upgrade pip && \
26
- python3 -m pip install --no-cache-dir \
27
- torch \
28
- torchvision \
29
- torchaudio \
30
- onnxruntime \
31
- --extra-index-url https://download.pytorch.org/whl/cpu && \
32
- python3 -m pip install --no-cache-dir \
33
- accelerate \
34
- datasets \
35
- hf-doc-builder \
36
- huggingface-hub \
37
- Jinja2 \
38
- librosa \
39
- numpy \
40
- scipy \
41
- tensorboard \
42
- transformers
43
-
44
- CMD ["/bin/bash"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_repaint.py DELETED
@@ -1,956 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Callable, List, Optional, Union
17
-
18
- import numpy as np
19
- import PIL
20
- import torch
21
- from packaging import version
22
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
23
-
24
- from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
25
- from diffusers.configuration_utils import FrozenDict, deprecate
26
- from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
27
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
28
- from diffusers.pipelines.stable_diffusion.safety_checker import (
29
- StableDiffusionSafetyChecker,
30
- )
31
- from diffusers.schedulers import KarrasDiffusionSchedulers
32
- from diffusers.utils import (
33
- is_accelerate_available,
34
- is_accelerate_version,
35
- logging,
36
- randn_tensor,
37
- )
38
-
39
-
40
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
-
42
-
43
- def prepare_mask_and_masked_image(image, mask):
44
- """
45
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
46
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
47
- ``image`` and ``1`` for the ``mask``.
48
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
49
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
50
- Args:
51
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
52
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
53
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
54
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
55
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
56
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
57
- Raises:
58
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
59
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
60
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
61
- (ot the other way around).
62
- Returns:
63
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
64
- dimensions: ``batch x channels x height x width``.
65
- """
66
- if isinstance(image, torch.Tensor):
67
- if not isinstance(mask, torch.Tensor):
68
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
69
-
70
- # Batch single image
71
- if image.ndim == 3:
72
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
73
- image = image.unsqueeze(0)
74
-
75
- # Batch and add channel dim for single mask
76
- if mask.ndim == 2:
77
- mask = mask.unsqueeze(0).unsqueeze(0)
78
-
79
- # Batch single mask or add channel dim
80
- if mask.ndim == 3:
81
- # Single batched mask, no channel dim or single mask not batched but channel dim
82
- if mask.shape[0] == 1:
83
- mask = mask.unsqueeze(0)
84
-
85
- # Batched masks no channel dim
86
- else:
87
- mask = mask.unsqueeze(1)
88
-
89
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
90
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
91
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
92
-
93
- # Check image is in [-1, 1]
94
- if image.min() < -1 or image.max() > 1:
95
- raise ValueError("Image should be in [-1, 1] range")
96
-
97
- # Check mask is in [0, 1]
98
- if mask.min() < 0 or mask.max() > 1:
99
- raise ValueError("Mask should be in [0, 1] range")
100
-
101
- # Binarize mask
102
- mask[mask < 0.5] = 0
103
- mask[mask >= 0.5] = 1
104
-
105
- # Image as float32
106
- image = image.to(dtype=torch.float32)
107
- elif isinstance(mask, torch.Tensor):
108
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
109
- else:
110
- # preprocess image
111
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
112
- image = [image]
113
-
114
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
115
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
116
- image = np.concatenate(image, axis=0)
117
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
118
- image = np.concatenate([i[None, :] for i in image], axis=0)
119
-
120
- image = image.transpose(0, 3, 1, 2)
121
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
122
-
123
- # preprocess mask
124
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
125
- mask = [mask]
126
-
127
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
128
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
129
- mask = mask.astype(np.float32) / 255.0
130
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
131
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
132
-
133
- mask[mask < 0.5] = 0
134
- mask[mask >= 0.5] = 1
135
- mask = torch.from_numpy(mask)
136
-
137
- # masked_image = image * (mask >= 0.5)
138
- masked_image = image
139
-
140
- return mask, masked_image
141
-
142
-
143
- class StableDiffusionRepaintPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
144
- r"""
145
- Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
146
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
147
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
148
- In addition the pipeline inherits the following loading methods:
149
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
150
- - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
151
- as well as the following saving methods:
152
- - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
153
- Args:
154
- vae ([`AutoencoderKL`]):
155
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
156
- text_encoder ([`CLIPTextModel`]):
157
- Frozen text-encoder. Stable Diffusion uses the text portion of
158
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
159
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
160
- tokenizer (`CLIPTokenizer`):
161
- Tokenizer of class
162
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
163
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
164
- scheduler ([`SchedulerMixin`]):
165
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
166
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
167
- safety_checker ([`StableDiffusionSafetyChecker`]):
168
- Classification module that estimates whether generated images could be considered offensive or harmful.
169
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
170
- feature_extractor ([`CLIPImageProcessor`]):
171
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
172
- """
173
- _optional_components = ["safety_checker", "feature_extractor"]
174
-
175
- def __init__(
176
- self,
177
- vae: AutoencoderKL,
178
- text_encoder: CLIPTextModel,
179
- tokenizer: CLIPTokenizer,
180
- unet: UNet2DConditionModel,
181
- scheduler: KarrasDiffusionSchedulers,
182
- safety_checker: StableDiffusionSafetyChecker,
183
- feature_extractor: CLIPImageProcessor,
184
- requires_safety_checker: bool = True,
185
- ):
186
- super().__init__()
187
-
188
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
189
- deprecation_message = (
190
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
191
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
192
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
193
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
194
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
195
- " file"
196
- )
197
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
198
- new_config = dict(scheduler.config)
199
- new_config["steps_offset"] = 1
200
- scheduler._internal_dict = FrozenDict(new_config)
201
-
202
- if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
203
- deprecation_message = (
204
- f"The configuration file of this scheduler: {scheduler} has not set the configuration"
205
- " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
206
- " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
207
- " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
208
- " Hub, it would be very nice if you could open a Pull request for the"
209
- " `scheduler/scheduler_config.json` file"
210
- )
211
- deprecate(
212
- "skip_prk_steps not set",
213
- "1.0.0",
214
- deprecation_message,
215
- standard_warn=False,
216
- )
217
- new_config = dict(scheduler.config)
218
- new_config["skip_prk_steps"] = True
219
- scheduler._internal_dict = FrozenDict(new_config)
220
-
221
- if safety_checker is None and requires_safety_checker:
222
- logger.warning(
223
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
224
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
225
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
226
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
227
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
228
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
229
- )
230
-
231
- if safety_checker is not None and feature_extractor is None:
232
- raise ValueError(
233
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
234
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
235
- )
236
-
237
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
238
- version.parse(unet.config._diffusers_version).base_version
239
- ) < version.parse("0.9.0.dev0")
240
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
241
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
242
- deprecation_message = (
243
- "The configuration file of the unet has set the default `sample_size` to smaller than"
244
- " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
245
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
246
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
247
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
248
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
249
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
250
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
251
- " the `unet/config.json` file"
252
- )
253
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
254
- new_config = dict(unet.config)
255
- new_config["sample_size"] = 64
256
- unet._internal_dict = FrozenDict(new_config)
257
- # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
258
- if unet.config.in_channels != 4:
259
- logger.warning(
260
- f"You have loaded a UNet with {unet.config.in_channels} input channels, whereas by default,"
261
- f" {self.__class__} assumes that `pipeline.unet` has 4 input channels: 4 for `num_channels_latents`,"
262
- ". If you did not intend to modify"
263
- " this behavior, please check whether you have loaded the right checkpoint."
264
- )
265
-
266
- self.register_modules(
267
- vae=vae,
268
- text_encoder=text_encoder,
269
- tokenizer=tokenizer,
270
- unet=unet,
271
- scheduler=scheduler,
272
- safety_checker=safety_checker,
273
- feature_extractor=feature_extractor,
274
- )
275
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
276
- self.register_to_config(requires_safety_checker=requires_safety_checker)
277
-
278
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
279
- def enable_sequential_cpu_offload(self, gpu_id=0):
280
- r"""
281
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
282
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
283
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
284
- Note that offloading happens on a submodule basis. Memory savings are higher than with
285
- `enable_model_cpu_offload`, but performance is lower.
286
- """
287
- if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
288
- from accelerate import cpu_offload
289
- else:
290
- raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
291
-
292
- device = torch.device(f"cuda:{gpu_id}")
293
-
294
- if self.device.type != "cpu":
295
- self.to("cpu", silence_dtype_warnings=True)
296
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
297
-
298
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
299
- cpu_offload(cpu_offloaded_model, device)
300
-
301
- if self.safety_checker is not None:
302
- cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
303
-
304
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
305
- def enable_model_cpu_offload(self, gpu_id=0):
306
- r"""
307
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
308
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
309
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
310
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
311
- """
312
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
313
- from accelerate import cpu_offload_with_hook
314
- else:
315
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
316
-
317
- device = torch.device(f"cuda:{gpu_id}")
318
-
319
- if self.device.type != "cpu":
320
- self.to("cpu", silence_dtype_warnings=True)
321
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
322
-
323
- hook = None
324
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
325
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
326
-
327
- if self.safety_checker is not None:
328
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
329
-
330
- # We'll offload the last model manually.
331
- self.final_offload_hook = hook
332
-
333
- @property
334
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
335
- def _execution_device(self):
336
- r"""
337
- Returns the device on which the pipeline's models will be executed. After calling
338
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
339
- hooks.
340
- """
341
- if not hasattr(self.unet, "_hf_hook"):
342
- return self.device
343
- for module in self.unet.modules():
344
- if (
345
- hasattr(module, "_hf_hook")
346
- and hasattr(module._hf_hook, "execution_device")
347
- and module._hf_hook.execution_device is not None
348
- ):
349
- return torch.device(module._hf_hook.execution_device)
350
- return self.device
351
-
352
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
353
- def _encode_prompt(
354
- self,
355
- prompt,
356
- device,
357
- num_images_per_prompt,
358
- do_classifier_free_guidance,
359
- negative_prompt=None,
360
- prompt_embeds: Optional[torch.FloatTensor] = None,
361
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
362
- ):
363
- r"""
364
- Encodes the prompt into text encoder hidden states.
365
- Args:
366
- prompt (`str` or `List[str]`, *optional*):
367
- prompt to be encoded
368
- device: (`torch.device`):
369
- torch device
370
- num_images_per_prompt (`int`):
371
- number of images that should be generated per prompt
372
- do_classifier_free_guidance (`bool`):
373
- whether to use classifier free guidance or not
374
- negative_prompt (`str` or `List[str]`, *optional*):
375
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
376
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
377
- less than `1`).
378
- prompt_embeds (`torch.FloatTensor`, *optional*):
379
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
380
- provided, text embeddings will be generated from `prompt` input argument.
381
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
382
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
383
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
384
- argument.
385
- """
386
- if prompt is not None and isinstance(prompt, str):
387
- batch_size = 1
388
- elif prompt is not None and isinstance(prompt, list):
389
- batch_size = len(prompt)
390
- else:
391
- batch_size = prompt_embeds.shape[0]
392
-
393
- if prompt_embeds is None:
394
- # textual inversion: procecss multi-vector tokens if necessary
395
- if isinstance(self, TextualInversionLoaderMixin):
396
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
397
-
398
- text_inputs = self.tokenizer(
399
- prompt,
400
- padding="max_length",
401
- max_length=self.tokenizer.model_max_length,
402
- truncation=True,
403
- return_tensors="pt",
404
- )
405
- text_input_ids = text_inputs.input_ids
406
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
407
-
408
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
409
- text_input_ids, untruncated_ids
410
- ):
411
- removed_text = self.tokenizer.batch_decode(
412
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
413
- )
414
- logger.warning(
415
- "The following part of your input was truncated because CLIP can only handle sequences up to"
416
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
417
- )
418
-
419
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
420
- attention_mask = text_inputs.attention_mask.to(device)
421
- else:
422
- attention_mask = None
423
-
424
- prompt_embeds = self.text_encoder(
425
- text_input_ids.to(device),
426
- attention_mask=attention_mask,
427
- )
428
- prompt_embeds = prompt_embeds[0]
429
-
430
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
431
-
432
- bs_embed, seq_len, _ = prompt_embeds.shape
433
- # duplicate text embeddings for each generation per prompt, using mps friendly method
434
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
435
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
436
-
437
- # get unconditional embeddings for classifier free guidance
438
- if do_classifier_free_guidance and negative_prompt_embeds is None:
439
- uncond_tokens: List[str]
440
- if negative_prompt is None:
441
- uncond_tokens = [""] * batch_size
442
- elif type(prompt) is not type(negative_prompt):
443
- raise TypeError(
444
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
445
- f" {type(prompt)}."
446
- )
447
- elif isinstance(negative_prompt, str):
448
- uncond_tokens = [negative_prompt]
449
- elif batch_size != len(negative_prompt):
450
- raise ValueError(
451
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
452
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
453
- " the batch size of `prompt`."
454
- )
455
- else:
456
- uncond_tokens = negative_prompt
457
-
458
- # textual inversion: procecss multi-vector tokens if necessary
459
- if isinstance(self, TextualInversionLoaderMixin):
460
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
461
-
462
- max_length = prompt_embeds.shape[1]
463
- uncond_input = self.tokenizer(
464
- uncond_tokens,
465
- padding="max_length",
466
- max_length=max_length,
467
- truncation=True,
468
- return_tensors="pt",
469
- )
470
-
471
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
472
- attention_mask = uncond_input.attention_mask.to(device)
473
- else:
474
- attention_mask = None
475
-
476
- negative_prompt_embeds = self.text_encoder(
477
- uncond_input.input_ids.to(device),
478
- attention_mask=attention_mask,
479
- )
480
- negative_prompt_embeds = negative_prompt_embeds[0]
481
-
482
- if do_classifier_free_guidance:
483
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
484
- seq_len = negative_prompt_embeds.shape[1]
485
-
486
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
487
-
488
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
489
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
490
-
491
- # For classifier free guidance, we need to do two forward passes.
492
- # Here we concatenate the unconditional and text embeddings into a single batch
493
- # to avoid doing two forward passes
494
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
495
-
496
- return prompt_embeds
497
-
498
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
499
- def run_safety_checker(self, image, device, dtype):
500
- if self.safety_checker is not None:
501
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
502
- image, has_nsfw_concept = self.safety_checker(
503
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
504
- )
505
- else:
506
- has_nsfw_concept = None
507
- return image, has_nsfw_concept
508
-
509
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
510
- def prepare_extra_step_kwargs(self, generator, eta):
511
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
512
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
513
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
514
- # and should be between [0, 1]
515
-
516
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
517
- extra_step_kwargs = {}
518
- if accepts_eta:
519
- extra_step_kwargs["eta"] = eta
520
-
521
- # check if the scheduler accepts generator
522
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
523
- if accepts_generator:
524
- extra_step_kwargs["generator"] = generator
525
- return extra_step_kwargs
526
-
527
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
528
- def decode_latents(self, latents):
529
- latents = 1 / self.vae.config.scaling_factor * latents
530
- image = self.vae.decode(latents).sample
531
- image = (image / 2 + 0.5).clamp(0, 1)
532
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
533
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
534
- return image
535
-
536
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
537
- def check_inputs(
538
- self,
539
- prompt,
540
- height,
541
- width,
542
- callback_steps,
543
- negative_prompt=None,
544
- prompt_embeds=None,
545
- negative_prompt_embeds=None,
546
- ):
547
- if height % 8 != 0 or width % 8 != 0:
548
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
549
-
550
- if (callback_steps is None) or (
551
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
552
- ):
553
- raise ValueError(
554
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
555
- f" {type(callback_steps)}."
556
- )
557
-
558
- if prompt is not None and prompt_embeds is not None:
559
- raise ValueError(
560
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
561
- " only forward one of the two."
562
- )
563
- elif prompt is None and prompt_embeds is None:
564
- raise ValueError(
565
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
566
- )
567
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
568
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
569
-
570
- if negative_prompt is not None and negative_prompt_embeds is not None:
571
- raise ValueError(
572
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
573
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
574
- )
575
-
576
- if prompt_embeds is not None and negative_prompt_embeds is not None:
577
- if prompt_embeds.shape != negative_prompt_embeds.shape:
578
- raise ValueError(
579
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
580
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
581
- f" {negative_prompt_embeds.shape}."
582
- )
583
-
584
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
585
- def prepare_latents(
586
- self,
587
- batch_size,
588
- num_channels_latents,
589
- height,
590
- width,
591
- dtype,
592
- device,
593
- generator,
594
- latents=None,
595
- ):
596
- shape = (
597
- batch_size,
598
- num_channels_latents,
599
- height // self.vae_scale_factor,
600
- width // self.vae_scale_factor,
601
- )
602
- if isinstance(generator, list) and len(generator) != batch_size:
603
- raise ValueError(
604
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
605
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
606
- )
607
-
608
- if latents is None:
609
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
610
- else:
611
- latents = latents.to(device)
612
-
613
- # scale the initial noise by the standard deviation required by the scheduler
614
- latents = latents * self.scheduler.init_noise_sigma
615
- return latents
616
-
617
- def prepare_mask_latents(
618
- self,
619
- mask,
620
- masked_image,
621
- batch_size,
622
- height,
623
- width,
624
- dtype,
625
- device,
626
- generator,
627
- do_classifier_free_guidance,
628
- ):
629
- # resize the mask to latents shape as we concatenate the mask to the latents
630
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
631
- # and half precision
632
- mask = torch.nn.functional.interpolate(
633
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
634
- )
635
- mask = mask.to(device=device, dtype=dtype)
636
-
637
- masked_image = masked_image.to(device=device, dtype=dtype)
638
-
639
- # encode the mask image into latents space so we can concatenate it to the latents
640
- if isinstance(generator, list):
641
- masked_image_latents = [
642
- self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
643
- for i in range(batch_size)
644
- ]
645
- masked_image_latents = torch.cat(masked_image_latents, dim=0)
646
- else:
647
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
648
- masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
649
-
650
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
651
- if mask.shape[0] < batch_size:
652
- if not batch_size % mask.shape[0] == 0:
653
- raise ValueError(
654
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
655
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
656
- " of masks that you pass is divisible by the total requested batch size."
657
- )
658
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
659
- if masked_image_latents.shape[0] < batch_size:
660
- if not batch_size % masked_image_latents.shape[0] == 0:
661
- raise ValueError(
662
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
663
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
664
- " Make sure the number of images that you pass is divisible by the total requested batch size."
665
- )
666
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
667
-
668
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
669
- masked_image_latents = (
670
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
671
- )
672
-
673
- # aligning device to prevent device errors when concating it with the latent model input
674
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
675
- return mask, masked_image_latents
676
-
677
- @torch.no_grad()
678
- def __call__(
679
- self,
680
- prompt: Union[str, List[str]] = None,
681
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
682
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
683
- height: Optional[int] = None,
684
- width: Optional[int] = None,
685
- num_inference_steps: int = 50,
686
- jump_length: Optional[int] = 10,
687
- jump_n_sample: Optional[int] = 10,
688
- guidance_scale: float = 7.5,
689
- negative_prompt: Optional[Union[str, List[str]]] = None,
690
- num_images_per_prompt: Optional[int] = 1,
691
- eta: float = 0.0,
692
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
693
- latents: Optional[torch.FloatTensor] = None,
694
- prompt_embeds: Optional[torch.FloatTensor] = None,
695
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
696
- output_type: Optional[str] = "pil",
697
- return_dict: bool = True,
698
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
699
- callback_steps: int = 1,
700
- ):
701
- r"""
702
- Function invoked when calling the pipeline for generation.
703
- Args:
704
- prompt (`str` or `List[str]`, *optional*):
705
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
706
- instead.
707
- image (`PIL.Image.Image`):
708
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
709
- be masked out with `mask_image` and repainted according to `prompt`.
710
- mask_image (`PIL.Image.Image`):
711
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
712
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
713
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
714
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
715
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
716
- The height in pixels of the generated image.
717
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
718
- The width in pixels of the generated image.
719
- num_inference_steps (`int`, *optional*, defaults to 50):
720
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
721
- expense of slower inference.
722
- jump_length (`int`, *optional*, defaults to 10):
723
- The number of steps taken forward in time before going backward in time for a single jump ("j" in
724
- RePaint paper). Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
725
- jump_n_sample (`int`, *optional*, defaults to 10):
726
- The number of times we will make forward time jump for a given chosen time sample. Take a look at
727
- Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
728
- guidance_scale (`float`, *optional*, defaults to 7.5):
729
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
730
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
731
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
732
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
733
- usually at the expense of lower image quality.
734
- negative_prompt (`str` or `List[str]`, *optional*):
735
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
736
- `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
737
- is less than `1`).
738
- num_images_per_prompt (`int`, *optional*, defaults to 1):
739
- The number of images to generate per prompt.
740
- eta (`float`, *optional*, defaults to 0.0):
741
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
742
- [`schedulers.DDIMScheduler`], will be ignored for others.
743
- generator (`torch.Generator`, *optional*):
744
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
745
- to make generation deterministic.
746
- latents (`torch.FloatTensor`, *optional*):
747
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
748
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
749
- tensor will ge generated by sampling using the supplied random `generator`.
750
- prompt_embeds (`torch.FloatTensor`, *optional*):
751
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
752
- provided, text embeddings will be generated from `prompt` input argument.
753
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
754
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
755
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
756
- argument.
757
- output_type (`str`, *optional*, defaults to `"pil"`):
758
- The output format of the generate image. Choose between
759
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
760
- return_dict (`bool`, *optional*, defaults to `True`):
761
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
762
- plain tuple.
763
- callback (`Callable`, *optional*):
764
- A function that will be called every `callback_steps` steps during inference. The function will be
765
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
766
- callback_steps (`int`, *optional*, defaults to 1):
767
- The frequency at which the `callback` function will be called. If not specified, the callback will be
768
- called at every step.
769
- Examples:
770
- ```py
771
- >>> import PIL
772
- >>> import requests
773
- >>> import torch
774
- >>> from io import BytesIO
775
- >>> from diffusers import StableDiffusionPipeline, RePaintScheduler
776
- >>> def download_image(url):
777
- ... response = requests.get(url)
778
- ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
779
- >>> base_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/"
780
- >>> img_url = base_url + "overture-creations-5sI6fQgYIuo.png"
781
- >>> mask_url = base_url + "overture-creations-5sI6fQgYIuo_mask.png "
782
- >>> init_image = download_image(img_url).resize((512, 512))
783
- >>> mask_image = download_image(mask_url).resize((512, 512))
784
- >>> pipe = DiffusionPipeline.from_pretrained(
785
- ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, custom_pipeline="stable_diffusion_repaint",
786
- ... )
787
- >>> pipe.scheduler = RePaintScheduler.from_config(pipe.scheduler.config)
788
- >>> pipe = pipe.to("cuda")
789
- >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
790
- >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
791
- ```
792
- Returns:
793
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
794
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
795
- When returning a tuple, the first element is a list with the generated images, and the second element is a
796
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
797
- (nsfw) content, according to the `safety_checker`.
798
- """
799
- # 0. Default height and width to unet
800
- height = height or self.unet.config.sample_size * self.vae_scale_factor
801
- width = width or self.unet.config.sample_size * self.vae_scale_factor
802
-
803
- # 1. Check inputs
804
- self.check_inputs(
805
- prompt,
806
- height,
807
- width,
808
- callback_steps,
809
- negative_prompt,
810
- prompt_embeds,
811
- negative_prompt_embeds,
812
- )
813
-
814
- if image is None:
815
- raise ValueError("`image` input cannot be undefined.")
816
-
817
- if mask_image is None:
818
- raise ValueError("`mask_image` input cannot be undefined.")
819
-
820
- # 2. Define call parameters
821
- if prompt is not None and isinstance(prompt, str):
822
- batch_size = 1
823
- elif prompt is not None and isinstance(prompt, list):
824
- batch_size = len(prompt)
825
- else:
826
- batch_size = prompt_embeds.shape[0]
827
-
828
- device = self._execution_device
829
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
830
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
831
- # corresponds to doing no classifier free guidance.
832
- do_classifier_free_guidance = guidance_scale > 1.0
833
-
834
- # 3. Encode input prompt
835
- prompt_embeds = self._encode_prompt(
836
- prompt,
837
- device,
838
- num_images_per_prompt,
839
- do_classifier_free_guidance,
840
- negative_prompt,
841
- prompt_embeds=prompt_embeds,
842
- negative_prompt_embeds=negative_prompt_embeds,
843
- )
844
-
845
- # 4. Preprocess mask and image
846
- mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
847
-
848
- # 5. set timesteps
849
- self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, device)
850
- self.scheduler.eta = eta
851
-
852
- timesteps = self.scheduler.timesteps
853
- # latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
854
-
855
- # 6. Prepare latent variables
856
- num_channels_latents = self.vae.config.latent_channels
857
- latents = self.prepare_latents(
858
- batch_size * num_images_per_prompt,
859
- num_channels_latents,
860
- height,
861
- width,
862
- prompt_embeds.dtype,
863
- device,
864
- generator,
865
- latents,
866
- )
867
-
868
- # 7. Prepare mask latent variables
869
- mask, masked_image_latents = self.prepare_mask_latents(
870
- mask,
871
- masked_image,
872
- batch_size * num_images_per_prompt,
873
- height,
874
- width,
875
- prompt_embeds.dtype,
876
- device,
877
- generator,
878
- do_classifier_free_guidance=False, # We do not need duplicate mask and image
879
- )
880
-
881
- # 8. Check that sizes of mask, masked image and latents match
882
- # num_channels_mask = mask.shape[1]
883
- # num_channels_masked_image = masked_image_latents.shape[1]
884
- if num_channels_latents != self.unet.config.in_channels:
885
- raise ValueError(
886
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
887
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} "
888
- f" = Please verify the config of"
889
- " `pipeline.unet` or your `mask_image` or `image` input."
890
- )
891
-
892
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
893
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
894
-
895
- t_last = timesteps[0] + 1
896
-
897
- # 10. Denoising loop
898
- with self.progress_bar(total=len(timesteps)) as progress_bar:
899
- for i, t in enumerate(timesteps):
900
- if t >= t_last:
901
- # compute the reverse: x_t-1 -> x_t
902
- latents = self.scheduler.undo_step(latents, t_last, generator)
903
- progress_bar.update()
904
- t_last = t
905
- continue
906
-
907
- # expand the latents if we are doing classifier free guidance
908
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
909
-
910
- # concat latents, mask, masked_image_latents in the channel dimension
911
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
912
- # latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
913
-
914
- # predict the noise residual
915
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
916
-
917
- # perform guidance
918
- if do_classifier_free_guidance:
919
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
920
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
921
-
922
- # compute the previous noisy sample x_t -> x_t-1
923
- latents = self.scheduler.step(
924
- noise_pred,
925
- t,
926
- latents,
927
- masked_image_latents,
928
- mask,
929
- **extra_step_kwargs,
930
- ).prev_sample
931
-
932
- # call the callback, if provided
933
- progress_bar.update()
934
- if callback is not None and i % callback_steps == 0:
935
- callback(i, t, latents)
936
-
937
- t_last = t
938
-
939
- # 11. Post-processing
940
- image = self.decode_latents(latents)
941
-
942
- # 12. Run safety checker
943
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
944
-
945
- # 13. Convert to PIL
946
- if output_type == "pil":
947
- image = self.numpy_to_pil(image)
948
-
949
- # Offload last model to CPU
950
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
951
- self.final_offload_hook.offload()
952
-
953
- if not return_dict:
954
- return (image, has_nsfw_concept)
955
-
956
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth_flax.py DELETED
@@ -1,709 +0,0 @@
1
- import argparse
2
- import hashlib
3
- import logging
4
- import math
5
- import os
6
- from pathlib import Path
7
- from typing import Optional
8
-
9
- import jax
10
- import jax.numpy as jnp
11
- import numpy as np
12
- import optax
13
- import torch
14
- import torch.utils.checkpoint
15
- import transformers
16
- from flax import jax_utils
17
- from flax.training import train_state
18
- from flax.training.common_utils import shard
19
- from huggingface_hub import HfFolder, Repository, create_repo, whoami
20
- from jax.experimental.compilation_cache import compilation_cache as cc
21
- from PIL import Image
22
- from torch.utils.data import Dataset
23
- from torchvision import transforms
24
- from tqdm.auto import tqdm
25
- from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
26
-
27
- from diffusers import (
28
- FlaxAutoencoderKL,
29
- FlaxDDPMScheduler,
30
- FlaxPNDMScheduler,
31
- FlaxStableDiffusionPipeline,
32
- FlaxUNet2DConditionModel,
33
- )
34
- from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
35
- from diffusers.utils import check_min_version
36
-
37
-
38
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
39
- check_min_version("0.19.0")
40
-
41
- # Cache compiled models across invocations of this script.
42
- cc.initialize_cache(os.path.expanduser("~/.cache/jax/compilation_cache"))
43
-
44
- logger = logging.getLogger(__name__)
45
-
46
-
47
- def parse_args():
48
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
49
- parser.add_argument(
50
- "--pretrained_model_name_or_path",
51
- type=str,
52
- default=None,
53
- required=True,
54
- help="Path to pretrained model or model identifier from huggingface.co/models.",
55
- )
56
- parser.add_argument(
57
- "--pretrained_vae_name_or_path",
58
- type=str,
59
- default=None,
60
- help="Path to pretrained vae or vae identifier from huggingface.co/models.",
61
- )
62
- parser.add_argument(
63
- "--revision",
64
- type=str,
65
- default=None,
66
- required=False,
67
- help="Revision of pretrained model identifier from huggingface.co/models.",
68
- )
69
- parser.add_argument(
70
- "--tokenizer_name",
71
- type=str,
72
- default=None,
73
- help="Pretrained tokenizer name or path if not the same as model_name",
74
- )
75
- parser.add_argument(
76
- "--instance_data_dir",
77
- type=str,
78
- default=None,
79
- required=True,
80
- help="A folder containing the training data of instance images.",
81
- )
82
- parser.add_argument(
83
- "--class_data_dir",
84
- type=str,
85
- default=None,
86
- required=False,
87
- help="A folder containing the training data of class images.",
88
- )
89
- parser.add_argument(
90
- "--instance_prompt",
91
- type=str,
92
- default=None,
93
- help="The prompt with identifier specifying the instance",
94
- )
95
- parser.add_argument(
96
- "--class_prompt",
97
- type=str,
98
- default=None,
99
- help="The prompt to specify images in the same class as provided instance images.",
100
- )
101
- parser.add_argument(
102
- "--with_prior_preservation",
103
- default=False,
104
- action="store_true",
105
- help="Flag to add prior preservation loss.",
106
- )
107
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
108
- parser.add_argument(
109
- "--num_class_images",
110
- type=int,
111
- default=100,
112
- help=(
113
- "Minimal class images for prior preservation loss. If there are not enough images already present in"
114
- " class_data_dir, additional images will be sampled with class_prompt."
115
- ),
116
- )
117
- parser.add_argument(
118
- "--output_dir",
119
- type=str,
120
- default="text-inversion-model",
121
- help="The output directory where the model predictions and checkpoints will be written.",
122
- )
123
- parser.add_argument("--save_steps", type=int, default=None, help="Save a checkpoint every X steps.")
124
- parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.")
125
- parser.add_argument(
126
- "--resolution",
127
- type=int,
128
- default=512,
129
- help=(
130
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
131
- " resolution"
132
- ),
133
- )
134
- parser.add_argument(
135
- "--center_crop",
136
- default=False,
137
- action="store_true",
138
- help=(
139
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
140
- " cropped. The images will be resized to the resolution first before cropping."
141
- ),
142
- )
143
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
144
- parser.add_argument(
145
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
146
- )
147
- parser.add_argument(
148
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
149
- )
150
- parser.add_argument("--num_train_epochs", type=int, default=1)
151
- parser.add_argument(
152
- "--max_train_steps",
153
- type=int,
154
- default=None,
155
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
156
- )
157
- parser.add_argument(
158
- "--learning_rate",
159
- type=float,
160
- default=5e-6,
161
- help="Initial learning rate (after the potential warmup period) to use.",
162
- )
163
- parser.add_argument(
164
- "--scale_lr",
165
- action="store_true",
166
- default=False,
167
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
168
- )
169
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
170
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
171
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
172
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
173
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
174
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
175
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
176
- parser.add_argument(
177
- "--hub_model_id",
178
- type=str,
179
- default=None,
180
- help="The name of the repository to keep in sync with the local `output_dir`.",
181
- )
182
- parser.add_argument(
183
- "--logging_dir",
184
- type=str,
185
- default="logs",
186
- help=(
187
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
188
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
189
- ),
190
- )
191
- parser.add_argument(
192
- "--mixed_precision",
193
- type=str,
194
- default="no",
195
- choices=["no", "fp16", "bf16"],
196
- help=(
197
- "Whether to use mixed precision. Choose"
198
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
199
- "and an Nvidia Ampere GPU."
200
- ),
201
- )
202
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
203
-
204
- args = parser.parse_args()
205
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
206
- if env_local_rank != -1 and env_local_rank != args.local_rank:
207
- args.local_rank = env_local_rank
208
-
209
- if args.instance_data_dir is None:
210
- raise ValueError("You must specify a train data directory.")
211
-
212
- if args.with_prior_preservation:
213
- if args.class_data_dir is None:
214
- raise ValueError("You must specify a data directory for class images.")
215
- if args.class_prompt is None:
216
- raise ValueError("You must specify prompt for class images.")
217
-
218
- return args
219
-
220
-
221
- class DreamBoothDataset(Dataset):
222
- """
223
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
224
- It pre-processes the images and the tokenizes prompts.
225
- """
226
-
227
- def __init__(
228
- self,
229
- instance_data_root,
230
- instance_prompt,
231
- tokenizer,
232
- class_data_root=None,
233
- class_prompt=None,
234
- class_num=None,
235
- size=512,
236
- center_crop=False,
237
- ):
238
- self.size = size
239
- self.center_crop = center_crop
240
- self.tokenizer = tokenizer
241
-
242
- self.instance_data_root = Path(instance_data_root)
243
- if not self.instance_data_root.exists():
244
- raise ValueError("Instance images root doesn't exists.")
245
-
246
- self.instance_images_path = list(Path(instance_data_root).iterdir())
247
- self.num_instance_images = len(self.instance_images_path)
248
- self.instance_prompt = instance_prompt
249
- self._length = self.num_instance_images
250
-
251
- if class_data_root is not None:
252
- self.class_data_root = Path(class_data_root)
253
- self.class_data_root.mkdir(parents=True, exist_ok=True)
254
- self.class_images_path = list(self.class_data_root.iterdir())
255
- if class_num is not None:
256
- self.num_class_images = min(len(self.class_images_path), class_num)
257
- else:
258
- self.num_class_images = len(self.class_images_path)
259
- self._length = max(self.num_class_images, self.num_instance_images)
260
- self.class_prompt = class_prompt
261
- else:
262
- self.class_data_root = None
263
-
264
- self.image_transforms = transforms.Compose(
265
- [
266
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
267
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
268
- transforms.ToTensor(),
269
- transforms.Normalize([0.5], [0.5]),
270
- ]
271
- )
272
-
273
- def __len__(self):
274
- return self._length
275
-
276
- def __getitem__(self, index):
277
- example = {}
278
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
279
- if not instance_image.mode == "RGB":
280
- instance_image = instance_image.convert("RGB")
281
- example["instance_images"] = self.image_transforms(instance_image)
282
- example["instance_prompt_ids"] = self.tokenizer(
283
- self.instance_prompt,
284
- padding="do_not_pad",
285
- truncation=True,
286
- max_length=self.tokenizer.model_max_length,
287
- ).input_ids
288
-
289
- if self.class_data_root:
290
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
291
- if not class_image.mode == "RGB":
292
- class_image = class_image.convert("RGB")
293
- example["class_images"] = self.image_transforms(class_image)
294
- example["class_prompt_ids"] = self.tokenizer(
295
- self.class_prompt,
296
- padding="do_not_pad",
297
- truncation=True,
298
- max_length=self.tokenizer.model_max_length,
299
- ).input_ids
300
-
301
- return example
302
-
303
-
304
- class PromptDataset(Dataset):
305
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
306
-
307
- def __init__(self, prompt, num_samples):
308
- self.prompt = prompt
309
- self.num_samples = num_samples
310
-
311
- def __len__(self):
312
- return self.num_samples
313
-
314
- def __getitem__(self, index):
315
- example = {}
316
- example["prompt"] = self.prompt
317
- example["index"] = index
318
- return example
319
-
320
-
321
- def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
322
- if token is None:
323
- token = HfFolder.get_token()
324
- if organization is None:
325
- username = whoami(token)["name"]
326
- return f"{username}/{model_id}"
327
- else:
328
- return f"{organization}/{model_id}"
329
-
330
-
331
- def get_params_to_save(params):
332
- return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
333
-
334
-
335
- def main():
336
- args = parse_args()
337
-
338
- logging.basicConfig(
339
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
340
- datefmt="%m/%d/%Y %H:%M:%S",
341
- level=logging.INFO,
342
- )
343
- # Setup logging, we only want one process per machine to log things on the screen.
344
- logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
345
- if jax.process_index() == 0:
346
- transformers.utils.logging.set_verbosity_info()
347
- else:
348
- transformers.utils.logging.set_verbosity_error()
349
-
350
- if args.seed is not None:
351
- set_seed(args.seed)
352
-
353
- rng = jax.random.PRNGKey(args.seed)
354
-
355
- if args.with_prior_preservation:
356
- class_images_dir = Path(args.class_data_dir)
357
- if not class_images_dir.exists():
358
- class_images_dir.mkdir(parents=True)
359
- cur_class_images = len(list(class_images_dir.iterdir()))
360
-
361
- if cur_class_images < args.num_class_images:
362
- pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
363
- args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision
364
- )
365
- pipeline.set_progress_bar_config(disable=True)
366
-
367
- num_new_images = args.num_class_images - cur_class_images
368
- logger.info(f"Number of class images to sample: {num_new_images}.")
369
-
370
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
371
- total_sample_batch_size = args.sample_batch_size * jax.local_device_count()
372
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=total_sample_batch_size)
373
-
374
- for example in tqdm(
375
- sample_dataloader, desc="Generating class images", disable=not jax.process_index() == 0
376
- ):
377
- prompt_ids = pipeline.prepare_inputs(example["prompt"])
378
- prompt_ids = shard(prompt_ids)
379
- p_params = jax_utils.replicate(params)
380
- rng = jax.random.split(rng)[0]
381
- sample_rng = jax.random.split(rng, jax.device_count())
382
- images = pipeline(prompt_ids, p_params, sample_rng, jit=True).images
383
- images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
384
- images = pipeline.numpy_to_pil(np.array(images))
385
-
386
- for i, image in enumerate(images):
387
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
388
- image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
389
- image.save(image_filename)
390
-
391
- del pipeline
392
-
393
- # Handle the repository creation
394
- if jax.process_index() == 0:
395
- if args.push_to_hub:
396
- if args.hub_model_id is None:
397
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
398
- else:
399
- repo_name = args.hub_model_id
400
- create_repo(repo_name, exist_ok=True, token=args.hub_token)
401
- repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
402
-
403
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
404
- if "step_*" not in gitignore:
405
- gitignore.write("step_*\n")
406
- if "epoch_*" not in gitignore:
407
- gitignore.write("epoch_*\n")
408
- elif args.output_dir is not None:
409
- os.makedirs(args.output_dir, exist_ok=True)
410
-
411
- # Load the tokenizer and add the placeholder token as a additional special token
412
- if args.tokenizer_name:
413
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
414
- elif args.pretrained_model_name_or_path:
415
- tokenizer = CLIPTokenizer.from_pretrained(
416
- args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
417
- )
418
- else:
419
- raise NotImplementedError("No tokenizer specified!")
420
-
421
- train_dataset = DreamBoothDataset(
422
- instance_data_root=args.instance_data_dir,
423
- instance_prompt=args.instance_prompt,
424
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
425
- class_prompt=args.class_prompt,
426
- class_num=args.num_class_images,
427
- tokenizer=tokenizer,
428
- size=args.resolution,
429
- center_crop=args.center_crop,
430
- )
431
-
432
- def collate_fn(examples):
433
- input_ids = [example["instance_prompt_ids"] for example in examples]
434
- pixel_values = [example["instance_images"] for example in examples]
435
-
436
- # Concat class and instance examples for prior preservation.
437
- # We do this to avoid doing two forward passes.
438
- if args.with_prior_preservation:
439
- input_ids += [example["class_prompt_ids"] for example in examples]
440
- pixel_values += [example["class_images"] for example in examples]
441
-
442
- pixel_values = torch.stack(pixel_values)
443
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
444
-
445
- input_ids = tokenizer.pad(
446
- {"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt"
447
- ).input_ids
448
-
449
- batch = {
450
- "input_ids": input_ids,
451
- "pixel_values": pixel_values,
452
- }
453
- batch = {k: v.numpy() for k, v in batch.items()}
454
- return batch
455
-
456
- total_train_batch_size = args.train_batch_size * jax.local_device_count()
457
- if len(train_dataset) < total_train_batch_size:
458
- raise ValueError(
459
- f"Training batch size is {total_train_batch_size}, but your dataset only contains"
460
- f" {len(train_dataset)} images. Please, use a larger dataset or reduce the effective batch size. Note that"
461
- f" there are {jax.local_device_count()} parallel devices, so your batch size can't be smaller than that."
462
- )
463
-
464
- train_dataloader = torch.utils.data.DataLoader(
465
- train_dataset, batch_size=total_train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True
466
- )
467
-
468
- weight_dtype = jnp.float32
469
- if args.mixed_precision == "fp16":
470
- weight_dtype = jnp.float16
471
- elif args.mixed_precision == "bf16":
472
- weight_dtype = jnp.bfloat16
473
-
474
- if args.pretrained_vae_name_or_path:
475
- # TODO(patil-suraj): Upload flax weights for the VAE
476
- vae_arg, vae_kwargs = (args.pretrained_vae_name_or_path, {"from_pt": True})
477
- else:
478
- vae_arg, vae_kwargs = (args.pretrained_model_name_or_path, {"subfolder": "vae", "revision": args.revision})
479
-
480
- # Load models and create wrapper for stable diffusion
481
- text_encoder = FlaxCLIPTextModel.from_pretrained(
482
- args.pretrained_model_name_or_path, subfolder="text_encoder", dtype=weight_dtype, revision=args.revision
483
- )
484
- vae, vae_params = FlaxAutoencoderKL.from_pretrained(
485
- vae_arg,
486
- dtype=weight_dtype,
487
- **vae_kwargs,
488
- )
489
- unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
490
- args.pretrained_model_name_or_path, subfolder="unet", dtype=weight_dtype, revision=args.revision
491
- )
492
-
493
- # Optimization
494
- if args.scale_lr:
495
- args.learning_rate = args.learning_rate * total_train_batch_size
496
-
497
- constant_scheduler = optax.constant_schedule(args.learning_rate)
498
-
499
- adamw = optax.adamw(
500
- learning_rate=constant_scheduler,
501
- b1=args.adam_beta1,
502
- b2=args.adam_beta2,
503
- eps=args.adam_epsilon,
504
- weight_decay=args.adam_weight_decay,
505
- )
506
-
507
- optimizer = optax.chain(
508
- optax.clip_by_global_norm(args.max_grad_norm),
509
- adamw,
510
- )
511
-
512
- unet_state = train_state.TrainState.create(apply_fn=unet.__call__, params=unet_params, tx=optimizer)
513
- text_encoder_state = train_state.TrainState.create(
514
- apply_fn=text_encoder.__call__, params=text_encoder.params, tx=optimizer
515
- )
516
-
517
- noise_scheduler = FlaxDDPMScheduler(
518
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
519
- )
520
- noise_scheduler_state = noise_scheduler.create_state()
521
-
522
- # Initialize our training
523
- train_rngs = jax.random.split(rng, jax.local_device_count())
524
-
525
- def train_step(unet_state, text_encoder_state, vae_params, batch, train_rng):
526
- dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
527
-
528
- if args.train_text_encoder:
529
- params = {"text_encoder": text_encoder_state.params, "unet": unet_state.params}
530
- else:
531
- params = {"unet": unet_state.params}
532
-
533
- def compute_loss(params):
534
- # Convert images to latent space
535
- vae_outputs = vae.apply(
536
- {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
537
- )
538
- latents = vae_outputs.latent_dist.sample(sample_rng)
539
- # (NHWC) -> (NCHW)
540
- latents = jnp.transpose(latents, (0, 3, 1, 2))
541
- latents = latents * vae.config.scaling_factor
542
-
543
- # Sample noise that we'll add to the latents
544
- noise_rng, timestep_rng = jax.random.split(sample_rng)
545
- noise = jax.random.normal(noise_rng, latents.shape)
546
- # Sample a random timestep for each image
547
- bsz = latents.shape[0]
548
- timesteps = jax.random.randint(
549
- timestep_rng,
550
- (bsz,),
551
- 0,
552
- noise_scheduler.config.num_train_timesteps,
553
- )
554
-
555
- # Add noise to the latents according to the noise magnitude at each timestep
556
- # (this is the forward diffusion process)
557
- noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
558
-
559
- # Get the text embedding for conditioning
560
- if args.train_text_encoder:
561
- encoder_hidden_states = text_encoder_state.apply_fn(
562
- batch["input_ids"], params=params["text_encoder"], dropout_rng=dropout_rng, train=True
563
- )[0]
564
- else:
565
- encoder_hidden_states = text_encoder(
566
- batch["input_ids"], params=text_encoder_state.params, train=False
567
- )[0]
568
-
569
- # Predict the noise residual
570
- model_pred = unet.apply(
571
- {"params": params["unet"]}, noisy_latents, timesteps, encoder_hidden_states, train=True
572
- ).sample
573
-
574
- # Get the target for loss depending on the prediction type
575
- if noise_scheduler.config.prediction_type == "epsilon":
576
- target = noise
577
- elif noise_scheduler.config.prediction_type == "v_prediction":
578
- target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
579
- else:
580
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
581
-
582
- if args.with_prior_preservation:
583
- # Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
584
- model_pred, model_pred_prior = jnp.split(model_pred, 2, axis=0)
585
- target, target_prior = jnp.split(target, 2, axis=0)
586
-
587
- # Compute instance loss
588
- loss = (target - model_pred) ** 2
589
- loss = loss.mean()
590
-
591
- # Compute prior loss
592
- prior_loss = (target_prior - model_pred_prior) ** 2
593
- prior_loss = prior_loss.mean()
594
-
595
- # Add the prior loss to the instance loss.
596
- loss = loss + args.prior_loss_weight * prior_loss
597
- else:
598
- loss = (target - model_pred) ** 2
599
- loss = loss.mean()
600
-
601
- return loss
602
-
603
- grad_fn = jax.value_and_grad(compute_loss)
604
- loss, grad = grad_fn(params)
605
- grad = jax.lax.pmean(grad, "batch")
606
-
607
- new_unet_state = unet_state.apply_gradients(grads=grad["unet"])
608
- if args.train_text_encoder:
609
- new_text_encoder_state = text_encoder_state.apply_gradients(grads=grad["text_encoder"])
610
- else:
611
- new_text_encoder_state = text_encoder_state
612
-
613
- metrics = {"loss": loss}
614
- metrics = jax.lax.pmean(metrics, axis_name="batch")
615
-
616
- return new_unet_state, new_text_encoder_state, metrics, new_train_rng
617
-
618
- # Create parallel version of the train step
619
- p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0, 1))
620
-
621
- # Replicate the train state on each device
622
- unet_state = jax_utils.replicate(unet_state)
623
- text_encoder_state = jax_utils.replicate(text_encoder_state)
624
- vae_params = jax_utils.replicate(vae_params)
625
-
626
- # Train!
627
- num_update_steps_per_epoch = math.ceil(len(train_dataloader))
628
-
629
- # Scheduler and math around the number of training steps.
630
- if args.max_train_steps is None:
631
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
632
-
633
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
634
-
635
- logger.info("***** Running training *****")
636
- logger.info(f" Num examples = {len(train_dataset)}")
637
- logger.info(f" Num Epochs = {args.num_train_epochs}")
638
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
639
- logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
640
- logger.info(f" Total optimization steps = {args.max_train_steps}")
641
-
642
- def checkpoint(step=None):
643
- # Create the pipeline using the trained modules and save it.
644
- scheduler, _ = FlaxPNDMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
645
- safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
646
- "CompVis/stable-diffusion-safety-checker", from_pt=True
647
- )
648
- pipeline = FlaxStableDiffusionPipeline(
649
- text_encoder=text_encoder,
650
- vae=vae,
651
- unet=unet,
652
- tokenizer=tokenizer,
653
- scheduler=scheduler,
654
- safety_checker=safety_checker,
655
- feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"),
656
- )
657
-
658
- outdir = os.path.join(args.output_dir, str(step)) if step else args.output_dir
659
- pipeline.save_pretrained(
660
- outdir,
661
- params={
662
- "text_encoder": get_params_to_save(text_encoder_state.params),
663
- "vae": get_params_to_save(vae_params),
664
- "unet": get_params_to_save(unet_state.params),
665
- "safety_checker": safety_checker.params,
666
- },
667
- )
668
-
669
- if args.push_to_hub:
670
- message = f"checkpoint-{step}" if step is not None else "End of training"
671
- repo.push_to_hub(commit_message=message, blocking=False, auto_lfs_prune=True)
672
-
673
- global_step = 0
674
-
675
- epochs = tqdm(range(args.num_train_epochs), desc="Epoch ... ", position=0)
676
- for epoch in epochs:
677
- # ======================== Training ================================
678
-
679
- train_metrics = []
680
-
681
- steps_per_epoch = len(train_dataset) // total_train_batch_size
682
- train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
683
- # train
684
- for batch in train_dataloader:
685
- batch = shard(batch)
686
- unet_state, text_encoder_state, train_metric, train_rngs = p_train_step(
687
- unet_state, text_encoder_state, vae_params, batch, train_rngs
688
- )
689
- train_metrics.append(train_metric)
690
-
691
- train_step_progress_bar.update(jax.local_device_count())
692
-
693
- global_step += 1
694
- if jax.process_index() == 0 and args.save_steps and global_step % args.save_steps == 0:
695
- checkpoint(global_step)
696
- if global_step >= args.max_train_steps:
697
- break
698
-
699
- train_metric = jax_utils.unreplicate(train_metric)
700
-
701
- train_step_progress_bar.close()
702
- epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
703
-
704
- if jax.process_index() == 0:
705
- checkpoint()
706
-
707
-
708
- if __name__ == "__main__":
709
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_single.py DELETED
@@ -1,250 +0,0 @@
1
- import tempfile
2
-
3
- import torch
4
-
5
- from diffusers import (
6
- DEISMultistepScheduler,
7
- DPMSolverMultistepScheduler,
8
- DPMSolverSinglestepScheduler,
9
- UniPCMultistepScheduler,
10
- )
11
-
12
- from .test_schedulers import SchedulerCommonTest
13
-
14
-
15
- class DPMSolverSinglestepSchedulerTest(SchedulerCommonTest):
16
- scheduler_classes = (DPMSolverSinglestepScheduler,)
17
- forward_default_kwargs = (("num_inference_steps", 25),)
18
-
19
- def get_scheduler_config(self, **kwargs):
20
- config = {
21
- "num_train_timesteps": 1000,
22
- "beta_start": 0.0001,
23
- "beta_end": 0.02,
24
- "beta_schedule": "linear",
25
- "solver_order": 2,
26
- "prediction_type": "epsilon",
27
- "thresholding": False,
28
- "sample_max_value": 1.0,
29
- "algorithm_type": "dpmsolver++",
30
- "solver_type": "midpoint",
31
- "lambda_min_clipped": -float("inf"),
32
- "variance_type": None,
33
- }
34
-
35
- config.update(**kwargs)
36
- return config
37
-
38
- def check_over_configs(self, time_step=0, **config):
39
- kwargs = dict(self.forward_default_kwargs)
40
- num_inference_steps = kwargs.pop("num_inference_steps", None)
41
- sample = self.dummy_sample
42
- residual = 0.1 * sample
43
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
44
-
45
- for scheduler_class in self.scheduler_classes:
46
- scheduler_config = self.get_scheduler_config(**config)
47
- scheduler = scheduler_class(**scheduler_config)
48
- scheduler.set_timesteps(num_inference_steps)
49
- # copy over dummy past residuals
50
- scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
51
-
52
- with tempfile.TemporaryDirectory() as tmpdirname:
53
- scheduler.save_config(tmpdirname)
54
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
55
- new_scheduler.set_timesteps(num_inference_steps)
56
- # copy over dummy past residuals
57
- new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
58
-
59
- output, new_output = sample, sample
60
- for t in range(time_step, time_step + scheduler.config.solver_order + 1):
61
- output = scheduler.step(residual, t, output, **kwargs).prev_sample
62
- new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
63
-
64
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
65
-
66
- def test_from_save_pretrained(self):
67
- pass
68
-
69
- def check_over_forward(self, time_step=0, **forward_kwargs):
70
- kwargs = dict(self.forward_default_kwargs)
71
- num_inference_steps = kwargs.pop("num_inference_steps", None)
72
- sample = self.dummy_sample
73
- residual = 0.1 * sample
74
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
75
-
76
- for scheduler_class in self.scheduler_classes:
77
- scheduler_config = self.get_scheduler_config()
78
- scheduler = scheduler_class(**scheduler_config)
79
- scheduler.set_timesteps(num_inference_steps)
80
-
81
- # copy over dummy past residuals (must be after setting timesteps)
82
- scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
83
-
84
- with tempfile.TemporaryDirectory() as tmpdirname:
85
- scheduler.save_config(tmpdirname)
86
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
87
- # copy over dummy past residuals
88
- new_scheduler.set_timesteps(num_inference_steps)
89
-
90
- # copy over dummy past residual (must be after setting timesteps)
91
- new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
92
-
93
- output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
94
- new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
95
-
96
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
97
-
98
- def full_loop(self, scheduler=None, **config):
99
- if scheduler is None:
100
- scheduler_class = self.scheduler_classes[0]
101
- scheduler_config = self.get_scheduler_config(**config)
102
- scheduler = scheduler_class(**scheduler_config)
103
-
104
- scheduler_class = self.scheduler_classes[0]
105
- scheduler_config = self.get_scheduler_config(**config)
106
- scheduler = scheduler_class(**scheduler_config)
107
-
108
- num_inference_steps = 10
109
- model = self.dummy_model()
110
- sample = self.dummy_sample_deter
111
- scheduler.set_timesteps(num_inference_steps)
112
-
113
- for i, t in enumerate(scheduler.timesteps):
114
- residual = model(sample, t)
115
- sample = scheduler.step(residual, t, sample).prev_sample
116
-
117
- return sample
118
-
119
- def test_full_uneven_loop(self):
120
- scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
121
- num_inference_steps = 50
122
- model = self.dummy_model()
123
- sample = self.dummy_sample_deter
124
- scheduler.set_timesteps(num_inference_steps)
125
-
126
- # make sure that the first t is uneven
127
- for i, t in enumerate(scheduler.timesteps[3:]):
128
- residual = model(sample, t)
129
- sample = scheduler.step(residual, t, sample).prev_sample
130
-
131
- result_mean = torch.mean(torch.abs(sample))
132
-
133
- assert abs(result_mean.item() - 0.2574) < 1e-3
134
-
135
- def test_timesteps(self):
136
- for timesteps in [25, 50, 100, 999, 1000]:
137
- self.check_over_configs(num_train_timesteps=timesteps)
138
-
139
- def test_switch(self):
140
- # make sure that iterating over schedulers with same config names gives same results
141
- # for defaults
142
- scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
143
- sample = self.full_loop(scheduler=scheduler)
144
- result_mean = torch.mean(torch.abs(sample))
145
-
146
- assert abs(result_mean.item() - 0.2791) < 1e-3
147
-
148
- scheduler = DEISMultistepScheduler.from_config(scheduler.config)
149
- scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
150
- scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
151
- scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
152
-
153
- sample = self.full_loop(scheduler=scheduler)
154
- result_mean = torch.mean(torch.abs(sample))
155
-
156
- assert abs(result_mean.item() - 0.2791) < 1e-3
157
-
158
- def test_thresholding(self):
159
- self.check_over_configs(thresholding=False)
160
- for order in [1, 2, 3]:
161
- for solver_type in ["midpoint", "heun"]:
162
- for threshold in [0.5, 1.0, 2.0]:
163
- for prediction_type in ["epsilon", "sample"]:
164
- self.check_over_configs(
165
- thresholding=True,
166
- prediction_type=prediction_type,
167
- sample_max_value=threshold,
168
- algorithm_type="dpmsolver++",
169
- solver_order=order,
170
- solver_type=solver_type,
171
- )
172
-
173
- def test_prediction_type(self):
174
- for prediction_type in ["epsilon", "v_prediction"]:
175
- self.check_over_configs(prediction_type=prediction_type)
176
-
177
- def test_solver_order_and_type(self):
178
- for algorithm_type in ["dpmsolver", "dpmsolver++"]:
179
- for solver_type in ["midpoint", "heun"]:
180
- for order in [1, 2, 3]:
181
- for prediction_type in ["epsilon", "sample"]:
182
- self.check_over_configs(
183
- solver_order=order,
184
- solver_type=solver_type,
185
- prediction_type=prediction_type,
186
- algorithm_type=algorithm_type,
187
- )
188
- sample = self.full_loop(
189
- solver_order=order,
190
- solver_type=solver_type,
191
- prediction_type=prediction_type,
192
- algorithm_type=algorithm_type,
193
- )
194
- assert not torch.isnan(sample).any(), "Samples have nan numbers"
195
-
196
- def test_lower_order_final(self):
197
- self.check_over_configs(lower_order_final=True)
198
- self.check_over_configs(lower_order_final=False)
199
-
200
- def test_lambda_min_clipped(self):
201
- self.check_over_configs(lambda_min_clipped=-float("inf"))
202
- self.check_over_configs(lambda_min_clipped=-5.1)
203
-
204
- def test_variance_type(self):
205
- self.check_over_configs(variance_type=None)
206
- self.check_over_configs(variance_type="learned_range")
207
-
208
- def test_inference_steps(self):
209
- for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
210
- self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
211
-
212
- def test_full_loop_no_noise(self):
213
- sample = self.full_loop()
214
- result_mean = torch.mean(torch.abs(sample))
215
-
216
- assert abs(result_mean.item() - 0.2791) < 1e-3
217
-
218
- def test_full_loop_with_karras(self):
219
- sample = self.full_loop(use_karras_sigmas=True)
220
- result_mean = torch.mean(torch.abs(sample))
221
-
222
- assert abs(result_mean.item() - 0.2248) < 1e-3
223
-
224
- def test_full_loop_with_v_prediction(self):
225
- sample = self.full_loop(prediction_type="v_prediction")
226
- result_mean = torch.mean(torch.abs(sample))
227
-
228
- assert abs(result_mean.item() - 0.1453) < 1e-3
229
-
230
- def test_full_loop_with_karras_and_v_prediction(self):
231
- sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
232
- result_mean = torch.mean(torch.abs(sample))
233
-
234
- assert abs(result_mean.item() - 0.0649) < 1e-3
235
-
236
- def test_fp16_support(self):
237
- scheduler_class = self.scheduler_classes[0]
238
- scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
239
- scheduler = scheduler_class(**scheduler_config)
240
-
241
- num_inference_steps = 10
242
- model = self.dummy_model()
243
- sample = self.dummy_sample_deter.half()
244
- scheduler.set_timesteps(num_inference_steps)
245
-
246
- for i, t in enumerate(scheduler.timesteps):
247
- residual = model(sample, t)
248
- sample = scheduler.step(residual, t, sample).prev_sample
249
-
250
- assert sample.dtype == torch.float16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py DELETED
@@ -1,50 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/retinanet_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- # model settings
7
- model = dict(
8
- bbox_head=dict(
9
- _delete_=True,
10
- type='SABLRetinaHead',
11
- num_classes=80,
12
- in_channels=256,
13
- stacked_convs=4,
14
- feat_channels=256,
15
- approx_anchor_generator=dict(
16
- type='AnchorGenerator',
17
- octave_base_scale=4,
18
- scales_per_octave=3,
19
- ratios=[0.5, 1.0, 2.0],
20
- strides=[8, 16, 32, 64, 128]),
21
- square_anchor_generator=dict(
22
- type='AnchorGenerator',
23
- ratios=[1.0],
24
- scales=[4],
25
- strides=[8, 16, 32, 64, 128]),
26
- bbox_coder=dict(
27
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
28
- loss_cls=dict(
29
- type='FocalLoss',
30
- use_sigmoid=True,
31
- gamma=2.0,
32
- alpha=0.25,
33
- loss_weight=1.0),
34
- loss_bbox_cls=dict(
35
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
36
- loss_bbox_reg=dict(
37
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
38
- # training and testing settings
39
- train_cfg=dict(
40
- assigner=dict(
41
- type='ApproxMaxIoUAssigner',
42
- pos_iou_thr=0.5,
43
- neg_iou_thr=0.4,
44
- min_pos_iou=0.0,
45
- ignore_iof_thr=-1),
46
- allowed_border=-1,
47
- pos_weight=-1,
48
- debug=False))
49
- # optimizer
50
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py DELETED
@@ -1,16 +0,0 @@
1
- _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://res2net101_v1d_26w_4s',
4
- backbone=dict(
5
- type='Res2Net',
6
- depth=101,
7
- scales=4,
8
- base_width=26,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- norm_eval=True,
14
- style='pytorch',
15
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
16
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/score_hlr_sampler.py DELETED
@@ -1,264 +0,0 @@
1
- import torch
2
- from mmcv.ops import nms_match
3
-
4
- from ..builder import BBOX_SAMPLERS
5
- from ..transforms import bbox2roi
6
- from .base_sampler import BaseSampler
7
- from .sampling_result import SamplingResult
8
-
9
-
10
- @BBOX_SAMPLERS.register_module()
11
- class ScoreHLRSampler(BaseSampler):
12
- r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
13
- Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.
14
-
15
- Score hierarchical local rank (HLR) differentiates with RandomSampler in
16
- negative part. It firstly computes Score-HLR in a two-step way,
17
- then linearly maps score hlr to the loss weights.
18
-
19
- Args:
20
- num (int): Total number of sampled RoIs.
21
- pos_fraction (float): Fraction of positive samples.
22
- context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
23
- neg_pos_ub (int): Upper bound of the ratio of num negative to num
24
- positive, -1 means no upper bound.
25
- add_gt_as_proposals (bool): Whether to add ground truth as proposals.
26
- k (float): Power of the non-linear mapping.
27
- bias (float): Shift of the non-linear mapping.
28
- score_thr (float): Minimum score that a negative sample is to be
29
- considered as valid bbox.
30
- """
31
-
32
- def __init__(self,
33
- num,
34
- pos_fraction,
35
- context,
36
- neg_pos_ub=-1,
37
- add_gt_as_proposals=True,
38
- k=0.5,
39
- bias=0,
40
- score_thr=0.05,
41
- iou_thr=0.5,
42
- **kwargs):
43
- super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
44
- self.k = k
45
- self.bias = bias
46
- self.score_thr = score_thr
47
- self.iou_thr = iou_thr
48
- self.context = context
49
- # context of cascade detectors is a list, so distinguish them here.
50
- if not hasattr(context, 'num_stages'):
51
- self.bbox_roi_extractor = context.bbox_roi_extractor
52
- self.bbox_head = context.bbox_head
53
- self.with_shared_head = context.with_shared_head
54
- if self.with_shared_head:
55
- self.shared_head = context.shared_head
56
- else:
57
- self.bbox_roi_extractor = context.bbox_roi_extractor[
58
- context.current_stage]
59
- self.bbox_head = context.bbox_head[context.current_stage]
60
-
61
- @staticmethod
62
- def random_choice(gallery, num):
63
- """Randomly select some elements from the gallery.
64
-
65
- If `gallery` is a Tensor, the returned indices will be a Tensor;
66
- If `gallery` is a ndarray or list, the returned indices will be a
67
- ndarray.
68
-
69
- Args:
70
- gallery (Tensor | ndarray | list): indices pool.
71
- num (int): expected sample num.
72
-
73
- Returns:
74
- Tensor or ndarray: sampled indices.
75
- """
76
- assert len(gallery) >= num
77
-
78
- is_tensor = isinstance(gallery, torch.Tensor)
79
- if not is_tensor:
80
- if torch.cuda.is_available():
81
- device = torch.cuda.current_device()
82
- else:
83
- device = 'cpu'
84
- gallery = torch.tensor(gallery, dtype=torch.long, device=device)
85
- perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
86
- rand_inds = gallery[perm]
87
- if not is_tensor:
88
- rand_inds = rand_inds.cpu().numpy()
89
- return rand_inds
90
-
91
- def _sample_pos(self, assign_result, num_expected, **kwargs):
92
- """Randomly sample some positive samples."""
93
- pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
94
- if pos_inds.numel() <= num_expected:
95
- return pos_inds
96
- else:
97
- return self.random_choice(pos_inds, num_expected)
98
-
99
- def _sample_neg(self,
100
- assign_result,
101
- num_expected,
102
- bboxes,
103
- feats=None,
104
- img_meta=None,
105
- **kwargs):
106
- """Sample negative samples.
107
-
108
- Score-HLR sampler is done in the following steps:
109
- 1. Take the maximum positive score prediction of each negative samples
110
- as s_i.
111
- 2. Filter out negative samples whose s_i <= score_thr, the left samples
112
- are called valid samples.
113
- 3. Use NMS-Match to divide valid samples into different groups,
114
- samples in the same group will greatly overlap with each other
115
- 4. Rank the matched samples in two-steps to get Score-HLR.
116
- (1) In the same group, rank samples with their scores.
117
- (2) In the same score rank across different groups,
118
- rank samples with their scores again.
119
- 5. Linearly map Score-HLR to the final label weights.
120
-
121
- Args:
122
- assign_result (:obj:`AssignResult`): result of assigner.
123
- num_expected (int): Expected number of samples.
124
- bboxes (Tensor): bbox to be sampled.
125
- feats (Tensor): Features come from FPN.
126
- img_meta (dict): Meta information dictionary.
127
- """
128
- neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
129
- num_neg = neg_inds.size(0)
130
- if num_neg == 0:
131
- return neg_inds, None
132
- with torch.no_grad():
133
- neg_bboxes = bboxes[neg_inds]
134
- neg_rois = bbox2roi([neg_bboxes])
135
- bbox_result = self.context._bbox_forward(feats, neg_rois)
136
- cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
137
- 'bbox_pred']
138
-
139
- ori_loss = self.bbox_head.loss(
140
- cls_score=cls_score,
141
- bbox_pred=None,
142
- rois=None,
143
- labels=neg_inds.new_full((num_neg, ),
144
- self.bbox_head.num_classes),
145
- label_weights=cls_score.new_ones(num_neg),
146
- bbox_targets=None,
147
- bbox_weights=None,
148
- reduction_override='none')['loss_cls']
149
-
150
- # filter out samples with the max score lower than score_thr
151
- max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
152
- valid_inds = (max_score > self.score_thr).nonzero().view(-1)
153
- invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
154
- num_valid = valid_inds.size(0)
155
- num_invalid = invalid_inds.size(0)
156
-
157
- num_expected = min(num_neg, num_expected)
158
- num_hlr = min(num_valid, num_expected)
159
- num_rand = num_expected - num_hlr
160
- if num_valid > 0:
161
- valid_rois = neg_rois[valid_inds]
162
- valid_max_score = max_score[valid_inds]
163
- valid_argmax_score = argmax_score[valid_inds]
164
- valid_bbox_pred = bbox_pred[valid_inds]
165
-
166
- # valid_bbox_pred shape: [num_valid, #num_classes, 4]
167
- valid_bbox_pred = valid_bbox_pred.view(
168
- valid_bbox_pred.size(0), -1, 4)
169
- selected_bbox_pred = valid_bbox_pred[range(num_valid),
170
- valid_argmax_score]
171
- pred_bboxes = self.bbox_head.bbox_coder.decode(
172
- valid_rois[:, 1:], selected_bbox_pred)
173
- pred_bboxes_with_score = torch.cat(
174
- [pred_bboxes, valid_max_score[:, None]], -1)
175
- group = nms_match(pred_bboxes_with_score, self.iou_thr)
176
-
177
- # imp: importance
178
- imp = cls_score.new_zeros(num_valid)
179
- for g in group:
180
- g_score = valid_max_score[g]
181
- # g_score has already sorted
182
- rank = g_score.new_tensor(range(g_score.size(0)))
183
- imp[g] = num_valid - rank + g_score
184
- _, imp_rank_inds = imp.sort(descending=True)
185
- _, imp_rank = imp_rank_inds.sort()
186
- hlr_inds = imp_rank_inds[:num_expected]
187
-
188
- if num_rand > 0:
189
- rand_inds = torch.randperm(num_invalid)[:num_rand]
190
- select_inds = torch.cat(
191
- [valid_inds[hlr_inds], invalid_inds[rand_inds]])
192
- else:
193
- select_inds = valid_inds[hlr_inds]
194
-
195
- neg_label_weights = cls_score.new_ones(num_expected)
196
-
197
- up_bound = max(num_expected, num_valid)
198
- imp_weights = (up_bound -
199
- imp_rank[hlr_inds].float()) / up_bound
200
- neg_label_weights[:num_hlr] = imp_weights
201
- neg_label_weights[num_hlr:] = imp_weights.min()
202
- neg_label_weights = (self.bias +
203
- (1 - self.bias) * neg_label_weights).pow(
204
- self.k)
205
- ori_selected_loss = ori_loss[select_inds]
206
- new_loss = ori_selected_loss * neg_label_weights
207
- norm_ratio = ori_selected_loss.sum() / new_loss.sum()
208
- neg_label_weights *= norm_ratio
209
- else:
210
- neg_label_weights = cls_score.new_ones(num_expected)
211
- select_inds = torch.randperm(num_neg)[:num_expected]
212
-
213
- return neg_inds[select_inds], neg_label_weights
214
-
215
- def sample(self,
216
- assign_result,
217
- bboxes,
218
- gt_bboxes,
219
- gt_labels=None,
220
- img_meta=None,
221
- **kwargs):
222
- """Sample positive and negative bboxes.
223
-
224
- This is a simple implementation of bbox sampling given candidates,
225
- assigning results and ground truth bboxes.
226
-
227
- Args:
228
- assign_result (:obj:`AssignResult`): Bbox assigning results.
229
- bboxes (Tensor): Boxes to be sampled from.
230
- gt_bboxes (Tensor): Ground truth bboxes.
231
- gt_labels (Tensor, optional): Class labels of ground truth bboxes.
232
-
233
- Returns:
234
- tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive
235
- label weights.
236
- """
237
- bboxes = bboxes[:, :4]
238
-
239
- gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
240
- if self.add_gt_as_proposals:
241
- bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
242
- assign_result.add_gt_(gt_labels)
243
- gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
244
- gt_flags = torch.cat([gt_ones, gt_flags])
245
-
246
- num_expected_pos = int(self.num * self.pos_fraction)
247
- pos_inds = self.pos_sampler._sample_pos(
248
- assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
249
- num_sampled_pos = pos_inds.numel()
250
- num_expected_neg = self.num - num_sampled_pos
251
- if self.neg_pos_ub >= 0:
252
- _pos = max(1, num_sampled_pos)
253
- neg_upper_bound = int(self.neg_pos_ub * _pos)
254
- if num_expected_neg > neg_upper_bound:
255
- num_expected_neg = neg_upper_bound
256
- neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
257
- assign_result,
258
- num_expected_neg,
259
- bboxes,
260
- img_meta=img_meta,
261
- **kwargs)
262
-
263
- return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
264
- assign_result, gt_flags), neg_label_weights
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/long_replies/script.py DELETED
@@ -1,143 +0,0 @@
1
- import torch
2
- from modules import chat, shared
3
- from modules.text_generation import (
4
- decode,
5
- encode,
6
- generate_reply,
7
- )
8
- from transformers import LogitsProcessor
9
- import gradio as gr
10
-
11
- params = {
12
- "display_name": "Long replies",
13
- "is_tab": False,
14
- "min_length": 120,
15
- }
16
-
17
- initial_size = 0
18
-
19
- class MyLogits(LogitsProcessor):
20
- """
21
- Manipulates the probabilities for the next token before it gets sampled.
22
- Used in the logits_processor_modifier function below.
23
- """
24
- def __init__(self):
25
- self.newline_id = shared.tokenizer.encode('\n')[-1]
26
- pass
27
-
28
- def __call__(self, input_ids, scores):
29
- if input_ids.shape[-1] - initial_size < params["min_length"]:
30
- scores[...,self.newline_id] = -1000
31
- # scores[...,shared.tokenizer.eos_token_id] = -1000
32
-
33
- # probs = torch.softmax(scores, dim=-1, dtype=torch.float)
34
- # probs[0] /= probs[0].sum()
35
- # scores = torch.log(probs / (1 - probs))
36
- return scores
37
-
38
- def history_modifier(history):
39
- """
40
- Modifies the chat history.
41
- Only used in chat mode.
42
- """
43
- return history
44
-
45
- def state_modifier(state):
46
- """
47
- Modifies the state variable, which is a dictionary containing the input
48
- values in the UI like sliders and checkboxes.
49
- """
50
- return state
51
-
52
- def chat_input_modifier(text, visible_text, state):
53
- """
54
- Modifies the user input string in chat mode (visible_text).
55
- You can also modify the internal representation of the user
56
- input (text) to change how it will appear in the prompt.
57
- """
58
- return text, visible_text
59
-
60
- def input_modifier(string, state):
61
- """
62
- In default/notebook modes, modifies the whole prompt.
63
-
64
- In chat mode, it is the same as chat_input_modifier but only applied
65
- to "text", here called "string", and not to "visible_text".
66
- """
67
- return string
68
-
69
- def bot_prefix_modifier(string, state):
70
- """
71
- Modifies the prefix for the next bot reply in chat mode.
72
- By default, the prefix will be something like "Bot Name:".
73
- """
74
- return string
75
-
76
- def tokenizer_modifier(state, prompt, input_ids, input_embeds):
77
- """
78
- Modifies the input ids and embeds.
79
- Used by the multimodal extension to put image embeddings in the prompt.
80
- Only used by loaders that use the transformers library for sampling.
81
- """
82
-
83
- global initial_size
84
- initial_size = input_ids.shape[-1]
85
-
86
- return prompt, input_ids, input_embeds
87
-
88
- def logits_processor_modifier(processor_list, input_ids):
89
- """
90
- Adds logits processors to the list, allowing you to access and modify
91
- the next token probabilities.
92
- Only used by loaders that use the transformers library for sampling.
93
- """
94
- processor_list.append(MyLogits())
95
- return processor_list
96
-
97
- def output_modifier(string, state):
98
- """
99
- Modifies the LLM output before it gets presented.
100
-
101
- In chat mode, the modified version goes into history['visible'],
102
- and the original version goes into history['internal'].
103
- """
104
- return string
105
-
106
- def custom_generate_chat_prompt(user_input, state, **kwargs):
107
- """
108
- Replaces the function that generates the prompt from the chat history.
109
- Only used in chat mode.
110
- """
111
- result = chat.generate_chat_prompt(user_input, state, **kwargs)
112
- return result
113
-
114
- def custom_css():
115
- """
116
- Returns a CSS string that gets appended to the CSS for the webui.
117
- """
118
- return ''
119
-
120
- def custom_js():
121
- """
122
- Returns a javascript string that gets appended to the javascript
123
- for the webui.
124
- """
125
- return ''
126
-
127
- def setup():
128
- """
129
- Gets executed only once, when the extension is imported.
130
- """
131
- pass
132
-
133
- def ui():
134
- """
135
- Gets executed when the UI is drawn. Custom gradio elements and
136
- their corresponding event handlers should be defined here.
137
-
138
- To learn about gradio components, check out the docs:
139
- https://gradio.app/docs/
140
- """
141
-
142
- min_length = gr.Slider(0, 800, step=10, value=params['min_length'], label='Minimum reply length')
143
- min_length.change(lambda x: params.update({'min_length': x}), min_length, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/sd_api_pictures/script.py DELETED
@@ -1,386 +0,0 @@
1
- import base64
2
- import io
3
- import re
4
- import time
5
- from datetime import date
6
- from pathlib import Path
7
-
8
- import gradio as gr
9
- import requests
10
- import torch
11
- from PIL import Image
12
-
13
- from modules import shared
14
- from modules.models import reload_model, unload_model
15
- from modules.ui import create_refresh_button
16
-
17
- torch._C._jit_set_profiling_mode(False)
18
-
19
- # parameters which can be customized in settings.json of webui
20
- params = {
21
- 'address': 'http://127.0.0.1:7860',
22
- 'mode': 0, # modes of operation: 0 (Manual only), 1 (Immersive/Interactive - looks for words to trigger), 2 (Picturebook Adventure - Always on)
23
- 'manage_VRAM': False,
24
- 'save_img': False,
25
- 'SD_model': 'NeverEndingDream', # not used right now
26
- 'prompt_prefix': '(Masterpiece:1.1), detailed, intricate, colorful',
27
- 'negative_prompt': '(worst quality, low quality:1.3)',
28
- 'width': 512,
29
- 'height': 512,
30
- 'denoising_strength': 0.61,
31
- 'restore_faces': False,
32
- 'enable_hr': False,
33
- 'hr_upscaler': 'ESRGAN_4x',
34
- 'hr_scale': '1.0',
35
- 'seed': -1,
36
- 'sampler_name': 'DPM++ 2M Karras',
37
- 'steps': 32,
38
- 'cfg_scale': 7,
39
- 'textgen_prefix': 'Please provide a detailed and vivid description of [subject]',
40
- 'sd_checkpoint': ' ',
41
- 'checkpoint_list': [" "]
42
- }
43
-
44
-
45
- def give_VRAM_priority(actor):
46
- global shared, params
47
-
48
- if actor == 'SD':
49
- unload_model()
50
- print("Requesting Auto1111 to re-load last checkpoint used...")
51
- response = requests.post(url=f'{params["address"]}/sdapi/v1/reload-checkpoint', json='')
52
- response.raise_for_status()
53
-
54
- elif actor == 'LLM':
55
- print("Requesting Auto1111 to vacate VRAM...")
56
- response = requests.post(url=f'{params["address"]}/sdapi/v1/unload-checkpoint', json='')
57
- response.raise_for_status()
58
- reload_model()
59
-
60
- elif actor == 'set':
61
- print("VRAM mangement activated -- requesting Auto1111 to vacate VRAM...")
62
- response = requests.post(url=f'{params["address"]}/sdapi/v1/unload-checkpoint', json='')
63
- response.raise_for_status()
64
-
65
- elif actor == 'reset':
66
- print("VRAM mangement deactivated -- requesting Auto1111 to reload checkpoint")
67
- response = requests.post(url=f'{params["address"]}/sdapi/v1/reload-checkpoint', json='')
68
- response.raise_for_status()
69
-
70
- else:
71
- raise RuntimeError(f'Managing VRAM: "{actor}" is not a known state!')
72
-
73
- response.raise_for_status()
74
- del response
75
-
76
-
77
- if params['manage_VRAM']:
78
- give_VRAM_priority('set')
79
-
80
- SD_models = ['NeverEndingDream'] # TODO: get with http://{address}}/sdapi/v1/sd-models and allow user to select
81
-
82
- picture_response = False # specifies if the next model response should appear as a picture
83
-
84
-
85
- def remove_surrounded_chars(string):
86
- # this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR
87
- # 'as few symbols as possible (0 upwards) between an asterisk and the end of the string'
88
- return re.sub('\*[^\*]*?(\*|$)', '', string)
89
-
90
-
91
- def triggers_are_in(string):
92
- string = remove_surrounded_chars(string)
93
- # regex searches for send|main|message|me (at the end of the word) followed by
94
- # a whole word of image|pic|picture|photo|snap|snapshot|selfie|meme(s),
95
- # (?aims) are regex parser flags
96
- return bool(re.search('(?aims)(send|mail|message|me)\\b.+?\\b(image|pic(ture)?|photo|snap(shot)?|selfie|meme)s?\\b', string))
97
-
98
-
99
- def state_modifier(state):
100
- if picture_response:
101
- state['stream'] = False
102
-
103
- return state
104
-
105
-
106
- def input_modifier(string):
107
- """
108
- This function is applied to your text inputs before
109
- they are fed into the model.
110
- """
111
-
112
- global params
113
-
114
- if not params['mode'] == 1: # if not in immersive/interactive mode, do nothing
115
- return string
116
-
117
- if triggers_are_in(string): # if we're in it, check for trigger words
118
- toggle_generation(True)
119
- string = string.lower()
120
- if "of" in string:
121
- subject = string.split('of', 1)[1] # subdivide the string once by the first 'of' instance and get what's coming after it
122
- string = params['textgen_prefix'].replace("[subject]", subject)
123
- else:
124
- string = params['textgen_prefix'].replace("[subject]", "your appearance, your surroundings and what you are doing right now")
125
-
126
- return string
127
-
128
- # Get and save the Stable Diffusion-generated picture
129
- def get_SD_pictures(description, character):
130
-
131
- global params
132
-
133
- if params['manage_VRAM']:
134
- give_VRAM_priority('SD')
135
-
136
- description = re.sub('<audio.*?</audio>', ' ', description)
137
- description = f"({description}:1)"
138
-
139
- payload = {
140
- "prompt": params['prompt_prefix'] + description,
141
- "seed": params['seed'],
142
- "sampler_name": params['sampler_name'],
143
- "enable_hr": params['enable_hr'],
144
- "hr_scale": params['hr_scale'],
145
- "hr_upscaler": params['hr_upscaler'],
146
- "denoising_strength": params['denoising_strength'],
147
- "steps": params['steps'],
148
- "cfg_scale": params['cfg_scale'],
149
- "width": params['width'],
150
- "height": params['height'],
151
- "restore_faces": params['restore_faces'],
152
- "override_settings_restore_afterwards": True,
153
- "negative_prompt": params['negative_prompt']
154
- }
155
-
156
- print(f'Prompting the image generator via the API on {params["address"]}...')
157
- response = requests.post(url=f'{params["address"]}/sdapi/v1/txt2img', json=payload)
158
- response.raise_for_status()
159
- r = response.json()
160
-
161
- visible_result = ""
162
- for img_str in r['images']:
163
- if params['save_img']:
164
- img_data = base64.b64decode(img_str)
165
-
166
- variadic = f'{date.today().strftime("%Y_%m_%d")}/{character}_{int(time.time())}'
167
- output_file = Path(f'extensions/sd_api_pictures/outputs/{variadic}.png')
168
- output_file.parent.mkdir(parents=True, exist_ok=True)
169
-
170
- with open(output_file.as_posix(), 'wb') as f:
171
- f.write(img_data)
172
-
173
- visible_result = visible_result + f'<img src="/file/extensions/sd_api_pictures/outputs/{variadic}.png" alt="{description}" style="max-width: unset; max-height: unset;">\n'
174
- else:
175
- image = Image.open(io.BytesIO(base64.b64decode(img_str.split(",", 1)[0])))
176
- # lower the resolution of received images for the chat, otherwise the log size gets out of control quickly with all the base64 values in visible history
177
- image.thumbnail((300, 300))
178
- buffered = io.BytesIO()
179
- image.save(buffered, format="JPEG")
180
- buffered.seek(0)
181
- image_bytes = buffered.getvalue()
182
- img_str = "data:image/jpeg;base64," + base64.b64encode(image_bytes).decode()
183
- visible_result = visible_result + f'<img src="{img_str}" alt="{description}">\n'
184
-
185
- if params['manage_VRAM']:
186
- give_VRAM_priority('LLM')
187
-
188
- return visible_result
189
-
190
- # TODO: how do I make the UI history ignore the resulting pictures (I don't want HTML to appear in history)
191
- # and replace it with 'text' for the purposes of logging?
192
- def output_modifier(string, state):
193
- """
194
- This function is applied to the model outputs.
195
- """
196
-
197
- global picture_response, params
198
-
199
- if not picture_response:
200
- return string
201
-
202
- string = remove_surrounded_chars(string)
203
- string = string.replace('"', '')
204
- string = string.replace('“', '')
205
- string = string.replace('\n', ' ')
206
- string = string.strip()
207
-
208
- if string == '':
209
- string = 'no viable description in reply, try regenerating'
210
- return string
211
-
212
- text = ""
213
- if (params['mode'] < 2):
214
- toggle_generation(False)
215
- text = f'*Sends a picture which portrays: “{string}”*'
216
- else:
217
- text = string
218
-
219
- string = get_SD_pictures(string, state['character_menu']) + "\n" + text
220
-
221
- return string
222
-
223
-
224
- def bot_prefix_modifier(string):
225
- """
226
- This function is only applied in chat mode. It modifies
227
- the prefix text for the Bot and can be used to bias its
228
- behavior.
229
- """
230
-
231
- return string
232
-
233
-
234
- def toggle_generation(*args):
235
- global picture_response, shared
236
-
237
- if not args:
238
- picture_response = not picture_response
239
- else:
240
- picture_response = args[0]
241
-
242
- shared.processing_message = "*Is sending a picture...*" if picture_response else "*Is typing...*"
243
-
244
-
245
- def filter_address(address):
246
- address = address.strip()
247
- # address = re.sub('http(s)?:\/\/|\/$','',address) # remove starting http:// OR https:// OR trailing slash
248
- address = re.sub('\/$', '', address) # remove trailing /s
249
- if not address.startswith('http'):
250
- address = 'http://' + address
251
- return address
252
-
253
-
254
- def SD_api_address_update(address):
255
- global params
256
-
257
- msg = "✔️ SD API is found on:"
258
- address = filter_address(address)
259
- params.update({"address": address})
260
- try:
261
- response = requests.get(url=f'{params["address"]}/sdapi/v1/sd-models')
262
- response.raise_for_status()
263
- # r = response.json()
264
- except:
265
- msg = "❌ No SD API endpoint on:"
266
-
267
- return gr.Textbox.update(label=msg)
268
-
269
-
270
- def custom_css():
271
- path_to_css = Path(__file__).parent.resolve() / 'style.css'
272
- return open(path_to_css, 'r').read()
273
-
274
-
275
- def get_checkpoints():
276
- global params
277
-
278
- try:
279
- models = requests.get(url=f'{params["address"]}/sdapi/v1/sd-models')
280
- options = requests.get(url=f'{params["address"]}/sdapi/v1/options')
281
- options_json = options.json()
282
- params['sd_checkpoint'] = options_json['sd_model_checkpoint']
283
- params['checkpoint_list'] = [result["title"] for result in models.json()]
284
- except:
285
- params['sd_checkpoint'] = ""
286
- params['checkpoint_list'] = []
287
-
288
- return gr.update(choices=params['checkpoint_list'], value=params['sd_checkpoint'])
289
-
290
-
291
- def load_checkpoint(checkpoint):
292
- payload = {
293
- "sd_model_checkpoint": checkpoint
294
- }
295
-
296
- try:
297
- requests.post(url=f'{params["address"]}/sdapi/v1/options', json=payload)
298
- except:
299
- pass
300
-
301
-
302
- def get_samplers():
303
- try:
304
- response = requests.get(url=f'{params["address"]}/sdapi/v1/samplers')
305
- response.raise_for_status()
306
- samplers = [x["name"] for x in response.json()]
307
- except:
308
- samplers = []
309
-
310
- return samplers
311
-
312
-
313
- def ui():
314
-
315
- # Gradio elements
316
- # gr.Markdown('### Stable Diffusion API Pictures') # Currently the name of extension is shown as the title
317
- with gr.Accordion("Parameters", open=True, elem_classes="SDAP"):
318
- with gr.Row():
319
- address = gr.Textbox(placeholder=params['address'], value=params['address'], label='Auto1111\'s WebUI address')
320
- modes_list = ["Manual", "Immersive/Interactive", "Picturebook/Adventure"]
321
- mode = gr.Dropdown(modes_list, value=modes_list[params['mode']], label="Mode of operation", type="index")
322
- with gr.Column(scale=1, min_width=300):
323
- manage_VRAM = gr.Checkbox(value=params['manage_VRAM'], label='Manage VRAM')
324
- save_img = gr.Checkbox(value=params['save_img'], label='Keep original images and use them in chat')
325
-
326
- force_pic = gr.Button("Force the picture response")
327
- suppr_pic = gr.Button("Suppress the picture response")
328
- with gr.Row():
329
- checkpoint = gr.Dropdown(params['checkpoint_list'], value=params['sd_checkpoint'], label="Checkpoint", type="value")
330
- update_checkpoints = gr.Button("Get list of checkpoints")
331
-
332
- with gr.Accordion("Generation parameters", open=False):
333
- prompt_prefix = gr.Textbox(placeholder=params['prompt_prefix'], value=params['prompt_prefix'], label='Prompt Prefix (best used to describe the look of the character)')
334
- textgen_prefix = gr.Textbox(placeholder=params['textgen_prefix'], value=params['textgen_prefix'], label='textgen prefix (type [subject] where the subject should be placed)')
335
- negative_prompt = gr.Textbox(placeholder=params['negative_prompt'], value=params['negative_prompt'], label='Negative Prompt')
336
- with gr.Row():
337
- with gr.Column():
338
- width = gr.Slider(64, 2048, value=params['width'], step=64, label='Width')
339
- height = gr.Slider(64, 2048, value=params['height'], step=64, label='Height')
340
- with gr.Column(variant="compact", elem_id="sampler_col"):
341
- with gr.Row(elem_id="sampler_row"):
342
- sampler_name = gr.Dropdown(value=params['sampler_name'], label='Sampling method', elem_id="sampler_box")
343
- create_refresh_button(sampler_name, lambda: None, lambda: {'choices': get_samplers()}, 'refresh-button')
344
- steps = gr.Slider(1, 150, value=params['steps'], step=1, label="Sampling steps", elem_id="steps_box")
345
- with gr.Row():
346
- seed = gr.Number(label="Seed", value=params['seed'], elem_id="seed_box")
347
- cfg_scale = gr.Number(label="CFG Scale", value=params['cfg_scale'], elem_id="cfg_box")
348
- with gr.Column() as hr_options:
349
- restore_faces = gr.Checkbox(value=params['restore_faces'], label='Restore faces')
350
- enable_hr = gr.Checkbox(value=params['enable_hr'], label='Hires. fix')
351
- with gr.Row(visible=params['enable_hr'], elem_classes="hires_opts") as hr_options:
352
- hr_scale = gr.Slider(1, 4, value=params['hr_scale'], step=0.1, label='Upscale by')
353
- denoising_strength = gr.Slider(0, 1, value=params['denoising_strength'], step=0.01, label='Denoising strength')
354
- hr_upscaler = gr.Textbox(placeholder=params['hr_upscaler'], value=params['hr_upscaler'], label='Upscaler')
355
-
356
- # Event functions to update the parameters in the backend
357
- address.change(lambda x: params.update({"address": filter_address(x)}), address, None)
358
- mode.select(lambda x: params.update({"mode": x}), mode, None)
359
- mode.select(lambda x: toggle_generation(x > 1), inputs=mode, outputs=None)
360
- manage_VRAM.change(lambda x: params.update({"manage_VRAM": x}), manage_VRAM, None)
361
- manage_VRAM.change(lambda x: give_VRAM_priority('set' if x else 'reset'), inputs=manage_VRAM, outputs=None)
362
- save_img.change(lambda x: params.update({"save_img": x}), save_img, None)
363
-
364
- address.submit(fn=SD_api_address_update, inputs=address, outputs=address)
365
- prompt_prefix.change(lambda x: params.update({"prompt_prefix": x}), prompt_prefix, None)
366
- textgen_prefix.change(lambda x: params.update({"textgen_prefix": x}), textgen_prefix, None)
367
- negative_prompt.change(lambda x: params.update({"negative_prompt": x}), negative_prompt, None)
368
- width.change(lambda x: params.update({"width": x}), width, None)
369
- height.change(lambda x: params.update({"height": x}), height, None)
370
- hr_scale.change(lambda x: params.update({"hr_scale": x}), hr_scale, None)
371
- denoising_strength.change(lambda x: params.update({"denoising_strength": x}), denoising_strength, None)
372
- restore_faces.change(lambda x: params.update({"restore_faces": x}), restore_faces, None)
373
- hr_upscaler.change(lambda x: params.update({"hr_upscaler": x}), hr_upscaler, None)
374
- enable_hr.change(lambda x: params.update({"enable_hr": x}), enable_hr, None)
375
- enable_hr.change(lambda x: hr_options.update(visible=params["enable_hr"]), enable_hr, hr_options)
376
- update_checkpoints.click(get_checkpoints, None, checkpoint)
377
- checkpoint.change(lambda x: params.update({"sd_checkpoint": x}), checkpoint, None)
378
- checkpoint.change(load_checkpoint, checkpoint, None)
379
-
380
- sampler_name.change(lambda x: params.update({"sampler_name": x}), sampler_name, None)
381
- steps.change(lambda x: params.update({"steps": x}), steps, None)
382
- seed.change(lambda x: params.update({"seed": x}), seed, None)
383
- cfg_scale.change(lambda x: params.update({"cfg_scale": x}), cfg_scale, None)
384
-
385
- force_pic.click(lambda x: toggle_generation(True), inputs=force_pic, outputs=None)
386
- suppr_pic.click(lambda x: toggle_generation(False), inputs=suppr_pic, outputs=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/script.py DELETED
@@ -1,355 +0,0 @@
1
- """
2
- This file is responsible for the UI and how the application interracts with the rest of the system.
3
- """
4
- import os
5
- from pathlib import Path
6
-
7
- # Point to where nltk will find the required data.
8
- os.environ['NLTK_DATA'] = str(Path("extensions/superboogav2/nltk_data").resolve())
9
-
10
- import textwrap
11
- import codecs
12
- import gradio as gr
13
-
14
- import extensions.superboogav2.parameters as parameters
15
-
16
- from modules.logging_colors import logger
17
- from modules import shared
18
-
19
- from .utils import create_metadata_source
20
- from .chromadb import make_collector
21
- from .download_urls import feed_url_into_collector
22
- from .data_processor import process_and_add_to_collector
23
- from .benchmark import benchmark
24
- from .optimize import optimize
25
- from .notebook_handler import input_modifier_internal
26
- from .chat_handler import custom_generate_chat_prompt_internal
27
- from .api import APIManager
28
-
29
- collector = None
30
- api_manager = None
31
-
32
- def setup():
33
- global collector
34
- global api_manager
35
- collector = make_collector()
36
- api_manager = APIManager(collector)
37
-
38
- if parameters.get_api_on():
39
- api_manager.start_server(parameters.get_api_port())
40
-
41
- def _feed_data_into_collector(corpus):
42
- yield '### Processing data...'
43
- process_and_add_to_collector(corpus, collector, False, create_metadata_source('direct-text'))
44
- yield '### Done.'
45
-
46
-
47
- def _feed_file_into_collector(file):
48
- yield '### Reading and processing the input dataset...'
49
- text = file.decode('utf-8')
50
- process_and_add_to_collector(text, collector, False, create_metadata_source('file'))
51
- yield '### Done.'
52
-
53
-
54
- def _feed_url_into_collector(urls):
55
- for i in feed_url_into_collector(urls, collector):
56
- yield i
57
- yield '### Done.'
58
-
59
-
60
- def _begin_benchmark():
61
- score, max_score = benchmark(Path("extensions/superboogav2/benchmark_texts/questions.json"), collector)
62
- return f'**Score**: {score}/{max_score}'
63
-
64
-
65
- def _begin_optimization(progress=gr.Progress()):
66
- return optimize(collector, progress), *_get_optimizable_settings()
67
-
68
-
69
- def _clear_data():
70
- collector.clear()
71
- return "### Data Cleared!"
72
-
73
-
74
- def _get_optimizable_settings() -> list:
75
- preprocess_pipeline = []
76
- if parameters.should_to_lower():
77
- preprocess_pipeline.append('Lower Cases')
78
- if parameters.should_remove_punctuation():
79
- preprocess_pipeline.append('Remove Punctuation')
80
- if parameters.should_remove_specific_pos():
81
- preprocess_pipeline.append('Remove Adverbs')
82
- if parameters.should_remove_stopwords():
83
- preprocess_pipeline.append('Remove Stop Words')
84
- if parameters.should_lemmatize():
85
- preprocess_pipeline.append('Lemmatize')
86
- if parameters.should_merge_spaces():
87
- preprocess_pipeline.append('Merge Spaces')
88
- if parameters.should_strip():
89
- preprocess_pipeline.append('Strip Edges')
90
-
91
- return [
92
- parameters.get_time_power(),
93
- parameters.get_time_steepness(),
94
- parameters.get_significant_level(),
95
- parameters.get_min_num_sentences(),
96
- parameters.get_new_dist_strategy(),
97
- parameters.get_delta_start(),
98
- parameters.get_min_num_length(),
99
- parameters.get_num_conversion_strategy(),
100
- preprocess_pipeline,
101
- parameters.get_chunk_count(),
102
- parameters.get_context_len(),
103
- parameters.get_chunk_len()
104
- ]
105
-
106
-
107
- def _apply_settings(optimization_steps, time_power, time_steepness, significant_level, min_sentences, new_dist_strat, delta_start, min_number_length, num_conversion,
108
- preprocess_pipeline, api_port, api_on, injection_strategy, add_chat_to_data, manual, postfix, data_separator, prefix, max_token_count,
109
- chunk_count, chunk_sep, context_len, chunk_regex, chunk_len, threads, strong_cleanup):
110
- logger.debug('Applying settings.')
111
-
112
- try:
113
- parameters.set_optimization_steps(optimization_steps)
114
- parameters.set_significant_level(significant_level)
115
- parameters.set_min_num_sentences(min_sentences)
116
- parameters.set_new_dist_strategy(new_dist_strat)
117
- parameters.set_delta_start(delta_start)
118
- parameters.set_min_num_length(min_number_length)
119
- parameters.set_num_conversion_strategy(num_conversion)
120
- parameters.set_api_port(api_port)
121
- parameters.set_api_on(api_on)
122
- parameters.set_injection_strategy(injection_strategy)
123
- parameters.set_add_chat_to_data(add_chat_to_data)
124
- parameters.set_manual(manual)
125
- parameters.set_postfix(codecs.decode(postfix, 'unicode_escape'))
126
- parameters.set_data_separator(codecs.decode(data_separator, 'unicode_escape'))
127
- parameters.set_prefix(codecs.decode(prefix, 'unicode_escape'))
128
- parameters.set_max_token_count(max_token_count)
129
- parameters.set_time_power(time_power)
130
- parameters.set_time_steepness(time_steepness)
131
- parameters.set_chunk_count(chunk_count)
132
- parameters.set_chunk_separator(codecs.decode(chunk_sep, 'unicode_escape'))
133
- parameters.set_context_len(context_len)
134
- parameters.set_chunk_regex(chunk_regex)
135
- parameters.set_chunk_len(chunk_len)
136
- parameters.set_num_threads(threads)
137
- parameters.set_strong_cleanup(strong_cleanup)
138
-
139
- preprocess_choices = ['Lower Cases', 'Remove Punctuation', 'Remove Adverbs', 'Remove Stop Words', 'Lemmatize', 'Merge Spaces', 'Strip Edges']
140
- for preprocess_method in preprocess_choices:
141
- if preprocess_method == 'Lower Cases':
142
- parameters.set_to_lower(preprocess_method in preprocess_pipeline)
143
- elif preprocess_method == 'Remove Punctuation':
144
- parameters.set_remove_punctuation(preprocess_method in preprocess_pipeline)
145
- elif preprocess_method == 'Remove Adverbs':
146
- parameters.set_remove_specific_pos(preprocess_method in preprocess_pipeline)
147
- elif preprocess_method == 'Remove Stop Words':
148
- parameters.set_remove_stopwords(preprocess_method in preprocess_pipeline)
149
- elif preprocess_method == 'Lemmatize':
150
- parameters.set_lemmatize(preprocess_method in preprocess_pipeline)
151
- elif preprocess_method == 'Merge Spaces':
152
- parameters.set_merge_spaces(preprocess_method in preprocess_pipeline)
153
- elif preprocess_method == 'Strip Edges':
154
- parameters.set_strip(preprocess_method in preprocess_pipeline)
155
-
156
- # Based on API on/off, start or stop the server
157
- if api_manager is not None:
158
- if parameters.get_api_on() and (not api_manager.is_server_running()):
159
- api_manager.start_server(parameters.get_api_port())
160
- elif (not parameters.get_api_on()) and api_manager.is_server_running():
161
- api_manager.stop_server()
162
- except Exception as e:
163
- logger.warn(f'Could not properly apply settings: {str(e)}')
164
-
165
-
166
- def custom_generate_chat_prompt(user_input, state, **kwargs):
167
- return custom_generate_chat_prompt_internal(user_input, state, collector, **kwargs)
168
-
169
-
170
- def input_modifier(string):
171
- return input_modifier_internal(string, collector)
172
-
173
-
174
- def ui():
175
- with gr.Accordion("Click for more information...", open=False):
176
- gr.Markdown(textwrap.dedent("""
177
-
178
- ## About
179
-
180
- This extension takes a dataset as input, breaks it into chunks, and adds the result to a local/offline Chroma database.
181
-
182
- The database is then queried during inference time to get the excerpts that are closest to your input. The idea is to create an arbitrarily large pseudo context.
183
-
184
- The core methodology was developed and contributed by kaiokendev, who is working on improvements to the method in this repository: https://github.com/kaiokendev/superbig
185
-
186
- ## Data input
187
-
188
- Start by entering some data in the interface below and then clicking on "Load data".
189
-
190
- Each time you load some new data, the old chunks are discarded.
191
-
192
- ## Chat mode
193
-
194
- #### Instruct
195
-
196
- On each turn, the chunks will be compared to your current input and the most relevant matches will be appended to the input in the following format:
197
-
198
- ```
199
- Consider the excerpts below as additional context:
200
- ...
201
- ```
202
-
203
- The injection doesn't make it into the chat history. It is only used in the current generation.
204
-
205
- #### Regular chat
206
-
207
- The chunks from the external data sources are ignored, and the chroma database is built based on the chat history instead. The most relevant past exchanges relative to the present input are added to the context string. This way, the extension acts as a long term memory.
208
-
209
- ## Notebook/default modes
210
-
211
- Your question must be manually specified between `<|begin-user-input|>` and `<|end-user-input|>` tags, and the injection point must be specified with `<|injection-point|>`.
212
-
213
- The special tokens mentioned above (`<|begin-user-input|>`, `<|end-user-input|>`, and `<|injection-point|>`) are removed in the background before the text generation begins.
214
-
215
- Here is an example in Vicuna 1.1 format:
216
-
217
- ```
218
- A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
219
-
220
- USER:
221
- <|injection-point|>
222
-
223
- <|begin-user-input|>What datasets are mentioned in the text above?<|end-user-input|>
224
- ASSISTANT:
225
- ```
226
- """))
227
-
228
- with gr.Row():
229
- with gr.Column(min_width=600):
230
- with gr.Tab("Text input"):
231
- data_input = gr.Textbox(lines=20, label='Input data')
232
- update_data = gr.Button('Load data')
233
-
234
- with gr.Tab("URL input"):
235
- url_input = gr.Textbox(lines=10, label='Input URLs', info='Enter one or more URLs separated by newline characters.')
236
- strong_cleanup = gr.Checkbox(value=parameters.get_is_strong_cleanup(), label='Strong cleanup', info='Only keeps html elements that look like long-form text.')
237
- threads = gr.Number(value=parameters.get_num_threads(), label='Threads', info='The number of threads to use while downloading the URLs.', precision=0)
238
- update_url = gr.Button('Load data')
239
-
240
- with gr.Tab("File input"):
241
- file_input = gr.File(label='Input file', type='binary')
242
- update_file = gr.Button('Load data')
243
-
244
- with gr.Tab("Settings"):
245
- with gr.Accordion("Processing settings", open=True):
246
- chunk_len = gr.Textbox(value=parameters.get_chunk_len(), label='Chunk length', info='In characters, not tokens. This value is used when you click on "Load data".')
247
- chunk_regex = gr.Textbox(value=parameters.get_chunk_regex(), label='Chunk regex', info='Will specifically add the captured text to the embeddings.')
248
- context_len = gr.Textbox(value=parameters.get_context_len(), label='Context length', info='In characters, not tokens. How much context to load around each chunk.')
249
- chunk_sep = gr.Textbox(value=codecs.encode(parameters.get_chunk_separator(), 'unicode_escape').decode(), label='Chunk separator', info='Used to manually split chunks. Manually split chunks longer than chunk length are split again. This value is used when you click on "Load data".')
250
-
251
- with gr.Accordion("Generation settings", open=False):
252
- chunk_count = gr.Number(value=parameters.get_chunk_count(), label='Chunk count', info='The number of closest-matching chunks to include in the prompt.')
253
- max_token_count = gr.Number(value=parameters.get_max_token_count(), label='Max Context Tokens', info='The context length in tokens will not exceed this value.')
254
- prefix = gr.Textbox(value=codecs.encode(parameters.get_prefix(), 'unicode_escape').decode(), label='Prefix', info='What to put before the injection point.')
255
- data_separator = gr.Textbox(value=codecs.encode(parameters.get_data_separator(), 'unicode_escape').decode(), label='Data separator', info='When multiple pieces of distant data are added, they might be unrelated. It\'s important to separate them.')
256
- postfix = gr.Textbox(value=codecs.encode(parameters.get_postfix(), 'unicode_escape').decode(), label='Postfix', info='What to put after the injection point.')
257
- with gr.Row():
258
- manual = gr.Checkbox(value=parameters.get_is_manual(), label="Is Manual", info="Manually specify when to use ChromaDB. Insert `!c` at the start or end of the message to trigger a query.", visible=shared.is_chat())
259
- add_chat_to_data = gr.Checkbox(value=parameters.get_add_chat_to_data(), label="Add Chat to Data", info="Automatically feed the chat history as you chat.", visible=shared.is_chat())
260
- injection_strategy = gr.Radio(choices=[parameters.PREPEND_TO_LAST, parameters.APPEND_TO_LAST, parameters.HIJACK_LAST_IN_CONTEXT], value=parameters.get_injection_strategy(), label='Injection Strategy', info='Where to inject the messages in chat or instruct mode.', visible=shared.is_chat())
261
- with gr.Row():
262
- api_on = gr.Checkbox(value=parameters.get_api_on(), label="Turn on API", info="Check this to turn on the API service.")
263
- api_port = gr.Number(value=parameters.get_api_port(), label="API Port", info="The port on which the API service will run.")
264
-
265
- with gr.Accordion("Advanced settings", open=False):
266
- preprocess_set_choices = []
267
- if parameters.should_to_lower():
268
- preprocess_set_choices.append('Lower Cases')
269
- if parameters.should_remove_punctuation():
270
- preprocess_set_choices.append('Remove Punctuation')
271
- if parameters.should_remove_specific_pos():
272
- preprocess_set_choices.append('Remove Adverbs')
273
- if parameters.should_remove_stopwords():
274
- preprocess_set_choices.append('Remove Stop Words')
275
- if parameters.should_lemmatize():
276
- preprocess_set_choices.append('Lemmatize')
277
- if parameters.should_merge_spaces():
278
- preprocess_set_choices.append('Merge Spaces')
279
- if parameters.should_strip():
280
- preprocess_set_choices.append('Strip Edges')
281
-
282
- preprocess_pipeline = gr.CheckboxGroup(label='Preprocessing pipeline', choices=[
283
- 'Lower Cases',
284
- 'Remove Punctuation',
285
- 'Remove Adverbs',
286
- 'Remove Stop Words',
287
- 'Lemmatize',
288
- 'Merge Spaces',
289
- 'Strip Edges',
290
- ], value=preprocess_set_choices, interactive=True, info='How to preprocess the text before it is turned into an embedding.')
291
-
292
- with gr.Row():
293
- num_conversion = gr.Dropdown(choices=[parameters.NUM_TO_WORD_METHOD, parameters.NUM_TO_CHAR_METHOD, parameters.NUM_TO_CHAR_LONG_METHOD, 'None'], value=parameters.get_num_conversion_strategy(), label="Number Conversion Method", info='How to preprocess numbers before creating the embeddings.', interactive=True)
294
- min_number_length = gr.Number(value=parameters.get_min_num_length(), label='Number Length Threshold', info='In digits. Only numbers that have at least that many digits will be converted.', interactive=True)
295
-
296
- delta_start = gr.Number(value=parameters.get_delta_start(), label='Delta Start Index', info='If the system encounters two identical embeddings, and they both start within the same delta, then only the first will be considered.', interactive=True)
297
- new_dist_strat = gr.Dropdown(choices=[parameters.DIST_MIN_STRATEGY, parameters.DIST_HARMONIC_STRATEGY, parameters.DIST_GEOMETRIC_STRATEGY, parameters.DIST_ARITHMETIC_STRATEGY], value=parameters.get_new_dist_strategy(), label="Distance Strategy", info='When two embedding texts are merged, the distance of the new piece will be decided using one of these strategies.', interactive=True)
298
- min_sentences = gr.Number(value=parameters.get_min_num_sentences(), label='Summary Threshold', info='In sentences. The minumum number of sentences to trigger text-rank summarization.', interactive=True)
299
- significant_level = gr.Slider(0.8, 2, value=parameters.get_significant_level(), label='Significant Level', info='Defines the cut-off for what is considered a "significant" distance relative to the median distance among the returned samples.', interactive=True)
300
- time_steepness = gr.Slider(0.01, 1.0, value=parameters.get_time_steepness(), label='Time Weighing Steepness', info='How differently two close excerpts are going to be weighed.')
301
- time_power = gr.Slider(0.0, 1.0, value=parameters.get_time_power(), label='Time Weighing Power', info='How influencial is the weighing. At 1.0, old entries won\'t be considered')
302
-
303
- with gr.Tab("Benchmark"):
304
- benchmark_button = gr.Button('Benchmark')
305
- optimize_button = gr.Button('Optimize')
306
- optimization_steps = gr.Number(value=parameters.get_optimization_steps(), label='Optimization Steps', info='For how many steps to optimize.', interactive=True)
307
-
308
-
309
- clear_button = gr.Button('❌ Clear Data')
310
-
311
-
312
- with gr.Column():
313
- last_updated = gr.Markdown()
314
-
315
- all_params = [optimization_steps, time_power, time_steepness, significant_level, min_sentences, new_dist_strat, delta_start, min_number_length, num_conversion,
316
- preprocess_pipeline, api_port, api_on, injection_strategy, add_chat_to_data, manual, postfix, data_separator, prefix, max_token_count,
317
- chunk_count, chunk_sep, context_len, chunk_regex, chunk_len, threads, strong_cleanup]
318
- optimizable_params = [time_power, time_steepness, significant_level, min_sentences, new_dist_strat, delta_start, min_number_length, num_conversion,
319
- preprocess_pipeline, chunk_count, context_len, chunk_len]
320
-
321
-
322
- update_data.click(_feed_data_into_collector, [data_input], last_updated, show_progress=False)
323
- update_url.click(_feed_url_into_collector, [url_input], last_updated, show_progress=False)
324
- update_file.click(_feed_file_into_collector, [file_input], last_updated, show_progress=False)
325
- benchmark_button.click(_begin_benchmark, [], last_updated, show_progress=True)
326
- optimize_button.click(_begin_optimization, [], [last_updated] + optimizable_params, show_progress=True)
327
- clear_button.click(_clear_data, [], last_updated, show_progress=False)
328
-
329
-
330
- optimization_steps.input(fn=_apply_settings, inputs=all_params, show_progress=False)
331
- time_power.input(fn=_apply_settings, inputs=all_params, show_progress=False)
332
- time_steepness.input(fn=_apply_settings, inputs=all_params, show_progress=False)
333
- significant_level.input(fn=_apply_settings, inputs=all_params, show_progress=False)
334
- min_sentences.input(fn=_apply_settings, inputs=all_params, show_progress=False)
335
- new_dist_strat.input(fn=_apply_settings, inputs=all_params, show_progress=False)
336
- delta_start.input(fn=_apply_settings, inputs=all_params, show_progress=False)
337
- min_number_length.input(fn=_apply_settings, inputs=all_params, show_progress=False)
338
- num_conversion.input(fn=_apply_settings, inputs=all_params, show_progress=False)
339
- preprocess_pipeline.input(fn=_apply_settings, inputs=all_params, show_progress=False)
340
- api_port.input(fn=_apply_settings, inputs=all_params, show_progress=False)
341
- api_on.input(fn=_apply_settings, inputs=all_params, show_progress=False)
342
- injection_strategy.input(fn=_apply_settings, inputs=all_params, show_progress=False)
343
- add_chat_to_data.input(fn=_apply_settings, inputs=all_params, show_progress=False)
344
- manual.input(fn=_apply_settings, inputs=all_params, show_progress=False)
345
- postfix.input(fn=_apply_settings, inputs=all_params, show_progress=False)
346
- data_separator.input(fn=_apply_settings, inputs=all_params, show_progress=False)
347
- prefix.input(fn=_apply_settings, inputs=all_params, show_progress=False)
348
- max_token_count.input(fn=_apply_settings, inputs=all_params, show_progress=False)
349
- chunk_count.input(fn=_apply_settings, inputs=all_params, show_progress=False)
350
- chunk_sep.input(fn=_apply_settings, inputs=all_params, show_progress=False)
351
- context_len.input(fn=_apply_settings, inputs=all_params, show_progress=False)
352
- chunk_regex.input(fn=_apply_settings, inputs=all_params, show_progress=False)
353
- chunk_len.input(fn=_apply_settings, inputs=all_params, show_progress=False)
354
- threads.input(fn=_apply_settings, inputs=all_params, show_progress=False)
355
- strong_cleanup.input(fn=_apply_settings, inputs=all_params, show_progress=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AquaSuisei/ChatGPTXE/assets/custom.js DELETED
@@ -1,70 +0,0 @@
1
- // custom javascript here
2
- const MAX_HISTORY_LENGTH = 32;
3
-
4
- var key_down_history = [];
5
- var currentIndex = -1;
6
- var user_input_ta;
7
-
8
- var ga = document.getElementsByTagName("gradio-app");
9
- var targetNode = ga[0];
10
- var observer = new MutationObserver(function(mutations) {
11
- for (var i = 0; i < mutations.length; i++) {
12
- if (mutations[i].addedNodes.length) {
13
- var user_input_tb = document.getElementById('user_input_tb');
14
- if (user_input_tb) {
15
- // 监听到user_input_tb被添加到DOM树中
16
- // 这里可以编写元素加载完成后需要执行的代码
17
- user_input_ta = user_input_tb.querySelector("textarea");
18
- if (user_input_ta){
19
- observer.disconnect(); // 停止监听
20
- // 在 textarea 上监听 keydown 事件
21
- user_input_ta.addEventListener("keydown", function (event) {
22
- var value = user_input_ta.value.trim();
23
- // 判断按下的是否为方向键
24
- if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
25
- // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
26
- if(value && key_down_history.indexOf(value) === -1)
27
- return;
28
- // 对于需要响应的动作,阻止默认行为。
29
- event.preventDefault();
30
- var length = key_down_history.length;
31
- if(length === 0) {
32
- currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
33
- return;
34
- }
35
- if (currentIndex === -1) {
36
- currentIndex = length;
37
- }
38
- if (event.code === 'ArrowUp' && currentIndex > 0) {
39
- currentIndex--;
40
- user_input_ta.value = key_down_history[currentIndex];
41
- } else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
42
- currentIndex++;
43
- user_input_ta.value = key_down_history[currentIndex];
44
- }
45
- user_input_ta.selectionStart = user_input_ta.value.length;
46
- user_input_ta.selectionEnd = user_input_ta.value.length;
47
- const input_event = new InputEvent("input", {bubbles: true, cancelable: true});
48
- user_input_ta.dispatchEvent(input_event);
49
- }else if(event.code === "Enter") {
50
- if (value) {
51
- currentIndex = -1;
52
- if(key_down_history.indexOf(value) === -1){
53
- key_down_history.push(value);
54
- if (key_down_history.length > MAX_HISTORY_LENGTH) {
55
- key_down_history.shift();
56
- }
57
- }
58
- }
59
- }
60
- });
61
- break;
62
- }
63
- }
64
- }
65
- }
66
- });
67
-
68
- // 监听目标节点的子节点列表是否发生变化
69
- observer.observe(targetNode, { childList: true , subtree: true });
70
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AsakuraMizu/moe-tts/text/shanghainese.py DELETED
@@ -1,64 +0,0 @@
1
- import re
2
- import cn2an
3
- import opencc
4
-
5
-
6
- converter = opencc.OpenCC('chinese_dialect_lexicons/zaonhe')
7
-
8
- # List of (Latin alphabet, ipa) pairs:
9
- _latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
10
- ('A', 'ᴇ'),
11
- ('B', 'bi'),
12
- ('C', 'si'),
13
- ('D', 'di'),
14
- ('E', 'i'),
15
- ('F', 'ᴇf'),
16
- ('G', 'dʑi'),
17
- ('H', 'ᴇtɕʰ'),
18
- ('I', 'ᴀi'),
19
- ('J', 'dʑᴇ'),
20
- ('K', 'kʰᴇ'),
21
- ('L', 'ᴇl'),
22
- ('M', 'ᴇm'),
23
- ('N', 'ᴇn'),
24
- ('O', 'o'),
25
- ('P', 'pʰi'),
26
- ('Q', 'kʰiu'),
27
- ('R', 'ᴀl'),
28
- ('S', 'ᴇs'),
29
- ('T', 'tʰi'),
30
- ('U', 'ɦiu'),
31
- ('V', 'vi'),
32
- ('W', 'dᴀbɤliu'),
33
- ('X', 'ᴇks'),
34
- ('Y', 'uᴀi'),
35
- ('Z', 'zᴇ')
36
- ]]
37
-
38
-
39
- def _number_to_shanghainese(num):
40
- num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两')
41
- return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num)
42
-
43
-
44
- def number_to_shanghainese(text):
45
- return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text)
46
-
47
-
48
- def latin_to_ipa(text):
49
- for regex, replacement in _latin_to_ipa:
50
- text = re.sub(regex, replacement, text)
51
- return text
52
-
53
-
54
- def shanghainese_to_ipa(text):
55
- text = number_to_shanghainese(text.upper())
56
- text = converter.convert(text).replace('-','').replace('$',' ')
57
- text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text)
58
- text = re.sub(r'[、;:]', ',', text)
59
- text = re.sub(r'\s*,\s*', ', ', text)
60
- text = re.sub(r'\s*。\s*', '. ', text)
61
- text = re.sub(r'\s*?\s*', '? ', text)
62
- text = re.sub(r'\s*!\s*', '! ', text)
63
- text = re.sub(r'\s*$', '', text)
64
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/winterm.py DELETED
@@ -1,195 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- try:
3
- from msvcrt import get_osfhandle
4
- except ImportError:
5
- def get_osfhandle(_):
6
- raise OSError("This isn't windows!")
7
-
8
-
9
- from . import win32
10
-
11
- # from wincon.h
12
- class WinColor(object):
13
- BLACK = 0
14
- BLUE = 1
15
- GREEN = 2
16
- CYAN = 3
17
- RED = 4
18
- MAGENTA = 5
19
- YELLOW = 6
20
- GREY = 7
21
-
22
- # from wincon.h
23
- class WinStyle(object):
24
- NORMAL = 0x00 # dim text, dim background
25
- BRIGHT = 0x08 # bright text, dim background
26
- BRIGHT_BACKGROUND = 0x80 # dim text, bright background
27
-
28
- class WinTerm(object):
29
-
30
- def __init__(self):
31
- self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
32
- self.set_attrs(self._default)
33
- self._default_fore = self._fore
34
- self._default_back = self._back
35
- self._default_style = self._style
36
- # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
37
- # So that LIGHT_EX colors and BRIGHT style do not clobber each other,
38
- # we track them separately, since LIGHT_EX is overwritten by Fore/Back
39
- # and BRIGHT is overwritten by Style codes.
40
- self._light = 0
41
-
42
- def get_attrs(self):
43
- return self._fore + self._back * 16 + (self._style | self._light)
44
-
45
- def set_attrs(self, value):
46
- self._fore = value & 7
47
- self._back = (value >> 4) & 7
48
- self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
49
-
50
- def reset_all(self, on_stderr=None):
51
- self.set_attrs(self._default)
52
- self.set_console(attrs=self._default)
53
- self._light = 0
54
-
55
- def fore(self, fore=None, light=False, on_stderr=False):
56
- if fore is None:
57
- fore = self._default_fore
58
- self._fore = fore
59
- # Emulate LIGHT_EX with BRIGHT Style
60
- if light:
61
- self._light |= WinStyle.BRIGHT
62
- else:
63
- self._light &= ~WinStyle.BRIGHT
64
- self.set_console(on_stderr=on_stderr)
65
-
66
- def back(self, back=None, light=False, on_stderr=False):
67
- if back is None:
68
- back = self._default_back
69
- self._back = back
70
- # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
71
- if light:
72
- self._light |= WinStyle.BRIGHT_BACKGROUND
73
- else:
74
- self._light &= ~WinStyle.BRIGHT_BACKGROUND
75
- self.set_console(on_stderr=on_stderr)
76
-
77
- def style(self, style=None, on_stderr=False):
78
- if style is None:
79
- style = self._default_style
80
- self._style = style
81
- self.set_console(on_stderr=on_stderr)
82
-
83
- def set_console(self, attrs=None, on_stderr=False):
84
- if attrs is None:
85
- attrs = self.get_attrs()
86
- handle = win32.STDOUT
87
- if on_stderr:
88
- handle = win32.STDERR
89
- win32.SetConsoleTextAttribute(handle, attrs)
90
-
91
- def get_position(self, handle):
92
- position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
93
- # Because Windows coordinates are 0-based,
94
- # and win32.SetConsoleCursorPosition expects 1-based.
95
- position.X += 1
96
- position.Y += 1
97
- return position
98
-
99
- def set_cursor_position(self, position=None, on_stderr=False):
100
- if position is None:
101
- # I'm not currently tracking the position, so there is no default.
102
- # position = self.get_position()
103
- return
104
- handle = win32.STDOUT
105
- if on_stderr:
106
- handle = win32.STDERR
107
- win32.SetConsoleCursorPosition(handle, position)
108
-
109
- def cursor_adjust(self, x, y, on_stderr=False):
110
- handle = win32.STDOUT
111
- if on_stderr:
112
- handle = win32.STDERR
113
- position = self.get_position(handle)
114
- adjusted_position = (position.Y + y, position.X + x)
115
- win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
116
-
117
- def erase_screen(self, mode=0, on_stderr=False):
118
- # 0 should clear from the cursor to the end of the screen.
119
- # 1 should clear from the cursor to the beginning of the screen.
120
- # 2 should clear the entire screen, and move cursor to (1,1)
121
- handle = win32.STDOUT
122
- if on_stderr:
123
- handle = win32.STDERR
124
- csbi = win32.GetConsoleScreenBufferInfo(handle)
125
- # get the number of character cells in the current buffer
126
- cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
127
- # get number of character cells before current cursor position
128
- cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
129
- if mode == 0:
130
- from_coord = csbi.dwCursorPosition
131
- cells_to_erase = cells_in_screen - cells_before_cursor
132
- elif mode == 1:
133
- from_coord = win32.COORD(0, 0)
134
- cells_to_erase = cells_before_cursor
135
- elif mode == 2:
136
- from_coord = win32.COORD(0, 0)
137
- cells_to_erase = cells_in_screen
138
- else:
139
- # invalid mode
140
- return
141
- # fill the entire screen with blanks
142
- win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
143
- # now set the buffer's attributes accordingly
144
- win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
145
- if mode == 2:
146
- # put the cursor where needed
147
- win32.SetConsoleCursorPosition(handle, (1, 1))
148
-
149
- def erase_line(self, mode=0, on_stderr=False):
150
- # 0 should clear from the cursor to the end of the line.
151
- # 1 should clear from the cursor to the beginning of the line.
152
- # 2 should clear the entire line.
153
- handle = win32.STDOUT
154
- if on_stderr:
155
- handle = win32.STDERR
156
- csbi = win32.GetConsoleScreenBufferInfo(handle)
157
- if mode == 0:
158
- from_coord = csbi.dwCursorPosition
159
- cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
160
- elif mode == 1:
161
- from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
162
- cells_to_erase = csbi.dwCursorPosition.X
163
- elif mode == 2:
164
- from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
165
- cells_to_erase = csbi.dwSize.X
166
- else:
167
- # invalid mode
168
- return
169
- # fill the entire screen with blanks
170
- win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
171
- # now set the buffer's attributes accordingly
172
- win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
173
-
174
- def set_title(self, title):
175
- win32.SetConsoleTitle(title)
176
-
177
-
178
- def enable_vt_processing(fd):
179
- if win32.windll is None or not win32.winapi_test():
180
- return False
181
-
182
- try:
183
- handle = get_osfhandle(fd)
184
- mode = win32.GetConsoleMode(handle)
185
- win32.SetConsoleMode(
186
- handle,
187
- mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING,
188
- )
189
-
190
- mode = win32.GetConsoleMode(handle)
191
- if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING:
192
- return True
193
- # Can get TypeError in testsuite where 'fd' is a Mock()
194
- except (OSError, TypeError):
195
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/utils.py DELETED
@@ -1,1086 +0,0 @@
1
- """
2
- requests.utils
3
- ~~~~~~~~~~~~~~
4
-
5
- This module provides utility functions that are used within Requests
6
- that are also useful for external consumption.
7
- """
8
-
9
- import codecs
10
- import contextlib
11
- import io
12
- import os
13
- import re
14
- import socket
15
- import struct
16
- import sys
17
- import tempfile
18
- import warnings
19
- import zipfile
20
- from collections import OrderedDict
21
-
22
- from pip._vendor.urllib3.util import make_headers, parse_url
23
-
24
- from . import certs
25
- from .__version__ import __version__
26
-
27
- # to_native_string is unused here, but imported here for backwards compatibility
28
- from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401
29
- from .compat import (
30
- Mapping,
31
- basestring,
32
- bytes,
33
- getproxies,
34
- getproxies_environment,
35
- integer_types,
36
- )
37
- from .compat import parse_http_list as _parse_list_header
38
- from .compat import (
39
- proxy_bypass,
40
- proxy_bypass_environment,
41
- quote,
42
- str,
43
- unquote,
44
- urlparse,
45
- urlunparse,
46
- )
47
- from .cookies import cookiejar_from_dict
48
- from .exceptions import (
49
- FileModeWarning,
50
- InvalidHeader,
51
- InvalidURL,
52
- UnrewindableBodyError,
53
- )
54
- from .structures import CaseInsensitiveDict
55
-
56
- NETRC_FILES = (".netrc", "_netrc")
57
-
58
- DEFAULT_CA_BUNDLE_PATH = certs.where()
59
-
60
- DEFAULT_PORTS = {"http": 80, "https": 443}
61
-
62
- # Ensure that ', ' is used to preserve previous delimiter behavior.
63
- DEFAULT_ACCEPT_ENCODING = ", ".join(
64
- re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
65
- )
66
-
67
-
68
- if sys.platform == "win32":
69
- # provide a proxy_bypass version on Windows without DNS lookups
70
-
71
- def proxy_bypass_registry(host):
72
- try:
73
- import winreg
74
- except ImportError:
75
- return False
76
-
77
- try:
78
- internetSettings = winreg.OpenKey(
79
- winreg.HKEY_CURRENT_USER,
80
- r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
81
- )
82
- # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
83
- proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
84
- # ProxyOverride is almost always a string
85
- proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
86
- except (OSError, ValueError):
87
- return False
88
- if not proxyEnable or not proxyOverride:
89
- return False
90
-
91
- # make a check value list from the registry entry: replace the
92
- # '<local>' string by the localhost entry and the corresponding
93
- # canonical entry.
94
- proxyOverride = proxyOverride.split(";")
95
- # now check if we match one of the registry values.
96
- for test in proxyOverride:
97
- if test == "<local>":
98
- if "." not in host:
99
- return True
100
- test = test.replace(".", r"\.") # mask dots
101
- test = test.replace("*", r".*") # change glob sequence
102
- test = test.replace("?", r".") # change glob char
103
- if re.match(test, host, re.I):
104
- return True
105
- return False
106
-
107
- def proxy_bypass(host): # noqa
108
- """Return True, if the host should be bypassed.
109
-
110
- Checks proxy settings gathered from the environment, if specified,
111
- or the registry.
112
- """
113
- if getproxies_environment():
114
- return proxy_bypass_environment(host)
115
- else:
116
- return proxy_bypass_registry(host)
117
-
118
-
119
- def dict_to_sequence(d):
120
- """Returns an internal sequence dictionary update."""
121
-
122
- if hasattr(d, "items"):
123
- d = d.items()
124
-
125
- return d
126
-
127
-
128
- def super_len(o):
129
- total_length = None
130
- current_position = 0
131
-
132
- if hasattr(o, "__len__"):
133
- total_length = len(o)
134
-
135
- elif hasattr(o, "len"):
136
- total_length = o.len
137
-
138
- elif hasattr(o, "fileno"):
139
- try:
140
- fileno = o.fileno()
141
- except (io.UnsupportedOperation, AttributeError):
142
- # AttributeError is a surprising exception, seeing as how we've just checked
143
- # that `hasattr(o, 'fileno')`. It happens for objects obtained via
144
- # `Tarfile.extractfile()`, per issue 5229.
145
- pass
146
- else:
147
- total_length = os.fstat(fileno).st_size
148
-
149
- # Having used fstat to determine the file length, we need to
150
- # confirm that this file was opened up in binary mode.
151
- if "b" not in o.mode:
152
- warnings.warn(
153
- (
154
- "Requests has determined the content-length for this "
155
- "request using the binary size of the file: however, the "
156
- "file has been opened in text mode (i.e. without the 'b' "
157
- "flag in the mode). This may lead to an incorrect "
158
- "content-length. In Requests 3.0, support will be removed "
159
- "for files in text mode."
160
- ),
161
- FileModeWarning,
162
- )
163
-
164
- if hasattr(o, "tell"):
165
- try:
166
- current_position = o.tell()
167
- except OSError:
168
- # This can happen in some weird situations, such as when the file
169
- # is actually a special file descriptor like stdin. In this
170
- # instance, we don't know what the length is, so set it to zero and
171
- # let requests chunk it instead.
172
- if total_length is not None:
173
- current_position = total_length
174
- else:
175
- if hasattr(o, "seek") and total_length is None:
176
- # StringIO and BytesIO have seek but no usable fileno
177
- try:
178
- # seek to end of file
179
- o.seek(0, 2)
180
- total_length = o.tell()
181
-
182
- # seek back to current position to support
183
- # partially read file-like objects
184
- o.seek(current_position or 0)
185
- except OSError:
186
- total_length = 0
187
-
188
- if total_length is None:
189
- total_length = 0
190
-
191
- return max(0, total_length - current_position)
192
-
193
-
194
- def get_netrc_auth(url, raise_errors=False):
195
- """Returns the Requests tuple auth for a given url from netrc."""
196
-
197
- netrc_file = os.environ.get("NETRC")
198
- if netrc_file is not None:
199
- netrc_locations = (netrc_file,)
200
- else:
201
- netrc_locations = (f"~/{f}" for f in NETRC_FILES)
202
-
203
- try:
204
- from netrc import NetrcParseError, netrc
205
-
206
- netrc_path = None
207
-
208
- for f in netrc_locations:
209
- try:
210
- loc = os.path.expanduser(f)
211
- except KeyError:
212
- # os.path.expanduser can fail when $HOME is undefined and
213
- # getpwuid fails. See https://bugs.python.org/issue20164 &
214
- # https://github.com/psf/requests/issues/1846
215
- return
216
-
217
- if os.path.exists(loc):
218
- netrc_path = loc
219
- break
220
-
221
- # Abort early if there isn't one.
222
- if netrc_path is None:
223
- return
224
-
225
- ri = urlparse(url)
226
-
227
- # Strip port numbers from netloc. This weird `if...encode`` dance is
228
- # used for Python 3.2, which doesn't support unicode literals.
229
- splitstr = b":"
230
- if isinstance(url, str):
231
- splitstr = splitstr.decode("ascii")
232
- host = ri.netloc.split(splitstr)[0]
233
-
234
- try:
235
- _netrc = netrc(netrc_path).authenticators(host)
236
- if _netrc:
237
- # Return with login / password
238
- login_i = 0 if _netrc[0] else 1
239
- return (_netrc[login_i], _netrc[2])
240
- except (NetrcParseError, OSError):
241
- # If there was a parsing error or a permissions issue reading the file,
242
- # we'll just skip netrc auth unless explicitly asked to raise errors.
243
- if raise_errors:
244
- raise
245
-
246
- # App Engine hackiness.
247
- except (ImportError, AttributeError):
248
- pass
249
-
250
-
251
- def guess_filename(obj):
252
- """Tries to guess the filename of the given object."""
253
- name = getattr(obj, "name", None)
254
- if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
255
- return os.path.basename(name)
256
-
257
-
258
- def extract_zipped_paths(path):
259
- """Replace nonexistent paths that look like they refer to a member of a zip
260
- archive with the location of an extracted copy of the target, or else
261
- just return the provided path unchanged.
262
- """
263
- if os.path.exists(path):
264
- # this is already a valid path, no need to do anything further
265
- return path
266
-
267
- # find the first valid part of the provided path and treat that as a zip archive
268
- # assume the rest of the path is the name of a member in the archive
269
- archive, member = os.path.split(path)
270
- while archive and not os.path.exists(archive):
271
- archive, prefix = os.path.split(archive)
272
- if not prefix:
273
- # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
274
- # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
275
- break
276
- member = "/".join([prefix, member])
277
-
278
- if not zipfile.is_zipfile(archive):
279
- return path
280
-
281
- zip_file = zipfile.ZipFile(archive)
282
- if member not in zip_file.namelist():
283
- return path
284
-
285
- # we have a valid zip archive and a valid member of that archive
286
- tmp = tempfile.gettempdir()
287
- extracted_path = os.path.join(tmp, member.split("/")[-1])
288
- if not os.path.exists(extracted_path):
289
- # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
290
- with atomic_open(extracted_path) as file_handler:
291
- file_handler.write(zip_file.read(member))
292
- return extracted_path
293
-
294
-
295
- @contextlib.contextmanager
296
- def atomic_open(filename):
297
- """Write a file to the disk in an atomic fashion"""
298
- tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
299
- try:
300
- with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
301
- yield tmp_handler
302
- os.replace(tmp_name, filename)
303
- except BaseException:
304
- os.remove(tmp_name)
305
- raise
306
-
307
-
308
- def from_key_val_list(value):
309
- """Take an object and test to see if it can be represented as a
310
- dictionary. Unless it can not be represented as such, return an
311
- OrderedDict, e.g.,
312
-
313
- ::
314
-
315
- >>> from_key_val_list([('key', 'val')])
316
- OrderedDict([('key', 'val')])
317
- >>> from_key_val_list('string')
318
- Traceback (most recent call last):
319
- ...
320
- ValueError: cannot encode objects that are not 2-tuples
321
- >>> from_key_val_list({'key': 'val'})
322
- OrderedDict([('key', 'val')])
323
-
324
- :rtype: OrderedDict
325
- """
326
- if value is None:
327
- return None
328
-
329
- if isinstance(value, (str, bytes, bool, int)):
330
- raise ValueError("cannot encode objects that are not 2-tuples")
331
-
332
- return OrderedDict(value)
333
-
334
-
335
- def to_key_val_list(value):
336
- """Take an object and test to see if it can be represented as a
337
- dictionary. If it can be, return a list of tuples, e.g.,
338
-
339
- ::
340
-
341
- >>> to_key_val_list([('key', 'val')])
342
- [('key', 'val')]
343
- >>> to_key_val_list({'key': 'val'})
344
- [('key', 'val')]
345
- >>> to_key_val_list('string')
346
- Traceback (most recent call last):
347
- ...
348
- ValueError: cannot encode objects that are not 2-tuples
349
-
350
- :rtype: list
351
- """
352
- if value is None:
353
- return None
354
-
355
- if isinstance(value, (str, bytes, bool, int)):
356
- raise ValueError("cannot encode objects that are not 2-tuples")
357
-
358
- if isinstance(value, Mapping):
359
- value = value.items()
360
-
361
- return list(value)
362
-
363
-
364
- # From mitsuhiko/werkzeug (used with permission).
365
- def parse_list_header(value):
366
- """Parse lists as described by RFC 2068 Section 2.
367
-
368
- In particular, parse comma-separated lists where the elements of
369
- the list may include quoted-strings. A quoted-string could
370
- contain a comma. A non-quoted string could have quotes in the
371
- middle. Quotes are removed automatically after parsing.
372
-
373
- It basically works like :func:`parse_set_header` just that items
374
- may appear multiple times and case sensitivity is preserved.
375
-
376
- The return value is a standard :class:`list`:
377
-
378
- >>> parse_list_header('token, "quoted value"')
379
- ['token', 'quoted value']
380
-
381
- To create a header from the :class:`list` again, use the
382
- :func:`dump_header` function.
383
-
384
- :param value: a string with a list header.
385
- :return: :class:`list`
386
- :rtype: list
387
- """
388
- result = []
389
- for item in _parse_list_header(value):
390
- if item[:1] == item[-1:] == '"':
391
- item = unquote_header_value(item[1:-1])
392
- result.append(item)
393
- return result
394
-
395
-
396
- # From mitsuhiko/werkzeug (used with permission).
397
- def parse_dict_header(value):
398
- """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
399
- convert them into a python dict:
400
-
401
- >>> d = parse_dict_header('foo="is a fish", bar="as well"')
402
- >>> type(d) is dict
403
- True
404
- >>> sorted(d.items())
405
- [('bar', 'as well'), ('foo', 'is a fish')]
406
-
407
- If there is no value for a key it will be `None`:
408
-
409
- >>> parse_dict_header('key_without_value')
410
- {'key_without_value': None}
411
-
412
- To create a header from the :class:`dict` again, use the
413
- :func:`dump_header` function.
414
-
415
- :param value: a string with a dict header.
416
- :return: :class:`dict`
417
- :rtype: dict
418
- """
419
- result = {}
420
- for item in _parse_list_header(value):
421
- if "=" not in item:
422
- result[item] = None
423
- continue
424
- name, value = item.split("=", 1)
425
- if value[:1] == value[-1:] == '"':
426
- value = unquote_header_value(value[1:-1])
427
- result[name] = value
428
- return result
429
-
430
-
431
- # From mitsuhiko/werkzeug (used with permission).
432
- def unquote_header_value(value, is_filename=False):
433
- r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
434
- This does not use the real unquoting but what browsers are actually
435
- using for quoting.
436
-
437
- :param value: the header value to unquote.
438
- :rtype: str
439
- """
440
- if value and value[0] == value[-1] == '"':
441
- # this is not the real unquoting, but fixing this so that the
442
- # RFC is met will result in bugs with internet explorer and
443
- # probably some other browsers as well. IE for example is
444
- # uploading files with "C:\foo\bar.txt" as filename
445
- value = value[1:-1]
446
-
447
- # if this is a filename and the starting characters look like
448
- # a UNC path, then just return the value without quotes. Using the
449
- # replace sequence below on a UNC path has the effect of turning
450
- # the leading double slash into a single slash and then
451
- # _fix_ie_filename() doesn't work correctly. See #458.
452
- if not is_filename or value[:2] != "\\\\":
453
- return value.replace("\\\\", "\\").replace('\\"', '"')
454
- return value
455
-
456
-
457
- def dict_from_cookiejar(cj):
458
- """Returns a key/value dictionary from a CookieJar.
459
-
460
- :param cj: CookieJar object to extract cookies from.
461
- :rtype: dict
462
- """
463
-
464
- cookie_dict = {}
465
-
466
- for cookie in cj:
467
- cookie_dict[cookie.name] = cookie.value
468
-
469
- return cookie_dict
470
-
471
-
472
- def add_dict_to_cookiejar(cj, cookie_dict):
473
- """Returns a CookieJar from a key/value dictionary.
474
-
475
- :param cj: CookieJar to insert cookies into.
476
- :param cookie_dict: Dict of key/values to insert into CookieJar.
477
- :rtype: CookieJar
478
- """
479
-
480
- return cookiejar_from_dict(cookie_dict, cj)
481
-
482
-
483
- def get_encodings_from_content(content):
484
- """Returns encodings from given content string.
485
-
486
- :param content: bytestring to extract encodings from.
487
- """
488
- warnings.warn(
489
- (
490
- "In requests 3.0, get_encodings_from_content will be removed. For "
491
- "more information, please see the discussion on issue #2266. (This"
492
- " warning should only appear once.)"
493
- ),
494
- DeprecationWarning,
495
- )
496
-
497
- charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
498
- pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
499
- xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
500
-
501
- return (
502
- charset_re.findall(content)
503
- + pragma_re.findall(content)
504
- + xml_re.findall(content)
505
- )
506
-
507
-
508
- def _parse_content_type_header(header):
509
- """Returns content type and parameters from given header
510
-
511
- :param header: string
512
- :return: tuple containing content type and dictionary of
513
- parameters
514
- """
515
-
516
- tokens = header.split(";")
517
- content_type, params = tokens[0].strip(), tokens[1:]
518
- params_dict = {}
519
- items_to_strip = "\"' "
520
-
521
- for param in params:
522
- param = param.strip()
523
- if param:
524
- key, value = param, True
525
- index_of_equals = param.find("=")
526
- if index_of_equals != -1:
527
- key = param[:index_of_equals].strip(items_to_strip)
528
- value = param[index_of_equals + 1 :].strip(items_to_strip)
529
- params_dict[key.lower()] = value
530
- return content_type, params_dict
531
-
532
-
533
- def get_encoding_from_headers(headers):
534
- """Returns encodings from given HTTP Header Dict.
535
-
536
- :param headers: dictionary to extract encoding from.
537
- :rtype: str
538
- """
539
-
540
- content_type = headers.get("content-type")
541
-
542
- if not content_type:
543
- return None
544
-
545
- content_type, params = _parse_content_type_header(content_type)
546
-
547
- if "charset" in params:
548
- return params["charset"].strip("'\"")
549
-
550
- if "text" in content_type:
551
- return "ISO-8859-1"
552
-
553
- if "application/json" in content_type:
554
- # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
555
- return "utf-8"
556
-
557
-
558
- def stream_decode_response_unicode(iterator, r):
559
- """Stream decodes an iterator."""
560
-
561
- if r.encoding is None:
562
- yield from iterator
563
- return
564
-
565
- decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
566
- for chunk in iterator:
567
- rv = decoder.decode(chunk)
568
- if rv:
569
- yield rv
570
- rv = decoder.decode(b"", final=True)
571
- if rv:
572
- yield rv
573
-
574
-
575
- def iter_slices(string, slice_length):
576
- """Iterate over slices of a string."""
577
- pos = 0
578
- if slice_length is None or slice_length <= 0:
579
- slice_length = len(string)
580
- while pos < len(string):
581
- yield string[pos : pos + slice_length]
582
- pos += slice_length
583
-
584
-
585
- def get_unicode_from_response(r):
586
- """Returns the requested content back in unicode.
587
-
588
- :param r: Response object to get unicode content from.
589
-
590
- Tried:
591
-
592
- 1. charset from content-type
593
- 2. fall back and replace all unicode characters
594
-
595
- :rtype: str
596
- """
597
- warnings.warn(
598
- (
599
- "In requests 3.0, get_unicode_from_response will be removed. For "
600
- "more information, please see the discussion on issue #2266. (This"
601
- " warning should only appear once.)"
602
- ),
603
- DeprecationWarning,
604
- )
605
-
606
- tried_encodings = []
607
-
608
- # Try charset from content-type
609
- encoding = get_encoding_from_headers(r.headers)
610
-
611
- if encoding:
612
- try:
613
- return str(r.content, encoding)
614
- except UnicodeError:
615
- tried_encodings.append(encoding)
616
-
617
- # Fall back:
618
- try:
619
- return str(r.content, encoding, errors="replace")
620
- except TypeError:
621
- return r.content
622
-
623
-
624
- # The unreserved URI characters (RFC 3986)
625
- UNRESERVED_SET = frozenset(
626
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
627
- )
628
-
629
-
630
- def unquote_unreserved(uri):
631
- """Un-escape any percent-escape sequences in a URI that are unreserved
632
- characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
633
-
634
- :rtype: str
635
- """
636
- parts = uri.split("%")
637
- for i in range(1, len(parts)):
638
- h = parts[i][0:2]
639
- if len(h) == 2 and h.isalnum():
640
- try:
641
- c = chr(int(h, 16))
642
- except ValueError:
643
- raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
644
-
645
- if c in UNRESERVED_SET:
646
- parts[i] = c + parts[i][2:]
647
- else:
648
- parts[i] = f"%{parts[i]}"
649
- else:
650
- parts[i] = f"%{parts[i]}"
651
- return "".join(parts)
652
-
653
-
654
- def requote_uri(uri):
655
- """Re-quote the given URI.
656
-
657
- This function passes the given URI through an unquote/quote cycle to
658
- ensure that it is fully and consistently quoted.
659
-
660
- :rtype: str
661
- """
662
- safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
663
- safe_without_percent = "!#$&'()*+,/:;=?@[]~"
664
- try:
665
- # Unquote only the unreserved characters
666
- # Then quote only illegal characters (do not quote reserved,
667
- # unreserved, or '%')
668
- return quote(unquote_unreserved(uri), safe=safe_with_percent)
669
- except InvalidURL:
670
- # We couldn't unquote the given URI, so let's try quoting it, but
671
- # there may be unquoted '%'s in the URI. We need to make sure they're
672
- # properly quoted so they do not cause issues elsewhere.
673
- return quote(uri, safe=safe_without_percent)
674
-
675
-
676
- def address_in_network(ip, net):
677
- """This function allows you to check if an IP belongs to a network subnet
678
-
679
- Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
680
- returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
681
-
682
- :rtype: bool
683
- """
684
- ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
685
- netaddr, bits = net.split("/")
686
- netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
687
- network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
688
- return (ipaddr & netmask) == (network & netmask)
689
-
690
-
691
- def dotted_netmask(mask):
692
- """Converts mask from /xx format to xxx.xxx.xxx.xxx
693
-
694
- Example: if mask is 24 function returns 255.255.255.0
695
-
696
- :rtype: str
697
- """
698
- bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
699
- return socket.inet_ntoa(struct.pack(">I", bits))
700
-
701
-
702
- def is_ipv4_address(string_ip):
703
- """
704
- :rtype: bool
705
- """
706
- try:
707
- socket.inet_aton(string_ip)
708
- except OSError:
709
- return False
710
- return True
711
-
712
-
713
- def is_valid_cidr(string_network):
714
- """
715
- Very simple check of the cidr format in no_proxy variable.
716
-
717
- :rtype: bool
718
- """
719
- if string_network.count("/") == 1:
720
- try:
721
- mask = int(string_network.split("/")[1])
722
- except ValueError:
723
- return False
724
-
725
- if mask < 1 or mask > 32:
726
- return False
727
-
728
- try:
729
- socket.inet_aton(string_network.split("/")[0])
730
- except OSError:
731
- return False
732
- else:
733
- return False
734
- return True
735
-
736
-
737
- @contextlib.contextmanager
738
- def set_environ(env_name, value):
739
- """Set the environment variable 'env_name' to 'value'
740
-
741
- Save previous value, yield, and then restore the previous value stored in
742
- the environment variable 'env_name'.
743
-
744
- If 'value' is None, do nothing"""
745
- value_changed = value is not None
746
- if value_changed:
747
- old_value = os.environ.get(env_name)
748
- os.environ[env_name] = value
749
- try:
750
- yield
751
- finally:
752
- if value_changed:
753
- if old_value is None:
754
- del os.environ[env_name]
755
- else:
756
- os.environ[env_name] = old_value
757
-
758
-
759
- def should_bypass_proxies(url, no_proxy):
760
- """
761
- Returns whether we should bypass proxies or not.
762
-
763
- :rtype: bool
764
- """
765
- # Prioritize lowercase environment variables over uppercase
766
- # to keep a consistent behaviour with other http projects (curl, wget).
767
- def get_proxy(key):
768
- return os.environ.get(key) or os.environ.get(key.upper())
769
-
770
- # First check whether no_proxy is defined. If it is, check that the URL
771
- # we're getting isn't in the no_proxy list.
772
- no_proxy_arg = no_proxy
773
- if no_proxy is None:
774
- no_proxy = get_proxy("no_proxy")
775
- parsed = urlparse(url)
776
-
777
- if parsed.hostname is None:
778
- # URLs don't always have hostnames, e.g. file:/// urls.
779
- return True
780
-
781
- if no_proxy:
782
- # We need to check whether we match here. We need to see if we match
783
- # the end of the hostname, both with and without the port.
784
- no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
785
-
786
- if is_ipv4_address(parsed.hostname):
787
- for proxy_ip in no_proxy:
788
- if is_valid_cidr(proxy_ip):
789
- if address_in_network(parsed.hostname, proxy_ip):
790
- return True
791
- elif parsed.hostname == proxy_ip:
792
- # If no_proxy ip was defined in plain IP notation instead of cidr notation &
793
- # matches the IP of the index
794
- return True
795
- else:
796
- host_with_port = parsed.hostname
797
- if parsed.port:
798
- host_with_port += f":{parsed.port}"
799
-
800
- for host in no_proxy:
801
- if parsed.hostname.endswith(host) or host_with_port.endswith(host):
802
- # The URL does match something in no_proxy, so we don't want
803
- # to apply the proxies on this URL.
804
- return True
805
-
806
- with set_environ("no_proxy", no_proxy_arg):
807
- # parsed.hostname can be `None` in cases such as a file URI.
808
- try:
809
- bypass = proxy_bypass(parsed.hostname)
810
- except (TypeError, socket.gaierror):
811
- bypass = False
812
-
813
- if bypass:
814
- return True
815
-
816
- return False
817
-
818
-
819
- def get_environ_proxies(url, no_proxy=None):
820
- """
821
- Return a dict of environment proxies.
822
-
823
- :rtype: dict
824
- """
825
- if should_bypass_proxies(url, no_proxy=no_proxy):
826
- return {}
827
- else:
828
- return getproxies()
829
-
830
-
831
- def select_proxy(url, proxies):
832
- """Select a proxy for the url, if applicable.
833
-
834
- :param url: The url being for the request
835
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
836
- """
837
- proxies = proxies or {}
838
- urlparts = urlparse(url)
839
- if urlparts.hostname is None:
840
- return proxies.get(urlparts.scheme, proxies.get("all"))
841
-
842
- proxy_keys = [
843
- urlparts.scheme + "://" + urlparts.hostname,
844
- urlparts.scheme,
845
- "all://" + urlparts.hostname,
846
- "all",
847
- ]
848
- proxy = None
849
- for proxy_key in proxy_keys:
850
- if proxy_key in proxies:
851
- proxy = proxies[proxy_key]
852
- break
853
-
854
- return proxy
855
-
856
-
857
- def resolve_proxies(request, proxies, trust_env=True):
858
- """This method takes proxy information from a request and configuration
859
- input to resolve a mapping of target proxies. This will consider settings
860
- such a NO_PROXY to strip proxy configurations.
861
-
862
- :param request: Request or PreparedRequest
863
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
864
- :param trust_env: Boolean declaring whether to trust environment configs
865
-
866
- :rtype: dict
867
- """
868
- proxies = proxies if proxies is not None else {}
869
- url = request.url
870
- scheme = urlparse(url).scheme
871
- no_proxy = proxies.get("no_proxy")
872
- new_proxies = proxies.copy()
873
-
874
- if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
875
- environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
876
-
877
- proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
878
-
879
- if proxy:
880
- new_proxies.setdefault(scheme, proxy)
881
- return new_proxies
882
-
883
-
884
- def default_user_agent(name="python-requests"):
885
- """
886
- Return a string representing the default user agent.
887
-
888
- :rtype: str
889
- """
890
- return f"{name}/{__version__}"
891
-
892
-
893
- def default_headers():
894
- """
895
- :rtype: requests.structures.CaseInsensitiveDict
896
- """
897
- return CaseInsensitiveDict(
898
- {
899
- "User-Agent": default_user_agent(),
900
- "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
901
- "Accept": "*/*",
902
- "Connection": "keep-alive",
903
- }
904
- )
905
-
906
-
907
- def parse_header_links(value):
908
- """Return a list of parsed link headers proxies.
909
-
910
- i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
911
-
912
- :rtype: list
913
- """
914
-
915
- links = []
916
-
917
- replace_chars = " '\""
918
-
919
- value = value.strip(replace_chars)
920
- if not value:
921
- return links
922
-
923
- for val in re.split(", *<", value):
924
- try:
925
- url, params = val.split(";", 1)
926
- except ValueError:
927
- url, params = val, ""
928
-
929
- link = {"url": url.strip("<> '\"")}
930
-
931
- for param in params.split(";"):
932
- try:
933
- key, value = param.split("=")
934
- except ValueError:
935
- break
936
-
937
- link[key.strip(replace_chars)] = value.strip(replace_chars)
938
-
939
- links.append(link)
940
-
941
- return links
942
-
943
-
944
- # Null bytes; no need to recreate these on each call to guess_json_utf
945
- _null = "\x00".encode("ascii") # encoding to ASCII for Python 3
946
- _null2 = _null * 2
947
- _null3 = _null * 3
948
-
949
-
950
- def guess_json_utf(data):
951
- """
952
- :rtype: str
953
- """
954
- # JSON always starts with two ASCII characters, so detection is as
955
- # easy as counting the nulls and from their location and count
956
- # determine the encoding. Also detect a BOM, if present.
957
- sample = data[:4]
958
- if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
959
- return "utf-32" # BOM included
960
- if sample[:3] == codecs.BOM_UTF8:
961
- return "utf-8-sig" # BOM included, MS style (discouraged)
962
- if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
963
- return "utf-16" # BOM included
964
- nullcount = sample.count(_null)
965
- if nullcount == 0:
966
- return "utf-8"
967
- if nullcount == 2:
968
- if sample[::2] == _null2: # 1st and 3rd are null
969
- return "utf-16-be"
970
- if sample[1::2] == _null2: # 2nd and 4th are null
971
- return "utf-16-le"
972
- # Did not detect 2 valid UTF-16 ascii-range characters
973
- if nullcount == 3:
974
- if sample[:3] == _null3:
975
- return "utf-32-be"
976
- if sample[1:] == _null3:
977
- return "utf-32-le"
978
- # Did not detect a valid UTF-32 ascii-range character
979
- return None
980
-
981
-
982
- def prepend_scheme_if_needed(url, new_scheme):
983
- """Given a URL that may or may not have a scheme, prepend the given scheme.
984
- Does not replace a present scheme with the one provided as an argument.
985
-
986
- :rtype: str
987
- """
988
- parsed = parse_url(url)
989
- scheme, auth, host, port, path, query, fragment = parsed
990
-
991
- # A defect in urlparse determines that there isn't a netloc present in some
992
- # urls. We previously assumed parsing was overly cautious, and swapped the
993
- # netloc and path. Due to a lack of tests on the original defect, this is
994
- # maintained with parse_url for backwards compatibility.
995
- netloc = parsed.netloc
996
- if not netloc:
997
- netloc, path = path, netloc
998
-
999
- if auth:
1000
- # parse_url doesn't provide the netloc with auth
1001
- # so we'll add it ourselves.
1002
- netloc = "@".join([auth, netloc])
1003
- if scheme is None:
1004
- scheme = new_scheme
1005
- if path is None:
1006
- path = ""
1007
-
1008
- return urlunparse((scheme, netloc, path, "", query, fragment))
1009
-
1010
-
1011
- def get_auth_from_url(url):
1012
- """Given a url with authentication components, extract them into a tuple of
1013
- username,password.
1014
-
1015
- :rtype: (str,str)
1016
- """
1017
- parsed = urlparse(url)
1018
-
1019
- try:
1020
- auth = (unquote(parsed.username), unquote(parsed.password))
1021
- except (AttributeError, TypeError):
1022
- auth = ("", "")
1023
-
1024
- return auth
1025
-
1026
-
1027
- def check_header_validity(header):
1028
- """Verifies that header parts don't contain leading whitespace
1029
- reserved characters, or return characters.
1030
-
1031
- :param header: tuple, in the format (name, value).
1032
- """
1033
- name, value = header
1034
-
1035
- for part in header:
1036
- if type(part) not in HEADER_VALIDATORS:
1037
- raise InvalidHeader(
1038
- f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
1039
- f"of type str or bytes, not {type(part)}"
1040
- )
1041
-
1042
- _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
1043
- _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
1044
-
1045
-
1046
- def _validate_header_part(header_part, header_kind, validator):
1047
- if not validator.match(header_part):
1048
- raise InvalidHeader(
1049
- f"Invalid leading whitespace, reserved character(s), or return"
1050
- f"character(s) in header {header_kind}: {header_part!r}"
1051
- )
1052
-
1053
-
1054
- def urldefragauth(url):
1055
- """
1056
- Given a url remove the fragment and the authentication part.
1057
-
1058
- :rtype: str
1059
- """
1060
- scheme, netloc, path, params, query, fragment = urlparse(url)
1061
-
1062
- # see func:`prepend_scheme_if_needed`
1063
- if not netloc:
1064
- netloc, path = path, netloc
1065
-
1066
- netloc = netloc.rsplit("@", 1)[-1]
1067
-
1068
- return urlunparse((scheme, netloc, path, params, query, ""))
1069
-
1070
-
1071
- def rewind_body(prepared_request):
1072
- """Move file pointer back to its recorded starting position
1073
- so it can be read again on redirect.
1074
- """
1075
- body_seek = getattr(prepared_request.body, "seek", None)
1076
- if body_seek is not None and isinstance(
1077
- prepared_request._body_position, integer_types
1078
- ):
1079
- try:
1080
- body_seek(prepared_request._body_position)
1081
- except OSError:
1082
- raise UnrewindableBodyError(
1083
- "An error occurred when rewinding request body for redirect."
1084
- )
1085
- else:
1086
- raise UnrewindableBodyError("Unable to rewind request body for redirect.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/data/waveform_mixers.py DELETED
@@ -1,127 +0,0 @@
1
- import random
2
- import sre_compile
3
- import numpy as np
4
- import torch
5
- import torch.nn as nn
6
- import pyloudnorm as pyln
7
-
8
-
9
- class SegmentMixer(nn.Module):
10
- def __init__(self, max_mix_num, lower_db, higher_db):
11
- super(SegmentMixer, self).__init__()
12
-
13
- self.max_mix_num = max_mix_num
14
- self.loudness_param = {
15
- 'lower_db': lower_db,
16
- 'higher_db': higher_db,
17
- }
18
-
19
- def __call__(self, waveforms):
20
-
21
- batch_size = waveforms.shape[0]
22
-
23
- data_dict = {
24
- 'segment': [],
25
- 'mixture': [],
26
- }
27
-
28
- for n in range(0, batch_size):
29
-
30
- segment = waveforms[n].clone()
31
-
32
- # create zero tensors as the background template
33
- noise = torch.zeros_like(segment)
34
-
35
- mix_num = random.randint(2, self.max_mix_num)
36
- assert mix_num >= 2
37
-
38
- for i in range(1, mix_num):
39
- next_segment = waveforms[(n + i) % batch_size]
40
- rescaled_next_segment = dynamic_loudnorm(audio=next_segment, reference=segment, **self.loudness_param)
41
- noise += rescaled_next_segment
42
-
43
- # randomly normalize background noise
44
- noise = dynamic_loudnorm(audio=noise, reference=segment, **self.loudness_param)
45
-
46
- # create audio mixyure
47
- mixture = segment + noise
48
-
49
- # declipping if need be
50
- max_value = torch.max(torch.abs(mixture))
51
- if max_value > 1:
52
- segment *= 0.9 / max_value
53
- mixture *= 0.9 / max_value
54
-
55
- data_dict['segment'].append(segment)
56
- data_dict['mixture'].append(mixture)
57
-
58
- for key in data_dict.keys():
59
- data_dict[key] = torch.stack(data_dict[key], dim=0)
60
-
61
- # return data_dict
62
- return data_dict['mixture'], data_dict['segment']
63
-
64
-
65
- def rescale_to_match_energy(segment1, segment2):
66
-
67
- ratio = get_energy_ratio(segment1, segment2)
68
- rescaled_segment1 = segment1 / ratio
69
- return rescaled_segment1
70
-
71
-
72
- def get_energy(x):
73
- return torch.mean(x ** 2)
74
-
75
-
76
- def get_energy_ratio(segment1, segment2):
77
-
78
- energy1 = get_energy(segment1)
79
- energy2 = max(get_energy(segment2), 1e-10)
80
- ratio = (energy1 / energy2) ** 0.5
81
- ratio = torch.clamp(ratio, 0.02, 50)
82
- return ratio
83
-
84
-
85
- def dynamic_loudnorm(audio, reference, lower_db=-10, higher_db=10):
86
- rescaled_audio = rescale_to_match_energy(audio, reference)
87
-
88
- delta_loudness = random.randint(lower_db, higher_db)
89
-
90
- gain = np.power(10.0, delta_loudness / 20.0)
91
-
92
- return gain * rescaled_audio
93
-
94
-
95
- def torch_to_numpy(tensor):
96
- """Convert a PyTorch tensor to a NumPy array."""
97
- if isinstance(tensor, torch.Tensor):
98
- return tensor.detach().cpu().numpy()
99
- else:
100
- raise ValueError("Input must be a PyTorch tensor.")
101
-
102
-
103
- def numpy_to_torch(array):
104
- """Convert a NumPy array to a PyTorch tensor."""
105
- if isinstance(array, np.ndarray):
106
- return torch.from_numpy(array)
107
- else:
108
- raise ValueError("Input must be a NumPy array.")
109
-
110
-
111
- # decayed
112
- def random_loudness_norm(audio, lower_db=-35, higher_db=-15, sr=32000):
113
- device = audio.device
114
- audio = torch_to_numpy(audio.squeeze(0))
115
- # randomly select a norm volume
116
- norm_vol = random.randint(lower_db, higher_db)
117
-
118
- # measure the loudness first
119
- meter = pyln.Meter(sr) # create BS.1770 meter
120
- loudness = meter.integrated_loudness(audio)
121
- # loudness normalize audio
122
- normalized_audio = pyln.normalize.loudness(audio, loudness, norm_vol)
123
-
124
- normalized_audio = numpy_to_torch(normalized_audio).unsqueeze(0)
125
-
126
- return normalized_audio.to(device)
127
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/dreambooth-altdiffusion/train_dreambooth.py DELETED
@@ -1,907 +0,0 @@
1
- import argparse
2
- import itertools
3
- import math
4
- import os
5
- from pathlib import Path
6
- from typing import Optional
7
- import subprocess
8
- import sys
9
- import gc
10
- import random
11
-
12
- import torch
13
- import torch.nn.functional as F
14
- import torch.utils.checkpoint
15
- from torch.utils.data import Dataset
16
-
17
- from accelerate import Accelerator
18
- from accelerate.logging import get_logger
19
- from accelerate.utils import set_seed
20
- from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
21
- from diffusers.optimization import get_scheduler
22
- from huggingface_hub import HfFolder, Repository, whoami
23
- from PIL import Image
24
- from torchvision import transforms
25
- from tqdm.auto import tqdm
26
- from transformers import AutoTokenizer, PretrainedConfig
27
-
28
-
29
- logger = get_logger(__name__)
30
-
31
- def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str):
32
- text_encoder_config = PretrainedConfig.from_pretrained(
33
- pretrained_model_name_or_path,
34
- subfolder="text_encoder",
35
- )
36
- model_class = text_encoder_config.architectures[0]
37
-
38
- if model_class == "CLIPTextModel":
39
- from transformers import CLIPTextModel
40
-
41
- return CLIPTextModel
42
- elif model_class == "RobertaSeriesModelWithTransformation":
43
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
44
-
45
- return RobertaSeriesModelWithTransformation
46
- else:
47
- raise ValueError(f"{model_class} is not supported.")
48
-
49
- def parse_args():
50
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
51
- parser.add_argument(
52
- "--pretrained_model_name_or_path",
53
- type=str,
54
- default=None,
55
- #required=True,
56
- help="Path to pretrained model or model identifier from huggingface.co/models.",
57
- )
58
- parser.add_argument(
59
- "--tokenizer_name",
60
- type=str,
61
- default=None,
62
- help="Pretrained tokenizer name or path if not the same as model_name",
63
- )
64
- parser.add_argument(
65
- "--instance_data_dir",
66
- type=str,
67
- default=None,
68
- #required=True,
69
- help="A folder containing the training data of instance images.",
70
- )
71
- parser.add_argument(
72
- "--class_data_dir",
73
- type=str,
74
- default=None,
75
- #required=False,
76
- help="A folder containing the training data of class images.",
77
- )
78
- parser.add_argument(
79
- "--instance_prompt",
80
- type=str,
81
- default=None,
82
- help="The prompt with identifier specifying the instance",
83
- )
84
- parser.add_argument(
85
- "--class_prompt",
86
- type=str,
87
- default="",
88
- help="The prompt to specify images in the same class as provided instance images.",
89
- )
90
- parser.add_argument(
91
- "--with_prior_preservation",
92
- default=False,
93
- action="store_true",
94
- help="Flag to add prior preservation loss.",
95
- )
96
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
97
- parser.add_argument(
98
- "--num_class_images",
99
- type=int,
100
- default=100,
101
- help=(
102
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
103
- " sampled with class_prompt."
104
- ),
105
- )
106
- parser.add_argument(
107
- "--output_dir",
108
- type=str,
109
- default="",
110
- help="The output directory where the model predictions and checkpoints will be written.",
111
- )
112
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
113
- parser.add_argument(
114
- "--resolution",
115
- type=int,
116
- default=512,
117
- help=(
118
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
119
- " resolution"
120
- ),
121
- )
122
- parser.add_argument(
123
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
124
- )
125
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
126
- parser.add_argument(
127
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
128
- )
129
- parser.add_argument(
130
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
131
- )
132
- parser.add_argument("--num_train_epochs", type=int, default=1)
133
- parser.add_argument(
134
- "--max_train_steps",
135
- type=int,
136
- default=None,
137
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
138
- )
139
- parser.add_argument(
140
- "--gradient_accumulation_steps",
141
- type=int,
142
- default=1,
143
- help="Number of updates steps to accumulate before performing a backward/update pass.",
144
- )
145
- parser.add_argument(
146
- "--gradient_checkpointing",
147
- action="store_true",
148
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
149
- )
150
- parser.add_argument(
151
- "--learning_rate",
152
- type=float,
153
- default=5e-6,
154
- help="Initial learning rate (after the potential warmup period) to use.",
155
- )
156
- parser.add_argument(
157
- "--scale_lr",
158
- action="store_true",
159
- default=False,
160
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
161
- )
162
- parser.add_argument(
163
- "--lr_scheduler",
164
- type=str,
165
- default="constant",
166
- help=(
167
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
168
- ' "constant", "constant_with_warmup"]'
169
- ),
170
- )
171
- parser.add_argument(
172
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
173
- )
174
- parser.add_argument(
175
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
176
- )
177
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
178
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
179
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
180
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
181
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
182
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
183
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
184
- parser.add_argument(
185
- "--hub_model_id",
186
- type=str,
187
- default=None,
188
- help="The name of the repository to keep in sync with the local `output_dir`.",
189
- )
190
- parser.add_argument(
191
- "--logging_dir",
192
- type=str,
193
- default="logs",
194
- help=(
195
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
196
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
197
- ),
198
- )
199
- parser.add_argument(
200
- "--mixed_precision",
201
- type=str,
202
- default="fp16",
203
- choices=["no", "fp16", "bf16"],
204
- help=(
205
- "Whether to use mixed precision. Choose"
206
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
207
- "and an Nvidia Ampere GPU."
208
- ),
209
- )
210
-
211
- parser.add_argument(
212
- "--save_n_steps",
213
- type=int,
214
- default=1,
215
- help=("Save the model every n global_steps"),
216
- )
217
-
218
-
219
- parser.add_argument(
220
- "--save_starting_step",
221
- type=int,
222
- default=1,
223
- help=("The step from which it starts saving intermediary checkpoints"),
224
- )
225
-
226
- parser.add_argument(
227
- "--stop_text_encoder_training",
228
- type=int,
229
- default=1000000,
230
- help=("The step at which the text_encoder is no longer trained"),
231
- )
232
-
233
-
234
- parser.add_argument(
235
- "--image_captions_filename",
236
- action="store_true",
237
- help="Get captions from filename",
238
- )
239
-
240
-
241
- parser.add_argument(
242
- "--dump_only_text_encoder",
243
- action="store_true",
244
- default=False,
245
- help="Dump only text encoder",
246
- )
247
-
248
- parser.add_argument(
249
- "--train_only_unet",
250
- action="store_true",
251
- default=False,
252
- help="Train only the unet",
253
- )
254
-
255
- parser.add_argument(
256
- "--cache_latents",
257
- action="store_true",
258
- default=False,
259
- help="Train only the unet",
260
- )
261
-
262
- parser.add_argument(
263
- "--Session_dir",
264
- type=str,
265
- default="",
266
- help="Current session directory",
267
- )
268
-
269
-
270
-
271
-
272
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
273
-
274
- args = parser.parse_args()
275
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
276
- if env_local_rank != -1 and env_local_rank != args.local_rank:
277
- args.local_rank = env_local_rank
278
-
279
- #if args.instance_data_dir is None:
280
- # raise ValueError("You must specify a train data directory.")
281
-
282
- #if args.with_prior_preservation:
283
- # if args.class_data_dir is None:
284
- # raise ValueError("You must specify a data directory for class images.")
285
- # if args.class_prompt is None:
286
- # raise ValueError("You must specify prompt for class images.")
287
-
288
- return args
289
-
290
-
291
- class DreamBoothDataset(Dataset):
292
- """
293
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
294
- It pre-processes the images and the tokenizes prompts.
295
- """
296
-
297
- def __init__(
298
- self,
299
- instance_data_root,
300
- instance_prompt,
301
- tokenizer,
302
- args,
303
- class_data_root=None,
304
- class_prompt=None,
305
- size=512,
306
- center_crop=False,
307
- ):
308
- self.size = size
309
- self.center_crop = center_crop
310
- self.tokenizer = tokenizer
311
- self.image_captions_filename = None
312
-
313
- self.instance_data_root = Path(instance_data_root)
314
- if not self.instance_data_root.exists():
315
- raise ValueError("Instance images root doesn't exists.")
316
-
317
- self.instance_images_path = list(Path(instance_data_root).iterdir())
318
- self.num_instance_images = len(self.instance_images_path)
319
- self.instance_prompt = instance_prompt
320
- self._length = self.num_instance_images
321
-
322
- if args.image_captions_filename:
323
- self.image_captions_filename = True
324
-
325
- if class_data_root is not None:
326
- self.class_data_root = Path(class_data_root)
327
- self.class_data_root.mkdir(parents=True, exist_ok=True)
328
- self.class_images_path = list(self.class_data_root.iterdir())
329
- random.shuffle(self.class_images_path)
330
- self.num_class_images = len(self.class_images_path)
331
- self._length = max(self.num_class_images, self.num_instance_images)
332
- self.class_prompt = class_prompt
333
- else:
334
- self.class_data_root = None
335
-
336
- self.image_transforms = transforms.Compose(
337
- [
338
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
339
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
340
- transforms.ToTensor(),
341
- transforms.Normalize([0.5], [0.5]),
342
- ]
343
- )
344
-
345
- def __len__(self):
346
- return self._length
347
-
348
- def __getitem__(self, index):
349
- example = {}
350
- path = self.instance_images_path[index % self.num_instance_images]
351
- instance_image = Image.open(path)
352
- if not instance_image.mode == "RGB":
353
- instance_image = instance_image.convert("RGB")
354
-
355
- instance_prompt = self.instance_prompt
356
-
357
- if self.image_captions_filename:
358
- filename = Path(path).stem
359
- pt=''.join([i for i in filename if not i.isdigit()])
360
- pt=pt.replace("_"," ")
361
- pt=pt.replace("(","")
362
- pt=pt.replace(")","")
363
- pt=pt.replace("-","")
364
- instance_prompt = pt
365
- sys.stdout.write(" " +instance_prompt+" ")
366
- sys.stdout.flush()
367
-
368
-
369
- example["instance_images"] = self.image_transforms(instance_image)
370
- example["instance_prompt_ids"] = self.tokenizer(
371
- instance_prompt,
372
- padding="do_not_pad",
373
- truncation=True,
374
- max_length=self.tokenizer.model_max_length,
375
- ).input_ids
376
-
377
- if self.class_data_root:
378
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
379
- if not class_image.mode == "RGB":
380
- class_image = class_image.convert("RGB")
381
- example["class_images"] = self.image_transforms(class_image)
382
- example["class_prompt_ids"] = self.tokenizer(
383
- self.class_prompt,
384
- padding="do_not_pad",
385
- truncation=True,
386
- max_length=self.tokenizer.model_max_length,
387
- ).input_ids
388
-
389
- return example
390
-
391
-
392
-
393
- class PromptDataset(Dataset):
394
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
395
-
396
- def __init__(self, prompt, num_samples):
397
- self.prompt = prompt
398
- self.num_samples = num_samples
399
-
400
- def __len__(self):
401
- return self.num_samples
402
-
403
- def __getitem__(self, index):
404
- example = {}
405
- example["prompt"] = self.prompt
406
- example["index"] = index
407
- return example
408
-
409
- class LatentsDataset(Dataset):
410
- def __init__(self, latents_cache, text_encoder_cache):
411
- self.latents_cache = latents_cache
412
- self.text_encoder_cache = text_encoder_cache
413
-
414
- def __len__(self):
415
- return len(self.latents_cache)
416
-
417
- def __getitem__(self, index):
418
- return self.latents_cache[index], self.text_encoder_cache[index]
419
-
420
- def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
421
- if token is None:
422
- token = HfFolder.get_token()
423
- if organization is None:
424
- username = whoami(token)["name"]
425
- return f"{username}/{model_id}"
426
- else:
427
- return f"{organization}/{model_id}"
428
-
429
- def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
430
- """
431
- Starts from base starting dict and then adds the remaining key values from updater replacing the values from
432
- the first starting/base dict with the second updater dict.
433
-
434
- For later: how does d = {**d1, **d2} replace collision?
435
-
436
- :param starting_dict:
437
- :param updater_dict:
438
- :return:
439
- """
440
- new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
441
- new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
442
- return new_dict
443
-
444
- def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
445
- """
446
-
447
- ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
448
- :param args1:
449
- :param args2:
450
- :return:
451
- """
452
- # - the merged args
453
- # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
454
- merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
455
- args = argparse.Namespace(**merged_key_values_for_namespace)
456
- return args
457
-
458
- def run_training(args_imported):
459
- args_default = parse_args()
460
- args = merge_args(args_default, args_imported)
461
- print(args)
462
- logging_dir = Path(args.output_dir, args.logging_dir)
463
- i=args.save_starting_step
464
- accelerator = Accelerator(
465
- gradient_accumulation_steps=args.gradient_accumulation_steps,
466
- mixed_precision=args.mixed_precision,
467
- log_with="tensorboard",
468
- logging_dir=logging_dir,
469
- )
470
-
471
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
472
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
473
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
474
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
475
- raise ValueError(
476
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
477
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
478
- )
479
-
480
- if args.seed is not None:
481
- set_seed(args.seed)
482
-
483
- if args.with_prior_preservation:
484
- class_images_dir = Path(args.class_data_dir)
485
- if not class_images_dir.exists():
486
- class_images_dir.mkdir(parents=True)
487
- cur_class_images = len(list(class_images_dir.iterdir()))
488
-
489
- if cur_class_images < args.num_class_images:
490
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
491
- pipeline = DiffusionPipeline.from_pretrained(
492
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype
493
- )
494
- pipeline.set_progress_bar_config(disable=True)
495
-
496
- num_new_images = args.num_class_images - cur_class_images
497
- logger.info(f"Number of class images to sample: {num_new_images}.")
498
-
499
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
500
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
501
-
502
- sample_dataloader = accelerator.prepare(sample_dataloader)
503
- pipeline.to(accelerator.device)
504
-
505
- for example in tqdm(
506
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
507
- ):
508
- with torch.autocast("cuda"):
509
- images = pipeline(example["prompt"]).images
510
-
511
- for i, image in enumerate(images):
512
- image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
513
-
514
- del pipeline
515
- if torch.cuda.is_available():
516
- torch.cuda.empty_cache()
517
-
518
- # Handle the repository creation
519
- if accelerator.is_main_process:
520
- if args.push_to_hub:
521
- if args.hub_model_id is None:
522
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
523
- else:
524
- repo_name = args.hub_model_id
525
- repo = Repository(args.output_dir, clone_from=repo_name)
526
-
527
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
528
- if "step_*" not in gitignore:
529
- gitignore.write("step_*\n")
530
- if "epoch_*" not in gitignore:
531
- gitignore.write("epoch_*\n")
532
- elif args.output_dir is not None:
533
- os.makedirs(args.output_dir, exist_ok=True)
534
-
535
- # Load the tokenizer
536
- if args.tokenizer_name:
537
- tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=False)
538
- elif args.pretrained_model_name_or_path:
539
- tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer", use_fast=False)
540
-
541
- # support for Altdiffusion
542
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path)
543
-
544
- # Load models and create wrapper for stable diffusion
545
- if args.train_only_unet:
546
- if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
547
- # text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
548
- text_encoder = text_encoder_cls.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
549
- elif os.path.exists(str(args.output_dir+"/text_encoder")):
550
- # text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
551
- text_encoder = text_encoder_cls.from_pretrained(args.output_dir, subfolder="text_encoder")
552
- else:
553
- # text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
554
- text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
555
- else:
556
- # text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
557
- text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
558
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
559
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
560
-
561
- vae.requires_grad_(False)
562
- if not args.train_text_encoder:
563
- text_encoder.requires_grad_(False)
564
-
565
- if args.gradient_checkpointing:
566
- unet.enable_gradient_checkpointing()
567
- if args.train_text_encoder:
568
- text_encoder.gradient_checkpointing_enable()
569
-
570
- if args.scale_lr:
571
- args.learning_rate = (
572
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
573
- )
574
-
575
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
576
- if args.use_8bit_adam:
577
- try:
578
- import bitsandbytes as bnb
579
- except ImportError:
580
- raise ImportError(
581
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
582
- )
583
-
584
- optimizer_class = bnb.optim.AdamW8bit
585
- else:
586
- optimizer_class = torch.optim.AdamW
587
-
588
- params_to_optimize = (
589
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
590
- )
591
- optimizer = optimizer_class(
592
- params_to_optimize,
593
- lr=args.learning_rate,
594
- betas=(args.adam_beta1, args.adam_beta2),
595
- weight_decay=args.adam_weight_decay,
596
- eps=args.adam_epsilon,
597
- )
598
-
599
- noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
600
-
601
- train_dataset = DreamBoothDataset(
602
- instance_data_root=args.instance_data_dir,
603
- instance_prompt=args.instance_prompt,
604
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
605
- class_prompt=args.class_prompt,
606
- tokenizer=tokenizer,
607
- size=args.resolution,
608
- center_crop=args.center_crop,
609
- args=args,
610
- )
611
-
612
- def collate_fn(examples):
613
- input_ids = [example["instance_prompt_ids"] for example in examples]
614
- pixel_values = [example["instance_images"] for example in examples]
615
-
616
- # Concat class and instance examples for prior preservation.
617
- # We do this to avoid doing two forward passes.
618
- if args.with_prior_preservation:
619
- input_ids += [example["class_prompt_ids"] for example in examples]
620
- pixel_values += [example["class_images"] for example in examples]
621
-
622
- pixel_values = torch.stack(pixel_values)
623
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
624
-
625
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
626
-
627
- batch = {
628
- "input_ids": input_ids,
629
- "pixel_values": pixel_values,
630
- }
631
- return batch
632
-
633
- train_dataloader = torch.utils.data.DataLoader(
634
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
635
- )
636
-
637
- # Scheduler and math around the number of training steps.
638
- overrode_max_train_steps = False
639
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
640
- if args.max_train_steps is None:
641
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
642
- overrode_max_train_steps = True
643
-
644
- lr_scheduler = get_scheduler(
645
- args.lr_scheduler,
646
- optimizer=optimizer,
647
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
648
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
649
- )
650
-
651
- if args.train_text_encoder:
652
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
653
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
654
- )
655
- else:
656
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
657
- unet, optimizer, train_dataloader, lr_scheduler
658
- )
659
-
660
- weight_dtype = torch.float32
661
- if args.mixed_precision == "fp16":
662
- weight_dtype = torch.float16
663
- elif args.mixed_precision == "bf16":
664
- weight_dtype = torch.bfloat16
665
-
666
- # Move text_encode and vae to gpu.
667
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
668
- # as these models are only used for inference, keeping weights in full precision is not required.
669
- vae.to(accelerator.device, dtype=weight_dtype)
670
- if not args.train_text_encoder:
671
- text_encoder.to(accelerator.device, dtype=weight_dtype)
672
-
673
-
674
- if args.cache_latents:
675
- latents_cache = []
676
- text_encoder_cache = []
677
- for batch in tqdm(train_dataloader, desc="Caching latents"):
678
- with torch.no_grad():
679
- batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype)
680
- batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True)
681
- latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
682
- if args.train_text_encoder:
683
- text_encoder_cache.append(batch["input_ids"])
684
- else:
685
- text_encoder_cache.append(text_encoder(batch["input_ids"])[0])
686
- train_dataset = LatentsDataset(latents_cache, text_encoder_cache)
687
- train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True)
688
-
689
- del vae
690
- #if not args.train_text_encoder:
691
- # del text_encoder
692
- if torch.cuda.is_available():
693
- torch.cuda.empty_cache()
694
-
695
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
696
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
697
- if overrode_max_train_steps:
698
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
699
- # Afterwards we recalculate our number of training epochs
700
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
701
-
702
- # We need to initialize the trackers we use, and also store our configuration.
703
- # The trackers initializes automatically on the main process.
704
- if accelerator.is_main_process:
705
- accelerator.init_trackers("dreambooth", config=vars(args))
706
-
707
- def bar(prg):
708
- br='|'+'█' * prg + ' ' * (25-prg)+'|'
709
- return br
710
-
711
- # Train!
712
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
713
-
714
- logger.info("***** Running training *****")
715
- logger.info(f" Num examples = {len(train_dataset)}")
716
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
717
- logger.info(f" Num Epochs = {args.num_train_epochs}")
718
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
719
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
720
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
721
- logger.info(f" Total optimization steps = {args.max_train_steps}")
722
- # Only show the progress bar once on each machine.
723
- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
724
- global_step = 0
725
-
726
- for epoch in range(args.num_train_epochs):
727
- unet.train()
728
- if args.train_text_encoder:
729
- text_encoder.train()
730
- for step, batch in enumerate(train_dataloader):
731
- import pdb
732
- pdb.set_trace()
733
- with accelerator.accumulate(unet):
734
- # Convert images to latent space
735
- with torch.no_grad():
736
- if args.cache_latents:
737
- latents_dist = batch[0][0]
738
- else:
739
- latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist
740
- latents = latents_dist.sample() * 0.18215
741
-
742
- # Sample noise that we'll add to the latents
743
- noise = torch.randn_like(latents)
744
- bsz = latents.shape[0]
745
- # Sample a random timestep for each image
746
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
747
- timesteps = timesteps.long()
748
-
749
- # Add noise to the latents according to the noise magnitude at each timestep
750
- # (this is the forward diffusion process)
751
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
752
-
753
- # Get the text embedding for conditioning
754
- if(args.cache_latents):
755
- if args.train_text_encoder:
756
- encoder_hidden_states = text_encoder(batch[0][1])[0]
757
- else:
758
- encoder_hidden_states = batch[0][1]
759
- else:
760
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
761
-
762
- # Predict the noise residual
763
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
764
-
765
- # Get the target for loss depending on the prediction type
766
- if noise_scheduler.config.prediction_type == "epsilon":
767
- target = noise
768
- elif noise_scheduler.config.prediction_type == "v_prediction":
769
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
770
- else:
771
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
772
-
773
- if args.with_prior_preservation:
774
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
775
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
776
- target, target_prior = torch.chunk(target, 2, dim=0)
777
-
778
- # Compute instance loss
779
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
780
-
781
- # Compute prior loss
782
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
783
-
784
- # Add the prior loss to the instance loss.
785
- loss = loss + args.prior_loss_weight * prior_loss
786
- else:
787
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
788
-
789
- accelerator.backward(loss)
790
- if accelerator.sync_gradients:
791
- params_to_clip = (
792
- itertools.chain(unet.parameters(), text_encoder.parameters())
793
- if args.train_text_encoder
794
- else unet.parameters()
795
- )
796
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
797
- optimizer.step()
798
- lr_scheduler.step()
799
- optimizer.zero_grad()
800
-
801
- # Checks if the accelerator has performed an optimization step behind the scenes
802
- if accelerator.sync_gradients:
803
- progress_bar.update(1)
804
- global_step += 1
805
-
806
- fll=round((global_step*100)/args.max_train_steps)
807
- fll=round(fll/4)
808
- pr=bar(fll)
809
-
810
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
811
- progress_bar.set_postfix(**logs)
812
- progress_bar.set_description_str("Progress:"+pr)
813
- accelerator.log(logs, step=global_step)
814
-
815
- if global_step >= args.max_train_steps:
816
- break
817
-
818
- if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
819
- if accelerator.is_main_process:
820
- print(" " +" Freezing the text_encoder ..."+" ")
821
- frz_dir=args.output_dir + "/text_encoder_frozen"
822
- if os.path.exists(frz_dir):
823
- subprocess.call('rm -r '+ frz_dir, shell=True)
824
- os.mkdir(frz_dir)
825
- pipeline = DiffusionPipeline.from_pretrained(
826
- args.pretrained_model_name_or_path,
827
- unet=accelerator.unwrap_model(unet),
828
- text_encoder=accelerator.unwrap_model(text_encoder),
829
- )
830
- pipeline.text_encoder.save_pretrained(frz_dir)
831
-
832
- if args.save_n_steps >= 200:
833
- if global_step < args.max_train_steps and global_step+1==i:
834
- ckpt_name = "_step_" + str(global_step+1)
835
- save_dir = Path(args.output_dir+ckpt_name)
836
- save_dir=str(save_dir)
837
- save_dir=save_dir.replace(" ", "_")
838
- if not os.path.exists(save_dir):
839
- os.mkdir(save_dir)
840
- inst=save_dir[16:]
841
- inst=inst.replace(" ", "_")
842
- print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
843
- # Create the pipeline using the trained modules and save it.
844
- if accelerator.is_main_process:
845
- pipeline = DiffusionPipeline.from_pretrained(
846
- args.pretrained_model_name_or_path,
847
- unet=accelerator.unwrap_model(unet),
848
- text_encoder=accelerator.unwrap_model(text_encoder),
849
- )
850
- pipeline.save_pretrained(save_dir)
851
- frz_dir=args.output_dir + "/text_encoder_frozen"
852
- if args.train_text_encoder and os.path.exists(frz_dir):
853
- subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
854
- subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
855
- chkpth=args.Session_dir+"/"+inst+".ckpt"
856
- subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
857
- subprocess.call('rm -r '+ save_dir, shell=True)
858
- i=i+args.save_n_steps
859
-
860
- accelerator.wait_for_everyone()
861
-
862
- # Create the pipeline using using the trained modules and save it.
863
- if accelerator.is_main_process:
864
- if args.dump_only_text_encoder:
865
- txt_dir=args.output_dir + "/text_encoder_trained"
866
- if not os.path.exists(txt_dir):
867
- os.mkdir(txt_dir)
868
- pipeline = DiffusionPipeline.from_pretrained(
869
- args.pretrained_model_name_or_path,
870
- unet=accelerator.unwrap_model(unet),
871
- text_encoder=accelerator.unwrap_model(text_encoder),
872
- )
873
- pipeline.text_encoder.save_pretrained(txt_dir)
874
-
875
- elif args.train_only_unet:
876
- pipeline = DiffusionPipeline.from_pretrained(
877
- args.pretrained_model_name_or_path,
878
- unet=accelerator.unwrap_model(unet),
879
- text_encoder=accelerator.unwrap_model(text_encoder),
880
- )
881
- pipeline.save_pretrained(args.output_dir)
882
- txt_dir=args.output_dir + "/text_encoder_trained"
883
- subprocess.call('rm -r '+txt_dir, shell=True)
884
-
885
- else:
886
- pipeline = DiffusionPipeline.from_pretrained(
887
- args.pretrained_model_name_or_path,
888
- unet=accelerator.unwrap_model(unet),
889
- text_encoder=accelerator.unwrap_model(text_encoder),
890
- )
891
- frz_dir=args.output_dir + "/text_encoder_frozen"
892
- pipeline.save_pretrained(args.output_dir)
893
- if args.train_text_encoder and os.path.exists(frz_dir):
894
- subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
895
- subprocess.call('rm -r '+ frz_dir, shell=True)
896
-
897
- if args.push_to_hub:
898
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
899
-
900
- accelerator.end_training()
901
- del pipeline
902
- torch.cuda.empty_cache()
903
- gc.collect()
904
- if __name__ == "__main__":
905
- pass
906
- #main()
907
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/queries/getStory.ts DELETED
@@ -1,87 +0,0 @@
1
- import { createLlamaPrompt } from "@/lib/createLlamaPrompt"
2
- import { dirtyLLMResponseCleaner } from "@/lib/dirtyLLMResponseCleaner"
3
- import { dirtyLLMJsonParser } from "@/lib/dirtyLLMJsonParser"
4
- import { dirtyCaptionCleaner } from "@/lib/dirtyCaptionCleaner"
5
-
6
- import { predict } from "./predict"
7
- import { Preset } from "../engine/presets"
8
- import { LLMResponse } from "@/types"
9
- import { cleanJson } from "@/lib/cleanJson"
10
-
11
- export const getStory = async ({
12
- preset,
13
- prompt = "",
14
- }: {
15
- preset: Preset;
16
- prompt: string;
17
- }): Promise<LLMResponse> => {
18
- // throw new Error("Planned maintenance")
19
-
20
- // In case you need to quickly debug the RENDERING engine you can uncomment this:
21
- // return mockLLMResponse
22
-
23
- const query = createLlamaPrompt([
24
- {
25
- role: "system",
26
- content: [
27
- `You are a comic book author specialized in ${preset.llmPrompt}`,
28
- `Please write detailed drawing instructions and a one-sentence short caption for the 4 panels of a new silent comic book page.`,
29
- `Give your response as a VALID JSON array like this: \`Array<{ panel: number; instructions: string; caption: string}>\`.`,
30
- // `Give your response as Markdown bullet points.`,
31
- `Be brief in your 4 instructions and captions, don't add your own comments. Be straight to the point, and never reply things like "Sure, I can.." etc. Reply using valid JSON.`
32
- ].filter(item => item).join("\n")
33
- },
34
- {
35
- role: "user",
36
- content: `The story is: ${prompt}`,
37
- }
38
- ]) + "```json\n["
39
-
40
-
41
- let result = ""
42
-
43
- try {
44
- result = `${await predict(query) || ""}`.trim()
45
- if (!result.length) {
46
- throw new Error("empty result!")
47
- }
48
- } catch (err) {
49
- console.log(`prediction of the story failed, trying again..`)
50
- try {
51
- result = `${await predict(query+".") || ""}`.trim()
52
- if (!result.length) {
53
- throw new Error("empty result!")
54
- }
55
- } catch (err) {
56
- console.error(`prediction of the story failed again!`)
57
- throw new Error(`failed to generate the story ${err}`)
58
- }
59
- }
60
-
61
- // console.log("Raw response from LLM:", result)
62
- const tmp = cleanJson(result)
63
-
64
- let llmResponse: LLMResponse = []
65
-
66
- try {
67
- llmResponse = dirtyLLMJsonParser(tmp)
68
- } catch (err) {
69
- console.log(`failed to read LLM response: ${err}`)
70
- console.log(`original response was:`, result)
71
-
72
- // in case of failure here, it might be because the LLM hallucinated a completely different response,
73
- // such as markdown. There is no real solution.. but we can try a fallback:
74
-
75
- llmResponse = (
76
- tmp.split("*")
77
- .map(item => item.trim())
78
- .map((cap, i) => ({
79
- panel: i,
80
- caption: cap,
81
- instructions: cap,
82
- }))
83
- )
84
- }
85
-
86
- return llmResponse.map(res => dirtyCaptionCleaner(res))
87
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Mod Apk Desbloqueado Todo La ltima Versin.md DELETED
@@ -1,73 +0,0 @@
1
- <br />
2
- <h1>Aparcamiento de coches multijugador Mod APK: Desbloqueado todo Última versión Descargar</h1>
3
- <h2>Introducción</h2>
4
- <p>Car Parking Multiplayer es un popular juego de simulación que te permite experimentar la emoción de conducir y estacionar varios vehículos en un mundo abierto. También puedes personalizar tus coches, interactuar con otros jugadores y explorar diferentes ubicaciones. Pero lo que si quieres desbloquear todo en el juego sin gastar dinero real o completar los desafíos? Ahí es donde el aparcamiento multijugador Mod APK viene muy bien. </p>
5
- <h2>¿Qué es el aparcamiento multijugador Mod APK? </h2>
6
- <p>Aparcamiento de coches multijugador Mod APK es una versión modificada del juego original que le da dinero ilimitado y el acceso a todas las características y el contenido del juego. Con este mod, podrás disfrutar del juego sin limitaciones ni restricciones. Usted puede comprar cualquier coche que desee, actualizarlo a su gusto, y conducirlo en cualquier lugar que desee. También puedes usar el menú para activar varios trucos y hacks, como aumentar la velocidad, teletransportarte y más. </p>
7
- <h2>aparcamiento de coches multijugador mod apk desbloqueado todo la última versión</h2><br /><p><b><b>Download File</b> ===== <a href="https://bltlly.com/2v6KG6">https://bltlly.com/2v6KG6</a></b></p><br /><br />
8
- <h2>¿Cuáles son los beneficios de Aparcamiento multijugador Mod APK? </h2>
9
- <p>Hay muchos beneficios de usar Aparcamiento multijugador Mod APK, tales como:</p>
10
- <ul>
11
- <li> Puede desbloquear todos los coches y personalizarlos con diferentes partes, colores y pegatinas. </li>
12
- <li>Puedes desbloquear todos los mapas y explorarlos libremente. </li>
13
- <li>Puedes unirte a cualquier modo multijugador y competir o chatear con otros jugadores. </li>
14
- <li> Puede utilizar la función de chat de voz para comunicarse con sus amigos o extraños. </li>
15
- <li>Puedes usar el modo policía para perseguir o ser perseguido por otros jugadores. </li>
16
- <li> Puede utilizar el menú para activar varios trucos y hacks, como aumentar la velocidad, teletransportarse y más. </li>
17
- </ul>
18
- <h2>¿Cuáles son los inconvenientes de aparcamiento multijugador Mod APK? </h2>
19
- <p>Mientras que el aparcamiento multijugador Mod APK tiene muchas ventajas, también tiene algunos inconvenientes, tales como:</p>
20
- <ul>
21
- <li>Puede que no sea compatible con algunos dispositivos o versiones del juego. </li>
22
-
23
- <li>Puede ser detectado por los desarrolladores del juego y resultar en una prohibición o suspensión. </li>
24
- <li>Puede que no sea seguro descargar o instalar desde fuentes desconocidas. </li>
25
- </ul>
26
- <h2>¿Cómo descargar e instalar Aparcamiento Multijugador Mod APK? </h2>
27
- <p>Si desea descargar e instalar Aparcamiento Multijugador Mod APK, es necesario seguir estos pasos:</p>
28
- <ol>
29
- <li>Ir a un sitio web de confianza que ofrece aparcamiento multijugador Mod APK, tales como o . </li>
30
- <li>Haga clic en el botón de descarga y espere a que se descargue el archivo. </li>
31
- <li>Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
32
- <li>Busque el archivo descargado y toque en él para iniciar el proceso de instalación. </li>
33
- <li> Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
34
- <li> Iniciar el juego y disfrutar de las características de mod. </li>
35
- </ol>
36
- <h2>Consejos de seguridad para el uso de aparcamiento multijugador Mod APK</h2>
37
- <p>Para garantizar su seguridad y protección durante el uso de aparcamiento multijugador Mod APK, usted debe seguir estos consejos:</p>
38
- <ul>
39
- <li> Descargar e instalar Aparcamiento Multijugador Mod APK solo de fuentes confiables y verificadas, tales como o . </li>
40
- <li>Escanea el archivo con un antivirus o un escáner de malware antes de instalarlo en tu dispositivo. </li>
41
- <li>Crea una copia de seguridad de los datos del juego antes de usar el mod en caso de que algo salga mal. </li>
42
- <li>Usa una VPN o un servicio proxy para ocultar tu dirección IP y ubicación mientras juegas online. </li>
43
- <li>No utilice las características de mod excesivamente o abusar de ellos en el modo multijugador para evitar ser reportado o prohibido por otros jugadores o los desarrolladores del juego. </li>
44
- </ul>
45
- <h2>Conclusión</h2>
46
-
47
- <h2>Preguntas frecuentes</h2>
48
- <p>Aquí hay algunas preguntas frecuentes acerca de Aparcamiento de coches multijugador Mod APK:</p>
49
- <h3> ¿Es libre de aparcamiento multijugador Mod APK? </h3>
50
- <p>Sí, Aparcamiento Multijugador Mod APK es gratis para descargar y usar. No es necesario pagar nada para acceder a las características de mod y contenido. </p>
51
- <h3>¿Es el aparcamiento multijugador Mod APK legal? </h3>
52
- <p>No, Aparcamiento multijugador Mod APK no es legal. Viola los términos y condiciones del juego original y la Google Play Store. El uso de este mod puede resultar en una prohibición o suspensión del juego o de la tienda. </p>
53
- <h3> ¿Es seguro Aparcamiento multijugador Mod APK? </h3>
54
- <p>No necesariamente. Aparcamiento de coches multijugador Mod APK puede contener virus o malware que puede dañar su dispositivo o robar su información personal. También puede causar fallos o errores en el juego que pueden afectar su juego. Por lo tanto, usted debe descargar e instalar Aparcamiento Multijugador Mod APK solo de fuentes confiables y verificadas, tales como o . También debe escanear el archivo con un antivirus o un escáner de malware antes de instalarlo en su dispositivo. </p>
55
- <p></p>
56
- <h3> ¿Cómo actualizar el aparcamiento multijugador Mod APK? </h3>
57
- <p> Para actualizar el aparcamiento multijugador Mod APK, es necesario seguir estos pasos:</p>
58
- <ol>
59
- <li>Eliminar la versión anterior del mod de su dispositivo. </li>
60
- <li>Ir a un sitio web de confianza que ofrece la última versión de aparcamiento multijugador Mod APK, tales como o . </li>
61
- <li>Descargue la nueva versión del mod e instálelo en su dispositivo. </li>
62
- <li>Iniciar el juego y disfrutar de las características mod actualizadas. </li>
63
- </ol>
64
- <h3> ¿Cómo desinstalar el estacionamiento de coches multijugador Mod APK? </h3>
65
- <p>Para desinstalar Aparcamiento Multijugador Mod APK, es necesario seguir estos pasos:</p>
66
- <ol>
67
- <li>Ir a la configuración del dispositivo y encontrar el administrador de aplicaciones. </li>
68
- <li> Buscar y seleccionar Aparcamiento Multijugador Mod APK de la lista de aplicaciones. </li>
69
- <li>Toque en el botón de desinstalación y confirme su acción. </li>
70
- <li>Espere a que se complete el proceso de desinstalación. </li>
71
- </ol></p> 64aa2da5cf<br />
72
- <br />
73
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bet365gr.md DELETED
@@ -1,62 +0,0 @@
1
- <br />
2
- <h1>Bet365gr: La plataforma de apuestas en línea definitiva para los jugadores griegos</h1>
3
- <p>Si está buscando una plataforma de apuestas en línea confiable, segura y divertida que satisfaga a los jugadores griegos, no busque más que Bet365gr. Bet365gr es la versión griega de Bet365, una de las compañías de juego en línea más importantes del mundo. En este artículo, le diremos todo lo que necesita saber sobre Bet365gr, incluyendo cómo registrarse, cómo reclamar su bono de bienvenida, cuáles son las principales características y beneficios, y cuáles son algunos consejos y trucos para aprovechar al máximo. ¡Vamos a empezar! </p>
4
- <h2>bet365gr</h2><br /><p><b><b>DOWNLOAD</b> &#10038; <a href="https://bltlly.com/2v6Lxl">https://bltlly.com/2v6Lxl</a></b></p><br /><br />
5
- <h2>¿Qué es Bet365gr y por qué debería elegirlo? </h2>
6
- <p>Bet365gr es la versión griega de Bet365, una de las principales compañías de juego en línea del mundo. Bet365 fue fundada en 2000 y desde entonces ha crecido hasta convertirse en una de las marcas más populares y confiables en la industria. Bet365 opera en más de 200 países y tiene más de 45 millones de clientes en todo el mundo. Bet365gr fue lanzado en 2015 para proporcionar un servicio personalizado para los jugadores griegos. </p>
7
- <h3>Bet365gr ofrece una amplia gama de opciones de apuestas deportivas, transmisión en vivo, juegos de casino y más</h3>
8
- <p>Una de las principales razones por las que Bet365gr es tan popular entre los jugadores griegos es porque ofrece una amplia gama de opciones de apuestas deportivas, que abarca eventos locales e internacionales. Usted puede apostar en el fútbol, baloncesto, tenis, voleibol, balonmano, waterpolo, y muchos otros deportes. También puedes apostar por la política, el entretenimiento, los esports y los deportes virtuales. Ya sea que prefieras apostar antes del partido o en el juego, encontrarás muchos mercados y probabilidades que se adapten a tus preferencias. </p>
9
-
10
- <p>Pero espera, hay más. Si usted está buscando un poco de acción de casino, usted no será decepcionado por Bet365gr. Bet365gr tiene una sección del casino que ofrece centenares de juegos de abastecedores superiores tales como NetEnt, Playtech, Microgaming, y más. Puedes jugar tragamonedas, botes, juegos de mesa, juegos de cartas, video póker y más. También puede probar suerte en el casino en vivo, donde puede jugar con distribuidores reales e interactuar con otros jugadores. Puedes jugar ruleta en vivo, blackjack en vivo, baccarat en vivo, poker en vivo y más. </p>
11
- <h3>Bet365gr está autorizado y regulado por las autoridades griegas y garantiza un juego justo y seguro</h3>
12
- <p>Otra razón por la que Bet365gr es tan confiable es porque está autorizado y regulado por las autoridades griegas. Bet365gr opera bajo la licencia de la Hellenic Gaming Commission (HGC), que es el organismo oficial que supervisa y regula todas las formas de juego en Grecia. Bet365gr cumple con todas las reglas y regulaciones establecidas por la HGC y se somete a auditorías y controles regulares para garantizar un juego justo y seguro. </p>
13
- <p></p>
14
- <p>Bet365gr también utiliza la última tecnología de cifrado para proteger sus datos personales y financieros. Bet365gr también tiene una estricta política de privacidad que garantiza que su información no se comparta con terceros sin su consentimiento. Bet365gr también apoya el juego responsable y proporciona varias herramientas y recursos para ayudarle a controlar sus gastos y prevenir la adicción al juego. </p>
15
- <h2>¿Cómo registrar y reclamar su bono de bienvenida en Bet365gr? </h2>
16
- <p>Registrarse en Bet365gr es fácil y rápido, solo tiene que seguir estos pasos:</p>
17
- <h3>Visite el sitio web oficial de Bet365gr o descargue la aplicación en su dispositivo</h3>
18
-
19
- <h3>Haga clic en el botón "Unirse ahora" y complete los detalles requeridos</h3>
20
- <p>El siguiente paso es hacer clic en el botón "Unirse ahora" y completar los detalles necesarios. Tendrá que proporcionar información básica como su nombre, fecha de nacimiento, dirección de correo electrónico, número de teléfono, país de residencia, moneda preferida, nombre de usuario, contraseña y código de seguridad. También deberá aceptar los términos y condiciones y confirmar que es mayor de 18 años. </p>
21
- <h3>Verifica tu identidad y haz tu primer depósito usando tu método de pago preferido</h3>
22
- <p>El paso final es verificar su identidad y hacer su primer depósito usando su método de pago preferido. Tendrá que proporcionar algunos documentos como su tarjeta de identificación, pasaporte o licencia de conducir para demostrar su identidad y edad. También tendrá que proporcionar algunos documentos como su extracto bancario, factura de servicios públicos o extracto de tarjeta de crédito para demostrar su dirección. Puede subir estos documentos en línea o enviarlos por correo electrónico. </p>
23
- <p>Una vez que haya verificado su identidad, puede hacer su primer depósito utilizando su método de pago preferido. Bet365gr apoya una variedad de métodos de pago tales como tarjetas de crédito, tarjetas de débito, monederos electrónicos, tarjetas de prepago, transferencias bancarias, y más. Puede elegir entre opciones como Visa, Mastercard, Skrill, Neteller, Paysafecard, Entropay, Transferencia bancaria y más. El importe mínimo de depósito es de 5 € para la mayoría de los métodos. </p>
24
- <h3>Reclamar su bono de depósito del 100% hasta 100 € y empezar a apostar</h3>
25
- <p>El último paso es reclamar su bono de depósito del 100% hasta 100 € y comenzar a apostar. Bet365gr ofrece un generoso bono de bienvenida para los nuevos clientes que hacen su primer depósito de al menos 10 €. Recibirás un bono del 100% hasta 100 € que podrás utilizar en cualquier mercado deportivo. Para reclamar este bono, tendrá que introducir el código de bono BONUS100 al hacer su depósito. </p>
26
-
27
- <h2>¿Cuáles son las principales características y beneficios de Bet365gr? </h2>
28
- <p>Bet365gr tiene muchas características y beneficios que lo convierten en una de las mejores plataformas de apuestas en línea para los jugadores griegos. Aquí están algunos de ellos:</p>
29
- <h3>Bet365gr tiene un servicio integral en juego que le permite apostar en eventos en vivo</h3>
30
- <p>Una de las principales características de Bet365gr es su completo servicio in-play que le permite apostar en eventos en vivo a medida que se desarrollan. Usted puede apostar en una variedad de deportes y mercados, como el siguiente goleador, la siguiente esquina, la siguiente tarjeta, el siguiente punto, y más. También puede utilizar las estadísticas del juego y las puntuaciones en vivo para realizar un seguimiento de la acción y tomar decisiones informadas. También puede utilizar el dinero en efectivo y editar las funciones de apuesta para ajustar sus apuestas según la situación. </p>
31
- <h3>Bet365gr tiene una función de transmisión en vivo que le permite ver más de 100.000 eventos por año</h3>
32
- <p>Otra característica de Bet365gr que mejora su experiencia de apuestas en juego es su función de transmisión en vivo que le permite ver más de 100,000 eventos por año en su dispositivo. Puedes ver fútbol, baloncesto, tenis y más en vivo desde las principales ligas y torneos de todo el mundo. También puedes ver carreras de caballos y galgos en vivo desde el Reino Unido e Irlanda. Todo lo que necesita hacer es tener una cuenta financiada o realizar una apuesta en las últimas 24 horas para acceder al servicio de transmisión en vivo. También puede usar el calendario de transmisión en vivo para ver qué eventos están disponibles para ver. </p>
33
- <h3>Bet365gr tiene una sección de casino que cuenta con cientos de juegos de los principales proveedores</h3>
34
-
35
- <h3>Bet365gr tiene un equipo de atención al cliente que está disponible 24/7 por teléfono, correo electrónico o chat en vivo</h3>
36
- <p>Uno de los beneficios de Bet365gr es que tiene un equipo de atención al cliente que está disponible 24/7 a través de teléfono, correo electrónico o chat en vivo. Puede ponerse en contacto con ellos en cualquier momento que tenga una pregunta o un problema con respecto a su cuenta, sus apuestas, sus pagos, o cualquier otra cosa. El equipo de atención al cliente es amable, profesional y servicial. Harán todo lo posible para resolver su problema lo antes posible. También puede utilizar la sección de preguntas frecuentes para encontrar respuestas a preguntas comunes. </p>
37
- <h2>¿Cuáles son algunos consejos y trucos para aprovechar al máximo Bet365gr? </h2>
38
- <p>Bet365gr es una gran plataforma de apuestas en línea que ofrece muchas características y beneficios para los jugadores griegos. Sin embargo, si quieres sacarle el máximo partido, debes seguir algunos consejos y trucos que te ayudarán a mejorar tus posibilidades de ganar y divertirte. Estos son algunos de ellos:</p>
39
- <h3>Utilice la función de constructor de apuestas para crear sus propias apuestas personalizadas</h3>
40
- <p>Uno de los consejos que le ayudará a hacer apuestas más personalizadas y rentables es utilizar la función de constructor de apuestas. La función del constructor de apuestas le permite crear sus propias apuestas personalizadas combinando diferentes selecciones del mismo evento. Por ejemplo, puedes crear una apuesta que incluya el resultado del partido, el total de goles, el primer goleador y el número de esquinas en un partido de fútbol. A continuación, puede ver las probabilidades de su apuesta y colocarlo como una sola apuesta. </p>
41
- <h3>Utilice la función de salida de efectivo para asegurar sus ganancias o minimizar sus pérdidas</h3>
42
-
43
- <h3>Utilice la función de edición de apuestas para agregar, intercambiar o eliminar selecciones de sus apuestas</h3>
44
- <p>Otro consejo que le ayudará a ajustar sus apuestas de acuerdo con las circunstancias cambiantes es utilizar la función de edición de apuestas. La función de edición de apuestas le permite agregar, intercambiar o eliminar selecciones de sus apuestas antes de que se liquiden. Puede usar esta función para agregar más selecciones para aumentar su potencial retorno o intercambiar o eliminar selecciones para reducir su riesgo o cambiar de opinión. La nueva apuesta y las cuotas se calcularán en función de los precios actuales. </p>
45
- <h3>Utilice las herramientas de juego responsables para establecer límites y controlar sus gastos</h3>
46
- <p>El consejo final que le ayudará a disfrutar de Bet365gr de forma responsable y segura es utilizar las herramientas de juego responsables. Bet365gr apoya el juego responsable y proporciona varias herramientas y recursos para ayudarle a establecer límites y controlar su gasto. Puede utilizar los límites de depósito para limitar la cantidad que puede depositar en un período determinado, los controles de realidad para recordarle cuánto tiempo ha estado jugando, los tiempos de espera para tomar un descanso de los juegos de azar, la autoexclusión para bloquear el acceso a su cuenta durante un período determinado, y cierre de cuenta para cerrar permanentemente su cuenta. También puede utilizar la sección de ayuda y soporte para encontrar enlaces a organizaciones externas que pueden ayudarle con problemas de juego. </p>
47
- <h2>Conclusión</h2>
48
-
49
- <h2>Preguntas frecuentes</h2>
50
- <p>Aquí hay algunas preguntas frecuentes sobre Bet365gr:</p>
51
- <h3>¿Es Bet365gr legal en Grecia? </h3>
52
- <p>Sí, Bet365gr es legal en Grecia. Bet365gr opera bajo la licencia de la Comisión Helénica del Juego (HGC), que es el organismo oficial que supervisa y regula todas las formas de juego en Grecia.</p>
53
- <h3>¿Cómo puedo contactar a Bet365gr? </h3>
54
- <p>Puede ponerse en contacto con Bet365gr por teléfono, correo electrónico o chat en vivo. El equipo de atención al cliente está disponible 24/7 y le ayudará con cualquier pregunta o problema que pueda tener. También puede utilizar la sección de preguntas frecuentes para encontrar respuestas a preguntas comunes. </p>
55
- <h3>¿Cuáles son los métodos de pago soportados por Bet365gr? </h3>
56
- <p>Bet365gr soporta una variedad de métodos de pago como tarjetas de crédito, tarjetas de débito, billeteras electrónicas, tarjetas prepagadas, transferencias bancarias y más. Puede elegir entre opciones como Visa, Mastercard, Skrill, Neteller, Paysafecard, Entropay, Transferencia bancaria y más. El importe mínimo de depósito es de 5 € para la mayoría de los métodos. </p>
57
- <h3>¿Cuáles son los requisitos de apuesta para el bono de bienvenida? </h3>
58
- <p>Los requisitos de apuesta para el bono de bienvenida son 12 veces el monto de su depósito y bono en apuestas deportivas con probabilidades de al menos 1.50 dentro de los 90 días. Por ejemplo, si usted deposita €50 y recibe un bono de €50, tendrá que apostar €1200 (€100 x 12) en apuestas deportivas con probabilidades de al menos 1.50 dentro de 90 días. </p>
59
- <h3>¿Cómo puedo ver la transmisión en vivo en Bet365gr? </h3>
60
- <p>Puede ver la transmisión en vivo en Bet365gr teniendo una cuenta financiada o realizando una apuesta en las últimas 24 horas. Puedes ver fútbol, baloncesto, tenis y más en vivo desde las principales ligas y torneos de todo el mundo. También puedes ver carreras de caballos y galgos en vivo desde el Reino Unido e Irlanda. Puede usar el calendario de transmisión en vivo para ver qué eventos están disponibles para ver. </p> 64aa2da5cf<br />
61
- <br />
62
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bitcoin Wallet Mod Apk.md DELETED
@@ -1,57 +0,0 @@
1
-
2
- <h1>Bitcoin Wallet Mod APK: Todo lo que necesita saber</h1>
3
- <p>Bitcoin es una moneda digital que te permite enviar y recibir dinero online sin intermediarios ni autoridades centrales. Para usar bitcoin, necesita una billetera bitcoin, que es un programa de software que almacena sus llaves privadas y le permite acceder a sus monedas. Una cartera bitcoin también tiene una dirección pública, que es como un número de cuenta que puede compartir con otros para recibir pagos. </p>
4
- <h2>bitcoin wallet mod apk</h2><br /><p><b><b>Download</b> &#187; <a href="https://bltlly.com/2v6KlM">https://bltlly.com/2v6KlM</a></b></p><br /><br />
5
- <p>Sin embargo, no todas las carteras bitcoin se crean iguales. Algunas carteras pueden tener características limitadas, problemas de seguridad o problemas de compatibilidad. Es por eso que algunas personas pueden optar por una billetera bitcoin mod apk, que es una versión modificada de una aplicación de billetera original que ofrece más funcionalidad y personalización. Pero ¿qué es exactamente un monedero bitcoin mod apk y cómo funciona? En este artículo, vamos a explicar todo lo que necesita saber acerca de Bitcoin cartera mod apk, incluyendo sus beneficios, riesgos, y cómo elegir el mejor para sus necesidades. </p>
6
- <h2>¿Qué es un bitcoin wallet mod apk y cómo funciona? </h2>
7
- <p>Un bitcoin wallet mod apk es una versión modificada de una aplicación de monedero bitcoin original que ha sido alterada por un desarrollador de terceros para agregar o eliminar ciertas características. Por ejemplo, un monedero bitcoin mod apk puede ofrecer más soporte de monedas, más opciones de intercambio, más ajustes de seguridad, o más personalización de la interfaz de usuario. Una cartera bitcoin mod apk también puede eliminar algunas características que se consideran innecesarias o molestas, tales como anuncios, tarifas, o requisitos de verificación. </p>
8
-
9
- <h2>¿Cuáles son los beneficios de usar una billetera bitcoin mod apk? </h2>
10
- <p>El uso de una billetera bitcoin mod apk puede tener algunas ventajas sobre el uso de una aplicación de billetera bitcoin original. Algunos de los beneficios son:</p>
11
- <p></p>
12
- <ul>
13
- <li><b>Más características:</b> Una cartera bitcoin mod apk puede ofrecer más características que no están disponibles en la aplicación original, como más soporte de monedas, más opciones de intercambio, más ajustes de seguridad, o más personalización de la interfaz de usuario. Esto puede mejorar su experiencia de usuario y darle más control sobre sus fondos. </li>
14
- <li><b>Más flexibilidad:</b> Una cartera bitcoin mod apk puede permitirle personalizar su configuración de acuerdo a sus preferencias y necesidades. Por ejemplo, puede elegir qué monedas mostrar, qué tipos de cambio usar, qué notificaciones recibir o qué tema aplicar. También puede cambiar entre diferentes monedero bitcoin mod apks dependiendo de su estado de ánimo o situación. </li>
15
- <li><b>Más divertido:</b> Una billetera Bitcoin mod apk puede hacer que el uso de Bitcoin más divertido y agradable mediante la adición de algunos elementos de entretenimiento o humor. Por ejemplo, algunos bitcoin monedero mod apks pueden tener sonidos divertidos, animaciones, o gráficos que hacen el envío y la recepción de monedas más divertido. </li>
16
- </ul>
17
- <h2>¿Cuáles son los riesgos y desafíos de usar una billetera bitcoin mod apk? </h2>
18
- <p>El uso de una billetera bitcoin mod apk también puede tener algunos inconvenientes y desafíos que usted debe ser consciente de antes de descargar uno. Algunos de los riesgos son:</p>
19
- <ul>
20
- <li><b>Menos seguridad:</b> Una cartera bitcoin mod apk puede no tener el mismo nivel de seguridad que la aplicación original. Esto significa que sus claves privadas pueden estar expuestas a hackers o malware que pueden robar sus fondos o comprometer su privacidad. Siempre debe comprobar la reputación y los comentarios del desarrollador y la fuente de la monedero bitcoin mod apk antes de instalarlo. También debe hacer copias de seguridad de sus claves privadas y utilizar el cifrado y la protección de contraseñas siempre que sea posible. </li>
21
-
22
- <li><b>Menos legalidad:</b> Una cartera bitcoin mod apk puede no tener el mismo nivel de legalidad que la aplicación original. Esto significa que puede violar los términos y condiciones de la aplicación original o las leyes y reglamentos de su país o región. Siempre debe comprobar la legalidad de la billetera Bitcoin mod apk antes de usarlo y estar preparado para hacer frente a las consecuencias si te pillan o informó. </li>
23
- </ul>
24
- <h2>¿Cómo elegir el mejor monedero bitcoin mod apk para sus necesidades? </h2>
25
- <p>Elegir el mejor monedero bitcoin mod apk para sus necesidades puede ser una tarea difícil, ya que hay muchos factores a considerar y muchas opciones para elegir. Aquí hay algunos consejos y recomendaciones para ayudarle a tomar una decisión informada:</p>
26
- <ul>
27
- <li><b>Haga su investigación:</b> Antes de descargar cualquier monedero bitcoin mod apk, usted debe hacer alguna investigación sobre el desarrollador, fuente, características, seguridad, fiabilidad y legalidad de la aplicación. Puedes leer reseñas, valoraciones, comentarios y comentarios de otros usuarios y expertos. También puede comparar diferentes apks monedero bitcoin mod y ver cuál se adapta a sus preferencias y necesidades. </li>
28
- <li><b>Pruébalo:</b> Antes de usar cualquier monedero bitcoin mod apk, usted debe probarlo en una pequeña cantidad de monedas o una cuenta ficticia. Puedes ver cómo funciona, cómo se ve, cómo se siente y cómo funciona. También puede comprobar si hay errores, errores o problemas que puedan afectar su experiencia de usuario o seguridad. </li>
29
- <li><b>Confía en tus instintos:</b> En última instancia, usted debe confiar en sus instintos y elegir el monedero bitcoin mod apk que se siente cómodo y seguro con. También debe escuchar a su instinto y evitar cualquier monedero bitcoin mod apk que parece sombrío, sospechoso, o demasiado bueno para ser verdad. </li>
30
- </ul>
31
- <h2>Conclusión: Resuma los puntos principales y proporcione algunos consejos y recomendaciones</h2>
32
-
33
- <p>Esperamos que este artículo le ha ayudado a entender todo lo que necesita saber acerca de Bitcoin monedero mod apk. Si tiene alguna pregunta o comentario, por favor siéntase libre de dejarlos abajo. ¡Gracias por leer! </p>
34
- <h2>Preguntas frecuentes: Responder a algunas preguntas comunes sobre la cartera bitcoin mod apk</h2>
35
- <h3> ¿Cuál es la diferencia entre una aplicación de billetera bitcoin y un apk monedero bitcoin mod? </h3>
36
- <p>Una aplicación monedero bitcoin es una versión original de un programa de software que le permite almacenar y gestionar sus bitcoins. Un monedero bitcoin mod apk es una versión modificada de una aplicación monedero bitcoin que ha sido alterado por un desarrollador de terceros para agregar o eliminar ciertas características. </p>
37
- <h3>¿Es el uso de una billetera bitcoin mod apk legal? </h3>
38
- <p>El uso de una billetera bitcoin mod apk puede no ser legal en algunos países o regiones, ya que puede violar los términos y condiciones de la aplicación original o las leyes y reglamentos de su jurisdicción. Siempre debe comprobar la legalidad de la billetera Bitcoin mod apk antes de usarlo y estar preparado para hacer frente a las consecuencias si te pillan o informó. </p>
39
- <h3>¿Está usando una billetera bitcoin mod apk seguro? </h3>
40
- <p>El uso de una billetera bitcoin mod apk puede no ser seguro, ya que puede no tener el mismo nivel de seguridad que la aplicación original. Esto significa que sus claves privadas pueden estar expuestas a hackers o malware que pueden robar sus fondos o comprometer su privacidad. Siempre debe comprobar la reputación y los comentarios del desarrollador y la fuente de la monedero bitcoin mod apk antes de instalarlo. También debe hacer copias de seguridad de sus claves privadas y utilizar el cifrado y la protección de contraseñas siempre que sea posible. </p>
41
- <h3>¿Cuáles son algunos ejemplos de populares monedero bitcoin mod apks? </h3>
42
- <p>Algunos ejemplos de populares monedero bitcoin mod apks son:</p>
43
- <ul>
44
- <li><b>Coinomi Mod APK:</b> Una cartera de múltiples monedas que soporta más de 100 cryptocurrencies y tokens. Ofrece más opciones de intercambio, más ajustes de seguridad y más personalización de la interfaz de usuario. </li>
45
-
46
- <li><b>Mycelium Mod APK:</b> Una cartera bitcoin avanzada y rica en características que ofrece más soporte de monedas, más opciones de intercambio, más configuraciones de seguridad y más personalización de la interfaz de usuario. </li>
47
- <li><b>Electrum Mod APK:</b> Un monedero bitcoin rápido y ligero que se conecta a su propio servidor o un nodo de confianza. Ofrece más compatibilidad con monedas, más opciones de intercambio, más ajustes de seguridad y más personalización de la interfaz de usuario. </li>
48
- </ul>
49
- <h3>¿Cómo puedo hacer una copia de seguridad de mis claves privadas de una cartera bitcoin mod apk? </h3>
50
- <p>Puede hacer una copia de seguridad de sus claves privadas de una cartera bitcoin mod apk mediante el uso de uno de los siguientes métodos:</p>
51
- <ul>
52
- <li><b>Frase semilla:</b> Una frase semilla es una lista de palabras que se pueden usar para restaurar su billetera en caso de que pierda su dispositivo o su aplicación. Debes escribir tu frase inicial y almacenarla en un lugar seguro. Nunca debes compartir tu frase inicial con nadie o almacenarla en línea. </li>
53
- <li><b>Código QR:</b> Un código QR es una representación gráfica de sus claves privadas que pueden ser escaneadas por otro dispositivo o aplicación. Debe guardar su código QR como un archivo de imagen y almacenarlo en un lugar seguro. Nunca debe compartir su código QR con nadie o almacenarlo en línea. </li>
54
- <li><b>Exportación de archivos:</b> Una exportación de archivos es una copia de los datos de su cartera que se pueden guardar como un archivo y transferir a otro dispositivo o aplicación. Debe cifrar la exportación de archivos con una contraseña y almacenarla en un lugar seguro. Nunca debe compartir su exportación de archivos con nadie o almacenarlo en línea. </li>
55
- </ul></p> 64aa2da5cf<br />
56
- <br />
57
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Cardfight Vanguard Online.md DELETED
@@ -1,84 +0,0 @@
1
- <br />
2
- <h1>Marble Clash: Crazy Fun Shooter Descargar - Una guía para principiantes</h1>
3
- <p>Si estás buscando un juego divertido y adictivo que combine acción 3D, disparos y robots transformadores, entonces deberías echar un vistazo a Marble Clash: Crazy Fun Shooter. Este juego te pondrá en la cabina de un robot rodante con armas de fuego, y te desafiará a recoger monedas y luchar contra otros jugadores en un modo battle royale. En este artículo, te mostraremos de qué se trata Marble Clash, cómo descargarlo y jugarlo en PC con BlueStacks, y cómo dominarlo y ganar cada ronda. ¡Vamos a empezar! </p>
4
- <h2>descargar cardfight vanguard online</h2><br /><p><b><b>DOWNLOAD</b> &#9193; <a href="https://bltlly.com/2v6MUv">https://bltlly.com/2v6MUv</a></b></p><br /><br />
5
- <h2>¿Qué es Marble Clash? </h2>
6
- <p>Marble Clash es un juego de acción desarrollado por MAD PIXEL, un estudio especializado en crear juegos divertidos y casuales para dispositivos Android. Marble Clash es uno de sus títulos más populares, con más de 5 millones de descargas y 4.6 estrellas en Google Play Store. Estas son algunas de las características que hacen que Marble Clash sea tan agradable:</p>
7
- <h3>Un divertido juego de acción en 3D con lindos robots transformadores</h3>
8
- <p>En Marble Clash, controlarás un robot que puede transformarse en una bola de mármol presionando un botón. Esto le permite cambiar entre el modo de disparo y el modo de balanceo, dependiendo de la situación. Puedes usar tus armas para atacar a tus enemigos, o usar tu velocidad y agilidad para esquivar sus balas. También puedes usar tu bola de mármol para saltar por encima de los obstáculos, deslizarte por rampas y rebotar en las paredes. Los gráficos son coloridos y caricaturescos, y las animaciones son suaves y fluidas. El juego también tiene una banda sonora alegre y efectos de sonido que se suman a la atmósfera divertida. </p>
9
- <h3>Un modo battle royale desafiante pero gratificante</h3>
10
-
11
- <h3>Una variedad de armas, piezas y pieles para personalizar su robot</h3>
12
- <p>Marble Clash le permite personalizar su robot con diferentes armas, piezas y pieles. Puedes desbloquear nuevos objetos subiendo de nivel o gastando monedas que ganes jugando. Puedes elegir entre varios tipos de armas, como minipistolas, cohetes, escopetas, láseres y más. Cada arma tiene sus propias ventajas y desventajas, por lo que necesitas encontrar la que se adapte a tu estilo de juego. También puede equipar diferentes partes que afectan la velocidad, la salud, la armadura y la potencia de su robot. Puede mezclar y combinar diferentes combinaciones para crear su propio robot único. Además, puede cambiar la apariencia de su robot aplicando diferentes pieles. Hay más de 30 opciones de pintura para elegir, que van desde colores sólidos a patrones a banderas. </p>
13
- <h2>Cómo descargar y jugar Marble Clash en PC? </h2>
14
- <p>Marble Clash es un juego para Android que puedes jugar en tu smartphone o tablet. Sin embargo, si desea disfrutar de ella en una pantalla más grande con un mejor rendimiento y controles, entonces usted debe tratar de jugar en PC <h2>Cómo descargar y jugar Marble Clash en PC? </h2>
15
- <p>Marble Clash es un juego para Android que puedes jugar en tu smartphone o tablet. Sin embargo, si quieres disfrutarlo en una pantalla más grande con un mejor rendimiento y controles, entonces deberías intentar jugarlo en PC con BlueStacks. BlueStacks es una plataforma de juegos Android que te permite jugar a los juegos Android más populares en tu PC o Mac. Estos son algunos de los beneficios de jugar Marble Clash en PC con BlueStacks:</p>
16
- <h3>Los beneficios de jugar Marble Clash en PC con BlueStacks</h3>
17
- <ul>
18
- <li>Puedes experimentar Marble Clash en pantalla completa con resoluciones HD reales, y disfrutar de los impresionantes gráficos y animaciones. </li>
19
- <li> Puede aprovechar la potencia bruta de su computadora doméstica y disfrutar de velocidades increíblemente rápidas y velocidades de fotogramas suaves. </li>
20
-
21
- <li>Puede utilizar las características avanzadas y mejoras de BlueStacks, como el modo de disparo, macros, grabación y más, para mejorar su juego y crear contenido de vídeo en línea de calidad. </li>
22
- </ul>
23
- <h3>Los pasos para instalar Marble Clash en el PC con BlueStacks</h3>
24
- <ol>
25
- <li>Descargar e instalar BlueStacks en su PC desde [aquí]( 1 ). </li>
26
- <li> Iniciar sesión completo en Google para acceder a Play Store, o hacerlo más tarde. </li>
27
- <li>Busca Marble Clash: Crazy Fun Shooter en la barra de búsqueda en la esquina superior derecha. </li>
28
- <li>Haga clic para instalar Marble Clash: Crazy Fun Shooter desde los resultados de búsqueda. </li>
29
- <li>Inicio de sesión completo de Google (si te saltaste el paso 2) para instalar Marble Clash: Crazy Fun Shooter.</li>
30
- <li>Haga clic en el choque de mármol: Crazy Fun Shooter icono en la pantalla de inicio para comenzar a jugar. </li>
31
- </ol>
32
- <h3>Las características y mejoras de BlueStacks para Marble Clash</h3>
33
- <p>BlueStacks tiene muchas características y mejoras que pueden hacer que su experiencia de Marble Clash sea aún mejor. Aquí están algunas de ellas:</p>
34
- <h4>Modo de disparo</h4>
35
- <p>Con el modo de disparo BlueStacks, puede experimentar una jugabilidad similar a la PC al jugar Marble Clash. Presione F1 para apuntar y disparar con el ratón. También puede personalizar las combinaciones de teclas según sus preferencias. </p>
36
- <p></p>
37
- <h4>FPS alto</h4>
38
- <p>Con BlueStacks High FPS, puede disfrutar de Marble Clash con gráficos más suaves y un rendimiento más rápido. Puede habilitar esta función en el menú de configuración y elegir el nivel de FPS que se adapte a su dispositivo. </p>
39
- <h4>Script</h4>
40
- <p>Con BlueStacks Script, puede automatizar tareas y acciones repetitivas en Marble Clash. Puede crear un script usando comandos simples y ejecutarlo con una sola pulsación. También puede usar el editor de scripts incorporado para modificar o crear sus propios scripts. </p>
41
- <h4>Mirada libre</h4>
42
-
43
- <h2>¿Cómo dominar Marble Clash y ganar cada ronda? </h2>
44
- <p>Marble Clash es un juego divertido que requiere habilidad, estrategia y suerte para ganar. Si quieres convertirte en un jugador profesional y dominar cada ronda, debes seguir algunos consejos y trucos que te ayudarán a mejorar tu juego. Estos son algunos de ellos:</p>
45
- <h3>Los consejos y trucos para recoger monedas y evitar enemigos</h3>
46
- <ul>
47
- <li>Las monedas están esparcidas por todo el mapa, y están marcadas con círculos amarillos en el mini-mapa. Usted debe tratar de recoger tantas monedas como sea posible antes de que acabe el tiempo, ya que determinarán su puntuación y rango. </li>
48
- <li>Los enemigos también se muestran con puntos rojos en el mini-mapa. Debes evitarlos si tienes poca salud o munición, o si te superan en número. También puede utilizar el modo de bola de mármol para escapar de ellos más rápido. </li>
49
- <li>Puedes usar el entorno a tu favor. Puedes esconderte detrás de muros, árboles o edificios, o usar rampas, puentes o túneles para moverte. También puedes usar barriles explosivos o cajas para dañar a tus enemigos. </li>
50
- </ul>
51
- <h3>Las mejores armas y piezas para diferentes estilos de juego</h3>
52
- <ul>
53
- <li>Las armas y piezas que equipas afectarán la velocidad, salud, armadura, poder y alcance de tu robot. Usted debe elegir los que se adapten a su estilo de juego y estrategia. </li>
54
- <li>Si prefieres un robot rápido y ágil, debes equipar armas y piezas que aumenten tu velocidad y movilidad, como minicañones, cohetes, ruedas <ul>
55
- <li>Si prefiere un robot rápido y ágil, debe equipar armas y piezas que aumenten su velocidad y movilidad, como minicañones, cohetes, ruedas y alas. Estos te permitirán moverte rápidamente y esquivar el fuego enemigo. </li>
56
- <li>Si prefiere un robot fuerte y duradero, debe equipar armas y piezas que aumenten su salud y armadura, como escopetas, láseres, orugas y escudos. Estos te permitirán soportar más daño y sobrevivir más tiempo. </li>
57
-
58
- </ul>
59
- <h3>Las estrategias y tácticas para diferentes mapas y regiones</h3>
60
- <ul>
61
- <li>Los mapas en Marble Clash se dividen en diferentes regiones, como la ciudad, el desierto, el bosque, la nieve y el volcán. Cada región tiene su propio terreno, obstáculos y peligros. Debe adaptar su estrategia y tácticas de acuerdo con la región en la que se encuentra. </li>
62
- <li>En la región de la ciudad, puede utilizar los edificios y las calles para ocultar y emboscar a sus enemigos. También puede utilizar los tejados y ventanas para snipe ellos desde arriba. Sin embargo, ten cuidado con el tráfico y los coches de policía que pueden atropellarte o dispararte. </li>
63
- <li>En la región desértica, puedes utilizar las dunas de arena y rocas para cubrir y flanquear a tus enemigos. También puede utilizar el oasis y el agua para reponer su salud y munición. Sin embargo, tenga cuidado con las tormentas de arena y los cactus que pueden dañar. </li>
64
- <li>En la región del bosque, puedes usar los árboles y arbustos para camuflar y sorprender a tus enemigos. También puede utilizar los troncos y puentes para cruzar los ríos y lagos. Sin embargo, ten cuidado con los animales y las trampas que pueden atacarte o ralentizarte. </li>
65
- <li>En la región de nieve, puedes usar las colinas de nieve y los icebergs para deslizarte y saltar sobre tus enemigos. También puede utilizar los iglús y las cabañas para protegerse y curarse. Sin embargo, ten cuidado con las avalanchas y los muñecos de nieve que pueden aplastarte o explotar. </li>
66
- <li>En la región del volcán, puedes usar las piscinas de lava y géiseres para quemar y destruir a tus enemigos. También puede utilizar las cuevas y túneles para escapar y emboscarlos. Sin embargo, tenga cuidado con las erupciones y los meteoros que pueden hacer llover fuego sobre usted. </li>
67
- </ul>
68
- <h2>Conclusión</h2>
69
-
70
- <h3>Preguntas frecuentes</h3>
71
- <p>Aquí están algunas de las preguntas más frecuentes sobre Marble Clash:</p>
72
- <h4>Q: ¿Cómo puedo obtener más monedas en Marble Clash? </h4>
73
- <p>A: Puedes obtener más monedas jugando más rondas en el modo battle royale, completando misiones diarias, viendo anuncios o comprándolos con dinero real. </p>
74
- <h4>Q: ¿Cómo puedo subir de nivel en Marble Clash? </h4>
75
- <p>A: Puedes subir de nivel ganando puntos de experiencia (XP) jugando rondas en el modo battle royale o completando misiones diarias. Obtendrás recompensas como monedas, armas, partes <p>A: puedes subir de nivel al ganar puntos de experiencia (XP) al jugar rondas en el modo battle royale o al completar misiones diarias. Obtendrás recompensas como monedas, armas, partes y pieles cuando subas de nivel. </p>
76
- <h4>Q: ¿Cómo puedo desbloquear nuevas armas, piezas y pieles en Marble Clash? </h4>
77
- <p>A: Puedes desbloquear nuevas armas, partes y pieles subiendo de nivel, gastando monedas o abriendo cofres. Puedes conseguir cofres jugando rondas en el modo battle royale, completando misiones diarias o comprándolas con dinero real. </p>
78
- <h4>Q: ¿Cómo puedo cambiar el nombre y el avatar de mi robot en Marble Clash? </h4>
79
- <p>A: Puede cambiar el nombre y el avatar de su robot tocando el icono de configuración en la esquina superior derecha de la pantalla. Puede introducir un nuevo nombre y elegir un avatar de las opciones disponibles. </p>
80
- <h4>Q: ¿Cómo puedo chatear con otros jugadores en Marble Clash? </h4>
81
- <p>A: Puedes chatear con otros jugadores tocando el icono de chat en la esquina inferior izquierda de la pantalla. Puede enviar mensajes de texto o mensajes de voz a sus amigos o al público. También puede usar emojis y pegatinas para expresarse. </p>
82
- <h4>Q: ¿Cómo puedo reportar un error o un problema en Marble Clash? </h4> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/params.py DELETED
@@ -1,303 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- from botocore.docs.shape import ShapeDocumenter
14
- from botocore.docs.utils import py_type_name
15
-
16
-
17
- class BaseParamsDocumenter(ShapeDocumenter):
18
- def document_params(self, section, shape, include=None, exclude=None):
19
- """Fills out the documentation for a section given a model shape.
20
-
21
- :param section: The section to write the documentation to.
22
-
23
- :param shape: The shape of the operation.
24
-
25
- :type include: Dictionary where keys are parameter names and
26
- values are the shapes of the parameter names.
27
- :param include: The parameter shapes to include in the documentation.
28
-
29
- :type exclude: List of the names of the parameters to exclude.
30
- :param exclude: The names of the parameters to exclude from
31
- documentation.
32
- """
33
- history = []
34
- self.traverse_and_document_shape(
35
- section=section,
36
- shape=shape,
37
- history=history,
38
- name=None,
39
- include=include,
40
- exclude=exclude,
41
- )
42
-
43
- def document_recursive_shape(self, section, shape, **kwargs):
44
- self._add_member_documentation(section, shape, **kwargs)
45
-
46
- def document_shape_default(
47
- self, section, shape, history, include=None, exclude=None, **kwargs
48
- ):
49
- self._add_member_documentation(section, shape, **kwargs)
50
-
51
- def document_shape_type_list(
52
- self, section, shape, history, include=None, exclude=None, **kwargs
53
- ):
54
- self._add_member_documentation(section, shape, **kwargs)
55
- param_shape = shape.member
56
- param_section = section.add_new_section(
57
- param_shape.name, context={'shape': shape.member.name}
58
- )
59
- self._start_nested_param(param_section)
60
- self.traverse_and_document_shape(
61
- section=param_section,
62
- shape=param_shape,
63
- history=history,
64
- name=None,
65
- )
66
- section = section.add_new_section('end-list')
67
- self._end_nested_param(section)
68
-
69
- def document_shape_type_map(
70
- self, section, shape, history, include=None, exclude=None, **kwargs
71
- ):
72
- self._add_member_documentation(section, shape, **kwargs)
73
-
74
- key_section = section.add_new_section(
75
- 'key', context={'shape': shape.key.name}
76
- )
77
- self._start_nested_param(key_section)
78
- self._add_member_documentation(key_section, shape.key)
79
-
80
- param_section = section.add_new_section(
81
- shape.value.name, context={'shape': shape.value.name}
82
- )
83
- param_section.style.indent()
84
- self._start_nested_param(param_section)
85
- self.traverse_and_document_shape(
86
- section=param_section,
87
- shape=shape.value,
88
- history=history,
89
- name=None,
90
- )
91
-
92
- end_section = section.add_new_section('end-map')
93
- self._end_nested_param(end_section)
94
- self._end_nested_param(end_section)
95
-
96
- def document_shape_type_structure(
97
- self,
98
- section,
99
- shape,
100
- history,
101
- include=None,
102
- exclude=None,
103
- name=None,
104
- **kwargs,
105
- ):
106
- members = self._add_members_to_shape(shape.members, include)
107
- self._add_member_documentation(section, shape, name=name)
108
- for param in members:
109
- if exclude and param in exclude:
110
- continue
111
- param_shape = members[param]
112
- param_section = section.add_new_section(
113
- param, context={'shape': param_shape.name}
114
- )
115
- self._start_nested_param(param_section)
116
- self.traverse_and_document_shape(
117
- section=param_section,
118
- shape=param_shape,
119
- history=history,
120
- name=param,
121
- )
122
- section = section.add_new_section('end-structure')
123
- self._end_nested_param(section)
124
-
125
- def _add_member_documentation(self, section, shape, **kwargs):
126
- pass
127
-
128
- def _add_members_to_shape(self, members, include):
129
- if include:
130
- members = members.copy()
131
- for param in include:
132
- members[param.name] = param
133
- return members
134
-
135
- def _document_non_top_level_param_type(self, type_section, shape):
136
- special_py_type = self._get_special_py_type_name(shape)
137
- py_type = py_type_name(shape.type_name)
138
-
139
- type_format = '(%s) --'
140
- if special_py_type is not None:
141
- # Special type can reference a linked class.
142
- # Italicizing it blows away the link.
143
- type_section.write(type_format % special_py_type)
144
- else:
145
- type_section.style.italics(type_format % py_type)
146
- type_section.write(' ')
147
-
148
- def _start_nested_param(self, section):
149
- section.style.indent()
150
- section.style.new_line()
151
-
152
- def _end_nested_param(self, section):
153
- section.style.dedent()
154
- section.style.new_line()
155
-
156
-
157
- class ResponseParamsDocumenter(BaseParamsDocumenter):
158
- """Generates the description for the response parameters"""
159
-
160
- EVENT_NAME = 'response-params'
161
-
162
- def _add_member_documentation(self, section, shape, name=None, **kwargs):
163
- name_section = section.add_new_section('param-name')
164
- name_section.write('- ')
165
- if name is not None:
166
- name_section.style.bold('%s' % name)
167
- name_section.write(' ')
168
- type_section = section.add_new_section('param-type')
169
- self._document_non_top_level_param_type(type_section, shape)
170
-
171
- documentation_section = section.add_new_section('param-documentation')
172
- if shape.documentation:
173
- documentation_section.style.indent()
174
- if getattr(shape, 'is_tagged_union', False):
175
- tagged_union_docs = section.add_new_section(
176
- 'param-tagged-union-docs'
177
- )
178
- note = (
179
- '.. note::'
180
- ' This is a Tagged Union structure. Only one of the '
181
- ' following top level keys will be set: %s. '
182
- ' If a client receives an unknown member it will '
183
- ' set ``SDK_UNKNOWN_MEMBER`` as the top level key, '
184
- ' which maps to the name or tag of the unknown '
185
- ' member. The structure of ``SDK_UNKNOWN_MEMBER`` is '
186
- ' as follows'
187
- )
188
- tagged_union_members_str = ', '.join(
189
- ['``%s``' % key for key in shape.members.keys()]
190
- )
191
- unknown_code_example = (
192
- '\'SDK_UNKNOWN_MEMBER\': '
193
- '{\'name\': \'UnknownMemberName\'}'
194
- )
195
- tagged_union_docs.write(note % (tagged_union_members_str))
196
- example = section.add_new_section('param-unknown-example')
197
- example.style.codeblock(unknown_code_example)
198
- documentation_section.include_doc_string(shape.documentation)
199
- section.style.new_paragraph()
200
-
201
- def document_shape_type_event_stream(
202
- self, section, shape, history, **kwargs
203
- ):
204
- self.document_shape_type_structure(section, shape, history, **kwargs)
205
-
206
-
207
- class RequestParamsDocumenter(BaseParamsDocumenter):
208
- """Generates the description for the request parameters"""
209
-
210
- EVENT_NAME = 'request-params'
211
-
212
- def document_shape_type_structure(
213
- self, section, shape, history, include=None, exclude=None, **kwargs
214
- ):
215
- if len(history) > 1:
216
- self._add_member_documentation(section, shape, **kwargs)
217
- section.style.indent()
218
- members = self._add_members_to_shape(shape.members, include)
219
- for i, param in enumerate(members):
220
- if exclude and param in exclude:
221
- continue
222
- param_shape = members[param]
223
- param_section = section.add_new_section(
224
- param, context={'shape': param_shape.name}
225
- )
226
- param_section.style.new_line()
227
- is_required = param in shape.required_members
228
- self.traverse_and_document_shape(
229
- section=param_section,
230
- shape=param_shape,
231
- history=history,
232
- name=param,
233
- is_required=is_required,
234
- )
235
- section = section.add_new_section('end-structure')
236
- if len(history) > 1:
237
- section.style.dedent()
238
- section.style.new_line()
239
-
240
- def _add_member_documentation(
241
- self,
242
- section,
243
- shape,
244
- name=None,
245
- is_top_level_param=False,
246
- is_required=False,
247
- **kwargs,
248
- ):
249
- py_type = self._get_special_py_type_name(shape)
250
- if py_type is None:
251
- py_type = py_type_name(shape.type_name)
252
- if is_top_level_param:
253
- type_section = section.add_new_section('param-type')
254
- type_section.write(f':type {name}: {py_type}')
255
- end_type_section = type_section.add_new_section('end-param-type')
256
- end_type_section.style.new_line()
257
- name_section = section.add_new_section('param-name')
258
- name_section.write(':param %s: ' % name)
259
-
260
- else:
261
- name_section = section.add_new_section('param-name')
262
- name_section.write('- ')
263
- if name is not None:
264
- name_section.style.bold('%s' % name)
265
- name_section.write(' ')
266
- type_section = section.add_new_section('param-type')
267
- self._document_non_top_level_param_type(type_section, shape)
268
-
269
- if is_required:
270
- is_required_section = section.add_new_section('is-required')
271
- is_required_section.style.indent()
272
- is_required_section.style.bold('[REQUIRED]')
273
- is_required_section.write(' ')
274
- if shape.documentation:
275
- documentation_section = section.add_new_section(
276
- 'param-documentation'
277
- )
278
- documentation_section.style.indent()
279
- if getattr(shape, 'is_tagged_union', False):
280
- tagged_union_docs = section.add_new_section(
281
- 'param-tagged-union-docs'
282
- )
283
- note = (
284
- '.. note::'
285
- ' This is a Tagged Union structure. Only one of the '
286
- ' following top level keys can be set: %s. '
287
- )
288
- tagged_union_members_str = ', '.join(
289
- ['``%s``' % key for key in shape.members.keys()]
290
- )
291
- tagged_union_docs.write(note % (tagged_union_members_str))
292
- documentation_section.include_doc_string(shape.documentation)
293
- self._add_special_trait_documentation(documentation_section, shape)
294
- end_param_section = section.add_new_section('end-param')
295
- end_param_section.style.new_paragraph()
296
-
297
- def _add_special_trait_documentation(self, section, shape):
298
- if 'idempotencyToken' in shape.metadata:
299
- self._append_idempotency_documentation(section)
300
-
301
- def _append_idempotency_documentation(self, section):
302
- docstring = 'This field is autopopulated if not provided.'
303
- section.write(docstring)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/check.py DELETED
@@ -1,149 +0,0 @@
1
- """Validation of dependencies of packages
2
- """
3
-
4
- import logging
5
- from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple
6
-
7
- from pip._vendor.packaging.requirements import Requirement
8
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
9
-
10
- from pip._internal.distributions import make_distribution_for_install_requirement
11
- from pip._internal.metadata import get_default_environment
12
- from pip._internal.metadata.base import DistributionVersion
13
- from pip._internal.req.req_install import InstallRequirement
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class PackageDetails(NamedTuple):
19
- version: DistributionVersion
20
- dependencies: List[Requirement]
21
-
22
-
23
- # Shorthands
24
- PackageSet = Dict[NormalizedName, PackageDetails]
25
- Missing = Tuple[NormalizedName, Requirement]
26
- Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement]
27
-
28
- MissingDict = Dict[NormalizedName, List[Missing]]
29
- ConflictingDict = Dict[NormalizedName, List[Conflicting]]
30
- CheckResult = Tuple[MissingDict, ConflictingDict]
31
- ConflictDetails = Tuple[PackageSet, CheckResult]
32
-
33
-
34
- def create_package_set_from_installed() -> Tuple[PackageSet, bool]:
35
- """Converts a list of distributions into a PackageSet."""
36
- package_set = {}
37
- problems = False
38
- env = get_default_environment()
39
- for dist in env.iter_installed_distributions(local_only=False, skip=()):
40
- name = dist.canonical_name
41
- try:
42
- dependencies = list(dist.iter_dependencies())
43
- package_set[name] = PackageDetails(dist.version, dependencies)
44
- except (OSError, ValueError) as e:
45
- # Don't crash on unreadable or broken metadata.
46
- logger.warning("Error parsing requirements for %s: %s", name, e)
47
- problems = True
48
- return package_set, problems
49
-
50
-
51
- def check_package_set(
52
- package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None
53
- ) -> CheckResult:
54
- """Check if a package set is consistent
55
-
56
- If should_ignore is passed, it should be a callable that takes a
57
- package name and returns a boolean.
58
- """
59
-
60
- missing = {}
61
- conflicting = {}
62
-
63
- for package_name, package_detail in package_set.items():
64
- # Info about dependencies of package_name
65
- missing_deps: Set[Missing] = set()
66
- conflicting_deps: Set[Conflicting] = set()
67
-
68
- if should_ignore and should_ignore(package_name):
69
- continue
70
-
71
- for req in package_detail.dependencies:
72
- name = canonicalize_name(req.name)
73
-
74
- # Check if it's missing
75
- if name not in package_set:
76
- missed = True
77
- if req.marker is not None:
78
- missed = req.marker.evaluate({"extra": ""})
79
- if missed:
80
- missing_deps.add((name, req))
81
- continue
82
-
83
- # Check if there's a conflict
84
- version = package_set[name].version
85
- if not req.specifier.contains(version, prereleases=True):
86
- conflicting_deps.add((name, version, req))
87
-
88
- if missing_deps:
89
- missing[package_name] = sorted(missing_deps, key=str)
90
- if conflicting_deps:
91
- conflicting[package_name] = sorted(conflicting_deps, key=str)
92
-
93
- return missing, conflicting
94
-
95
-
96
- def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails:
97
- """For checking if the dependency graph would be consistent after \
98
- installing given requirements
99
- """
100
- # Start from the current state
101
- package_set, _ = create_package_set_from_installed()
102
- # Install packages
103
- would_be_installed = _simulate_installation_of(to_install, package_set)
104
-
105
- # Only warn about directly-dependent packages; create a whitelist of them
106
- whitelist = _create_whitelist(would_be_installed, package_set)
107
-
108
- return (
109
- package_set,
110
- check_package_set(
111
- package_set, should_ignore=lambda name: name not in whitelist
112
- ),
113
- )
114
-
115
-
116
- def _simulate_installation_of(
117
- to_install: List[InstallRequirement], package_set: PackageSet
118
- ) -> Set[NormalizedName]:
119
- """Computes the version of packages after installing to_install."""
120
- # Keep track of packages that were installed
121
- installed = set()
122
-
123
- # Modify it as installing requirement_set would (assuming no errors)
124
- for inst_req in to_install:
125
- abstract_dist = make_distribution_for_install_requirement(inst_req)
126
- dist = abstract_dist.get_metadata_distribution()
127
- name = dist.canonical_name
128
- package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))
129
-
130
- installed.add(name)
131
-
132
- return installed
133
-
134
-
135
- def _create_whitelist(
136
- would_be_installed: Set[NormalizedName], package_set: PackageSet
137
- ) -> Set[NormalizedName]:
138
- packages_affected = set(would_be_installed)
139
-
140
- for package_name in package_set:
141
- if package_name in packages_affected:
142
- continue
143
-
144
- for req in package_set[package_name].dependencies:
145
- if canonicalize_name(req.name) in packages_affected:
146
- packages_affected.add(package_name)
147
- break
148
-
149
- return packages_affected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/_log.py DELETED
@@ -1,38 +0,0 @@
1
- """Customize logging
2
-
3
- Defines custom logger class for the `logger.verbose(...)` method.
4
-
5
- init_logging() must be called before any other modules that call logging.getLogger.
6
- """
7
-
8
- import logging
9
- from typing import Any, cast
10
-
11
- # custom log level for `--verbose` output
12
- # between DEBUG and INFO
13
- VERBOSE = 15
14
-
15
-
16
- class VerboseLogger(logging.Logger):
17
- """Custom Logger, defining a verbose log-level
18
-
19
- VERBOSE is between INFO and DEBUG.
20
- """
21
-
22
- def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None:
23
- return self.log(VERBOSE, msg, *args, **kwargs)
24
-
25
-
26
- def getLogger(name: str) -> VerboseLogger:
27
- """logging.getLogger, but ensures our VerboseLogger class is returned"""
28
- return cast(VerboseLogger, logging.getLogger(name))
29
-
30
-
31
- def init_logging() -> None:
32
- """Register our VerboseLogger and VERBOSE log level.
33
-
34
- Should be called before any calls to getLogger(),
35
- i.e. in pip._internal.__init__
36
- """
37
- logging.setLoggerClass(VerboseLogger)
38
- logging.addLevelName(VERBOSE, "VERBOSE")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/retry.py DELETED
@@ -1,272 +0,0 @@
1
- # Copyright 2016–2021 Julien Danjou
2
- # Copyright 2016 Joshua Harlow
3
- # Copyright 2013-2014 Ray Holder
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- import abc
18
- import re
19
- import typing
20
-
21
- if typing.TYPE_CHECKING:
22
- from pip._vendor.tenacity import RetryCallState
23
-
24
-
25
- class retry_base(abc.ABC):
26
- """Abstract base class for retry strategies."""
27
-
28
- @abc.abstractmethod
29
- def __call__(self, retry_state: "RetryCallState") -> bool:
30
- pass
31
-
32
- def __and__(self, other: "retry_base") -> "retry_all":
33
- return retry_all(self, other)
34
-
35
- def __or__(self, other: "retry_base") -> "retry_any":
36
- return retry_any(self, other)
37
-
38
-
39
- RetryBaseT = typing.Union[retry_base, typing.Callable[["RetryCallState"], bool]]
40
-
41
-
42
- class _retry_never(retry_base):
43
- """Retry strategy that never rejects any result."""
44
-
45
- def __call__(self, retry_state: "RetryCallState") -> bool:
46
- return False
47
-
48
-
49
- retry_never = _retry_never()
50
-
51
-
52
- class _retry_always(retry_base):
53
- """Retry strategy that always rejects any result."""
54
-
55
- def __call__(self, retry_state: "RetryCallState") -> bool:
56
- return True
57
-
58
-
59
- retry_always = _retry_always()
60
-
61
-
62
- class retry_if_exception(retry_base):
63
- """Retry strategy that retries if an exception verifies a predicate."""
64
-
65
- def __init__(self, predicate: typing.Callable[[BaseException], bool]) -> None:
66
- self.predicate = predicate
67
-
68
- def __call__(self, retry_state: "RetryCallState") -> bool:
69
- if retry_state.outcome is None:
70
- raise RuntimeError("__call__() called before outcome was set")
71
-
72
- if retry_state.outcome.failed:
73
- exception = retry_state.outcome.exception()
74
- if exception is None:
75
- raise RuntimeError("outcome failed but the exception is None")
76
- return self.predicate(exception)
77
- else:
78
- return False
79
-
80
-
81
- class retry_if_exception_type(retry_if_exception):
82
- """Retries if an exception has been raised of one or more types."""
83
-
84
- def __init__(
85
- self,
86
- exception_types: typing.Union[
87
- typing.Type[BaseException],
88
- typing.Tuple[typing.Type[BaseException], ...],
89
- ] = Exception,
90
- ) -> None:
91
- self.exception_types = exception_types
92
- super().__init__(lambda e: isinstance(e, exception_types))
93
-
94
-
95
- class retry_if_not_exception_type(retry_if_exception):
96
- """Retries except an exception has been raised of one or more types."""
97
-
98
- def __init__(
99
- self,
100
- exception_types: typing.Union[
101
- typing.Type[BaseException],
102
- typing.Tuple[typing.Type[BaseException], ...],
103
- ] = Exception,
104
- ) -> None:
105
- self.exception_types = exception_types
106
- super().__init__(lambda e: not isinstance(e, exception_types))
107
-
108
-
109
- class retry_unless_exception_type(retry_if_exception):
110
- """Retries until an exception is raised of one or more types."""
111
-
112
- def __init__(
113
- self,
114
- exception_types: typing.Union[
115
- typing.Type[BaseException],
116
- typing.Tuple[typing.Type[BaseException], ...],
117
- ] = Exception,
118
- ) -> None:
119
- self.exception_types = exception_types
120
- super().__init__(lambda e: not isinstance(e, exception_types))
121
-
122
- def __call__(self, retry_state: "RetryCallState") -> bool:
123
- if retry_state.outcome is None:
124
- raise RuntimeError("__call__() called before outcome was set")
125
-
126
- # always retry if no exception was raised
127
- if not retry_state.outcome.failed:
128
- return True
129
-
130
- exception = retry_state.outcome.exception()
131
- if exception is None:
132
- raise RuntimeError("outcome failed but the exception is None")
133
- return self.predicate(exception)
134
-
135
-
136
- class retry_if_exception_cause_type(retry_base):
137
- """Retries if any of the causes of the raised exception is of one or more types.
138
-
139
- The check on the type of the cause of the exception is done recursively (until finding
140
- an exception in the chain that has no `__cause__`)
141
- """
142
-
143
- def __init__(
144
- self,
145
- exception_types: typing.Union[
146
- typing.Type[BaseException],
147
- typing.Tuple[typing.Type[BaseException], ...],
148
- ] = Exception,
149
- ) -> None:
150
- self.exception_cause_types = exception_types
151
-
152
- def __call__(self, retry_state: "RetryCallState") -> bool:
153
- if retry_state.outcome is None:
154
- raise RuntimeError("__call__ called before outcome was set")
155
-
156
- if retry_state.outcome.failed:
157
- exc = retry_state.outcome.exception()
158
- while exc is not None:
159
- if isinstance(exc.__cause__, self.exception_cause_types):
160
- return True
161
- exc = exc.__cause__
162
-
163
- return False
164
-
165
-
166
- class retry_if_result(retry_base):
167
- """Retries if the result verifies a predicate."""
168
-
169
- def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
170
- self.predicate = predicate
171
-
172
- def __call__(self, retry_state: "RetryCallState") -> bool:
173
- if retry_state.outcome is None:
174
- raise RuntimeError("__call__() called before outcome was set")
175
-
176
- if not retry_state.outcome.failed:
177
- return self.predicate(retry_state.outcome.result())
178
- else:
179
- return False
180
-
181
-
182
- class retry_if_not_result(retry_base):
183
- """Retries if the result refutes a predicate."""
184
-
185
- def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
186
- self.predicate = predicate
187
-
188
- def __call__(self, retry_state: "RetryCallState") -> bool:
189
- if retry_state.outcome is None:
190
- raise RuntimeError("__call__() called before outcome was set")
191
-
192
- if not retry_state.outcome.failed:
193
- return not self.predicate(retry_state.outcome.result())
194
- else:
195
- return False
196
-
197
-
198
- class retry_if_exception_message(retry_if_exception):
199
- """Retries if an exception message equals or matches."""
200
-
201
- def __init__(
202
- self,
203
- message: typing.Optional[str] = None,
204
- match: typing.Optional[str] = None,
205
- ) -> None:
206
- if message and match:
207
- raise TypeError(f"{self.__class__.__name__}() takes either 'message' or 'match', not both")
208
-
209
- # set predicate
210
- if message:
211
-
212
- def message_fnc(exception: BaseException) -> bool:
213
- return message == str(exception)
214
-
215
- predicate = message_fnc
216
- elif match:
217
- prog = re.compile(match)
218
-
219
- def match_fnc(exception: BaseException) -> bool:
220
- return bool(prog.match(str(exception)))
221
-
222
- predicate = match_fnc
223
- else:
224
- raise TypeError(f"{self.__class__.__name__}() missing 1 required argument 'message' or 'match'")
225
-
226
- super().__init__(predicate)
227
-
228
-
229
- class retry_if_not_exception_message(retry_if_exception_message):
230
- """Retries until an exception message equals or matches."""
231
-
232
- def __init__(
233
- self,
234
- message: typing.Optional[str] = None,
235
- match: typing.Optional[str] = None,
236
- ) -> None:
237
- super().__init__(message, match)
238
- # invert predicate
239
- if_predicate = self.predicate
240
- self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
241
-
242
- def __call__(self, retry_state: "RetryCallState") -> bool:
243
- if retry_state.outcome is None:
244
- raise RuntimeError("__call__() called before outcome was set")
245
-
246
- if not retry_state.outcome.failed:
247
- return True
248
-
249
- exception = retry_state.outcome.exception()
250
- if exception is None:
251
- raise RuntimeError("outcome failed but the exception is None")
252
- return self.predicate(exception)
253
-
254
-
255
- class retry_any(retry_base):
256
- """Retries if any of the retries condition is valid."""
257
-
258
- def __init__(self, *retries: retry_base) -> None:
259
- self.retries = retries
260
-
261
- def __call__(self, retry_state: "RetryCallState") -> bool:
262
- return any(r(retry_state) for r in self.retries)
263
-
264
-
265
- class retry_all(retry_base):
266
- """Retries if all the retries condition are valid."""
267
-
268
- def __init__(self, *retries: retry_base) -> None:
269
- self.retries = retries
270
-
271
- def __call__(self, retry_state: "RetryCallState") -> bool:
272
- return all(r(retry_state) for r in self.retries)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Binettebob22/fast_diffusion2/app.py DELETED
@@ -1,956 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
-
6
- models = [
7
- "Yntec/GalenaVAE",
8
- "Yntec/a-ZovyaRemix",
9
- "Yntec/a-ZovyaRPGV3VAE",
10
- "Yntec/a-ZoviaRPGArtistV2VAE",
11
- "Yntec/GameAssetsDigitalUnitsCreationKit",
12
- "Yntec/InsaneRealisticCVAE",
13
- "Yntec/Lunar",
14
- "Yntec/LunarLuma",
15
- "Yntec/QToriReloaded",
16
- "Yntec/Chik2",
17
- "Yntec/InsaneM3U",
18
- "Yntec/DucHaiten-StyleLikeMeVAE",
19
- "Yntec/CuteYuki2",
20
- "Yntec/Luma",
21
- "Yntec/Noosphere_v3_CVAE",
22
- "Yntec/RealRainbows",
23
- "Yntec/Ninja-Diffusers",
24
- "Yntec/ChildrenStoriesAnime",
25
- "Yntec/theallysMixIV-verisimilar",
26
- "Yntec/DucHaitenAnime768",
27
- "Yntec/RainbowClassicAnime",
28
- "Yntec/DucHaitenClassicAnime768",
29
- "Yntec/WesternAnimation",
30
- "Yntec/GOLDFish",
31
- "Yntec/NeverExisted",
32
- "Yntec/Rainbowsphere",
33
- "Yntec/DreamAnything",
34
- "Yntec/Dreamsphere",
35
- "Yntec/Photosphere",
36
- "Yntec/yabalMixTrue25D_v2_VAE",
37
- "dreamlike-art/dreamlike-anime-1.0",
38
- "Yntec/RainbowDreams",
39
- "dreamlike-art/dreamlike-photoreal-2.0",
40
- "Yntec/rainbowpatch",
41
- "Yntec/DucHaiten-Retro-Diffusers",
42
- "Yntec/ElldrethsRetroMix_Diffusers",
43
- "Yntec/sexyToons",
44
- "digiplay/wantan25D_prototype",
45
- "digiplay/PotoPhotoRealism_v1",
46
- "digiplay/LunarDiffusion_v1.27",
47
- "digiplay/insaneRealistic_v1",
48
- "digiplay/OLDFish_2348_diffusers",
49
- "DucHaiten/DucHaitenDreamWorld",
50
- "digiplay/LemonteaMixPainterly2_v1",
51
- "digiplay/SweetMuse_diffusers",
52
- "dreamlike-art/dreamlike-diffusion-1.0",
53
- "digiplay/Realisian_v1",
54
- "Hius/DreamFul-V2",
55
- "digiplay/m3u", #263
56
- "digiplay/RMHF_2.5D_v2",
57
- "digiplay/FishMix_v1.1",
58
- "stablediffusionapi/icomix-2",
59
- "digiplay/Remedy",
60
- "Hemlok/QuinceMix",
61
- "digiplay/K-main",
62
- "digiplay/LusterMix_v1.5_safetensors", #256
63
- "digiplay/perfectLewdFantasy_v1.01",
64
- "digiplay/Opiate_v2",
65
- "digiplay/PhotoSomnia_vFinal",
66
- "Yntec/KIDSILLUSTRATIONS",
67
- "digiplay/polla_mix_2.5D",
68
- "Yntec/COOLKIDSV2",
69
- "Yntec/Pavo-Mix-Diffusers",
70
- "Yntec/RPG_Remix",
71
- "Yntec/OrangeRemix",
72
- "Yntec/PeachMix3",
73
- "Yntec/DucHaitenAIart-beta",
74
- "stablediffusionapi/all-526-animated",
75
- "AstraliteHeart/pony-diffusion",
76
- "stablediffusionapi/chilloutmixsf",
77
- "Masagin/Deliberate", #235
78
- "DucHaiten/DucHaitenSuperCute",
79
- "stablediffusionapi/all-526",
80
- "theintuitiveye/HARDblend",
81
- "stablediffusionapi/cusp-of-serenity",
82
- "stablediffusionapi/cyberrealistic",
83
- "SG161222/Realistic_Vision_V1.4",
84
- "digiplay/paulEberSRealismMix_v1",
85
- "Ojimi/anime-kawai-diffusion",
86
- "hassanblend/hassanblend1.4",
87
- "digiplay/zodiac_eclipse_DAY1",
88
- "LottePeisch/RevAnimated-Diffusers",
89
- "claudfuen/photorealistic-fuen-v1",
90
- "stablediffusionapi/chillout-app-factory",
91
- "DucHaiten/DucHaitenJourney",
92
- "robotjung/SemiRealMix",
93
- "Joeythemonster/anything-midjourney-v-4-1",
94
- "prompthero/midjourney-v4-diffusion",
95
- "prompthero/openjourney-v4",
96
- "x67/shortjourney",
97
- "darkstorm2150/Protogen_v2.2_Official_Release",
98
- "FredZhang7/paint-journey-v2",
99
- "digiplay/PersonaStyleCheckpoint",
100
- "darkstorm2150/Protogen_Infinity_Official_Release",
101
- "PeggyWang/openjourney-v2",
102
- "darkstorm2150/Protogen_x3.4_Official_Release",
103
- "stablediffusionapi/deliberateappfactory", #236
104
- "digiplay/CrossoverMix_v2",
105
- "stablediffusionapi/spybg",
106
- "stablediffusionapi/dreamshaper-v6", #239
107
- "stablediffusionapi/the-ally",
108
- "darkstorm2150/Protogen_x5.8_Official_Release",
109
- "coreco/seek.art_MEGA",
110
- "digiplay/BlankCanvas_v1", #07.11
111
- "digiplay/OnlyAnime_v2.3",
112
- "Korakoe/OpenNiji",
113
- "digiplay/Photon_v1",
114
- "digiplay/Pika_v2",
115
- "digiplay/RealCartoon3D_F16full_v3.1", #254
116
- "digiplay/realidefmix_3.5VAE",
117
- "digiplay/realmixUnrealjourney_v1",
118
- "digiplay/SyncMix_v1.5",
119
- "digiplay/TWingshadow_v1.2",
120
- "digiplay/V3_by_Hans_Asian",
121
- "digiplay/whatamix_v1",
122
-
123
- "digiplay/2K", #216
124
- "digiplay/AIGEN_v1.4_diffusers",
125
- "digiplay/BrickAndMortarMix_v2.0_diffusers", #224
126
- "digiplay/BeautyFool_v1.2VAE_pruned",
127
- "digiplay/breakdomainrealistic_R2333",
128
- "digiplay/CCTV2.5d_v1", #219
129
- "digiplay/ChikMix_V3", #253
130
- "stablediffusionapi/chilledremixsazyou-r", #195
131
- "digiplay/CityEdge_StyleMix_v1.44",
132
- "stablediffusionapi/dalcefopainting2", #199
133
- "digiplay/EdisonNilMix_v1", #07.10
134
- "digiplay/DiamondCoalMix_v2_pruned_diffusers",
135
- "digiplay/DreamShaper_7", #259
136
- "digiplay/elegantEntropy_v1.1", #221
137
- "digiplay/EtherRealMix_LUX2",
138
- "digiplay/KawaiiRealisticAnimeMix_A0.3",
139
- "digiplay/highQualityCGMIX_v1",
140
- "digiplay/HIMAWARI_v1",
141
- "digiplay/Hodgepodge_v2.1", #217
142
- "digiplay/illustro1stEdition_illustroV1", #214
143
- "digiplay/Juggernaut_final", #07.11
144
- "digiplay/Landscape_PhotoReal_v1",
145
- "digiplay/LuckyStrikeMix0.2Realistic", #07.10
146
- "digiplay/Matrix_Stellar_VAE_v1",
147
- "digiplay/PrefixRealisticMix_v1",
148
- "digiplay/RealEpicMajicRevolution_v1", #07.11
149
- "digiplay/ShampooMix_4", #252
150
- "digiplay/SoapMix2.5D_v1",
151
- "digiplay/ZemiHR_v2_diffusers",
152
-
153
- "Redamancy2299/dreambooth",
154
- "Lykon/DreamShaper", #240
155
- "trysem/DreamShaper-3.3",
156
- "HusseinHE/hussein-deliberate-1000steps", #237
157
- "stablediffusionapi/majicmixfantasy",
158
- "stablediffusionapi/majicmixsombre", #247
159
- "wavymulder/modelshoot",
160
- "digiplay/ChillyMix_v1", #215
161
- "stablediffusionapi/foto-assisted-diffusion", #197
162
- "wavymulder/portraitplus",
163
- "stablediffusionapi/chilloutmix-4264",
164
- "stablediffusionapi/product-design", #194
165
- "kandinsky-community/kandinsky-2-1", #251
166
-
167
- "digiplay/2.5DSET_diffusers", #227
168
- "digiplay/2-KWI", #213
169
- "digiplay/alstroemeriaMix_v1",
170
- "wavymulder/Analog-Diffusion",
171
- "digiplay/AniRealityMix_v1", #257
172
- "digiplay/ARRealVX1.1",
173
- "digiplay/BadAnime_v1",
174
- "digiplay/BasilKorea_v2", #07.11
175
- "digiplay/bluePencilRealistic_v01",
176
- "digiplay/bra_v40_diffusers",
177
- "digiplay/Burger_Mix_semiR2Lite", #222
178
- "digiplay/calicomixreal_v2.0_diffusers",
179
- "digiplay/CampurSari_Gen1",
180
- "digiplay/cocotifacute_v1", #07.10
181
- "digiplay/cosfMix_v1", #223
182
- "digiplay/CounterMix_v2", #211
183
- "digiplay/CuriousMerge2.5D_v5",
184
- "digiplay/dosmix",
185
- "digiplay/epi_2.5Dphotogodess_diffusers",
186
- "stablediffusionapi/droodlyrielv15",
187
- "digiplay/fantexi_v0.7",
188
- "digiplay/fishmix_other_v1",
189
- "digiplay/FormCleansingMix_v1", #228
190
- "digiplay/FumizukiMix_v1",
191
- "digiplay/helloworld_v3",
192
- "digiplay/HenmixArt_v1",
193
- "digiplay/ISOmix_v3.22",
194
- "digiplay/kencanmix_v2.0beta",
195
- "wavymulder/lomo-diffusion",
196
- "stablediffusionapi/majicmixv5", #192
197
- "digiplay/mecha_musume_vivid_soft",
198
- "digiplay/MiracleMixGlitter_v1",
199
- "digiplay/MixTape_RocknRoll_v3punk_bake_fp16",
200
- "digiplay/NextPhoto_v1",
201
- "digiplay/Noosphere_v3",
202
- "digiplay/nk15_diffusers", #230
203
- "digiplay/PeachMixsRelistic_R0", #262
204
- "wavymulder/timeless-diffusion",
205
- "digiplay/WhiteDreamyHillMix_v1", #220
206
- "digiplay/ya3p_VAE", #258
207
-
208
- "DucHaiten/DucHaitenAnime",
209
- "DucHaiten/DucHaitenAIart",
210
- "Manseo/Colorful-v4.5-Plus", #244
211
- "Guizmus/SDArt_ChaosAndOrder",
212
- "DucHaiten/DH_ClassicAnime",
213
- "stablediffusionapi/disneypixar",
214
- "johnslegers/epic-diffusion-v1.1",
215
- "emilianJR/epiCRealism",
216
- "johnslegers/epic-diffusion",
217
- "digiplay/endlessMixRenatus_v1.1", #07.10
218
- "digiplay/fantasticAnime_diffusers",
219
- "stablediffusionapi/ghostmix",
220
- "Duskfallcrew/EpicMix_Realism",
221
- "nitrosocke/Nitro-Diffusion",
222
- "prompthero/openjourney",
223
- "Guizmus/SDArt_something",
224
- "DucHaiten/DucHaiten-StyleLikeMe",
225
- "ddPn08/subtly", #250
226
- "22h/vintedois-diffusion-v0-1",
227
-
228
- "circulus/sd-anireal-v2.7",
229
- "0xJustin/Dungeons-and-Diffusion",
230
- "Guizmus/SDArt_AliceInDiffusionLand",
231
- "stablediffusionapi/realistic-vision-v20-2047",
232
- "redstonehero/RPG-v5-itr17_A10T",
233
-
234
- "stablediffusionapi/camelliamix25d",
235
- "Guizmus/SDArt_cosmichorrors",
236
- "DGSpitzer/DGSpitzer-Art-Diffusion",
237
- "stablediffusionapi/emotion-puppeteer-v2",
238
- "stablediffusionapi/fengjing",
239
- "stablediffusionapi/fuwafuwamix",
240
- "Fred99774/girlnew1",
241
- "stablediffusionapi/majicmixrealistic",
242
- "badmonk/nxka",
243
- "ItsJayQz/SynthwavePunk-v2",
244
- "zhyemmmm/ToonYou",
245
- "stablediffusionapi/uber-realistic-merge",
246
- "stablediffusionapi/vne732h9dh4",
247
- "stablediffusionapi/wand-magic2",
248
- "stablediffusionapi/waifu-journey-2",
249
- "stablediffusionapi/zovya",
250
-
251
- "Guizmus/SDArt_cosmichorrors768",
252
- "stablediffusionapi/counterfeit-v30",
253
- "stablediffusionapi/amireal",
254
- #"JamesFlare/pastel-mix", #"andite/pastel-mix",
255
- "stablediffusionapi/rev-anim",
256
- "aipicasso/picasso-diffusion-1-1",
257
- "xiaolxl/Gf_style2",
258
- "circulus/sd-semireal-v2.8",
259
- "Crosstyan/BPModel", #07.11
260
-
261
- "digiplay/Dusk-1",
262
- "ogkalu/Comic-Diffusion",
263
- "Guizmus/SDArt_ChaosAndOrder768",
264
- "gsdf/Counterfeit-V2.0",
265
- "dwancin/memoji", #07.11
266
- "nousr/robo-diffusion-2-base",
267
-
268
- ##"hakurei/waifu-diffusion",
269
- "WarriorMama777/AbyssOrangeMix2",
270
- "stablediffusionapi/abyssorangemix2nsfw", #200
271
- "cag/anything-v3-1",
272
- "iZELX1/Anything-V3-X",
273
- "xyn-ai/anything-v4.0", #"andite/anything-v4.0",
274
- "D1b4l4p/AsianMix",
275
- "Fred99774/chilloutvlara",
276
- "aipicasso/cool-japan-diffusion-2-1-2",
277
- "stablediffusionapi/corneos-7th-heaven-m", #196
278
- "DGSpitzer/Cyberpunk-Anime-Diffusion",
279
- "stablediffusionapi/dark-sushi-mix",
280
- "joachimsallstrom/Double-Exposure-Diffusion",
281
- "eimiss/EimisAnimeDiffusion_1.0v",
282
- "prompthero/funko-diffusion",
283
- "nitrosocke/Ghibli-Diffusion",
284
- ###"iZELX1/Grapefruit",
285
- "xiaolxl/GuoFeng3",
286
- "stablediffusionapi/tmnd-mix",
287
- "coder119/Vectorartz_Diffusion", #203
288
-
289
- "WarriorMama777/AbyssOrangeMix",
290
- "AIARTCHAN/7pa",
291
- "JosephusCheung/ACertainModel",
292
- "JosephusCheung/ACertainThing",
293
- "AIARTCHAN/AbyssHellHero",
294
- "JosephusCheung/ACertainty",
295
- "AIARTCHAN/AbyssHellVer3",
296
- "AIARTCHAN/AbyssMapleVer3",
297
- "stablediffusionapi/abyssorangemixsfw",
298
- "AIARTCHAN/anidosmixV2",
299
- "stablediffusionapi/anime-model-v2",
300
- "kubanemil/AnyLORA",
301
- "stablediffusionapi/hc-anything-v3-vae", #231
302
- "mm00/anything-v3.0-light",
303
- "stablediffusionapi/anythingelse-v4",
304
- "stablediffusionapi/anything-v45-fixed",
305
- "stablediffusionapi/anything-v5",
306
- "nitrosocke/Arcane-Diffusion",
307
- "nitrosocke/archer-diffusion",
308
- "stablediffusionapi/architecture-tuned-model",
309
- "WarriorMama777/BloodOrangeMix",
310
- "wavymulder/collage-diffusion",
311
- "stablediffusionapi/camelliamixline",
312
- "digiplay/chrysanthemumMix_v1",
313
- "digiplay/CiderMix_ciderR", #260
314
- "Johnhex/Clam", #243
315
- "stablediffusionapi/cosmic-babes",
316
- "digiplay/CoffeeDonut_v1",
317
- "stablediffusionapi/dark-sushi-25d",
318
- "digiplay/Defacta_v1_diffusers", #226
319
- ## "WarriorMama777/EerieOrangeMix",
320
- "digiplay/DuelAnimeMix_v1", #225
321
- "Envvi/Inkpunk-Diffusion",
322
- "digiplay/kotosmix_diffusers", #229
323
- "stablediffusionapi/meinaalter",
324
- "Nacholmo/meinamixv7-diffusers",
325
- "stablediffusionapi/meinapastel",
326
- "AIARTCHAN/MIX-Pro-V4",
327
- "Lykon/NeverEnding-Dream",
328
- "stablediffusionapi/shirataki-mix", #191
329
- "NoCrypt/SomethingV2_2",
330
- "NoCrypt/SomethingV2",
331
- "badmonk/sxzumi",
332
- ## "stablediffusionapi/three-delicacy",
333
- ## "stablediffusionapi/three-delicacy-wonto",
334
- "etherealxx/systemy-csrmodel-cutesexyrobutts", #"andite/cutesexyrobutts-diffusion",
335
- "sd-dreambooth-library/true-guweiz-style", # "andite/guweiz-diffusion",
336
- "stablediffusionapi/vector-art", #198
337
- "digiplay/xxMix_4",
338
- ###"mio/hiten", #"andite/hiten-diffusion",
339
- ### "andite/mashuu-diffusion",
340
- ### "andite/mignon-diffusion",
341
- ### "andite/mikapikazo-diffusion",
342
- ### "andite/piromizu-diffusion",
343
- "digiplay/Zevinemix_v1.0/",
344
-
345
- "digiplay/AnaMix_v2", #07.11
346
- "stablediffusionapi/animetestmodelv3",
347
- "yulet1de/anything", #232
348
- "hakurei/artstation-diffusion", #07.11
349
- "Fictiverse/Stable_Diffusion_BalloonArt_Model",
350
- "stablediffusionapi/bg-dream-irl",
351
- "stablediffusionapi/bg-dream-model-b", #193
352
- "Rardilit/Ciffusion_v0.1",
353
- "circulus/sd-anireal-2d-v2",
354
- "circulus/sd-photoreal-v2.7",
355
- "circulus/sd-photoreal-photo-v2",
356
- "circulus/sd-anireal-2.5d-v2",
357
- "circulus/sd-anireal-v2.5",
358
- "circulus/sd-photoreal-semi-v2",
359
- "circulus/sd-photoreal-real-v2",
360
- "circulus/sd-photoreal-v2.5",
361
- "circulus/sd-anireal-3d-v2",
362
- "circulus/sd-anireal-v2.8",
363
- "nitrosocke/classic-anim-diffusion",
364
- "Conflictx/Complex-Lineart", #245
365
- "sayakpaul/da-vinci-sd-pokemon",
366
- "nitrosocke/elden-ring-diffusion",
367
- "digiplay/EtherBluMix_1", #07.11
368
- "digiplay/fantasticmix_v40_test", #261
369
- "theintuitiveye/FantasyMix",
370
- "Fictiverse/Stable_Diffusion_FluidArt_Model",
371
- "nitrosocke/Future-Diffusion",
372
- "ItsJayQz/GTA5_Artwork_Diffusion", #205
373
- "digiplay/hellopure_v2.23",
374
- "TheLastBen/hrrzg-style-768px", #246
375
- "nevernotsean/IllustratedPaperMini", #242
376
- "dallinmackay/JWST-Deep-Space-diffusion",
377
- "prompthero/linkedin-diffusion",
378
- "mann-e/mann-e_4_rev-0-1", #210
379
- "ItsJayQz/Marvel_WhatIf_Diffusion", #206
380
- "yuanbit/max-15-1e-6-1500",
381
- "MyneFactory/MF-Base", #248
382
- "Fictiverse/Stable_Diffusion_Microscopic_model", #249
383
- "nitrosocke/mo-di-diffusion",
384
- "luongphamit/NeverEnding-Dream2", #241
385
- "lambdalabs/sd-naruto-diffusers", #201
386
- "Vernon-2/output_test",
387
- "Fictiverse/Stable_Diffusion_PaperCut_Model",
388
- "bsuutari/path_to_saved_model",
389
- "bsuutari/path_to_saved_model_rafa",
390
- "digiplay/PlanetBumix_v1",
391
- "lambdalabs/sd-pokemon-diffusers", #202
392
- "prompthero/poolsuite-diffusion",
393
- "digiplay/RealismEngine_v1",
394
- "nitrosocke/redshift-diffusion",
395
- "nitrosocke/redshift-diffusion-768",
396
- "nousr/robo-diffusion",
397
- "digiplay/SDVN1-Real_v1", #255
398
- "nitrosocke/spider-verse-diffusion",
399
- #"runwayml/stable-diffusion-v1-5",
400
- "nicky007/stable-diffusion-logo-fine-tuned",
401
- "stablediffusionapi/three-delicacy", #233
402
- "stablediffusionapi/three-delicacy-wonto", #234
403
- "naclbit/trinart_stable_diffusion_v2",
404
- "dallinmackay/Tron-Legacy-diffusion",
405
- "digiplay/unstableDiffusersYamerMIX_v3",
406
- "dallinmackay/Van-Gogh-diffusion",
407
- "ItsJayQz/Valorant_Diffusion",
408
- "Fictiverse/Stable_Diffusion_VoxelArt_Model", #204
409
- "wavymulder/wavyfusion",
410
- "CompVis/stable-diffusion-v1-3", #207
411
- "CompVis/stable-diffusion-v1-2", #208
412
- "CompVis/stable-diffusion-v1-1", #209
413
- ]
414
- current_model = models[0]
415
-
416
- text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
417
-
418
- models2=[
419
- gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
420
- gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
421
- gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False),
422
- gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False),
423
- gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False),
424
- gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False),
425
- gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False),
426
- gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False),
427
- gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False),
428
- gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False),
429
- gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False),
430
- gr.Interface.load(f"models/{models[11]}",live=True,preprocess=False),
431
- gr.Interface.load(f"models/{models[12]}",live=True,preprocess=False),
432
- gr.Interface.load(f"models/{models[13]}",live=True,preprocess=False),
433
- gr.Interface.load(f"models/{models[14]}",live=True,preprocess=False),
434
- gr.Interface.load(f"models/{models[15]}",live=True,preprocess=False),
435
- gr.Interface.load(f"models/{models[16]}",live=True,preprocess=False),
436
- gr.Interface.load(f"models/{models[17]}",live=True,preprocess=False),
437
- gr.Interface.load(f"models/{models[18]}",live=True,preprocess=False),
438
- gr.Interface.load(f"models/{models[19]}",live=True,preprocess=False),
439
- gr.Interface.load(f"models/{models[20]}",live=True,preprocess=False),
440
- gr.Interface.load(f"models/{models[21]}",live=True,preprocess=False),
441
- gr.Interface.load(f"models/{models[22]}",live=True,preprocess=False),
442
- gr.Interface.load(f"models/{models[23]}",live=True,preprocess=False),
443
- gr.Interface.load(f"models/{models[24]}",live=True,preprocess=False),
444
- gr.Interface.load(f"models/{models[25]}",live=True,preprocess=False),
445
- gr.Interface.load(f"models/{models[26]}",live=True,preprocess=False),
446
- gr.Interface.load(f"models/{models[27]}",live=True,preprocess=False),
447
- gr.Interface.load(f"models/{models[28]}",live=True,preprocess=False),
448
- gr.Interface.load(f"models/{models[29]}",live=True,preprocess=False),
449
- gr.Interface.load(f"models/{models[30]}",live=True,preprocess=False),
450
- gr.Interface.load(f"models/{models[31]}",live=True,preprocess=False),
451
- gr.Interface.load(f"models/{models[32]}",live=True,preprocess=False),
452
- gr.Interface.load(f"models/{models[33]}",live=True,preprocess=False),
453
- gr.Interface.load(f"models/{models[34]}",live=True,preprocess=False),
454
- gr.Interface.load(f"models/{models[35]}",live=True,preprocess=False),
455
- gr.Interface.load(f"models/{models[36]}",live=True,preprocess=False),
456
- gr.Interface.load(f"models/{models[37]}",live=True,preprocess=False),
457
- gr.Interface.load(f"models/{models[38]}",live=True,preprocess=False),
458
- gr.Interface.load(f"models/{models[39]}",live=True,preprocess=False),
459
- gr.Interface.load(f"models/{models[40]}",live=True,preprocess=False),
460
- gr.Interface.load(f"models/{models[41]}",live=True,preprocess=False),
461
- gr.Interface.load(f"models/{models[42]}",live=True,preprocess=False),
462
- gr.Interface.load(f"models/{models[43]}",live=True,preprocess=False),
463
- gr.Interface.load(f"models/{models[44]}",live=True,preprocess=False),
464
- gr.Interface.load(f"models/{models[45]}",live=True,preprocess=False),
465
- gr.Interface.load(f"models/{models[46]}",live=True,preprocess=False),
466
- gr.Interface.load(f"models/{models[47]}",live=True,preprocess=False),
467
- gr.Interface.load(f"models/{models[48]}",live=True,preprocess=False),
468
- gr.Interface.load(f"models/{models[49]}",live=True,preprocess=False),
469
- gr.Interface.load(f"models/{models[50]}",live=True,preprocess=False),
470
- gr.Interface.load(f"models/{models[51]}",live=True,preprocess=False),
471
- gr.Interface.load(f"models/{models[52]}",live=True,preprocess=False),
472
- gr.Interface.load(f"models/{models[53]}",live=True,preprocess=False),
473
- gr.Interface.load(f"models/{models[54]}",live=True,preprocess=False),
474
- gr.Interface.load(f"models/{models[55]}",live=True,preprocess=False),
475
- gr.Interface.load(f"models/{models[56]}",live=True,preprocess=False),
476
- gr.Interface.load(f"models/{models[57]}",live=True,preprocess=False),
477
- gr.Interface.load(f"models/{models[58]}",live=True,preprocess=False),
478
- gr.Interface.load(f"models/{models[59]}",live=True,preprocess=False),
479
- gr.Interface.load(f"models/{models[60]}",live=True,preprocess=False),
480
- gr.Interface.load(f"models/{models[61]}",live=True,preprocess=False),
481
- gr.Interface.load(f"models/{models[62]}",live=True,preprocess=False),
482
- gr.Interface.load(f"models/{models[63]}",live=True,preprocess=False),
483
- gr.Interface.load(f"models/{models[64]}",live=True,preprocess=False),
484
- gr.Interface.load(f"models/{models[65]}",live=True,preprocess=False),
485
- gr.Interface.load(f"models/{models[66]}",live=True,preprocess=False),
486
- gr.Interface.load(f"models/{models[67]}",live=True,preprocess=False),
487
- gr.Interface.load(f"models/{models[68]}",live=True,preprocess=False),
488
- gr.Interface.load(f"models/{models[69]}",live=True,preprocess=False),
489
- gr.Interface.load(f"models/{models[70]}",live=True,preprocess=False),
490
- gr.Interface.load(f"models/{models[71]}",live=True,preprocess=False),
491
- gr.Interface.load(f"models/{models[72]}",live=True,preprocess=False),
492
- gr.Interface.load(f"models/{models[73]}",live=True,preprocess=False),
493
- gr.Interface.load(f"models/{models[74]}",live=True,preprocess=False),
494
- gr.Interface.load(f"models/{models[75]}",live=True,preprocess=False),
495
- gr.Interface.load(f"models/{models[76]}",live=True,preprocess=False),
496
- gr.Interface.load(f"models/{models[77]}",live=True,preprocess=False),
497
- gr.Interface.load(f"models/{models[78]}",live=True,preprocess=False),
498
- gr.Interface.load(f"models/{models[79]}",live=True,preprocess=False),
499
- gr.Interface.load(f"models/{models[80]}",live=True,preprocess=False),
500
- gr.Interface.load(f"models/{models[81]}",live=True,preprocess=False),
501
- gr.Interface.load(f"models/{models[82]}",live=True,preprocess=False),
502
- gr.Interface.load(f"models/{models[83]}",live=True,preprocess=False),
503
- gr.Interface.load(f"models/{models[84]}",live=True,preprocess=False),
504
- gr.Interface.load(f"models/{models[85]}",live=True,preprocess=False),
505
- gr.Interface.load(f"models/{models[86]}",live=True,preprocess=False),
506
- gr.Interface.load(f"models/{models[87]}",live=True,preprocess=False),
507
- gr.Interface.load(f"models/{models[88]}",live=True,preprocess=False),
508
- gr.Interface.load(f"models/{models[89]}",live=True,preprocess=False),
509
- gr.Interface.load(f"models/{models[90]}",live=True,preprocess=False),
510
- gr.Interface.load(f"models/{models[91]}",live=True,preprocess=False),
511
- gr.Interface.load(f"models/{models[92]}",live=True,preprocess=False),
512
- gr.Interface.load(f"models/{models[93]}",live=True,preprocess=False),
513
- gr.Interface.load(f"models/{models[94]}",live=True,preprocess=False),
514
- gr.Interface.load(f"models/{models[95]}",live=True,preprocess=False),
515
- gr.Interface.load(f"models/{models[96]}",live=True,preprocess=False),
516
- gr.Interface.load(f"models/{models[97]}",live=True,preprocess=False),
517
- gr.Interface.load(f"models/{models[98]}",live=True,preprocess=False),
518
- gr.Interface.load(f"models/{models[99]}",live=True,preprocess=False),
519
- gr.Interface.load(f"models/{models[100]}",live=True,preprocess=False),
520
- gr.Interface.load(f"models/{models[101]}",live=True,preprocess=False),
521
- gr.Interface.load(f"models/{models[102]}",live=True,preprocess=False),
522
- gr.Interface.load(f"models/{models[103]}",live=True,preprocess=False),
523
- gr.Interface.load(f"models/{models[104]}",live=True,preprocess=False),
524
- gr.Interface.load(f"models/{models[105]}",live=True,preprocess=False),
525
- gr.Interface.load(f"models/{models[106]}",live=True,preprocess=False),
526
- gr.Interface.load(f"models/{models[107]}",live=True,preprocess=False),
527
- gr.Interface.load(f"models/{models[108]}",live=True,preprocess=False),
528
- gr.Interface.load(f"models/{models[109]}",live=True,preprocess=False),
529
- gr.Interface.load(f"models/{models[110]}",live=True,preprocess=False),
530
- gr.Interface.load(f"models/{models[111]}",live=True,preprocess=False),
531
- gr.Interface.load(f"models/{models[112]}",live=True,preprocess=False),
532
- gr.Interface.load(f"models/{models[113]}",live=True,preprocess=False),
533
- gr.Interface.load(f"models/{models[114]}",live=True,preprocess=False),
534
- gr.Interface.load(f"models/{models[115]}",live=True,preprocess=False),
535
- gr.Interface.load(f"models/{models[116]}",live=True,preprocess=False),
536
- gr.Interface.load(f"models/{models[117]}",live=True,preprocess=False),
537
- gr.Interface.load(f"models/{models[118]}",live=True,preprocess=False),
538
- gr.Interface.load(f"models/{models[119]}",live=True,preprocess=False),
539
- gr.Interface.load(f"models/{models[120]}",live=True,preprocess=False),
540
- gr.Interface.load(f"models/{models[121]}",live=True,preprocess=False),
541
- gr.Interface.load(f"models/{models[122]}",live=True,preprocess=False),
542
- gr.Interface.load(f"models/{models[123]}",live=True,preprocess=False),
543
- gr.Interface.load(f"models/{models[124]}",live=True,preprocess=False),
544
- gr.Interface.load(f"models/{models[125]}",live=True,preprocess=False),
545
- gr.Interface.load(f"models/{models[126]}",live=True,preprocess=False),
546
- gr.Interface.load(f"models/{models[127]}",live=True,preprocess=False),
547
- gr.Interface.load(f"models/{models[128]}",live=True,preprocess=False),
548
- gr.Interface.load(f"models/{models[129]}",live=True,preprocess=False),
549
- gr.Interface.load(f"models/{models[130]}",live=True,preprocess=False),
550
- gr.Interface.load(f"models/{models[131]}",live=True,preprocess=False),
551
- gr.Interface.load(f"models/{models[132]}",live=True,preprocess=False),
552
- gr.Interface.load(f"models/{models[133]}",live=True,preprocess=False),
553
- gr.Interface.load(f"models/{models[134]}",live=True,preprocess=False),
554
- gr.Interface.load(f"models/{models[135]}",live=True,preprocess=False),
555
- gr.Interface.load(f"models/{models[136]}",live=True,preprocess=False),
556
- gr.Interface.load(f"models/{models[137]}",live=True,preprocess=False),
557
- gr.Interface.load(f"models/{models[138]}",live=True,preprocess=False),
558
- gr.Interface.load(f"models/{models[139]}",live=True,preprocess=False),
559
- gr.Interface.load(f"models/{models[140]}",live=True,preprocess=False),
560
- gr.Interface.load(f"models/{models[141]}",live=True,preprocess=False),
561
- gr.Interface.load(f"models/{models[142]}",live=True,preprocess=False),
562
- gr.Interface.load(f"models/{models[143]}",live=True,preprocess=False),
563
- gr.Interface.load(f"models/{models[144]}",live=True,preprocess=False),
564
- gr.Interface.load(f"models/{models[145]}",live=True,preprocess=False),
565
- gr.Interface.load(f"models/{models[146]}",live=True,preprocess=False),
566
- gr.Interface.load(f"models/{models[147]}",live=True,preprocess=False),
567
- gr.Interface.load(f"models/{models[148]}",live=True,preprocess=False),
568
- gr.Interface.load(f"models/{models[149]}",live=True,preprocess=False),
569
- gr.Interface.load(f"models/{models[150]}",live=True,preprocess=False),
570
- gr.Interface.load(f"models/{models[151]}",live=True,preprocess=False),
571
- gr.Interface.load(f"models/{models[152]}",live=True,preprocess=False),
572
- gr.Interface.load(f"models/{models[153]}",live=True,preprocess=False),
573
- gr.Interface.load(f"models/{models[154]}",live=True,preprocess=False),
574
- gr.Interface.load(f"models/{models[155]}",live=True,preprocess=False),
575
- gr.Interface.load(f"models/{models[156]}",live=True,preprocess=False),
576
- gr.Interface.load(f"models/{models[157]}",live=True,preprocess=False),
577
- gr.Interface.load(f"models/{models[158]}",live=True,preprocess=False),
578
- gr.Interface.load(f"models/{models[159]}",live=True,preprocess=False),
579
-
580
- gr.Interface.load(f"models/{models[160]}",live=True,preprocess=False),
581
- gr.Interface.load(f"models/{models[161]}",live=True,preprocess=False),
582
- gr.Interface.load(f"models/{models[162]}",live=True,preprocess=False),
583
- gr.Interface.load(f"models/{models[163]}",live=True,preprocess=False),
584
- gr.Interface.load(f"models/{models[164]}",live=True,preprocess=False),
585
- gr.Interface.load(f"models/{models[165]}",live=True,preprocess=False),
586
- gr.Interface.load(f"models/{models[166]}",live=True,preprocess=False),
587
- gr.Interface.load(f"models/{models[167]}",live=True,preprocess=False),
588
- gr.Interface.load(f"models/{models[168]}",live=True,preprocess=False),
589
- gr.Interface.load(f"models/{models[169]}",live=True,preprocess=False),
590
-
591
- gr.Interface.load(f"models/{models[170]}",live=True,preprocess=False),
592
- gr.Interface.load(f"models/{models[171]}",live=True,preprocess=False),
593
- gr.Interface.load(f"models/{models[172]}",live=True,preprocess=False),
594
- gr.Interface.load(f"models/{models[173]}",live=True,preprocess=False),
595
- gr.Interface.load(f"models/{models[174]}",live=True,preprocess=False),
596
- gr.Interface.load(f"models/{models[175]}",live=True,preprocess=False),
597
- gr.Interface.load(f"models/{models[176]}",live=True,preprocess=False),
598
- gr.Interface.load(f"models/{models[177]}",live=True,preprocess=False),
599
- gr.Interface.load(f"models/{models[178]}",live=True,preprocess=False),
600
- gr.Interface.load(f"models/{models[179]}",live=True,preprocess=False),
601
-
602
- gr.Interface.load(f"models/{models[180]}",live=True,preprocess=False),
603
- gr.Interface.load(f"models/{models[181]}",live=True,preprocess=False),
604
- gr.Interface.load(f"models/{models[182]}",live=True,preprocess=False),
605
- gr.Interface.load(f"models/{models[183]}",live=True,preprocess=False),
606
- gr.Interface.load(f"models/{models[184]}",live=True,preprocess=False),
607
- gr.Interface.load(f"models/{models[185]}",live=True,preprocess=False),
608
- gr.Interface.load(f"models/{models[186]}",live=True,preprocess=False),
609
- gr.Interface.load(f"models/{models[187]}",live=True,preprocess=False),
610
- gr.Interface.load(f"models/{models[188]}",live=True,preprocess=False),
611
- gr.Interface.load(f"models/{models[189]}",live=True,preprocess=False),
612
-
613
- gr.Interface.load(f"models/{models[190]}",live=True,preprocess=False),
614
- gr.Interface.load(f"models/{models[191]}",live=True,preprocess=False),
615
- gr.Interface.load(f"models/{models[192]}",live=True,preprocess=False),
616
- gr.Interface.load(f"models/{models[193]}",live=True,preprocess=False),
617
- gr.Interface.load(f"models/{models[194]}",live=True,preprocess=False),
618
- gr.Interface.load(f"models/{models[195]}",live=True,preprocess=False),
619
- gr.Interface.load(f"models/{models[196]}",live=True,preprocess=False),
620
- gr.Interface.load(f"models/{models[197]}",live=True,preprocess=False),
621
- gr.Interface.load(f"models/{models[198]}",live=True,preprocess=False),
622
- gr.Interface.load(f"models/{models[199]}",live=True,preprocess=False),
623
-
624
- gr.Interface.load(f"models/{models[200]}",live=True,preprocess=False),
625
- gr.Interface.load(f"models/{models[201]}",live=True,preprocess=False),
626
- gr.Interface.load(f"models/{models[202]}",live=True,preprocess=False),
627
- gr.Interface.load(f"models/{models[203]}",live=True,preprocess=False),
628
- gr.Interface.load(f"models/{models[204]}",live=True,preprocess=False),
629
- gr.Interface.load(f"models/{models[205]}",live=True,preprocess=False),
630
- gr.Interface.load(f"models/{models[206]}",live=True,preprocess=False),
631
- gr.Interface.load(f"models/{models[207]}",live=True,preprocess=False),
632
- gr.Interface.load(f"models/{models[208]}",live=True,preprocess=False),
633
- gr.Interface.load(f"models/{models[209]}",live=True,preprocess=False),
634
-
635
- gr.Interface.load(f"models/{models[210]}",live=True,preprocess=False),
636
- gr.Interface.load(f"models/{models[211]}",live=True,preprocess=False),
637
- gr.Interface.load(f"models/{models[212]}",live=True,preprocess=False),
638
- gr.Interface.load(f"models/{models[213]}",live=True,preprocess=False),
639
- gr.Interface.load(f"models/{models[214]}",live=True,preprocess=False),
640
- gr.Interface.load(f"models/{models[215]}",live=True,preprocess=False),
641
- gr.Interface.load(f"models/{models[216]}",live=True,preprocess=False),
642
- gr.Interface.load(f"models/{models[217]}",live=True,preprocess=False),
643
- gr.Interface.load(f"models/{models[218]}",live=True,preprocess=False),
644
- gr.Interface.load(f"models/{models[219]}",live=True,preprocess=False),
645
-
646
- gr.Interface.load(f"models/{models[220]}",live=True,preprocess=False),
647
- gr.Interface.load(f"models/{models[221]}",live=True,preprocess=False),
648
- gr.Interface.load(f"models/{models[222]}",live=True,preprocess=False),
649
- gr.Interface.load(f"models/{models[223]}",live=True,preprocess=False),
650
- gr.Interface.load(f"models/{models[224]}",live=True,preprocess=False),
651
- gr.Interface.load(f"models/{models[225]}",live=True,preprocess=False),
652
- gr.Interface.load(f"models/{models[226]}",live=True,preprocess=False),
653
- gr.Interface.load(f"models/{models[227]}",live=True,preprocess=False),
654
- gr.Interface.load(f"models/{models[228]}",live=True,preprocess=False),
655
- gr.Interface.load(f"models/{models[229]}",live=True,preprocess=False),
656
-
657
- gr.Interface.load(f"models/{models[230]}",live=True,preprocess=False),
658
- gr.Interface.load(f"models/{models[231]}",live=True,preprocess=False),
659
- gr.Interface.load(f"models/{models[232]}",live=True,preprocess=False),
660
- gr.Interface.load(f"models/{models[233]}",live=True,preprocess=False),
661
- gr.Interface.load(f"models/{models[234]}",live=True,preprocess=False),
662
- gr.Interface.load(f"models/{models[235]}",live=True,preprocess=False),
663
- gr.Interface.load(f"models/{models[236]}",live=True,preprocess=False),
664
- gr.Interface.load(f"models/{models[237]}",live=True,preprocess=False),
665
- gr.Interface.load(f"models/{models[238]}",live=True,preprocess=False),
666
- gr.Interface.load(f"models/{models[239]}",live=True,preprocess=False),
667
-
668
- gr.Interface.load(f"models/{models[240]}",live=True,preprocess=False),
669
- gr.Interface.load(f"models/{models[241]}",live=True,preprocess=False),
670
- gr.Interface.load(f"models/{models[242]}",live=True,preprocess=False),
671
- gr.Interface.load(f"models/{models[243]}",live=True,preprocess=False),
672
- gr.Interface.load(f"models/{models[244]}",live=True,preprocess=False),
673
- gr.Interface.load(f"models/{models[245]}",live=True,preprocess=False),
674
- gr.Interface.load(f"models/{models[246]}",live=True,preprocess=False),
675
- gr.Interface.load(f"models/{models[247]}",live=True,preprocess=False),
676
- gr.Interface.load(f"models/{models[248]}",live=True,preprocess=False),
677
- gr.Interface.load(f"models/{models[249]}",live=True,preprocess=False),
678
-
679
- gr.Interface.load(f"models/{models[250]}",live=True,preprocess=False),
680
- gr.Interface.load(f"models/{models[251]}",live=True,preprocess=False),
681
- gr.Interface.load(f"models/{models[252]}",live=True,preprocess=False),
682
- gr.Interface.load(f"models/{models[253]}",live=True,preprocess=False),
683
- gr.Interface.load(f"models/{models[254]}",live=True,preprocess=False),
684
- gr.Interface.load(f"models/{models[255]}",live=True,preprocess=False),
685
- gr.Interface.load(f"models/{models[256]}",live=True,preprocess=False),
686
- gr.Interface.load(f"models/{models[257]}",live=True,preprocess=False),
687
- gr.Interface.load(f"models/{models[258]}",live=True,preprocess=False),
688
- gr.Interface.load(f"models/{models[259]}",live=True,preprocess=False),
689
-
690
- gr.Interface.load(f"models/{models[260]}",live=True,preprocess=False),
691
- gr.Interface.load(f"models/{models[261]}",live=True,preprocess=False),
692
- gr.Interface.load(f"models/{models[262]}",live=True,preprocess=False),
693
- gr.Interface.load(f"models/{models[263]}",live=True,preprocess=False),
694
- gr.Interface.load(f"models/{models[264]}",live=True,preprocess=False),
695
- gr.Interface.load(f"models/{models[265]}",live=True,preprocess=False),
696
- gr.Interface.load(f"models/{models[266]}",live=True,preprocess=False),
697
- gr.Interface.load(f"models/{models[267]}",live=True,preprocess=False),
698
- gr.Interface.load(f"models/{models[268]}",live=True,preprocess=False),
699
- gr.Interface.load(f"models/{models[269]}",live=True,preprocess=False),
700
-
701
- gr.Interface.load(f"models/{models[270]}",live=True,preprocess=False),
702
- gr.Interface.load(f"models/{models[271]}",live=True,preprocess=False),
703
- gr.Interface.load(f"models/{models[272]}",live=True,preprocess=False),
704
- gr.Interface.load(f"models/{models[273]}",live=True,preprocess=False),
705
- gr.Interface.load(f"models/{models[274]}",live=True,preprocess=False),
706
- gr.Interface.load(f"models/{models[275]}",live=True,preprocess=False),
707
- gr.Interface.load(f"models/{models[276]}",live=True,preprocess=False),
708
- gr.Interface.load(f"models/{models[277]}",live=True,preprocess=False),
709
- gr.Interface.load(f"models/{models[278]}",live=True,preprocess=False),
710
- gr.Interface.load(f"models/{models[279]}",live=True,preprocess=False),
711
-
712
- gr.Interface.load(f"models/{models[280]}",live=True,preprocess=False),
713
- gr.Interface.load(f"models/{models[281]}",live=True,preprocess=False),
714
- gr.Interface.load(f"models/{models[282]}",live=True,preprocess=False),
715
- gr.Interface.load(f"models/{models[283]}",live=True,preprocess=False),
716
- gr.Interface.load(f"models/{models[284]}",live=True,preprocess=False),
717
- gr.Interface.load(f"models/{models[285]}",live=True,preprocess=False),
718
- gr.Interface.load(f"models/{models[286]}",live=True,preprocess=False),
719
- gr.Interface.load(f"models/{models[287]}",live=True,preprocess=False),
720
- gr.Interface.load(f"models/{models[288]}",live=True,preprocess=False),
721
- gr.Interface.load(f"models/{models[289]}",live=True,preprocess=False),
722
-
723
- gr.Interface.load(f"models/{models[290]}",live=True,preprocess=False),
724
- gr.Interface.load(f"models/{models[291]}",live=True,preprocess=False),
725
- gr.Interface.load(f"models/{models[292]}",live=True,preprocess=False),
726
- gr.Interface.load(f"models/{models[293]}",live=True,preprocess=False),
727
- gr.Interface.load(f"models/{models[294]}",live=True,preprocess=False),
728
- gr.Interface.load(f"models/{models[295]}",live=True,preprocess=False),
729
- gr.Interface.load(f"models/{models[296]}",live=True,preprocess=False),
730
- gr.Interface.load(f"models/{models[297]}",live=True,preprocess=False),
731
- gr.Interface.load(f"models/{models[298]}",live=True,preprocess=False),
732
- gr.Interface.load(f"models/{models[299]}",live=True,preprocess=False),
733
-
734
- gr.Interface.load(f"models/{models[300]}",live=True,preprocess=False),
735
- gr.Interface.load(f"models/{models[301]}",live=True,preprocess=False),
736
- gr.Interface.load(f"models/{models[302]}",live=True,preprocess=False),
737
- gr.Interface.load(f"models/{models[303]}",live=True,preprocess=False),
738
- gr.Interface.load(f"models/{models[304]}",live=True,preprocess=False),
739
- gr.Interface.load(f"models/{models[305]}",live=True,preprocess=False),
740
- gr.Interface.load(f"models/{models[306]}",live=True,preprocess=False),
741
- gr.Interface.load(f"models/{models[307]}",live=True,preprocess=False),
742
- gr.Interface.load(f"models/{models[308]}",live=True,preprocess=False),
743
- gr.Interface.load(f"models/{models[309]}",live=True,preprocess=False),
744
-
745
- gr.Interface.load(f"models/{models[310]}",live=True,preprocess=False),
746
- gr.Interface.load(f"models/{models[311]}",live=True,preprocess=False),
747
- gr.Interface.load(f"models/{models[312]}",live=True,preprocess=False),
748
- gr.Interface.load(f"models/{models[313]}",live=True,preprocess=False),
749
- gr.Interface.load(f"models/{models[314]}",live=True,preprocess=False),
750
- gr.Interface.load(f"models/{models[315]}",live=True,preprocess=False),
751
- gr.Interface.load(f"models/{models[316]}",live=True,preprocess=False),
752
- gr.Interface.load(f"models/{models[317]}",live=True,preprocess=False),
753
- gr.Interface.load(f"models/{models[318]}",live=True,preprocess=False),
754
- gr.Interface.load(f"models/{models[319]}",live=True,preprocess=False),
755
-
756
- gr.Interface.load(f"models/{models[320]}",live=True,preprocess=False),
757
- gr.Interface.load(f"models/{models[321]}",live=True,preprocess=False),
758
- gr.Interface.load(f"models/{models[322]}",live=True,preprocess=False),
759
- gr.Interface.load(f"models/{models[323]}",live=True,preprocess=False),
760
- gr.Interface.load(f"models/{models[324]}",live=True,preprocess=False),
761
- gr.Interface.load(f"models/{models[325]}",live=True,preprocess=False),
762
- gr.Interface.load(f"models/{models[326]}",live=True,preprocess=False),
763
- gr.Interface.load(f"models/{models[327]}",live=True,preprocess=False),
764
- gr.Interface.load(f"models/{models[328]}",live=True,preprocess=False),
765
- gr.Interface.load(f"models/{models[329]}",live=True,preprocess=False),
766
-
767
- gr.Interface.load(f"models/{models[330]}",live=True,preprocess=False),
768
- gr.Interface.load(f"models/{models[331]}",live=True,preprocess=False),
769
- gr.Interface.load(f"models/{models[332]}",live=True,preprocess=False),
770
- gr.Interface.load(f"models/{models[333]}",live=True,preprocess=False),
771
- gr.Interface.load(f"models/{models[334]}",live=True,preprocess=False),
772
- gr.Interface.load(f"models/{models[335]}",live=True,preprocess=False),
773
- gr.Interface.load(f"models/{models[336]}",live=True,preprocess=False),
774
- gr.Interface.load(f"models/{models[337]}",live=True,preprocess=False),
775
- gr.Interface.load(f"models/{models[338]}",live=True,preprocess=False),
776
- gr.Interface.load(f"models/{models[339]}",live=True,preprocess=False),
777
-
778
- gr.Interface.load(f"models/{models[340]}",live=True,preprocess=False),
779
- gr.Interface.load(f"models/{models[341]}",live=True,preprocess=False),
780
- gr.Interface.load(f"models/{models[342]}",live=True,preprocess=False),
781
- gr.Interface.load(f"models/{models[343]}",live=True,preprocess=False),
782
- gr.Interface.load(f"models/{models[344]}",live=True,preprocess=False),
783
- gr.Interface.load(f"models/{models[345]}",live=True,preprocess=False),
784
- gr.Interface.load(f"models/{models[346]}",live=True,preprocess=False),
785
- gr.Interface.load(f"models/{models[347]}",live=True,preprocess=False),
786
- gr.Interface.load(f"models/{models[348]}",live=True,preprocess=False),
787
- gr.Interface.load(f"models/{models[349]}",live=True,preprocess=False),
788
- gr.Interface.load(f"models/{models[350]}",live=True,preprocess=False),
789
-
790
- gr.Interface.load(f"models/{models[351]}",live=True,preprocess=False),
791
- gr.Interface.load(f"models/{models[352]}",live=True,preprocess=False),
792
- gr.Interface.load(f"models/{models[353]}",live=True,preprocess=False),
793
- gr.Interface.load(f"models/{models[354]}",live=True,preprocess=False),
794
- gr.Interface.load(f"models/{models[355]}",live=True,preprocess=False),
795
- gr.Interface.load(f"models/{models[356]}",live=True,preprocess=False),
796
- gr.Interface.load(f"models/{models[357]}",live=True,preprocess=False),
797
- gr.Interface.load(f"models/{models[358]}",live=True,preprocess=False),
798
- gr.Interface.load(f"models/{models[359]}",live=True,preprocess=False),
799
-
800
- gr.Interface.load(f"models/{models[360]}",live=True,preprocess=False),
801
- gr.Interface.load(f"models/{models[361]}",live=True,preprocess=False),
802
- gr.Interface.load(f"models/{models[362]}",live=True,preprocess=False),
803
- gr.Interface.load(f"models/{models[363]}",live=True,preprocess=False),
804
- gr.Interface.load(f"models/{models[364]}",live=True,preprocess=False),
805
- gr.Interface.load(f"models/{models[365]}",live=True,preprocess=False),
806
- gr.Interface.load(f"models/{models[366]}",live=True,preprocess=False),
807
- gr.Interface.load(f"models/{models[367]}",live=True,preprocess=False),
808
- gr.Interface.load(f"models/{models[368]}",live=True,preprocess=False),
809
- gr.Interface.load(f"models/{models[369]}",live=True,preprocess=False),
810
-
811
- gr.Interface.load(f"models/{models[370]}",live=True,preprocess=False),
812
- gr.Interface.load(f"models/{models[371]}",live=True,preprocess=False),
813
- gr.Interface.load(f"models/{models[372]}",live=True,preprocess=False),
814
- gr.Interface.load(f"models/{models[373]}",live=True,preprocess=False),
815
- gr.Interface.load(f"models/{models[374]}",live=True,preprocess=False),
816
- gr.Interface.load(f"models/{models[375]}",live=True,preprocess=False),
817
- gr.Interface.load(f"models/{models[376]}",live=True,preprocess=False),
818
- gr.Interface.load(f"models/{models[377]}",live=True,preprocess=False),
819
- gr.Interface.load(f"models/{models[378]}",live=True,preprocess=False),
820
- gr.Interface.load(f"models/{models[379]}",live=True,preprocess=False),
821
-
822
- gr.Interface.load(f"models/{models[380]}",live=True,preprocess=False),
823
- gr.Interface.load(f"models/{models[381]}",live=True,preprocess=False),
824
- #gr.Interface.load(f"models/{models[382]}",live=True,preprocess=False),
825
- #gr.Interface.load(f"models/{models[383]}",live=True,preprocess=False),
826
- #gr.Interface.load(f"models/{models[384]}",live=True,preprocess=False),
827
- #gr.Interface.load(f"models/{models[385]}",live=True,preprocess=False),
828
- #gr.Interface.load(f"models/{models[386]}",live=True,preprocess=False),
829
- #gr.Interface.load(f"models/{models[387]}",live=True,preprocess=False),
830
- #gr.Interface.load(f"models/{models[388]}",live=True,preprocess=False),
831
- #gr.Interface.load(f"models/{models[389]}",live=True,preprocess=False),
832
- ]
833
-
834
-
835
- def text_it1(inputs,text_gen1=text_gen1):
836
- go_t1=text_gen1(inputs)
837
- return(go_t1)
838
-
839
- def set_model(current_model):
840
- current_model = models[current_model]
841
- return gr.update(label=(f"{current_model}"))
842
-
843
-
844
- def send_it1(inputs, model_choice):
845
- proc1=models2[model_choice]
846
- output1=proc1(inputs)
847
- return(output1)
848
- css=""""""
849
-
850
-
851
- with gr.Blocks(css=css) as myface:
852
- gr.HTML("""<!DOCTYPE html>
853
- <html lang="en">
854
- <head>
855
- <meta charset="utf-8" />
856
- <meta name="twitter:card" content="player"/>
857
- <meta name="twitter:site" content=""/>
858
- <meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
859
- <meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
860
- <meta name="twitter:player:width" content="100%"/>
861
- <meta name="twitter:player:height" content="600"/>
862
- <meta property="og:title" content="Embedded Live Viewer"/>
863
- <meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
864
- <meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
865
- <!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
866
-
867
- </head>
868
-
869
- </html>
870
- """)
871
- with gr.Row():
872
- with gr.Tab("Title"):
873
- gr.HTML(""" <title>Minimum Multiplier</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;">
874
- <h1>Fill the Textbox at the top and click Generate Image</h1>
875
- <br><h4>The first time you load a model it takes 200 seconds</h4>
876
- <br><h4>But after it loads each image takes 20 seconds to generate!</h4>
877
-
878
- """)
879
-
880
- with gr.Tab("Description"):
881
- gr.HTML("""<div style="text-align:center;">
882
- <h4>As many Text-to-Image Models as I can fit here</h4><br>
883
- <h4>Suggest more up in the "Community" button</h4>
884
-
885
- </div>""")
886
-
887
- with gr.Tab("Tools"):
888
- with gr.Tab("View"):
889
- with gr.Row():
890
- with gr.Column(style="width=50%, height=70%"):
891
- gr.Pil(label="Crop")
892
- with gr.Column(style="width=50%, height=70%"):
893
- gr.Pil(label="Crop")
894
-
895
-
896
- with gr.Tab("Draw"):
897
- with gr.Column(style="width=50%, height=70%"):
898
- gr.Pil(label="Crop")
899
- with gr.Column(style="width=50%, height=70%"):
900
- gr.Pil(label="Draw")
901
-
902
-
903
- gr.ImagePaint(label="Draw")
904
-
905
- with gr.Tab("Text"):
906
- with gr.Row():
907
-
908
- with gr.Column(scale=50):
909
- gr.Textbox(label="", lines=8, interactive=True)
910
-
911
-
912
- with gr.Column(scale=50):
913
- gr.Textbox(label="", lines=8, interactive=True)
914
-
915
- with gr.Tab("Color Picker"):
916
- with gr.Row():
917
-
918
- with gr.Column(scale=50):
919
- gr.ColorPicker(label="Color", interactive=True)
920
-
921
-
922
- with gr.Column(scale=50):
923
- gr.ImagePaint(label="Draw", interactive=True)
924
- with gr.Row():
925
- with gr.Column(scale=100):
926
- magic1=gr.Textbox(lines=4)
927
- run=gr.Button("Generate Image")
928
- with gr.Row():
929
- with gr.Column(scale=100):
930
- #Model selection dropdown
931
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
932
- with gr.Row():
933
- with gr.Column(style="width=800px"):
934
- output1=gr.Image(label=(f"{current_model}"))
935
-
936
-
937
- with gr.Row():
938
- with gr.Column(scale=50):
939
- input_text=gr.Textbox(label="Prompt Idea",lines=2)
940
- use_short=gr.Button("Use Short Prompt")
941
- see_prompts=gr.Button("Extend Idea")
942
-
943
-
944
- def short_prompt(inputs):
945
- return(inputs)
946
-
947
- model_name1.change(set_model,inputs=model_name1,outputs=[output1])
948
-
949
- run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
950
-
951
- use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
952
-
953
- see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
954
-
955
- myface.queue(concurrency_count=200)
956
- myface.launch(inline=True, show_api=False, max_threads=400)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/search.py DELETED
@@ -1,36 +0,0 @@
1
- import time
2
-
3
- from openai import util
4
- from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
5
- from openai.error import InvalidRequestError, TryAgain
6
-
7
-
8
- class Search(EngineAPIResource):
9
- engine_required = False
10
- OBJECT_NAME = "search"
11
-
12
- @classmethod
13
- def create(cls, *args, **kwargs):
14
- """
15
- Creates a new search for the provided input and parameters.
16
-
17
- See https://beta.openai.com/docs/api-reference/search for a list
18
- of valid parameters.
19
- """
20
-
21
- start = time.time()
22
- timeout = kwargs.pop("timeout", None)
23
- if kwargs.get("model", None) is None and kwargs.get("engine", None) is None:
24
- raise InvalidRequestError(
25
- "Must provide an 'engine' or 'model' parameter to create a Search.",
26
- param="engine",
27
- )
28
-
29
- while True:
30
- try:
31
- return super().create(*args, **kwargs)
32
- except TryAgain as e:
33
- if timeout is not None and time.time() > start + timeout:
34
- raise
35
-
36
- util.log_info("Waiting for model to warm up", error=e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/version.py DELETED
@@ -1 +0,0 @@
1
- VERSION = "0.19.0"
 
 
spaces/BoomerangGirl/MagicPrompt-Stable-Diffusion/app.py DELETED
@@ -1,54 +0,0 @@
1
- from transformers import pipeline, set_seed
2
- import gradio as grad, random, re
3
-
4
-
5
- gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
6
- with open("ideas.txt", "r") as f:
7
- line = f.readlines()
8
-
9
-
10
- def generate(starting_text):
11
- seed = random.randint(100, 1000000)
12
- set_seed(seed)
13
-
14
- if starting_text == "":
15
- starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
16
- starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
17
-
18
- response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
19
- response_list = []
20
- for x in response:
21
- resp = x['generated_text'].strip()
22
- if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
23
- response_list.append(resp+'\n')
24
-
25
- response_end = "\n".join(response_list)
26
- response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
27
- response_end = response_end.replace("<", "").replace(">", "")
28
-
29
- if response_end != "":
30
- return response_end
31
-
32
-
33
- txt = grad.Textbox(lines=1, label="Initial Text", placeholder="English Text here")
34
- out = grad.Textbox(lines=4, label="Generated Prompts")
35
-
36
- examples = []
37
- for x in range(8):
38
- examples.append(line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize())
39
-
40
- title = "Stable Diffusion Prompt Generator"
41
- description = 'This is a demo of the model series: "MagicPrompt", in this case, aimed at: "Stable Diffusion". To use it, simply submit your text or click on one of the examples. To learn more about the model, [click here](https://huggingface.co/Gustavosta/MagicPrompt-Stable-Diffusion).<br>'
42
-
43
- grad.Interface(fn=generate,
44
- inputs=txt,
45
- outputs=out,
46
- examples=examples,
47
- title=title,
48
- description=description,
49
- article='',
50
- allow_flagging='never',
51
- cache_examples=False,
52
- theme="default").launch(enable_queue=True, debug=True)
53
-
54
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Demo-Balanced-MSE/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Demo Balanced MSE
3
- emoji: 🐨
4
- colorFrom: green
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 2.8.14
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/base_roi_head.py DELETED
@@ -1,114 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
-
3
- import torch.nn as nn
4
-
5
- from ..builder import build_shared_head
6
-
7
-
8
- class BaseRoIHead(nn.Module, metaclass=ABCMeta):
9
- """Base class for RoIHeads."""
10
-
11
- def __init__(self,
12
- bbox_roi_extractor=None,
13
- bbox_head=None,
14
- mask_roi_extractor=None,
15
- mask_head=None,
16
- gan_roi_extractor=None,
17
- gan_head=None,
18
- shared_head=None,
19
- train_cfg=None,
20
- test_cfg=None):
21
- super(BaseRoIHead, self).__init__()
22
- self.train_cfg = train_cfg
23
- self.test_cfg = test_cfg
24
- if shared_head is not None:
25
- self.shared_head = build_shared_head(shared_head)
26
-
27
- if bbox_head is not None:
28
- self.init_bbox_head(bbox_roi_extractor, bbox_head)
29
-
30
- if mask_head is not None:
31
- self.init_mask_head(mask_roi_extractor, mask_head)
32
-
33
- if gan_head is not None:
34
- self.init_gan_head(mask_roi_extractor, mask_head)
35
-
36
- self.init_assigner_sampler()
37
-
38
- @property
39
- def with_bbox(self):
40
- """bool: whether the RoI head contains a `bbox_head`"""
41
- return hasattr(self, 'bbox_head') and self.bbox_head is not None
42
-
43
- @property
44
- def with_mask(self):
45
- """bool: whether the RoI head contains a `mask_head`"""
46
- return hasattr(self, 'mask_head') and self.mask_head is not None
47
-
48
- @property
49
- def with_shared_head(self):
50
- """bool: whether the RoI head contains a `shared_head`"""
51
- return hasattr(self, 'shared_head') and self.shared_head is not None
52
-
53
- @abstractmethod
54
- def init_weights(self, pretrained):
55
- """Initialize the weights in head.
56
-
57
- Args:
58
- pretrained (str, optional): Path to pre-trained weights.
59
- Defaults to None.
60
- """
61
- pass
62
-
63
- @abstractmethod
64
- def init_bbox_head(self):
65
- """Initialize ``bbox_head``"""
66
- pass
67
-
68
- @abstractmethod
69
- def init_mask_head(self):
70
- """Initialize ``mask_head``"""
71
- pass
72
-
73
- @abstractmethod
74
- def init_gan_head(self):
75
- """Initialize ``gan_head``"""
76
- pass
77
-
78
-
79
- @abstractmethod
80
- def init_assigner_sampler(self):
81
- """Initialize assigner and sampler."""
82
- pass
83
-
84
- @abstractmethod
85
- def forward_train(self,
86
- x,
87
- img_meta,
88
- proposal_list,
89
- gt_bboxes,
90
- gt_labels,
91
- gt_bboxes_ignore=None,
92
- gt_masks=None,
93
- **kwargs):
94
- """Forward function during training."""
95
-
96
- async def async_simple_test(self, x, img_meta, **kwargs):
97
- """Asynchronized test function."""
98
- raise NotImplementedError
99
-
100
- def simple_test(self,
101
- x,
102
- proposal_list,
103
- img_meta,
104
- proposals=None,
105
- rescale=False,
106
- **kwargs):
107
- """Test without augmentation."""
108
-
109
- def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
110
- """Test with augmentations.
111
-
112
- If rescale is False, then returned bboxes and masks will fit the scale
113
- of imgs[0].
114
- """