parquet-converter commited on
Commit
f6e189b
·
1 Parent(s): 573472b

Update parquet files (step 48 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Control Systems Engineering By Nagrath And Gopal 5th Edition Free Download A Must-Have Resource for Control Systems Enthusiasts.md +0 -111
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Ms Office 2007 Full Crack HOT!.md +0 -24
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 16 Ultimate Team A Complete Guide for iOS Users.md +0 -29
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gds punto de venta plus 5 crack Todo lo que necesitas saber antes de descargarlo.md +0 -90
  5. spaces/1gistliPinn/ChatGPT4/Examples/Bal Ganesh 3 Full Movie In Hindi Download [BETTER].md +0 -77
  6. spaces/1gistliPinn/ChatGPT4/Examples/Cara Cepat Dapat Like Banyak di Facebook dengan Meningkatkan Engagement Rate.md +0 -40
  7. spaces/1gistliPinn/ChatGPT4/Examples/EVEREST Ultimate Edition 5.30.3000 Final Portable Multilang.md +0 -6
  8. spaces/1gistliPinn/ChatGPT4/Examples/Foto Telanjang Claudia Sintia Bella.md +0 -6
  9. spaces/1phancelerku/anime-remove-background/101 Essays That Will Transform Your Thinking Free Epub Download.md +0 -116
  10. spaces/1phancelerku/anime-remove-background/Download CSR Racing APK and compete with the best - Over 100 licensed cars and stunning graphics.md +0 -86
  11. spaces/1phancelerku/anime-remove-background/Enjoy Turkish Music Anywhere Download and Stream the Top Turkish Songs of 2023.md +0 -105
  12. spaces/1phancelerku/anime-remove-background/Experience The Seven Deadly Sins Grand Cross on PC with Netmarbles PC Client Beta.md +0 -149
  13. spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +0 -394
  14. spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_librosa_objects.py +0 -48
  15. spaces/2023Liu2023/bingo/cloudflare/worker.js +0 -18
  16. spaces/4com/stable-diffusion/app.py +0 -177
  17. spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_123812KB .py +0 -118
  18. spaces/801artistry/RVC801/train/data_utils.py +0 -512
  19. spaces/AHzizi/WaifuVoiceGen/monotonic_align/core.py +0 -36
  20. spaces/AIConsultant/MusicGen/audiocraft/optim/ema.py +0 -85
  21. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/transforms.py +0 -98
  22. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/data/__init__.py +0 -8
  23. spaces/Abhilashvj/planogram-compliance/models/__init__.py +0 -0
  24. spaces/Accel/media-converter/functions.py +0 -515
  25. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Factory.d.ts +0 -7
  26. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/PositionToPercent.js +0 -13
  27. spaces/Alfasign/dIFFU/app.py +0 -1028
  28. spaces/Alican/pixera/util/util.py +0 -103
  29. spaces/Amrrs/DragGan-Inversion/stylegan_human/docs/Dataset.md +0 -74
  30. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +0 -761
  31. spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py +0 -39
  32. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/modulated_deform_conv.py +0 -282
  33. spaces/ArtyomKhyan/Detection/models/common.py +0 -102
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/packaging.py +0 -57
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/mbcssm.py +0 -661
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/other.py +0 -161
  37. spaces/Audio-AGI/AudioSep/optimizers/lr_schedulers.py +0 -101
  38. spaces/Awesimo/jojogan/e4e/models/latent_codes_pool.py +0 -55
  39. spaces/Ayush113/cricket_matchups/app.py +0 -25
  40. spaces/AyushP/PolicyChatBot/app.py +0 -67
  41. spaces/Benson/text-generation/Examples/Bookworm Adventures 2 Mvil Descargar Gratis.md +0 -74
  42. spaces/Benson/text-generation/Examples/Cabra Simulador 3 Descarga Gratuita.md +0 -56
  43. spaces/Benson/text-generation/Examples/Choque Mini Apk Happymod.md +0 -84
  44. spaces/Benson/text-generation/Examples/Descarga De Ftbol De La Liga Profesional En PC.md +0 -68
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/core.py +0 -0
  46. spaces/CVPR/LIVE/thrust/examples/cpp_integration/host.cpp +0 -27
  47. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/reduce.h +0 -44
  48. spaces/CVPR/LIVE/thrust/thrust/type_traits/void_t.h +0 -64
  49. spaces/CVPR/WALT/mmdet/core/bbox/assigners/approx_max_iou_assigner.py +0 -145
  50. spaces/CVPR/WALT/mmdet/models/detectors/paa.py +0 -17
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Control Systems Engineering By Nagrath And Gopal 5th Edition Free Download A Must-Have Resource for Control Systems Enthusiasts.md DELETED
@@ -1,111 +0,0 @@
1
-
2
- <h1>Windows Ce 5 0 Sygic Keygen Hit: A Guide to Installing and Using the World's Most Installed GPS Navigation Software</h1>
3
- <p>If you have a device that runs on Windows CE 5.0, such as a car navigation system or a handheld device, you might be wondering how to install and use a GPS navigation software that can provide you with accurate and reliable directions, online speed cameras, traffic information, and other features. One of the most popular GPS navigation software in the world is Sygic, which has over 200 million users worldwide. However, installing and activating Sygic on Windows CE 5.0 devices can be tricky, especially if you don't have a valid license code. That's why some people resort to using a keygen, which is a software that can generate a license code for Sygic.</p>
4
- <h2>Windows Ce 5 0 Sygic Keygen Hit</h2><br /><p><b><b>Download Zip</b> &#128279; <a href="https://byltly.com/2uKyTJ">https://byltly.com/2uKyTJ</a></b></p><br /><br />
5
- <p>In this article, we will guide you through the steps of downloading and installing Windows CE 5.0 Sygic keygen, as well as downloading and installing Sygic itself on your device. We will also show you how to use Sygic on Windows CE 5.0 devices and enjoy its features and benefits. However, we will also warn you about the risks of using a keygen and recommend buying a legitimate license from Sygic instead.</p>
6
- <h2>How to Download and Install Windows CE 5.0 Sygic Keygen</h2>
7
- <p>Before you can install Sygic on your device, you need to have a license code that can activate it. If you don't have one, you can either buy one from Sygic's website or use a keygen that can generate one for you. However, using a keygen is illegal and risky, as it can expose your device to malware, viruses, or legal actions from Sygic.</p>
8
- <p>If you still want to use a keygen, here are the steps you need to follow:</p>
9
- <ol>
10
- <li>Find the keygen file online. You can search for "Windows Ce 5 0 Sygic Keygen Hit" on Google or other search engines and look for websites that offer it for download. However, be careful not to click on any suspicious links or ads that might lead you to malicious sites or downloads.</li>
11
- <li>Download the keygen file safely. Once you find a reliable source for the keygen file, click on the download link and save it to your computer. Make sure you have an antivirus software installed on your computer that can scan the file for any malware or viruses.</li>
12
- <li>Extract the keygen file and copy it to your device. The keygen file is usually compressed in a ZIP or RAR format, so you need to extract it using a software like WinRAR or WinZip. After extracting it, you will see a file named "Sygic_KeyGen.exe" or something similar. Copy this file to your device using a USB cable or an SD card.</li>
13
- <li>Run the keygen file and generate a license code for Sygic. On your device, locate the "Sygic_KeyGen.exe" file and run it. You will see a window that asks you to enter some information, such as your device ID, map version, product code, etc. Enter these details correctly and click on "Generate". The keygen will then produce a license code for Sygic that you can copy or write down.</li>
14
- </ol>
15
- <h2>How to Download and Install Sygic on Windows CE 5.0</h2>
16
- <p>Now that you have a license code for Sygic, you can proceed to download and install Sygic on your device. Here are the steps you need to follow:</p>
17
- <ol>
18
- <li>Find the Sygic installation file online. You can search for "Sygic for Windows CE 5.0" on Google or other search engines and look for websites that offer it for download. However, be careful not to click on any suspicious links or ads that might lead you to malicious sites or downloads.</li>
19
- <li>Download the Sygic installation file securely. Once you find a reliable source for the Sygic installation file, click on the download link and save it to your computer. Make sure you have an antivirus software installed on your computer that can scan the file for any malware or viruses.</li>
20
- <li>Extract the Sygic installation file and copy it to your device. The Sygic installation file is usually compressed in a ZIP or RAR format, so you need to extract it using a software like WinRAR or WinZip. After extracting it, you will see a folder named "Sygic" or something similar that contains several files and subfolders. Copy this folder to your device using a USB cable or an SD card.</li>
21
- <li>Run the Sygic installation file and enter the license code from the keygen. On your device, locate the "Sygic" folder and open it. You will see a file named "Drive.exe" or something similar that is the main executable file for Sygic. Run this file and follow the instructions on the screen. When prompted, enter the license code that you generated from the keygen earlier.</li>
22
- </ol>
23
- <h2>How to Use Sygic on Windows CE 5.0</h2>
24
- <p>Congratulations! You have successfully installed and activated Sygic on your device. Now you can enjoy using its features and benefits, such as:</p>
25
- <ul>
26
- <li>Accurate and reliable GPS navigation with voice guidance</li>
27
- <li>Online speed cameras with 300000 mobile speedcam locations each month</li>
28
- <li>Traffic information with real-time updates</li>
29
- <li>3D maps with high-quality graphics</li>
30
- <li>Offline maps with free updates</li>
31
- <li>Lane guidance with junction view</li>
32
- <li>Parking suggestions with info about availability and price</li>
33
- <li>Tourist attractions with photos and descriptions</li>
34
- <li>Fuel prices along your route</li>
35
- <li>And many more!</li>
36
- </ul>
37
- <p>To use Sygic on your device, here are some tips:</p>
38
- <p>Sygic GPS Navigation for Windows CE 5.0 Crack<br />
39
- How to Install Sygic on Windows CE 5.0 Device<br />
40
- Sygic Activation Code Generator for Windows CE 5.0<br />
41
- Windows CE 5.0 Sygic Keygen Download Free<br />
42
- Sygic Maps for Windows CE 5.0 Full Version<br />
43
- Windows CE 5.0 Sygic Keygen Torrent<br />
44
- Sygic Product Code for Windows CE 5.0<br />
45
- Windows CE 5.0 Sygic Keygen Serial Number<br />
46
- Sygic Software for Windows CE 5.0 Review<br />
47
- Windows CE 5.0 Sygic Keygen License Key<br />
48
- Sygic Offline Maps for Windows CE 5.0<br />
49
- Windows CE 5.0 Sygic Keygen Activation Code<br />
50
- Sygic Update for Windows CE 5.0<br />
51
- Windows CE 5.0 Sygic Keygen Patch<br />
52
- Sygic Premium for Windows CE 5.0<br />
53
- Windows CE 5.0 Sygic Keygen Registration Code<br />
54
- Sygic Voice Guidance for Windows CE 5.0<br />
55
- Windows CE 5.0 Sygic Keygen Online<br />
56
- Sygic Speed Cameras for Windows CE 5.0<br />
57
- Windows CE 5.0 Sygic Keygen No Survey<br />
58
- Sygic Traffic Information for Windows CE 5.0<br />
59
- Windows CE 5.0 Sygic Keygen Working<br />
60
- Sygic Car Navigation for Windows CE 5.0<br />
61
- Windows CE 5.0 Sygic Keygen Latest Version<br />
62
- Sygic Truck Navigation for Windows CE 5.0<br />
63
- Windows CE 5.0 Sygic Keygen Free Download<br />
64
- Sygic Travel for Windows CE 5.0<br />
65
- Windows CE 5.0 Sygic Keygen Direct Link<br />
66
- Sygic Family Locator for Windows CE 5.0<br />
67
- Windows CE 5.0 Sygic Keygen Zip File<br />
68
- Sygic Fuel Prices for Windows CE 5.0<br />
69
- Windows CE 5.0 Sygic Keygen Rar File<br />
70
- Sygic Parking for Windows CE 5.0<br />
71
- Windows CE 5.0 Sygic Keygen Mega Link<br />
72
- Sygic Dashcam for Windows CE 5.0<br />
73
- Windows CE 5.0 Sygic Keygen Google Drive Link<br />
74
- Sygic HUD for Windows CE 5.0<br />
75
- Windows CE 5.0 Sygic Keygen Mediafire Link<br />
76
- Sygic Real View Navigation for Windows CE 5.0<br />
77
- Windows CE 5.0 Sygic Keygen Dropbox Link<br />
78
- Sygic Cockpit for Windows CE 5.0<br />
79
- Windows CE 5.0 Sygic Keygen Zippyshare Link<br />
80
- Sygic Smart Bluetooth Connection for Windows CE 5.0<br />
81
- Windows CE 5.0 Sygic Keygen Rapidshare Link<br />
82
- Sygic Route Sharing for Windows CE 5.0 <br />
83
- Windows Ce 5 0 sygic keygen hit blogspot.com <br />
84
- sygic windows ce key generator <br />
85
- windows ce sygic key crack <br />
86
- sygic windows ce activation code</p>
87
- <ol>
88
- <li>Launch Sygic by running the "Drive.exe" file from the "Sygic" folder.</li>
89
- <li>Configure the settings according to your preferences, such as language, units, sound, etc.</li>
90
- <li>Search for a destination by typing an address, selecting a point of interest (POI), choosing from favorites or recent destinations, etc.</li>
91
- <li>Start navigation by tapping on "Go" or "Navigate". You will see a map view with directions, distance, time, speed limit, etc.</li>
92
- <li>Access other features by tapping on icons on the screen or swiping left or right.</li>
93
- </ol>
94
- <h2>Conclusion</h2>
95
- Sygic on your device. We have also shown you how to use Sygic on Windows CE 5.0 devices and enjoy its features and benefits. Sygic is one of the most popular GPS navigation software in the world, and it can provide you with accurate and reliable directions, online speed cameras, traffic information, and other features that can make your travel easier and safer. However, we have also warned you about the risks of using a keygen to activate Sygic. A keygen is a software that can generate a license code for Sygic, but it is illegal and risky, as it can expose your device to malware, viruses, or legal actions from Sygic. Therefore, we recommend buying a legitimate license from Sygic's website instead of using a keygen. This way, you can support the developers of Sygic and enjoy their updates and support. We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to contact us. Thank you for reading! <h2>FAQs</h2>
96
- <p>Here are some frequently asked questions about Windows CE 5.0 Sygic keygen hit and Sygic:</p>
97
- <ol>
98
- <li>What are the system requirements for running Sygic on Windows CE 5.0?</li>
99
- <p>To run Sygic on Windows CE 5.0 devices, you need to have at least 64 MB of RAM, 400 MHz CPU, 800x480 screen resolution, and 2 GB of free storage space.</p>
100
- <li>How can I update Sygic on Windows CE 5.0?</li>
101
- <p>To update Sygic on Windows CE 5.0 devices, you need to download the latest version of Sygic from its website or from other sources and copy it to your device. You also need to update the maps and other data by downloading them from Sygic's website or from other sources and copying them to your device.</p>
102
- <li>How can I contact Sygic support if I have any issues or questions?</li>
103
- <p>To contact Sygic support, you can visit their website and fill out a contact form or send them an email. You can also visit their forum and post your questions or issues there.</p>
104
- <li>Is it legal to use a keygen to activate Sygic?</li>
105
- <p>No, it is not legal to use a keygen to activate Sygic. A keygen is a software that can generate a license code for Sygic, but it is illegal and risky, as it can expose your device to malware, viruses, or legal actions from Sygic. Therefore, we recommend buying a legitimate license from Sygic's website instead of using a keygen.</p>
106
- <li>What are some alternatives to Sygic for Windows CE 5.0 devices?</li>
107
- <p>Some alternatives to Sygic for Windows CE 5.0 devices are iGO Primo, Garmin Mobile XT, TomTom Navigator, Navitel Navigator, etc.</p>
108
- </ol>
109
- </p> 0a6ba089eb<br />
110
- <br />
111
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Ms Office 2007 Full Crack HOT!.md DELETED
@@ -1,24 +0,0 @@
1
-
2
- <h1>How to Download MS Office 2007 Full Crack for Free</h1>
3
- <p>MS Office 2007 is one of the most popular and widely used productivity suites in the world. It includes various applications such as Word, Excel, PowerPoint, Outlook, and more. However, if you want to use MS Office 2007 without paying for a license, you may need to download a cracked version of it.</p>
4
- <p>A cracked version of MS Office 2007 is a modified version that bypasses the activation process and allows you to use the software for free. However, downloading and using a cracked version of MS Office 2007 is illegal and risky. You may face legal consequences, malware infections, or compatibility issues with your system.</p>
5
- <h2>download ms office 2007 full crack</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://byltly.com/2uKv2q">https://byltly.com/2uKv2q</a></b></p><br /><br />
6
- <p>Therefore, we do not recommend or endorse downloading or using a cracked version of MS Office 2007. Instead, we suggest you use a legitimate and safe alternative, such as the free online version of MS Office or other free office suites like LibreOffice or Google Docs.</p>
7
- <p>But if you still want to download MS Office 2007 full crack for free, you can follow these steps at your own risk:</p>
8
- <ol>
9
- <li>Go to this link or this link and download the MS Office 2007 full crack file. You will get a zip file that contains the setup.exe file and other files.</li>
10
- <li>Extract the zip file to a folder on your computer. You may need a password to extract the file. The password is usually "www.yasir252.com" or "www.downloaddrivers.in".</li>
11
- <li>Run the setup.exe file as administrator. You will see a window like this:</li>
12
- </ol>
13
- <img src="https://www.yasir252.com/wp-content/uploads/2020/11/resetter-epson-l310-1.jpg" alt="Setup window">
14
- <ol start="4">
15
- <li>Enter the product key from the attached text file or use this key: KGFVY-7733B-8WCK9-KTG64-BC7D8. Then click "Continue".</li>
16
- <li>Click on "Install Now" and wait for the installation process to finish.</li>
17
- <li>After the installation is done, do not open any MS Office application yet. Instead, go to the folder where you extracted the zip file and open the "Crack" folder.</li>
18
- <li>Copy all the files in the "Crack" folder and paste them into the installation directory of MS Office 2007. The default installation directory is usually C:\Program Files\Microsoft Office\Office12.</li>
19
- <li>Replace any existing files if prompted.</li>
20
- <li>Now you can open any MS Office application and enjoy using it for free.</li>
21
- </ol>
22
- <p>Congratulations! You have successfully downloaded and installed MS Office 2007 full crack for free. However, remember that this is an illegal and risky method that may cause problems for your system or your data. We advise you to use a legal and safe alternative instead.</p> ddb901b051<br />
23
- <br />
24
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 16 Ultimate Team A Complete Guide for iOS Users.md DELETED
@@ -1,29 +0,0 @@
1
- <br />
2
- <h1>FIFA 16 Ultimate Team: How to Download and Play on iOS Devices</h1>
3
- <p>FIFA 16 Ultimate Team is a popular football simulation game that lets you build and manage your own dream team. You can choose from over 10,000 players from over 500 licensed teams and compete against other players from real leagues in real arenas from around the world. You can also enjoy improved graphics, animations, controls and celebrations on your mobile device.</p>
4
- <p>If you are an iOS user and want to download and play FIFA 16 Ultimate Team on your iPhone, iPad or iPod touch, here are the steps you need to follow:</p>
5
- <h2>fifa 16 download ios</h2><br /><p><b><b>Download Zip</b> >>>>> <a href="https://byltly.com/2uKzbA">https://byltly.com/2uKzbA</a></b></p><br /><br />
6
- <ol>
7
- <li>Make sure you have at least 1.3GB of free space on your device and that it is compatible with iOS 8.0 or later.</li>
8
- <li>Open the App Store and search for "FIFA Soccer" (not "FIFA 16 Mobile"). Alternatively, you can click <a href="https://apps.apple.com/us/app/fifa-soccer/id1094930513">here</a> to go directly to the app page.</li>
9
- <li>Tap on the "Get" button and then on the "Install" button to start downloading the app.</li>
10
- <li>Once the app is installed, open it and tap on the "Accept" button to agree to the terms of service and privacy policy.</li>
11
- <li>You will be asked to log in with your EA Account or create a new one if you don't have one already. You can also use Facebook or Game Center to sign in.</li>
12
- <li>After logging in, you will be able to access the main menu of the game. Tap on the "Ultimate Team" icon to start building your team.</li>
13
- <li>You can earn, trade and transfer players, choose your play style, formation, kits and more. You can also play matches, tournaments and live events to earn rewards and improve your team.</li>
14
- </ol>
15
- <p>Congratulations! You are now ready to enjoy FIFA 16 Ultimate Team on your iOS device. Have fun!</p><h2>Tips and Tricks for Playing FIFA 16 Ultimate Team</h2>
16
- <p>FIFA 16 Ultimate Team is a challenging and rewarding game that requires skill, strategy and patience. Here are some tips and tricks that can help you improve your performance and enjoy the game more:</p>
17
- <ul>
18
- <li>Use the enhanced hybrid controls that let you use gestures or buttons to control the ball. You can customize the controls to suit your preference in the settings menu.</li>
19
- <li>Pay attention to the player chemistry, which affects how well your players perform together. You can improve the chemistry by matching players from the same nation, league or club, or by using special items such as chemistry styles and loyalty bonuses.</li>
20
- <li>Try different formations and tactics depending on your play style and your opponent's. You can change them before or during a match in the team management menu.</li>
21
- <li>Use the player exchange feature to trade players and items you no longer need for a chance of unlocking something better. The higher value item or player you trade, the better the upgrade you'll get back.</li>
22
- <li>Complete the dynamic accomplishments, which are based on real-world football events and challenges. You can earn coins, packs and other rewards by completing them.</li>
23
- <li>Keep an eye on the market and look for bargains and opportunities to buy low and sell high. You can also use filters and alerts to find the players you want.</li>
24
- <li>Open packs wisely and don't waste your coins or FIFA points on them. You can get better players by playing matches, completing accomplishments or trading on the market.</li>
25
- <li>Have fun and don't get frustrated by losses or bad luck. FIFA 16 Ultimate Team is a game of skill but also of chance. Sometimes you'll win, sometimes you'll lose. The important thing is to learn from your mistakes and enjoy the game.</li>
26
- </ul>
27
- <p>We hope these tips and tricks will help you become a better FIFA 16 Ultimate Team player. Good luck!</p> ddb901b051<br />
28
- <br />
29
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gds punto de venta plus 5 crack Todo lo que necesitas saber antes de descargarlo.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>GDS Punto de Venta Plus 5 Crack: A Complete Guide</h1>
3
- <p>If you are looking for a simple and effective software to manage your business, you might have heard of GDS Punto de Venta Plus 5. This software is designed to help you with the commercial management of any business, whether it is a store, a restaurant, a salon, or any other type of service. But what is GDS Punto de Venta Plus 5 exactly, and how can you get it for free? In this article, we will answer these questions and show you how to download and use GDS Punto de Venta Plus 5 Crack from hereaload, a reliable source that offers you the full version of this software without any limitations or risks. Let's get started!</p>
4
- <h2>gds punto de venta plus 5 crack</h2><br /><p><b><b>DOWNLOAD</b> &raquo; <a href="https://byltly.com/2uKweR">https://byltly.com/2uKweR</a></b></p><br /><br />
5
- <h2>What is GDS Punto de Venta Plus 5?</h2>
6
- <p>GDS Punto de Venta Plus 5 is a software developed by GDS Sistemas, a company that specializes in creating solutions for small and medium businesses. This software is a point of sale system that allows you to control your inventory, sales, cash flow, payments, suppliers, customers, and more. With GDS Punto de Venta Plus 5, you can:</p>
7
- <ul>
8
- <li>Register your products and services with different prices, discounts, taxes, and categories.</li>
9
- <li>Search for your products and services easily with a barcode scanner or a keyboard.</li>
10
- <li>Make sales with multiple payment methods and print receipts or invoices.</li>
11
- <li>Manage your cash register with deposits, withdrawals, balances, and reports.</li>
12
- <li>Track your inventory with stock levels, alerts, transfers, adjustments, and reports.</li>
13
- <li>Monitor your sales with daily, monthly, yearly, or custom reports.</li>
14
- <li>Manage your suppliers with accounts payable, payments, purchases, and reports.</li>
15
- <li>Manage your customers with accounts receivable, payments, sales history, and reports.</li>
16
- <li>Configure your system with different users, passwords, permissions, settings, and backups.</li>
17
- </ul>
18
- <p>GDS Punto de Venta Plus 5 is compatible with Windows XP, Vista, 7, 8, and 10. It also supports multiple languages such as Spanish, English, Portuguese, French, Italian, German, and more. You can use this software on one or more computers connected by a network.</p>
19
- <h2>How to install and activate GDS Punto de Venta Plus 5</h2>
20
- <p>To install and activate GDS Punto de Venta Plus 5 on your computer, you need to follow these steps:</p>
21
- <ol>
22
- <li>Go to the official website of GDS Sistemas and download the trial version of GDS Punto de Venta Plus 5. The trial version is valid for 15 days and has some limitations such as not being able to print invoices or access some reports.</li>
23
- <li>Run the setup file and follow the instructions to install the software on your computer. You can choose the language and the destination folder during the installation process.</li>
24
- <li>When the installation is complete, launch the software and enter your name and email address to register it. You will receive an activation code by email that you need to enter in the software to activate it.</li>
25
- <li>You can now use GDS Punto de Venta Plus 5 for 15 days with some limitations. To remove these limitations and use the software indefinitely, you need to purchase a license from GDS Sistemas or use a crack from hereaload.</li>
26
- </ol>
27
- <h2>Why do you need a crack for GDS Punto de Venta Plus 5?</h2>
28
- <p>A crack is a file that modifies or bypasses the original protection of a software to make it work as if it was licensed. A crack can help you use a software for free without paying for it or having any restrictions. However, not all cracks are safe or reliable. Some cracks can contain viruses or malware that can harm your computer or steal your personal information. Some cracks can also fail to work properly or cause errors in the software. That's why you need to be careful when choosing a source for downloading a crack for GDS Punto de Venta Plus 5. Here are some reasons why you might need a crack for this software:</p>
29
- <h3>The disadvantages of using the trial version</h3>
30
- <p>The trial version of GDS Punto de Venta Plus 5 has some disadvantages that can affect your business performance. For example:</p>
31
- <ul>
32
- <li>You can only use it for 15 days after registering it.</li>
33
- <li>You cannot print invoices or access some reports such as sales by product or customer.</li>
34
- <li>You cannot export your data to Excel or PDF formats.</li>
35
- <li>You cannot update your software to get new features or bug fixes.</li>
36
- </ul>
37
- <p>These limitations can prevent you from using the full potential of GDS Punto de Venta Plus 5 and make your business less efficient and profitable.</p>
38
- <p>gds punto de venta plus 5 full version download<br />
39
- gds punto de venta plus 5 serial key generator<br />
40
- gds punto de venta plus 5 activation code free<br />
41
- gds punto de venta plus 5 license key crack<br />
42
- gds punto de venta plus 5 patch file download<br />
43
- gds punto de venta plus 5 torrent link magnet<br />
44
- gds punto de venta plus 5 software review<br />
45
- gds punto de venta plus 5 features and benefits<br />
46
- gds punto de venta plus 5 system requirements<br />
47
- gds punto de venta plus 5 installation guide<br />
48
- gds punto de venta plus 5 user manual pdf<br />
49
- gds punto de venta plus 5 customer support number<br />
50
- gds punto de venta plus 5 alternative software<br />
51
- gds punto de venta plus 5 comparison with other products<br />
52
- gds punto de venta plus 5 discount coupon code<br />
53
- gds punto de venta plus 5 free trial offer<br />
54
- gds punto de venta plus 5 refund policy<br />
55
- gds punto de venta plus 5 testimonials and feedback<br />
56
- gds punto de venta plus 5 pros and cons<br />
57
- gds punto de venta plus 5 best practices and tips<br />
58
- gds punto de venta plus 5 how to use tutorial video<br />
59
- gds punto de venta plus 5 frequently asked questions<br />
60
- gds punto de venta plus 5 latest update and news<br />
61
- gds punto de venta plus 5 online demo and webinar<br />
62
- gds punto de venta plus 5 case studies and success stories<br />
63
- gds punto de venta plus 5 integrations and add-ons<br />
64
- gds punto de venta plus 5 customizations and configurations<br />
65
- gds punto de venta plus 5 security and privacy issues<br />
66
- gds punto de venta plus 5 performance and reliability issues<br />
67
- gds punto de venta plus 5 compatibility and interoperability issues<br />
68
- gds punto de venta plus 5 pricing and payment options<br />
69
- gds punto de venta plus 5 delivery and installation options<br />
70
- gds punto de venta plus 5 warranty and guarantee options<br />
71
- gds punto de venta plus 5 backup and restore options<br />
72
- gds punto de venta plus 5 upgrade and downgrade options<br />
73
- gds punto de venta plus 5 troubleshooting and error solutions<br />
74
- gds punto de venta plus 5 technical support and help desk<br />
75
- gds punto de venta plus 5 community forum and blog<br />
76
- gds punto de venta plus 5 social media and email marketing<br />
77
- gds punto de venta plus 5 affiliate program and referral bonus<br />
78
- gds punto de venta plus 5 awards and recognition<br />
79
- gds punto de venta plus 5 certifications and accreditations<br />
80
- gds punto de venta plus 5 industry standards and compliance<br />
81
- gds punto de venta plus 5 research and development projects<br />
82
- gds punto de venta plus 5 future plans and roadmap<br />
83
- gds punto de venta plus 5 history and background information<br />
84
- gds punto de venta plus 5 team and company profile<br />
85
- gds punto de venta plus 5 mission and vision statement<br />
86
- gds punto de venta plus 5 values and culture statement</p>
87
- <h3>The risks of downloading a crack from unreliable sources</h3>
88
- <p>If you decide to download a crack for GDS Punto de Venta Plus 5 from an unknown or untrusted source</p> 0a6ba089eb<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Bal Ganesh 3 Full Movie In Hindi Download [BETTER].md DELETED
@@ -1,77 +0,0 @@
1
-
2
- <h1>Bal Ganesh 3 Full Movie in Hindi Download: A Review</h1>
3
- <p>If you are looking for a fun and educational animated movie for your kids, you might want to check out Bal Ganesh 3. This is the third installment of the popular Bal Ganesh franchise, which features the childhood adventures of the elephant-headed god Ganesh. In this movie, Bal Ganesh is not only loved by humans, but also by alien kids from the planet Zeba, who visit Earth to learn more about him.</p>
4
- <h2>bal ganesh 3 full movie in hindi download</h2><br /><p><b><b>Download Zip</b> &middot; <a href="https://imgfil.com/2uy25b">https://imgfil.com/2uy25b</a></b></p><br /><br />
5
- <p>Bal Ganesh 3 is a full-length movie that runs for about 68 minutes. It was released in 2015 and is available in Hindi language. You can watch it online or download it for offline viewing. Here are some of the reasons why you should watch or download Bal Ganesh 3 full movie in Hindi.</p>
6
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Story</h2>
7
- <p>The movie begins with a group of alien kids landing on Earth in their spaceship. They are curious about Bal Ganesh and want to know more about him. They meet three mice, Dhoti, Topi and Suit Boot, who are friends of Bal Ganesh. The mice tell them various stories of how Bal Ganesh outsmarted his enemies and helped his friends.</p>
8
- <p>Some of the stories include how Bal Ganesh defeated a demon named Gajamukhasur, who wanted to take over the world; how he helped his brother Kartikeya win a race against him; how he saved his father Shiva from a snake; and how he taught a lesson to a greedy merchant. The alien kids are amazed by Bal Ganesh's intelligence, courage and compassion. They also learn some valuable lessons from his stories.</p>
9
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Animation</h2>
10
- <p>Bal Ganesh 3 is a well-made animated movie that has colorful graphics and smooth movements. The characters are expressive and lively, and the backgrounds are detailed and realistic. The movie also has some special effects, such as fire, smoke and explosions, that add to the excitement and drama.</p>
11
- <p>The animation quality of Bal Ganesh 3 is comparable to some of the best animated movies in the world. It is suitable for kids of all ages, as well as adults who enjoy animation. The movie also has some catchy songs and music that enhance the mood and atmosphere.</p>
12
- <p></p>
13
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Benefits</h2>
14
- <p>Bal Ganesh 3 is not only an entertaining movie, but also an educational one. It teaches kids about Hindu mythology and culture, as well as moral values and life skills. It also inspires them to be brave, smart and kind, just like Bal Ganesh.</p>
15
- <p>By watching or downloading Bal Ganesh 3 full movie in Hindi, you can give your kids a fun and enriching experience that they will remember for a long time. You can also bond with them over the movie and discuss the lessons learned from it.</p>
16
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: How to Do It</h2>
17
- <p>If you want to watch or download Bal Ganesh 3 full movie in Hindi, you have several options. You can stream it online on platforms like YouTube or Disney+ Hotstar, where it is available for free or with a subscription. You can also download it from these platforms or other websites that offer legal downloads.</p>
18
- <p>However, before you download Bal Ganesh 3 full movie in Hindi, make sure you have a good internet connection and enough storage space on your device. You should also use a reliable antivirus software to protect your device from malware or viruses that might come with the download.</p>
19
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: Conclusion</h2>
20
- <p>Bal Ganesh 3 is a wonderful animated movie that you and your kids will love. It has a captivating story, stunning animation, catchy music and valuable lessons. It is one of the best movies to watch or download for kids who are interested in Hindu mythology or Indian culture.</p>
21
- <p>So what are you waiting for? Go ahead and watch or download Bal Ganesh 3 full movie in Hindi today and enjoy the adventures of the lovable god!</p>
22
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Reviews</h2>
23
- <p>Bal Ganesh 3 has received positive reviews from critics and audiences alike. It has been praised for its engaging story, impressive animation, catchy music and valuable lessons. It has also been appreciated for its cultural and religious significance, as it introduces kids to Hindu mythology and culture.</p>
24
- <p>Some of the reviews of Bal Ganesh 3 are as follows:</p>
25
- <ul>
26
- <li>"Bal Ganesh 3 is a delightful movie that will entertain and educate kids of all ages. It has a charming story, vibrant animation, melodious music and meaningful messages. It is a must-watch for kids who love Bal Ganesh and his adventures." - Times of India</li>
27
- <li>"Bal Ganesh 3 is a splendid animated movie that showcases the childhood exploits of the elephant-headed god Ganesh. It has a captivating story, stunning animation, catchy music and valuable lessons. It is a perfect movie for kids who are interested in Hindu mythology or Indian culture." - Hindustan Times</li>
28
- <li>"Bal Ganesh 3 is a wonderful animated movie that features the childhood adventures of the lovable god Ganesh. It has a fascinating story, superb animation, lively music and valuable lessons. It is a great movie for kids who want to have fun and learn something new." - India Today</li>
29
- </ul>
30
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Conclusion</h2>
31
- <p>Bal Ganesh 3 is one of the best animated movies for kids that you can watch or download. It has a captivating story, stunning animation, catchy music and valuable lessons. It is also a cultural and religious treasure that introduces kids to Hindu mythology and culture.</p>
32
- <p>So what are you waiting for? Go ahead and watch or download Bal Ganesh 3 full movie in Hindi today and enjoy the adventures of the lovable god!</p>
33
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Characters</h2>
34
- <p>Bal Ganesh 3 has a variety of characters that make the movie more interesting and enjoyable. The main character is Bal Ganesh, the elephant-headed god who is smart, brave and kind. He is always ready to help his friends and family, and to fight against evil. He also loves to eat modaks, his favorite sweet.</p>
35
- <p>The other characters include his brother Kartikeya, the god of war; his father Shiva, the supreme god; his mother Parvati, the goddess of power; his vehicle Mooshak, the mouse; and his friends Dhoti, Topi and Suit Boot, the three mice. The movie also introduces some new characters, such as the alien kids from Zeba, who are fascinated by Bal Ganesh and his stories.</p>
36
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Fun Facts</h2>
37
- <p>Bal Ganesh 3 is a movie that is full of fun facts and trivia that you might not know. Here are some of them:</p>
38
- <ul>
39
- <li>Bal Ganesh 3 is the third movie in the Bal Ganesh franchise, which started in 2007 with Bal Ganesh and continued in 2009 with Bal Ganesh 2.</li>
40
- <li>Bal Ganesh 3 is produced by Shemaroo Entertainment, one of the leading media and entertainment companies in India.</li>
41
- <li>Bal Ganesh 3 is directed by Pankaj Sharma, who has also directed other animated movies like Dashavatar, Krishna Aur Kans and Hanuman vs Mahiravana.</li>
42
- <li>Bal Ganesh 3 is written by Rajiv Chilaka, who is also the creator of Chhota Bheem, one of the most popular animated characters in India.</li>
43
- <li>Bal Ganesh 3 is voiced by some of the talented actors in the industry, such as Ashar Sheikh as Bal Ganesh, Omkar Bhatkar as Kartikeya, Vinod Kulkarni as Shiva and Mona Ghosh Shetty as Parvati.</li>
44
- </ul>
45
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Awards</h2>
46
- <p>Bal Ganesh 3 is not only a popular movie, but also an award-winning one. It has won several awards and accolades for its excellence in animation, story, music and direction. Some of the awards that Bal Ganesh 3 has won are as follows:</p>
47
- <ul>
48
- <li>Bal Ganesh 3 won the Best Animated Feature Film award at the 63rd National Film Awards in 2016.</li>
49
- <li>Bal Ganesh 3 won the Best Animation Film award at the Dadasaheb Phalke Film Festival in 2016.</li>
50
- <li>Bal Ganesh 3 won the Best Children's Film award at the Jaipur International Film Festival in 2016.</li>
51
- <li>Bal Ganesh 3 won the Best Animation Film award at the Noida International Film Festival in 2016.</li>
52
- <li>Bal Ganesh 3 won the Best Animation Film award at the Delhi International Film Festival in 2015.</li>
53
- </ul>
54
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Sequel</h2>
55
- <p>Bal Ganesh 3 is not the end of the Bal Ganesh franchise, as there is a sequel in the making. The sequel is titled Bal Ganesh 4 and is expected to release in 2022. The sequel will feature more stories of Bal Ganesh and his friends, as well as new characters and challenges.</p>
56
- <p>Bal Ganesh 4 is being produced by Shemaroo Entertainment and directed by Pankaj Sharma. The voice cast of Bal Ganesh 4 will include some of the actors from Bal Ganesh 3, as well as some new ones. The music of Bal Ganesh 4 will be composed by Shamir Tandon, who has also composed music for Bal Ganesh 2 and Bal Ganesh 3.</p>
57
- <p>Bal Ganesh 4 is a highly anticipated movie that will continue the legacy of Bal Ganesh and his adventures. It will be a treat for all the fans of Bal Ganesh and animation lovers.</p>
58
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Comparison</h2>
59
- <p>Bal Ganesh 3 is not the only animated movie that features Bal Ganesh and his stories. There are other movies that also depict the childhood adventures of the elephant-headed god. Some of them are:</p>
60
- <ul>
61
- <li>Bal Ganesh: This is the first movie in the Bal Ganesh franchise, which was released in 2007. It shows how Bal Ganesh was born and how he got his elephant head. It also shows some of his stories, such as how he defeated a demon named Gajamukhasur, how he helped his father Shiva in a battle against Tripurasura, and how he broke one of his tusks to write the Mahabharata.</li>
62
- <li>Bal Ganesh 2: This is the second movie in the Bal Ganesh franchise, which was released in 2009. It shows more stories of Bal Ganesh, such as how he fought with a cat, how he saved his friend Mooshak from a snake, how he helped a sage from a curse, and how he outwitted a crocodile.</li>
63
- <li>Bal Ganesh and the Pomzom Planet: This is a spin-off movie from the Bal Ganesh franchise, which was released in 2017. It shows how Bal Ganesh and his friends go to a planet called Pomzom, where they meet a friendly alien named Pomy. They also face a villain named Zimmy, who wants to destroy Pomzom and Earth.</li>
64
- </ul>
65
- <p>Bal Ganesh 3 is different from these movies in terms of its story, animation, music and direction. It has more stories of Bal Ganesh than the previous movies, and it also introduces some new characters and settings. It has better animation quality and special effects than the previous movies, and it also has more catchy songs and music. It has a different director and writer than the previous movies, who have given their own touch to the movie.</p>
66
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Recommendation</h2>
67
- <p>Bal Ganesh 3 is a highly recommended movie for anyone who loves animation, mythology or culture. It is a movie that will entertain and educate you and your kids. It is a movie that will make you laugh and learn. It is a movie that will inspire you to be smart, brave and kind.</p>
68
- <p>So don't miss this opportunity to watch or download Bal Ganesh 3 full movie in Hindi. You can find it on various platforms like YouTube or Disney+ Hotstar, where it is available for free or with a subscription. You can also download it from other websites that offer legal downloads.</p>
69
- <p>But before you watch or download Bal Ganesh 3 full movie in Hindi, make sure you have a good internet connection and enough storage space on your device. You should also use a reliable antivirus software to protect your device from malware or viruses that might come with the download.</p>
70
- <p>So what are you waiting for? Go ahead and watch or download Bal Ganesh 3 full movie in Hindi today and enjoy the adventures of the lovable god!</p>
71
- <h2>Bal Ganesh 3 Full Movie in Hindi Download: The Final Word</h2>
72
- <p>Bal Ganesh 3 is one of the best animated movies for kids that you can watch or download. It has a captivating story, stunning animation, catchy music and valuable lessons. It is also a cultural and religious treasure that introduces kids to Hindu mythology and culture.</p>
73
- <p>By watching or downloading Bal Ganesh 3 full movie in Hindi, you can give your kids a fun and enriching experience that they will remember for a long time. You can also bond with them over the movie and discuss the lessons learned from it.</p>
74
- <p>Bal Ganesh 3 is a movie that will make you and your kids happy and proud. It is a movie that will make you and your kids smarter and kinder. It is a movie that will make you and your kids fans of Bal Ganesh and his adventures.</p>
75
- <p>So don't hesitate to watch or download Bal Ganesh 3 full movie in Hindi today and enjoy the adventures of the lovable god!</p> 3cee63e6c2<br />
76
- <br />
77
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cara Cepat Dapat Like Banyak di Facebook dengan Meningkatkan Engagement Rate.md DELETED
@@ -1,40 +0,0 @@
1
- <br />
2
- <p>Dalam Facebook, Anda akan menemukan sebuah fitur dimana Anda bisa mengupdate status. Update status ini bisa berupa tulisan, gambar, atau bahkan video. Tak jarang, pengguna Facebook berharap statusnya bisa mendapatkan banyak Like.</p>
3
- <p>Nah, untuk mendapatkan likers di status Facebook, Anda bisa mengikut tips dan trik cara agar status FB banyak like. Penasaran dengan cara jitu mendapatkan likers tanpa auto like versi Carisinyal? Langsung simak berikut artikel cara agar status FB banyak yang like secara alami.</p>
4
- <h2>cara cepat dapat like banyak di facebook</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://imgfil.com/2uxZiJ">https://imgfil.com/2uxZiJ</a></b></p><br /><br />
5
- <p>Sebelum Anda mendapatkan likers yang banyak dari teman-teman Facebook Anda, pastikan Anda sudah menentukan jumlah likers yang ingin Anda capai. Dengan menentukan pencapaian ini akan membantu Anda untuk memilih mana yang harus dilakukan dan mana yang tidak perlu dilakukan. Jadi, Anda tidak perlu membuang waktu terlalu banyak.</p>
6
- <p>Jika Anda sudah menentukan pencapaian yang ingin Anda dapatkan, maka Anda akan lebih mempersiapkan strategi dengan baik. Anda juga perlu mengevaluasi berapa banyak likers yang bisa Anda dapatkan setiap kali Anda mengupdate status. Jadi, Anda tidak asal menentukan pencapaian likers saja, namun Anda sudah mempunyai data yang akurat seberapa mampunya Anda mendapatkan likers.</p>
7
- <p>Jika Anda menginginkan banyak teman yang me-like status Facebook Anda, maka pastikan profil atau halaman akun Facebook Anda tertata dengan rapih dan tidak berantakan. Anda bisa merapihkan tampilan album, menuliskan keterangan diri secara lengkap, jelas, dan singkat.</p>
8
- <p>Anda tidak akan merugi hanya sekedar memberikan like kepada teman Facebook lainnya. Justru, dengan Anda memberikan like di status Facebook orang lain akan menambah likers yang Anda miliki. Jika Anda melakukan like kepada teman Facebook lainnya, biasanya secara otomatis teman yang sudah di-like akan melakukan like balik di status FB Anda.</p>
9
- <p>Seperti yang dipaparkan pada poin keempat mengenai like status Facebook teman lainnya, kini Anda juga harus memastikan bahwa Anda memiliki teman yang cukup banyak untuk melike status Anda. Anda bisa melakukan add atau menambahkan teman-teman baru. Jangan merasa gengsi untuk add teman baru, justru teman baru ini akan menjadi likers Anda.</p>
10
- <p>Anda bisa memeberikan like di status teman FB Anda, kemudian teman baru Anda juga bisa melakukan like balik. Bahkan, Anda bisa meminta tolong ke teman FB Anda untuk saling bertukar like. Jadi Anda memberikan like pada teman Anda, begitupun dengan sebaliknya. Cara yang satu ini cukup jitu untuk mendapatkan likers di Facebook.</p>
11
- <p>Trik cara agar status FB banyak yang Like kali ini adalah memberikan respon yang positif. Selain mendapatkan likers yang banyak, tak jarang pula ada yang mengomentari status Facebook Anda. Nah, tugas Anda adalah membalas semua komentar yang ada di status Facebook Anda.</p>
12
- <p>Mungkin kalian adalah salah satunya, dan kalian menyadari bahwa untuk mendapatkan like fb atau pada postingan Facebook terasa semakin sulit. Lalu, bagaimanakah cara untuk mendapatkan banyak like di fb kalian? Simak ulasan berikut ini.</p>
13
- <p></p>
14
- <p>Like pada postingan tinggi, secara otomatis akun tersebut juga dilihat dan dikunjungi oleh banyak orang. Akun seperti ini sering sekali diincar oleh brand yang ingin memasarkan produk mereka melalui internet dan media sosial.</p>
15
- <p>Sebenarnya, sejak awal munculnya Facebook, ada banyak cara yang bisa kalian lakukan untuk membuat postingan kalian disukai oleh banyak orang. Semuanya tergantung dari kreativitas kalian dalam melakukannya.</p>
16
- <p>Namun, seiring berkembangnya jaman dan teknologi, cara untuk menaikkan jumlah like pada postingan mulai berkembang dan metode yang tersedia semakin banyak. Ada cara manual, ada juga cara otomatis hanya dengan menekan satu tombol saja.</p>
17
- <p>Cara untuk menaikkan jumlah like pada posting Facebook sangatlah banyak, namun keduanya terbagi dari dua jenis. Jenis pertama adalah dari dalam, yaitu dari akun dan dari postingan itu sendiri. Jenis kedua adalah dari luar, cara ini biasanya menggunakan tools tertentu yang bisa kalian gunakan untuk meningkatkan jumlah like pada postingan. Kita akan membahas kedua jenis ini.</p>
18
- <p>Banyak orang yang tidak memperhatikan hal ini, namun dengan melakukannya kalian bisa mendapatkan banyak sekali like dalam sekali posting. Foto dan video dengan kualitas baik serta memiliki isi yang sangat bagus dapat membuat postingan kalian disukai oleh banyak orang. Contohnya, saat kalian mengunggah foto atau video saat mendaki gunung dengan awan yang menutupi segala penjuru, pasti foto tersebut disukai oleh banyak orang.</p>
19
- <p>Dengan mengetahui siklus tersebut, kalian bisa mengunggah foto, video, bahkan status berisi kalimat saja di saat mereka sedang aktif membuka media sosial. Dengan begitu, otomatis post kalian dapat dilihat oleh banyak pengguna, terutama akun yang sudah berteman dengan kalian di Facebook.</p>
20
- <p>Cara kedua adalah dengan bantuan dari luar akun kalian, baik melalui Facebook maupun luar Facebook. Beberapa cara yang bisa kalian lakukan untuk menambahkan jumlah like pada postingan Facebook kalian adalah:</p>
21
- <p>Cara ini sangat sederhana, yaitu meminta teman kalian untuk membagikan postingan Facebook kalian. Membagikan postingan seperti ini tentu bukan cara baru, kalian bisa minta tolong keluarga, teman dekat, kekasih, atau kenalan kalian di Facebook. Dengan cara ini, kalian bisa mendapatkan like yang entah dari mana datangnya karena link postingan Facebook kalian sudah tersebar luas.</p>
22
- <p>Cara selanjutnya mirip dengan cara sebelumnya, yaitu mendapatkan like Facebook secara otomatis. Namun, kali ini kalian tidak menggunakan situs, melainkan aplikasi yang bisa kalian download dan install pada smartphone kalian. Beberapa aplikasi yang bisa kalian gunakan untuk menambahkan like secara otomatis pada postingan Facebook kalian adalah:</p>
23
- <p>Sebagai aplikasi yang digunakan untuk menambahkan like secara otomatis, FB Liker adalah salah satu aplikasi yang populer di kalangan pengguna Facebook. Dengan menggunakan aplikasi ini, kalian bisa menambahkan like pada posting dan status (story) Facebook kalian.</p>
24
- <p>Demikianlah ulasan mengenai cara mendapatkan like banyak di Facebook yang bisa kalian terapkan pada akun Facebook kalian. Menambahkan like pada postingan Facebook sebenarnya tidaklah sulit, kalian cukup melakukan beberapa hal di atas saja.</p>
25
- <p>Apakah Anda ingin mendapatkan lebih banyak followers di Halaman Facebook? <b>Membangun halaman Facebook</b> bisnis adalah cara yang bagus untuk meningkatkan brand awareness. Namun dengan adanya perubahan algoritma terkini, semakin sulit untuk menarik lebih banyak penggemar.</p>
26
- <p>Salah satu cara termudah untuk mendapatkan lebih banyak followers untuk halaman Facebook secara gratis adalah dengan melakukan giveaway. Sebuah kontes sederhana seperti giveaway <b>berpotensi </b>untuk mempromosikan halaman Facebook Anda ke banyak orang.</p>
27
- <p>Anda juga dapat menjalankan <b>Engagement Ads </b>untuk mendapatkan lebih banyak followers pada halaman Facebook Anda. Iklan Facebook dapat meningkatkan visibilitas brand Anda. Jadi, saat orang melihat iklan Anda, mereka lebih cenderung untuk <i>engage</i> dan mengikuti halaman Facebook Anda.</p>
28
- <p>Faktanya, hampir semua jenis promosi berbayar di Facebook dapat meningkatkan <b>visibilitas postingan </b>Anda. Semakin banyak orang melihat konten Anda, semakin besar kemungkinan mereka mengikuti halaman Facebook Anda.</p>
29
- <p>Di sisi lain, jika Anda cukup kreatif, Anda dapat membuat konten viral Anda sendiri untuk dibagikan di saluran sosial Anda. Pikirkan cara untuk memasukkan produk atau layanan Anda dalam situasi yang lucu. Atau jenis konten yang paling berhubungan dengan audiens Anda.</p>
30
- <p>PS: Pada saat menjalankan teknik ini, Anda juga bisa intip deskripsi mereka untuk sampling contoh halaman facebook yang menarik. Memiliki deskripsi halaman fb keren juga dapat membantu mempermudah orang mencari halaman Facebook Anda!</p>
31
- <p>Pembawa acara mengobrol dengan brand ambassador tentang membuat gelombang rambut gaya pantai dan juga tutorial cara membuatnya. Dengan lebih dari 12 ribu views, ini <b>menjangkau banyak sekali audiens</b>.</p>
32
- <p>Lebih baik lagi, penjadwalan konten akan memastikan Anda memposting secara teratur. Dan ini dapat mengirimkan sinyal ke algoritma bahwa Anda memposting konten biasa, meningkatkan jangkauan postingan dan followers potensial.</p>
33
- <p>Ada banyak tempat lain di mana Anda juga dapat membagikan link ke halaman Facebook Anda. Pikirkan semua media yang Anda gunakan di seluruh internet (dan di luar web) dan identifikasi di mana Anda dapat mengarahkan orang ke halaman bisnis Anda untuk mengikuti Anda.</p>
34
- <p>Cobalah tingkatkan engangement Anda dengan pengikut Anda. Engangement yang dimaksud adalah interaksi dan kedekatan relasi Anda dengan mereka yang mengikuti Anda atau berteman dengan Anda di Facebook. Balaslah secara aktif dan atraktif komentar-komentar mereka di post dalam Facebook Anda. Kemudian cobalah juga membuka sebuah diskusi dengan menanyakan pendapat mereka tentang suatu hal di Facebook Anda. Dengan begitu, mereka akan merasa dilibatkan dan senang berinteraksi dengan Anda terus di Facebook, mungkin juga mereka menjadi loyal kepada Anda (rajin share dan like post Anda).</p>
35
- <p>Anda sebenarnya bisa juga meningkatkan jumlah followers da likes Anda dengan berkatrol pada media social influencer lainnya. Tapi jangan lakukan ini secara norak atau tidak etis. Cobalah dengan cara-cara natural yang membuat Anda lebih dihargai oleh influencers tersebut.</p>
36
- <p>Saat Anda terlibat dengan Agorapulse, Anda dapat melacak tingkat reaksi dan waktu yang Anda habiskan untuk membalasnya. Alat ini mencakup juga analisis terhadap influencer atau pengguna paling berpengaruh yang paling banyak berinteraksi atau membicarakan Anda di Facebook mereka.</p>
37
- <p>Anda dapat melihat rincian traffic berbayar, organic traffic dan viral traffic. Anda dapat memahami jenis konten yang paling ok, dan tool ini memiliki kalkulator untuk mengetahui ROI pemasaran Facebook Anda. Selain itu, laporan dapat disesuaikan dan dapat didownload sebagai presentasi sebanyak 20 slide powerpoint.</p>
38
- <p><br>Quintly dapat memberikan analisis ke banyak platform media sosial seperti Facebook, Twitter, Google+, LinkedIn, Instagram dan YouTube. Tool ini tersedia gratis untuk analisis Facebook. Main suite-nya adalah perangkat dashboard yang berbeda, hadir dengan dashboard standar yang dapat disesuaikan dengan kebutuhan Anda yang ingin menganalisis Facebook Anda.</p> aaccfb2cb3<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/EVEREST Ultimate Edition 5.30.3000 Final Portable Multilang.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>EVEREST Ultimate Edition 5.30.3000 Final Portable Multilang</h2><br /><p><b><b>Download</b> &#10026;&#10026;&#10026; <a href="https://imgfil.com/2uxZoY">https://imgfil.com/2uxZoY</a></b></p><br /><br />
2
- <br />
3
- With the largest collection of ultra-accurately modeled gear, creative ... EVEREST Ultimate Edition 5.30.3000 Final Portable Multilang patch 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Foto Telanjang Claudia Sintia Bella.md DELETED
@@ -1,6 +0,0 @@
1
- <br />
2
- <p>Search claudia cynthia bella bugil Photos<br/>Search claudia cynthia bella bugil Unrated Videos<br/>Search claudia cynthia bella bugil XXX Videos<br/>Search claudia cynthia bella bugil Indian Videos<br/>Search claudia cynthia bella bugil MP4 Videos<br/>Search claudia cynthia bella bugil Indian Images<br/>Search claudia cynthia bella bugil Leaked Videos<br/>Search claudia cynthia bella bugil Leaked Pics<br/>Search claudia cynthia bella bugil XXX Posts<br/></p>
3
- <p>Cynthia Martell Nude Leaked Photos Victoria Justice Topless In White Panties Kelly Brook Nude And Leaked (10 Photos) Emmy Rossum Nude Shameless (2017) s08e10 HD 1080p pernikahan laudya cynthia bella, laudya chintya bella instagram, model bugil indo artis, gambar sandra dewi, artis cantik bugil, bela bugil, diana pungky bugil, foto ibu ibu bugil, minang bugil</p>
4
- <h2>Foto Telanjang Claudia Sintia Bella</h2><br /><p><b><b>Download Zip</b> &harr; <a href="https://imgfil.com/2uy1qX">https://imgfil.com/2uy1qX</a></b></p><br /><br /> aaccfb2cb3<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/101 Essays That Will Transform Your Thinking Free Epub Download.md DELETED
@@ -1,116 +0,0 @@
1
-
2
- <h1>How to Download 101 Essays That Will Change The Way You Think for Free</h1>
3
- <p>If you are looking for a book that will inspire you, challenge you, and transform your perspective on life, you should definitely check out 101 Essays That Will Change The Way You Think by Brianna Wiest. This book is a collection of the author's most popular and insightful pieces of writing, covering topics such as purpose, passion, negative thinking, cognitive biases, daily routine, and more. In this article, we will show you how to download this book for free in EPUB format, which is one of the most widely used and compatible file formats for ebooks. We will also explain what EPUB format is, how to open and read it on different devices, and where to find free EPUB downloads of 101 Essays That Will Change The Way You Think. Let's get started!</p>
4
- <h2>101 essays that will change the way you think epub download free</h2><br /><p><b><b>Download File</b> &hArr; <a href="https://jinyurl.com/2uNTjc">https://jinyurl.com/2uNTjc</a></b></p><br /><br />
5
- <h2>What is 101 Essays That Will Change The Way You Think?</h2>
6
- <h3>A brief summary of the book and its author</h3>
7
- <p>101 Essays That Will Change The Way You Think is a global bestseller and social media phenomenon that has been read by millions of people around the world. It is written by Brianna Wiest, a renowned writer and editor who has been featured in publications such as Forbes, HuffPost, Thought Catalog, and Medium. She is also the author of several other books, such as The Mountain Is You, Salt Water, and I Am The Hero Of My Own Life.</p>
8
- <p>The book consists of 101 short essays that explore various aspects of personal growth, self-awareness, and happiness. Each essay offers a fresh and original perspective that will make you think differently about yourself and your life. Some of the essays include:</p>
9
- <ul>
10
- <li>Why You Should Pursue Purpose Over Passion</li>
11
- <li>How To Embrace Negative Thinking</li>
12
- <li>The Wisdom In Daily Routine</li>
13
- <li>How To Become Aware Of The Cognitive Biases That Are Creating The Way You See Your Life</li>
14
- <li>Why You Should Stop Trying To Fix Yourself And Start Living Instead</li>
15
- <li>How To Find Your True Self In A World That Wants You To Be Someone Else</li>
16
- <li>And many more!</li>
17
- </ul>
18
- <h3>Why you should read this book</h3>
19
- <p>This book is not just a collection of essays; it is a powerful tool that will help you improve your mindset, habits, and actions. By reading this book, you will:</p>
20
- <ul>
21
- <li>Learn how to overcome your fears, doubts, and insecurities</li>
22
- <li>Discover your true purpose and passion in life</li>
23
- <li>Develop a positive and realistic attitude towards yourself and your circumstances</li>
24
- <li>Understand how your thoughts and emotions affect your behavior and outcomes</li>
25
- <li>Create a meaningful and fulfilling daily routine that supports your goals</li>
26
- <li>Enhance your creativity, productivity, and happiness</li>
27
- <li>And much more!</li>
28
- </ul>
29
- <h2>What is EPUB format?</h2>
30
- <h3>The advantages and disadvantages of EPUB files</h3>
31
- <p>EPUB stands for electronic publication. It is a file format including EPUB. It has a variety of genres and categories that you can browse by popularity, rating, reviews, or recommendations. You can also search by keyword or use the advanced search option.</li>
32
- <li><a href="">Free-Ebooks.net</a>: This is a website that offers over 10,000 free ebooks in various formats, including EPUB. It has a selection of fiction and non-fiction books that you can browse by genre, author, title, or language. You can also search by keyword or use the advanced search option.</li>
33
- <li><a href="">BookBub</a>: This is a website that offers free and discounted ebooks in various formats, including EPUB. It has a curated list of bestsellers and new releases that you can browse by genre, category, or popularity. You can also search by keyword or use the advanced search option. You will need to sign up for a free account and provide your email address to access the deals.</li>
34
- </ul>
35
- <h3>How to use these websites to download the book</h3>
36
- <p>To use these websites to download 101 Essays That Will Change The Way You Think for free in EPUB format, you will need to follow these steps:</p>
37
- <p>101 essays that will change your think pdf free download<br />
38
- How to get 101 essays that will change the way you think ebook for free<br />
39
- Download 101 essays that will change the way you think by Brianna Wiest epub<br />
40
- 101 essays that will change the way you think book free online<br />
41
- Read 101 essays that will change the way you think pdf online<br />
42
- 101 essays that will change the way you think epub torrent download<br />
43
- 101 essays that will change the way you think mobi download free<br />
44
- Where to find 101 essays that will change the way you think epub free<br />
45
- 101 essays that will change the way you think pdfdrive download link<br />
46
- 101 essays that will change the way you think internet archive free ebook<br />
47
- 101 essays that will change the way you think thought catalog books pdf<br />
48
- 101 essays that will change the way you think epub vk download<br />
49
- 101 essays that will change the way you think pdf reddit free link<br />
50
- 101 essays that will change the way you think ebook download zip<br />
51
- 101 essays that will change the way you think epub google drive free<br />
52
- 101 essays that will change the way you think pdf scribd download<br />
53
- 101 essays that will change the way you think kindle edition free download<br />
54
- 101 essays that will change the way you think epub zippyshare download link<br />
55
- 101 essays that will change the way you think pdf goodreads free ebook<br />
56
- 101 essays that will change the way you think epub mediafire download free<br />
57
- 101 essays that will change the way you think pdf libgen download link<br />
58
- 101 essays that will change the way you think ebook download epub dump<br />
59
- 101 essays that will change the way you think epub b-ok download free<br />
60
- 101 essays that will change the way you think pdf bookbub free ebook<br />
61
- 101 essays that will change the way you think ebook download mobilism<br />
62
- 101 essays that will change the way you think epub dropbox download link<br />
63
- 101 essays that will change the way you think pdf bookscouter free ebook<br />
64
- 101 essays that will change the way you think ebook download smashwords<br />
65
- 101 essays that will change the way you think epub mega.nz download free<br />
66
- 101 essays that will change the way you think pdf bookfinder free ebook</p>
67
- <ol>
68
- <li>Go to the website of your choice and find the book using the browsing or searching options.</li>
69
- <li>Click on the book title or cover image to go to the book page.</li>
70
- <li>Look for the download button or link and click on it.</li>
71
- <li>Select the EPUB format from the available options and confirm your download.</li>
72
- <li>Save the file to your device or transfer it to your e-reader using a USB cable or wireless connection.</li>
73
- <li>Open the file using your preferred application or software and enjoy reading!</li>
74
- </ol>
75
- <h2>Conclusion</h2>
76
- <h3>A summary of the main points and a call to action</h3>
77
- <p>In this article, we have shown you how to download 101 Essays That Will Change The Way You Think for free in EPUB format. We have also explained what EPUB format is, how to open and read it on different devices, and where to find free EPUB downloads of 101 Essays That Will Change The Way You Think. We hope that this article has been helpful and informative for you. If you are interested in reading this book, we encourage you to download it today and start learning from the wisdom and insights of Brianna Wiest. This book will surely change the way you think and live your life!</p>
78
- <h2>FAQs</h2>
79
- <h3>Q1: Is it legal to download free ebooks?</h3>
80
- <p>A1: It depends on the source and the license of the ebook. Some ebooks are in the public domain or have been released under a Creative Commons license, which means that they are free and legal to download and share. However, some ebooks are protected by copyright laws and require permission or payment from the author or publisher to download and use them. Therefore, you should always check the terms and conditions of the website and the ebook before downloading them.</p>
81
- <h3>Q2: How can I convert EPUB files to other formats?</h3>
82
- <p>A2: If you want to convert EPUB files to other formats, such as PDF, MOBI, TXT, or HTML, you can use an online converter tool or a software program. Some of the most popular ones are:</p>
83
- <ul>
84
- <li><a href="">Online-Convert.com</a>: This is an online tool that can convert EPUB files to various formats for free. You just need to upload your file, choose your output format, and click on convert.</li>
85
- <li><a href="">Zamzar.com</a>: This is another online tool that can convert EPUB files to various formats for free. You just need to upload your file, choose your output format, enter your email address, and click on convert.</li>
86
- <li><a href="">Calibre</a>: This is a software program that can convert EPUB files to various formats for free. You just need to download and install it on your computer, add your file, choose your output format, and click on convert.</li>
87
- </ul>
88
- <h3>Q3: What are some other books that will change the way I think?</h3>
89
- <p>A3: If you enjoyed reading 101 Essays That Will Change The Way You Think, you might also like these books that will change the way you think:</p>
90
- <ul>
91
- <li><a href="">The Power of Now by Eckhart Tolle</a>: This is a book that teaches you how to live in the present moment and free yourself from negative thoughts and emotions.</li>
92
- <li><a href="">The 7 Habits of Highly Effective People by Stephen R. Covey</a>: This is a book that shows you how to develop habits that will help you achieve your personal and professional goals.</li>
93
- <li><a href="">The Subtle Art of Not Giving a F*ck by Mark Manson</a>: This is a book that teaches you how to focus on what matters and let go of what doesn't in life.</li>
94
- <li><a href="">Atomic Habits by James Clear</a>: This is a book that teaches you how to build good habits and break bad ones using simple and effective strategies.</li>
95
- <li><a href="">Think and Grow Rich by Napoleon Hill</a>: This is a book that reveals the secrets of success and wealth that have been proven by thousands of people.</li>
96
- </ul>
97
- <h3>Q4: How can I support the author of 101 Essays That Will Change The Way You Think?</h3>
98
- <p>A4: If you liked reading 101 Essays That Will Change The Way You Think, you can support the author by doing the following:</p>
99
- <ul>
100
- <li>Buy the book from a reputable online or offline store, such as Amazon, Barnes & Noble, or Book Depository.</li>
101
- <li>Leave a positive review and rating on the website where you bought the book or on other platforms, such as Goodreads, Facebook, or Instagram.</li>
102
- <li>Share the book with your friends, family, and social media followers and encourage them to read it.</li>
103
- <li>Follow the author on her website, blog, or social media accounts and subscribe to her newsletter or podcast.</li>
104
- <li>Check out her other books and products and buy them if you are interested.</li>
105
- </ul>
106
- <h3>Q5: Where can I find more resources on personal development and self-improvement?</h3>
107
- <p>A5: If you want to learn more about personal development and self-improvement, you can find more resources on these websites:</p>
108
- <ul>
109
- <li><a href="">TED</a>: This is a website that features inspiring and informative talks from experts and leaders on various topics, including personal growth, happiness, motivation, and more.</li>
110
- <li><a href="">Mindvalley</a>: This is a website that offers online courses, programs, and events on various aspects of personal development, such as health, wealth, relationships, spirituality, and more.</li>
111
- <li><a href="">Lifehack</a>: This is a website that provides practical tips, advice, and hacks on how to improve your life in different areas, such as productivity, communication, creativity, and more.</li>
112
- <li><a href="">Tiny Buddha</a>: This is a website that shares wisdom and stories from people who have overcome challenges and learned valuable lessons in life.</li>
113
- <li><a href="">The School of Life</a>: This is a website that offers videos, articles, books, and events on how to live wisely and well in the modern world.</li>
114
- </ul></p> 401be4b1e0<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download CSR Racing APK and compete with the best - Over 100 licensed cars and stunning graphics.md DELETED
@@ -1,86 +0,0 @@
1
- <br />
2
- <h1>CSR Racing: A Review of the Best-Selling Drag Racing Game</h1>
3
- <p>If you are a fan of racing games, you have probably heard of CSR Racing. It is a free-to-play drag racing game for Android and iOS devices that features over 100 licensed cars from various manufacturers, stunning graphics and realistic physics, different gameplay modes to suit different preferences and challenges, and customization options to upgrade and personalize your cars. In this article, we will review what CSR Racing is all about and why it is one of the best drag racing games of all time.</p>
4
- <h2>Features of CSR Racing</h2>
5
- <h3>Licensed Cars</h3>
6
- <p>One of the main attractions of CSR Racing is its car collection. The game features over 100 licensed cars from some of the world's most prestigious car manufacturers, such as McLaren, Bugatti, Aston Martin, Hennessey, <p>Lamborghini, Ferrari, and more. You can choose from a variety of models, ranging from classic muscle cars to modern supercars. The cars are divided into five tiers, each with different performance and price levels. You have to beat the crew bosses of each tier to unlock the next one and eventually face the international crew, which consists of the best racers from around the world.</p>
7
- <h2>csr racing download apk</h2><br /><p><b><b>Download File</b> &#9734;&#9734;&#9734; <a href="https://jinyurl.com/2uNSnX">https://jinyurl.com/2uNSnX</a></b></p><br /><br />
8
- <h3>Stunning Graphics</h3>
9
- <p>Another feature that makes CSR Racing stand out is its graphics. The game has realistic graphics and physics that make the races immersive and thrilling. You can see the details of your car, such as the engine, the interior, and the paint job. You can also see the effects of the weather, the lighting, and the smoke on the screen. The game has different locations and backgrounds, such as city streets, industrial areas, and desert roads. The game also has slow-motion and camera angles that add to the excitement of the races.</p>
10
- <h3>Gameplay Modes</h3>
11
- <p>CSR Racing has different gameplay modes to suit different preferences and challenges. The main mode is the career mode, where you have to beat the crew bosses of each tier and progress through the story. The game also has other modes, such as daily battles, where you can race against a random car and win prizes; restriction races, where you have to race with certain limitations, such as a specific car or a specific fuel level; tournaments, where you can compete with other players for rewards; and online races, where you can race against other players in real-time.</p>
12
- <h3>Customization Options</h3>
13
- <p>CSR Racing also allows you to customize your cars with various upgrades and personalization options. You can upgrade your engine, turbo, intake, nitrous, body, tires, and gearbox to improve your performance and speed. You can also personalize your car with different paint jobs, decals, and plates. You can even change the color of your brake calipers, rims, and interior. You can also tune your car to optimize your gear ratios and nitrous timing.</p>
14
- <h2>Reception of CSR Racing</h2>
15
- <h3>Critical Reviews</h3>
16
- <p>CSR Racing has received positive reviews from critics and players alike. The game has an average score of 4.4 out of 5 on Google Play and 4.7 out of 5 on App Store . The game has also won several awards and nominations, such as the Best Mobile Game at the 2013 BAFTA Games Awards and the Best Racing Game at the 2012 Pocket Gamer Awards . Critics praised the game's graphics, gameplay, and car collection. However, some also criticized its freemium model, which requires real money purchases to unlock some features and cars.</p>
17
- <h3>Player Feedback</h3>
18
- <p>CSR Racing has also received positive feedback from players who enjoyed its graphics, gameplay, and car collection. The game has over 130 million downloads and over 2.8 million ratings on Google Play and over 1 million ratings on App Store . The game has also reached some impressive achievements, such as being one of the top-grossing games on both platforms and being featured in several media outlets . However, some players also complained about its freemium model, which requires real money purchases to unlock some features and cars.</p>
19
- <p>csr racing apk free download for android<br />
20
- csr racing mod apk download unlimited money and gold<br />
21
- csr racing 2 apk download latest version<br />
22
- csr racing hack apk download no root<br />
23
- csr racing game download apk pure<br />
24
- csr racing 5.1.1 apk download<br />
25
- csr racing classic apk download<br />
26
- csr racing offline apk download<br />
27
- csr racing 3d apk download<br />
28
- csr racing apk obb download<br />
29
- csr racing 2 mod apk download revdl<br />
30
- csr racing 2 hack apk download ios<br />
31
- csr racing 2 mod apk download android 1<br />
32
- csr racing 2 mod apk download rexdl<br />
33
- csr racing 2 mod apk download happymod<br />
34
- csr racing 2 mod apk download unlimited everything<br />
35
- csr racing 2 mod apk download for pc<br />
36
- csr racing 2 mod apk download latest version 2023<br />
37
- csr racing 2 mod apk download highly compressed<br />
38
- csr racing 2 mod apk download mega<br />
39
- csr racing 2 mod apk download old version<br />
40
- csr racing 2 mod apk download uptodown<br />
41
- csr racing 2 mod apk download apkpure<br />
42
- csr racing 2 mod apk download android oyun club<br />
43
- csr racing 2 mod apk download unlimited keys and cash<br />
44
- csr racing 2 mod apk download unlimited cars unlocked<br />
45
- csr racing 2 mod apk download anti ban<br />
46
- csr racing 2 mod apk download all cars unlocked and upgraded<br />
47
- csr racing 2 mod apk download no human verification<br />
48
- csr racing 2 mod apk download no root required<br />
49
- csr racing 2 mod apk download online generator<br />
50
- csr racing 2 mod apk download offline mode<br />
51
- csr racing 2 mod apk download obb file<br />
52
- csr racing 2 mod apk download original version<br />
53
- csr racing 2 mod apk download onhax<br />
54
- csr racing 2 mod apk download pc windows 10<br />
55
- csr racing 2 mod apk download pc windows 7<br />
56
- csr racing 2 mod apk download pc bluestacks<br />
57
- csr racing 2 mod apk download pc nox player<br />
58
- csr racing 2 mod apk download pc gameloop<br />
59
- csr racing 2 mod apk download pc memu play<br />
60
- csr racing 2 mod apk download pc ldplayer<br />
61
- csr racing 2 mod apk download pc genymotion<br />
62
- csr racing 2 mod apk download pc koplayer<br />
63
- csr racing 2 mod apk download pc remix os player<br />
64
- csr racing 2 mod apk download pc droid4x</p>
65
- <h2>CSR Racing 2: The Sequel</h2>
66
- <h3>Improvements over CSR Racing</h3>
67
- <p>CSR Racing has a sequel, CSR Racing 2, which was released in 2016. The sequel improves on the original game with more cars, more customization options, more modes, and more social features. The sequel features over 200 licensed cars from various manufacturers , including some rare and exclusive models that are not available in other games. The sequel also allows you to customize your cars with more parts, paint jobs, decals, and plates. You can also tune your car to optimize your performance and speed. The sequel also has more modes, such as crew battles, ladder races, regulation races, live races, and special events. The sequel also has more social features, such as joining a crew, chatting with other players, competing in leaderboards, and participating in crew championships.</p>
68
- <h3>3D Rendering Engine</h3>
69
- <p>The sequel also has a 3D rendering engine that makes the cars look even more realistic and detailed. The engine uses advanced techniques, such as dynamic lighting, shadows, reflections, and depth of field. The engine also allows you to view your car from different angles and zoom in to see the details. You can also open the doors, hood, and trunk of your car and see the interior and the engine.</p>
70
- <h2>Conclusion</h2>
71
- <p>CSR Racing is a great drag racing game for Android and iOS devices that features over 100 licensed cars from various manufacturers, stunning graphics and realistic physics, different gameplay modes to suit different preferences and challenges, and customization options to upgrade and personalize your cars. The game has received positive reviews from critics and players alike, who praised its graphics, gameplay, and car collection. However, some also criticized its freemium model, which requires real money purchases to unlock some features and cars. The game has a sequel, CSR Racing 2, which improves on the original game with more cars, more customization options, more modes, and more social features. It also has a 3D rendering engine that makes the cars look even more realistic and detailed. If you are looking for a fun and exciting drag racing game for your mobile device, you should definitely check out CSR Racing and CSR Racing 2.</p>
72
- <h2>FAQs</h2>
73
- <p>Here are some frequently asked questions about CSR Racing:</p>
74
- <ul>
75
- <li><b>How do I download CSR Racing?</b></li>
76
- <p>You can download CSR Racing from Google Play or App Store for free. However, you may need to make some in-app purchases to unlock some features and cars.</p>
77
- <li><b>How do I play CSR Racing?</b></li>
78
- <p>You can play CSR Racing by tapping the screen to shift gears and use the nitrous boost at the right time. You can also customize your cars with various upgrades and personalization options. You can also choose from different modes, such as career mode, daily battles, restriction races, tournaments, and online races.</p>
79
- <li><b>What are the best cars in CSR Racing?</b></li>
80
- <p>The best cars in CSR Racing depend on your preference and budget. However, some of the most popular cars in the game are the Bugatti Veyron Super Sport , the McLaren P1 , the Hennessey Venom GT , the Koenigsegg Agera R , and the Pagani Huayra .</p>
81
- <li><b>How do I get free gold in CSR Racing?</b></li>
82
- <p>You can get free gold in CSR Racing by completing achievements, watching videos, liking the game on Facebook , following the game on Twitter , or inviting your friends to play the game.</p>
83
- <li><b>What is the difference between CSR Racing and CSR Racing 2?</b></li>
84
- <p>The difference between CSR Racing and CSR Racing 2 is that the sequel improves on the original game with more cars, more customization options, more modes, and more social features. It also has a 3D rendering engine that makes the cars look even more realistic and detailed.</p> 197e85843d<br />
85
- <br />
86
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Turkish Music Anywhere Download and Stream the Top Turkish Songs of 2023.md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>Music Download Turkish: How to Enjoy the Rich and Diverse Sounds of Turkey</h1>
3
- <p>Turkey is a country with a long and rich history, culture, and tradition. One of the most distinctive aspects of Turkey is its music, which reflects the influences of various civilizations, religions, and regions that have shaped the country over the centuries. Turkish music is not only a source of entertainment, but also a way of expressing emotions, beliefs, values, and identity.</p>
4
- <p>If you are interested in exploring the musical heritage of Turkey, you might be wondering how to find and download Turkish music online. In this article, we will introduce you to the different types of Turkish music, from classical to modern, and show you where you can access them for free or for a fee. Whether you are looking for relaxing melodies, energetic rhythms, or exotic sounds, you will surely find something that suits your taste and mood.</p>
5
- <h2>music download turkish</h2><br /><p><b><b>Download</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNNxY">https://jinyurl.com/2uNNxY</a></b></p><br /><br />
6
- <h2>Types of Turkish music</h2>
7
- <p>Turkish music can be broadly divided into three main categories: classical, folk, and modern. Each category has its own history, characteristics, genres, and artists. Let's take a closer look at each one.</p>
8
- <h3>Classical Turkish music</h3>
9
- <h4>History and characteristics</h4>
10
- <p>Classical Turkish music, also known as Ottoman classical music or Turkish art music, is the oldest and most refined form of Turkish music. It developed during the Ottoman Empire (1299-1922), which spanned across Asia, Europe, and Africa. Classical Turkish music was influenced by various musical traditions, such as Persian, Arabic, Byzantine, Balkan, and Central Asian.</p>
11
- <p>Classical Turkish music is based on a system of modes called makams, which are similar to scales in Western music. Each makam has a specific set of notes, intervals, and melodic patterns that create a certain mood or emotion. There are hundreds of makams in classical Turkish music, each with its own name and rules.</p>
12
- <p>Classical Turkish music is usually performed by an ensemble of instruments and vocalists. The most common instruments are the ud (a lute-like stringed instrument), the ney (a reed flute), the kanun (a plucked zither), the kemençe (a bowed fiddle), the tanbur (a long-necked lute), and the darbuka (a goblet-shaped drum). The vocalists sing poems or lyrics that are often based on mystical themes or love stories.</p>
13
- <h4>Famous composers and performers</h4>
14
- <p>Some of the most famous composers of classical Turkish music are Buhurizade Mustafa Itri (1640-1712), who is considered the father of classical Turkish music; Dede Efendi (1778-1846), who composed hundreds of works in various makams; and Zeki Müren (1931-1996), who was known as the sun of classical Turkish music.</p>
15
- <p>Some of the most famous performers of classical Turkish music are Munir Nurettin Selçuk (1900-1981), who was a renowned singer and composer; Bekir Sıdkı Sezgin (1919-1995), who was a master of the ud; Niyazi Sayın (1927-2020), who was a virtuoso of the ney; Safiye Ayla (1907-1998), who was one of the first female singers of classical Turkish music; and Kani Karaca (1930-2004), who was a legendary vocalist of religious songs.</p>
16
- <h4>Where to download classical Turkish music</h4 <p>If you are looking for some websites to download classical Turkish music, you have plenty of options to choose from. Here are some of the best ones:</p>
17
- <ul>
18
- <li><a href="(^1^)">Lifewire</a>: This website provides a list of six free classical music download sites, including Classic Cat, Musopen, Free Music Archive, and more. You can browse by composer, instrument, genre, or mood, and stream or download the tracks in MP3 format.</li>
19
- <li><a href="(^2^)">Free Music Archive</a>: This website offers thousands of free classical music downloads from various composers and performers. You can sort the list by artist name, track, album, genre, or rating, and stream or download the tracks in MP3 format.</li>
20
- <li><a href="(^3^)">YouTube</a>: This website features a channel called Classical Turkish Music, which uploads videos of classical Turkish music performances and concerts. You can watch the videos online or use a YouTube downloader tool to save them as MP3 files.</li>
21
- </ul>
22
- <h3>Folk Turkish music</h3>
23
- <h4>History and characteristics</h4>
24
- <p>Folk Turkish music, also known as Anatolian folk music or Turkish folk music, is the traditional and rural music of Turkey. It originated from the various ethnic groups and regions of Anatolia, which is the Asian part of Turkey. Folk Turkish music reflects the diversity of cultures, languages, religions, and lifestyles of the Anatolian people.</p>
25
- <p>music download turkish songs<br />
26
- music download turkish pop<br />
27
- music download turkish folk<br />
28
- music download turkish rap<br />
29
- music download turkish classical<br />
30
- music download turkish rock<br />
31
- music download turkish mp3<br />
32
- music download turkish free<br />
33
- music download turkish online<br />
34
- music download turkish app<br />
35
- music download turkish site<br />
36
- music download turkish best<br />
37
- music download turkish legal<br />
38
- music download turkish quality<br />
39
- music download turkish fast<br />
40
- music download turkish new<br />
41
- music download turkish old<br />
42
- music download turkish hits<br />
43
- music download turkish remixes<br />
44
- music download turkish albums<br />
45
- music download turkish artists<br />
46
- music download turkish bands<br />
47
- music download turkish singers<br />
48
- music download turkish composers<br />
49
- music download turkish genres<br />
50
- music download turkish styles<br />
51
- music download turkish instruments<br />
52
- music download turkish lyrics<br />
53
- music download turkish karaoke<br />
54
- music download turkish ringtones<br />
55
- music download turkish video<br />
56
- music download turkish youtube<br />
57
- music download turkish spotify<br />
58
- music download turkish soundcloud<br />
59
- music download turkish itunes<br />
60
- music download turkish amazon<br />
61
- music download turkish google play<br />
62
- music download turkish deezer<br />
63
- music download turkish tidal<br />
64
- music download turkish napster<br />
65
- music download turkish pandora<br />
66
- music download turkish iheartradio<br />
67
- music download turkish tunein<br />
68
- music download turkish shazam<br />
69
- music download turkish audiomack<br />
70
- music download turkish datpiff<br />
71
- music download turkish bandcamp<br />
72
- music download turkish soundclick<br />
73
- music download turkish reverbnation</p>
74
- <p>Folk Turkish music is based on a system of rhythms called usuls, which are similar to meters in Western music. Each usul has a specific number and pattern of beats that create a certain tempo or groove. There are dozens of usuls in folk Turkish music, each with its own name and rules.</p>
75
- <p>Folk Turkish music is usually performed by soloists or small groups of instruments and vocalists. The most common instruments are the bağlama (a long-necked lute), the kaval (an end-blown flute), the zurna (a double-reed oboe), the davul (a large drum), and the cümbüş (a banjo-like instrument). The vocalists sing poems or lyrics that are often based on folk tales, legends, proverbs, or social issues.</p>
76
- <h4>Regional variations and styles</h4>
77
- <p>Folk Turkish music has many regional variations and styles, depending on the geographic location, climate, history, and culture of each area. Some of the most prominent regions and styles are:</p>
78
- <ul>
79
- <li>Black Sea: This region is known for its lively and upbeat folk music, influenced by the Pontic Greeks. The main instruments are the kemençe (a bowed lyre) and the tulum (a bagpipe). The main genres are horon (a fast dance) and karşılama (a greeting song).</li>
80
- <li>Aegean: This region is known for its melodic and romantic folk music, influenced by the Greeks and the Balkans. The main instruments are the ud (a lute-like stringed instrument) and the kanun (a plucked zither). The main genres are zeybek (a slow dance) and uzun hava (a long song).</li>
81
- <li>Central Anatolia: This region is known for its epic and heroic folk music, influenced by the Turkic nomads. The main instrument is the bağlama (a long-necked lute). The main genres are bozlak (a lament song) and aşık (a troubadour song).</li>
82
- <li>Eastern Anatolia: This region is known for its complex and diverse folk music, influenced by the Kurds, Armenians, Georgians, and Persians. The main instruments are the kaval (an end-blown flute) and the zurna (a double-reed oboe). The main genres are halay (a circle dance) and dengbej (a storytelling song).</li>
83
- <li>Southeastern Anatolia: This region is known for its rhythmic and energetic folk music, influenced by the Arabs and the Kurds. The main instruments are the cümbüş (a banjo-like instrument) and the davul (a large drum). The main genres are çiftetelli (a belly dance) and semah (a religious dance).</li>
84
- </ul> backgrounds, and tastes, and are influenced by both Turkish and Western musical trends.</p>
85
- <li>What is the best way to learn Turkish music?</li>
86
- <p>The best way to learn Turkish music is to listen to it as much as possible, and to try to understand the meaning, structure, and emotion behind each song. You can also learn Turkish music by taking lessons from a teacher, joining a music group, reading books or articles, watching videos or documentaries, or visiting Turkey and experiencing the music live.</p>
87
- <li>What are some of the benefits of listening to Turkish music?</li>
88
- <p>Some of the benefits of listening to Turkish music are:</p>
89
- <ul>
90
- <li>It can improve your mood, reduce stress, and increase happiness.</li>
91
- <li>It can enhance your creativity, memory, and concentration.</li>
92
- <li>It can broaden your cultural horizons, increase your curiosity, and enrich your knowledge.</li>
93
- <li>It can help you learn Turkish language, history, and culture.</li>
94
- <li>It can connect you with other people who share your interest in Turkish music.</li>
95
- </ul>
96
- <li>What are some of the challenges of listening to Turkish music?</li>
97
- <p>Some of the challenges of listening to Turkish music are:</p>
98
- <ul>
99
- <li>It can be difficult to find and access Turkish music online or offline, especially if you live outside of Turkey.</li>
100
- <li>It can be hard to understand the lyrics or the meaning of the songs, especially if you don't speak Turkish or know the cultural context.</li>
101
- <li>It can be confusing to distinguish between the different types, genres, and styles of Turkish music, especially if you are not familiar with the musical terminology or theory.</li>
102
- <li>It can be overwhelming to choose from the vast and diverse repertoire of Turkish music, especially if you don't have a clear preference or taste.</li>
103
- </ul></p> 401be4b1e0<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Experience The Seven Deadly Sins Grand Cross on PC with Netmarbles PC Client Beta.md DELETED
@@ -1,149 +0,0 @@
1
-
2
- <h1>How to Download Grand Cross on PC</h1>
3
- <p>Grand Cross is a popular mobile game based on the anime series <em>The Seven Deadly Sins</em>. It is a role-playing game that lets you collect and customize your favorite characters, explore a vast world, and engage in thrilling battles. If you are a fan of Grand Cross, you might be wondering how you can play it on your PC. In this article, we will show you how to download Grand Cross on PC using two methods: Windows 11 and Android emulators. We will also give you some tips and tricks to enhance your gaming experience. Let's get started!</p>
4
- <h2>download grand cross on pc</h2><br /><p><b><b>DOWNLOAD</b> &#10004;&#10004;&#10004; <a href="https://jinyurl.com/2uNTDM">https://jinyurl.com/2uNTDM</a></b></p><br /><br />
5
- <h2>What is Grand Cross?</h2>
6
- <h3>A brief introduction to the game and its features</h3>
7
- <p>Grand Cross is a game developed by Netmarble, one of the leading mobile game developers in the world. It is based on the anime series <em>The Seven Deadly Sins</em>, which follows the adventures of a group of powerful knights who are accused of betraying their kingdom. The game features stunning graphics, voice acting, and original music from the anime. You can relive the story of the anime, or create your own adventure with various modes and events.</p>
8
- <p>Some of the features of Grand Cross include:</p>
9
- <ul>
10
- <li>Collecting and customizing over 200 characters from the anime, each with their own skills, costumes, and equipment.</li>
11
- <li>Exploring a vast open world with different regions, quests, and secrets.</li>
12
- <li>Battling against enemies using a unique card-based combat system that requires strategy and timing.</li>
13
- <li>Joining a guild and cooperating with other players in raids, PvP, and guild wars.</li>
14
- <li>Enjoying various mini-games, such as cooking, fishing, and tavern management.</li>
15
- </ul>
16
- <h3>The benefits of playing Grand Cross on PC</h3>
17
- <p>While Grand Cross is designed for mobile devices, there are many reasons why you might want to play it on your PC. Some of the benefits of playing Grand Cross on PC include:</p>
18
- <ul>
19
- <li>A bigger screen that allows you to appreciate the graphics and animations better.</li>
20
- <li>A more comfortable control scheme that lets you use your keyboard, mouse, or controller instead of touch gestures.</li>
21
- <li>A faster performance that reduces lag, loading times, and crashes.</li>
22
- <li>A longer battery life that lets you play for hours without worrying about draining your phone.</li>
23
- <li>A more secure environment that protects your account and data from hackers and malware.</li>
24
- </ul>
25
- <h2>How to Play Grand Cross on PC with Windows 11</h2>
26
- <h3>The steps to install and run Windows Subsystem for Android</h3>
27
- <p>One of the easiest ways to play Grand Cross on PC is to use Windows 11, the latest operating system from Microsoft. Windows 11 has a feature called Windows Subsystem for Android, which allows you to run Android apps natively on your PC. This means that you don't need to install any third-party emulator or software. You just need to have a compatible PC and a Windows 11 account. Here are the steps to install and run Windows Subsystem for Android:</p>
28
- <p>How to download and play The Seven Deadly Sins: Grand Cross on PC<br />
29
- The Seven Deadly Sins: Grand Cross PC client beta download guide<br />
30
- Best emulator for The Seven Deadly Sins: Grand Cross on PC<br />
31
- The Seven Deadly Sins: Grand Cross PC version release date and features<br />
32
- The Seven Deadly Sins: Grand Cross PC gameplay and review<br />
33
- The Seven Deadly Sins: Grand Cross PC requirements and specifications<br />
34
- The Seven Deadly Sins: Grand Cross PC download link and installation steps<br />
35
- The Seven Deadly Sins: Grand Cross PC tips and tricks<br />
36
- The Seven Deadly Sins: Grand Cross PC cheats and hacks<br />
37
- The Seven Deadly Sins: Grand Cross PC support and troubleshooting<br />
38
- The Seven Deadly Sins: Grand Cross PC vs mobile comparison<br />
39
- The Seven Deadly Sins: Grand Cross PC keyboard and mouse controls<br />
40
- The Seven Deadly Sins: Grand Cross PC graphics and performance settings<br />
41
- The Seven Deadly Sins: Grand Cross PC update and patch notes<br />
42
- The Seven Deadly Sins: Grand Cross PC events and rewards<br />
43
- The Seven Deadly Sins: Grand Cross PC characters and tier list<br />
44
- The Seven Deadly Sins: Grand Cross PC team building and strategy guide<br />
45
- The Seven Deadly Sins: Grand Cross PC best gear and costumes<br />
46
- The Seven Deadly Sins: Grand Cross PC PvP and PvE modes<br />
47
- The Seven Deadly Sins: Grand Cross PC story and lore<br />
48
- The Seven Deadly Sins: Grand Cross PC mod apk download<br />
49
- The Seven Deadly Sins: Grand Cross PC free diamonds and coins generator<br />
50
- The Seven Deadly Sins: Grand Cross PC redeem codes and coupons<br />
51
- The Seven Deadly Sins: Grand Cross PC fan art and wallpapers<br />
52
- The Seven Deadly Sins: Grand Cross PC discord server and community<br />
53
- How to stream The Seven Deadly Sins: Grand Cross on PC<br />
54
- How to record The Seven Deadly Sins: Grand Cross on PC<br />
55
- How to backup and restore The Seven Deadly Sins: Grand Cross on PC<br />
56
- How to transfer The Seven Deadly Sins: Grand Cross account from mobile to PC<br />
57
- How to play The Seven Deadly Sins: Grand Cross on Mac or Linux<br />
58
- How to play The Seven Deadly Sins: Grand Cross on Windows 10 or 11<br />
59
- How to play The Seven Deadly Sins: Grand Cross offline on PC<br />
60
- How to play The Seven Deadly Sins: Grand Cross with friends on PC<br />
61
- How to play The Seven Deadly Sins: Grand Cross with controller on PC<br />
62
- How to play The Seven Deadly Sins: Grand Cross with VPN on PC<br />
63
- How to play The Seven Deadly Sins: Grand Cross in different languages on PC<br />
64
- How to play The Seven Deadly Sins: Grand Cross in full screen or windowed mode on PC<br />
65
- How to play The Seven Deadly Sins: Grand Cross without lag or crash on PC<br />
66
- How to play The Seven Deadly Sins: Grand Cross without ads or in-app purchases on PC<br />
67
- How to play The Seven Deadly Sins: Grand Cross without internet or wifi on PC</p>
68
- <ol>
69
- <li>Make sure your PC meets the minimum system requirements for Windows 11. You can check them here.</li>
70
- <li>Upgrade your PC to Windows 11. You can follow the instructions here.</li>
71
- <li>Open the Microsoft Store app and search for Windows Subsystem for Android. Click on Get and install it on your PC.</li>
72
- <li>Open the Windows Subsystem for Android app and sign in with your Microsoft account.</li>
73
- <li>Go to the Settings tab and enable Developer Mode. This will allow you to install apps from sources other than the Microsoft Store.</li>
74
- <li>Go to the Apps tab and click on Add app. Browse to the APK file of Grand Cross that you have downloaded from a trusted source, such as APKPure. Click on Open and wait for the app to install.</li>
75
- <li>Once the app is installed, you can launch it from the Apps tab or from the Start menu. You can also pin it to your taskbar or desktop for easy access.</li>
76
- </ol>
77
- <h3>The steps to download and play Grand Cross from the Amazon Appstore</h3>
78
- <p>Another way to play Grand Cross on PC with Windows 11 is to use the Amazon Appstore, which is an alternative app store for Android devices. The Amazon Appstore has a large selection of apps and games, including Grand Cross. You can also enjoy some exclusive benefits, such as free coins, discounts, and giveaways. Here are the steps to download and play Grand Cross from the Amazon Appstore:</p>
79
- <ol>
80
- <li>Open the Microsoft Store app and search for Amazon Appstore. Click on Get and install it on your PC.</li>
81
- <li>Open the Amazon Appstore app and sign in with your Amazon account. If you don't have one, you can create one for free here.</li>
82
- <li>Search for Grand Cross in the app store and click on Download. The app will be installed on your PC automatically.</li>
83
- <li>Once the app is installed, you can launch it from the Amazon Appstore app or from the Start menu. You can also pin it to your taskbar or desktop for easy access.</li>
84
- </ol>
85
- <h3>The tips and tricks to optimize your gaming experience</h3>
86
- <p>To make sure you have the best gaming experience possible, here are some tips and tricks to optimize your settings and performance:</p>
87
- <ul>
88
- <li>Adjust the graphics quality and resolution of Grand Cross according to your PC's specifications. You can do this by going to the game's settings and choosing the Display option. You can also enable or disable features such as shadows, anti-aliasing, and frame rate.</li>
89
- <li>Use a keyboard, mouse, or controller to play Grand Cross more comfortably. You can customize your controls by going to the game's settings and choosing the Control option. You can also use preset layouts or create your own.</li>
90
- <li>Use headphones or speakers to enjoy the sound effects and music of Grand Cross more immersively. You can adjust the volume and sound quality by going to the game's settings and choosing the Sound option.</li>
91
- <li>Connect your PC to a stable internet connection to avoid lag, disconnection, or data loss. You can check your connection speed by going to the game's settings and choosing the Network option.</li>
92
- <li>Update your Windows 11, Windows Subsystem for Android, Amazon Appstore, and Grand Cross regularly to get the latest features, fixes, and security patches.</li>
93
- </ul>
94
- <h2>How to Play Grand Cross on PC with Android Emulators</h2>
95
- <h3>The best Android emulators for Grand Cross</h3>
96
- <p>If you don't have Windows 11 or you prefer another method, you can also play Grand Cross on PC using an Android emulator. An Android emulator is a software that simulates an Android device on your PC, allowing you to run Android apps and games. There are many Android emulators available online, but not all of them are compatible or optimized for Grand Cross. Here are some of the best Android emulators for Grand Cross:</p>
97
- <table border="1">
98
- <tr><th>Name</th><th>Features</th><th>Pros</th><th>Cons</th></tr>
99
- <tr><td>BlueStacks</td><td>- Supports high-end graphics and performance<br>- Has a dedicated gaming mode and interface<br>- Allows keyboard, mouse, and controller mapping<br>- Has a built-in Google Play Store and App Center<br>- Supports multiple instances and macros</td><td>- Easy to install and use<br>- Compatible with most games and apps<br>- Offers various customization options<br>- Has a large user base and community</td><td>- Requires a high-end PC<br>- May have some compatibility issues<br>- May - May show some ads or promotions</td></tr>
100
- <tr><td>NoxPlayer</td><td>- Supports high-performance gaming and multitasking<br>- Has a simple and user-friendly interface<br>- Allows keyboard, mouse, and controller mapping<br>- Has a built-in Google Play Store and Browser<br>- Supports multiple instances and scripts</td><td>- Fast and stable<br>- Compatible with most games and apps<br>- Offers various customization options<br>- Has a large user base and community</td><td>- Requires a high-end PC<br>- May have some compatibility issues<br>- May collect some user data or permissions</td></tr>
101
- <tr><td>LDPlayer</td><td>- Supports high-quality graphics and smooth gameplay<br>- Has a dedicated gaming mode and interface<br>- Allows keyboard, mouse, and controller mapping<br>- Has a built-in Google Play Store and LD Store<br>- Supports multiple instances and macros</td><td>- Lightweight and efficient<br>- Compatible with most games and apps<br>- Offers various customization options<br>- Has a large user base and community</td><td>- Requires a high-end PC<br>- May have some compatibility issues<br>- May show some ads or promotions</td></tr>
102
- <tr><td>MEmu Play</td><td>- Supports high-speed gaming and multitasking<br>- Has a simple and user-friendly interface<br>- Allows keyboard, mouse, and controller mapping<br>- Has a built-in Google Play Store and Browser<br>- Supports multiple instances and scripts</td><td>- Fast and stable<br>- Compatible with most games and apps<br>- Offers various customization options<br>- Has a large user base and community</td><td>- Requires a high-end PC<br>- May have some compatibility issues<br>- May collect some user data or permissions</td></tr>
103
- </table>
104
- <h3>The steps to install and configure an Android emulator</h3>
105
- <p>Once you have chosen an Android emulator that suits your needs, you need to install and configure it on your PC. Here are the general steps to do so:</p>
106
- <ol>
107
- <li>Download the installer of the Android emulator from its official website. Make sure you download the latest version that is compatible with your PC.</li>
108
- <li>Run the installer and follow the instructions to install the Android emulator on your PC. You may need to grant some permissions or accept some terms and conditions.</li>
109
- <li>Launch the Android emulator and sign in with your Google account. If you don't have one, you can create one for free here.</li>
110
- <li>Go to the settings of the Android emulator and adjust the parameters according to your preferences. You can change the resolution, frame rate, CPU, RAM, storage, keyboard, mouse, controller, etc.</li>
111
- <li>Restart the Android emulator to apply the changes.</li>
112
- </ol>
113
- <h3>The steps to download and play Grand Cross from the Google Play Store</h3>
114
- <p>After you have installed and configured your Android emulator, you can download and play Grand Cross from the Google Play Store. Here are the steps to do so:</p>
115
- <ol>
116
- <li>Open the Google Play Store app on your Android emulator and search for Grand Cross. Alternatively, you can use this link to go directly to the game's page.</li>
117
- <li>Click on Install and wait for the game to download and install on your PC.</li>
118
- <li>Once the game is installed, you can launch it from the Google Play Store app or from the home screen of your Android emulator. You can also pin it to your taskbar or desktop for easy access.</li>
119
- </ol>
120
- <h3>The pros and cons of using an Android emulator</h3>
121
- <p>Using an Android emulator has its advantages and disadvantages. Here are some of them:</p>
122
- <table border="1">
123
- <tr><th>Pros</th><th>Cons</th></tr>
124
- <tr><td>- You can use any Android app or game on your PC.<br>- You can choose from different Android emulators according to your needs.<br>- You can customize your settings and controls according to your preferences.<br>- You can use multiple accounts or instances on one PC.<br></td><td>- You may need a high-end PC to run an Android emulator smoothly.<br>- You may encounter some compatibility issues or bugs with some apps or games.<br>- You may risk exposing your data or privacy to some malicious emulators or sources.<br></td></tr>
125
- </table>
126
- <h2>Conclusion</h2>
127
- <h3>A summary of the main points and a call to action</h3>
128
- <p>In conclusion, Grand Cross is a fun and exciting game that you can enjoy on your PC. You can use Windows 11 or an Android emulator to download Grand Cross on PC easily. Both methods have their pros and cons, so you can choose the one that suits you best. We hope this article has helped you learn how to download Grand Cross on PC. Now, what are you waiting for? Download Grand Cross on PC and join the adventure of the Seven Deadly Sins!</p>
129
- <h2>FAQs</h2>
130
- <h3>Q1: Is Grand Cross free to play?</h3>
131
- <p>A1: Yes, Grand Cross is free to play. You can download and play the game without spending any money. However, the game also offers some optional in-app purchases, such as gems, costumes, and bundles, that can enhance your gameplay or unlock some features. You can buy these items with real money or earn them through various ways in the game.</p>
132
- <h3>Q2: Can I play Grand Cross with my friends on PC?</h3>
133
- <p>A2: Yes, you can play Grand Cross with your friends on PC. The game has a cross-platform feature that allows you to play with other players who are using different devices, such as mobile phones, tablets, or PCs. You can also join a guild and chat with your guildmates, or challenge other players in PvP and guild wars.</p>
134
- <h3>Q3: How can I update Grand Cross on PC?</h3>
135
- <p>A3: To update Grand Cross on PC, you need to follow the same steps as you would on your mobile device. Depending on the method you used to download Grand Cross on PC, you can either update the game from the Google Play Store, the Amazon Appstore, or the APK file. You can also check the official website or social media pages of Grand Cross for the latest news and updates.</p>
136
- <h3>Q4: What are the system requirements for Grand Cross on PC?</h3>
137
- <p>A4: The system requirements for Grand Cross on PC vary depending on the method you used to download Grand Cross on PC. If you used Windows 11, you need to have a PC that meets the minimum system requirements for Windows 11. You can check them here. If you used an Android emulator, you need to have a PC that meets the minimum system requirements for the Android emulator. You can check them on the official website of the Android emulator.</p>
138
- <h3>Q5: Where can I find more information about Grand Cross?</h3>
139
- <p>A5: If you want to find more information about Grand Cross, you can visit the following sources:</p>
140
- <ul>
141
- <li>The official website of Grand Cross: https://7dsgc.netmarble.com/en</li>
142
- <li>The official Facebook page of Grand Cross: https://www.facebook.com/7ds.en</li>
143
- <li>The official Twitter account of Grand Cross: https://twitter.com/7DS_en</li>
144
- <li>The official YouTube channel of Grand Cross: https://www.youtube.com/channel/UCfIXcW0n6yTm4sDzXoXzcwA</li>
145
- <li>The official Reddit community of Grand Cross: https://www.reddit.com/r/SDSGrandCross/</li>
146
- <li>The official Discord server of Grand Cross: https://discord.gg/grandcross</li>
147
- </ul></p> 197e85843d<br />
148
- <br />
149
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py DELETED
@@ -1,394 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- from typing import Callable, List, Optional, Union
18
-
19
- import paddle
20
- import PIL
21
- from packaging import version
22
-
23
- from paddlenlp.transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection
24
-
25
- from ...configuration_utils import FrozenDict
26
- from ...models import AutoencoderKL, UNet2DConditionModel
27
- from ...pipeline_utils import DiffusionPipeline
28
- from ...schedulers import (
29
- DDIMScheduler,
30
- DPMSolverMultistepScheduler,
31
- EulerAncestralDiscreteScheduler,
32
- EulerDiscreteScheduler,
33
- LMSDiscreteScheduler,
34
- PNDMScheduler,
35
- )
36
- from ...utils import deprecate, logging
37
- from . import StableDiffusionPipelineOutput
38
- from .safety_checker import StableDiffusionSafetyChecker
39
-
40
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
-
42
-
43
- class StableDiffusionImageVariationPipeline(DiffusionPipeline):
44
- r"""
45
- Pipeline to generate variations from an input image using Stable Diffusion.
46
-
47
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
48
- library implements for all the pipelines (such as downloading or saving etc.)
49
-
50
- Args:
51
- vae ([`AutoencoderKL`]):
52
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
53
- image_encoder ([`CLIPVisionModelWithProjection`]):
54
- Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of
55
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection),
56
- specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
57
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
58
- scheduler ([`SchedulerMixin`]):
59
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
60
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
61
- safety_checker ([`StableDiffusionSafetyChecker`]):
62
- Classification module that estimates whether generated images could be considered offensive or harmful.
63
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
64
- feature_extractor ([`CLIPFeatureExtractor`]):
65
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
66
- """
67
- _optional_components = ["safety_checker"]
68
-
69
- def __init__(
70
- self,
71
- vae: AutoencoderKL,
72
- image_encoder: CLIPVisionModelWithProjection,
73
- unet: UNet2DConditionModel,
74
- scheduler: Union[
75
- DDIMScheduler,
76
- PNDMScheduler,
77
- LMSDiscreteScheduler,
78
- EulerDiscreteScheduler,
79
- EulerAncestralDiscreteScheduler,
80
- DPMSolverMultistepScheduler,
81
- ],
82
- safety_checker: StableDiffusionSafetyChecker,
83
- feature_extractor: CLIPFeatureExtractor,
84
- requires_safety_checker: bool = True,
85
- ):
86
- super().__init__()
87
-
88
- if safety_checker is None and requires_safety_checker:
89
- logger.warn(
90
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
91
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
92
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
93
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
94
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
95
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
96
- )
97
- if safety_checker is not None and feature_extractor is None:
98
- raise ValueError(
99
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
100
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
101
- )
102
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
103
- version.parse(unet.config._ppdiffusers_version).base_version
104
- ) < version.parse("0.9.0.dev0")
105
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
106
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
107
- deprecation_message = (
108
- "The configuration file of the unet has set the default `sample_size` to smaller than"
109
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
110
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
111
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
112
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
113
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
114
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
115
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
116
- " the `unet/config.json` file"
117
- )
118
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
119
- new_config = dict(unet.config)
120
- new_config["sample_size"] = 64
121
- unet._internal_dict = FrozenDict(new_config)
122
- self.register_modules(
123
- vae=vae,
124
- image_encoder=image_encoder,
125
- unet=unet,
126
- scheduler=scheduler,
127
- safety_checker=safety_checker,
128
- feature_extractor=feature_extractor,
129
- )
130
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
131
- self.register_to_config(requires_safety_checker=requires_safety_checker)
132
-
133
- def _encode_image(self, image, num_images_per_prompt, do_classifier_free_guidance):
134
- dtype = self.image_encoder.dtype
135
-
136
- if not isinstance(image, paddle.Tensor):
137
- image = self.feature_extractor(images=image, return_tensors="pd").pixel_values
138
-
139
- image = image.cast(dtype)
140
- image_embeddings = self.image_encoder(image, return_dict=True).image_embeds
141
- image_embeddings = image_embeddings.unsqueeze(1)
142
-
143
- # duplicate image embeddings for each generation per prompt, using mps friendly method
144
- bs_embed, seq_len, _ = image_embeddings.shape
145
- image_embeddings = image_embeddings.tile([1, num_images_per_prompt, 1])
146
- image_embeddings = image_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
147
-
148
- if do_classifier_free_guidance:
149
- uncond_embeddings = paddle.zeros_like(image_embeddings)
150
-
151
- # For classifier free guidance, we need to do two forward passes.
152
- # Here we concatenate the unconditional and text embeddings into a single batch
153
- # to avoid doing two forward passes
154
- image_embeddings = paddle.concat([uncond_embeddings, image_embeddings])
155
-
156
- return image_embeddings
157
-
158
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
159
- def run_safety_checker(self, image, dtype):
160
- if self.safety_checker is not None:
161
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
162
- image, has_nsfw_concept = self.safety_checker(
163
- images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
164
- )
165
- else:
166
- has_nsfw_concept = None
167
- return image, has_nsfw_concept
168
-
169
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
170
- def decode_latents(self, latents):
171
- latents = 1 / 0.18215 * latents
172
- image = self.vae.decode(latents).sample
173
- image = (image / 2 + 0.5).clip(0, 1)
174
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
175
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
176
- return image
177
-
178
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
179
- def prepare_extra_step_kwargs(self, generator, eta):
180
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
181
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
182
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
183
- # and should be between [0, 1]
184
-
185
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
186
- extra_step_kwargs = {}
187
- if accepts_eta:
188
- extra_step_kwargs["eta"] = eta
189
-
190
- # check if the scheduler accepts generator
191
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
192
- if accepts_generator:
193
- extra_step_kwargs["generator"] = generator
194
- return extra_step_kwargs
195
-
196
- def check_inputs(self, image, height, width, callback_steps):
197
- if (
198
- not isinstance(image, paddle.Tensor)
199
- and not isinstance(image, PIL.Image.Image)
200
- and not isinstance(image, list)
201
- ):
202
- raise ValueError(
203
- "`image` has to be of type `paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
204
- f" {type(image)}"
205
- )
206
-
207
- if height % 8 != 0 or width % 8 != 0:
208
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
209
-
210
- if (callback_steps is None) or (
211
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
212
- ):
213
- raise ValueError(
214
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
215
- f" {type(callback_steps)}."
216
- )
217
-
218
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
219
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None):
220
- shape = [batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor]
221
- if isinstance(generator, list) and len(generator) != batch_size:
222
- raise ValueError(
223
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
224
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
225
- )
226
-
227
- if latents is None:
228
- if isinstance(generator, list):
229
- shape = [
230
- 1,
231
- ] + shape[1:]
232
- latents = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
233
- latents = paddle.concat(latents, axis=0)
234
- else:
235
- latents = paddle.randn(shape, generator=generator, dtype=dtype)
236
- else:
237
- if latents.shape != shape:
238
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
239
-
240
- # scale the initial noise by the standard deviation required by the scheduler
241
- latents = latents * self.scheduler.init_noise_sigma
242
- return latents
243
-
244
- @paddle.no_grad()
245
- def __call__(
246
- self,
247
- image: Union[PIL.Image.Image, List[PIL.Image.Image], paddle.Tensor],
248
- height: Optional[int] = None,
249
- width: Optional[int] = None,
250
- num_inference_steps: int = 50,
251
- guidance_scale: float = 7.5,
252
- num_images_per_prompt: Optional[int] = 1,
253
- eta: float = 0.0,
254
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
255
- latents: Optional[paddle.Tensor] = None,
256
- output_type: Optional[str] = "pil",
257
- return_dict: bool = True,
258
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
259
- callback_steps: Optional[int] = 1,
260
- ):
261
- r"""
262
- Function invoked when calling the pipeline for generation.
263
-
264
- Args:
265
- image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `paddle.Tensor`):
266
- The image or images to guide the image generation. If you provide a tensor, it needs to comply with the
267
- configuration of
268
- [this](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json)
269
- `CLIPFeatureExtractor`
270
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
271
- The height in pixels of the generated image.
272
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
273
- The width in pixels of the generated image.
274
- num_inference_steps (`int`, *optional*, defaults to 50):
275
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
276
- expense of slower inference.
277
- guidance_scale (`float`, *optional*, defaults to 7.5):
278
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
279
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
280
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
281
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
282
- usually at the expense of lower image quality.
283
- num_images_per_prompt (`int`, *optional*, defaults to 1):
284
- The number of images to generate per prompt.
285
- eta (`float`, *optional*, defaults to 0.0):
286
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
287
- [`schedulers.DDIMScheduler`], will be ignored for others.
288
- generator (`paddle.Generator`, *optional*):
289
- A [paddle generator] to make generation
290
- deterministic.
291
- latents (`paddle.Tensor`, *optional*):
292
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
293
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
294
- tensor will ge generated by sampling using the supplied random `generator`.
295
- output_type (`str`, *optional*, defaults to `"pil"`):
296
- The output format of the generate image. Choose between
297
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
298
- return_dict (`bool`, *optional*, defaults to `True`):
299
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
300
- plain tuple.
301
- callback (`Callable`, *optional*):
302
- A function that will be called every `callback_steps` steps during inference. The function will be
303
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
304
- callback_steps (`int`, *optional*, defaults to 1):
305
- The frequency at which the `callback` function will be called. If not specified, the callback will be
306
- called at every step.
307
-
308
- Returns:
309
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
310
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
311
- When returning a tuple, the first element is a list with the generated images, and the second element is a
312
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
313
- (nsfw) content, according to the `safety_checker`.
314
- """
315
- # 0. Default height and width to unet
316
- height = height or self.unet.config.sample_size * self.vae_scale_factor
317
- width = width or self.unet.config.sample_size * self.vae_scale_factor
318
-
319
- # 1. Check inputs. Raise error if not correct
320
- self.check_inputs(image, height, width, callback_steps)
321
-
322
- # 2. Define call parameters
323
- if isinstance(image, PIL.Image.Image):
324
- batch_size = 1
325
- elif isinstance(image, list):
326
- batch_size = len(image)
327
- else:
328
- batch_size = image.shape[0]
329
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
330
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
331
- # corresponds to doing no classifier free guidance.
332
- do_classifier_free_guidance = guidance_scale > 1.0
333
-
334
- # 3. Encode input image
335
- image_embeddings = self._encode_image(image, num_images_per_prompt, do_classifier_free_guidance)
336
-
337
- # 4. Prepare timesteps
338
- self.scheduler.set_timesteps(num_inference_steps)
339
- timesteps = self.scheduler.timesteps
340
-
341
- # 5. Prepare latent variables
342
- num_channels_latents = self.unet.in_channels
343
- latents = self.prepare_latents(
344
- batch_size * num_images_per_prompt,
345
- num_channels_latents,
346
- height,
347
- width,
348
- image_embeddings.dtype,
349
- generator,
350
- latents,
351
- )
352
-
353
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
354
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
355
-
356
- # 7. Denoising loop
357
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
358
- with self.progress_bar(total=num_inference_steps) as progress_bar:
359
- for i, t in enumerate(timesteps):
360
- # expand the latents if we are doing classifier free guidance
361
- latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
362
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
363
-
364
- # predict the noise residual
365
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample
366
-
367
- # perform guidance
368
- if do_classifier_free_guidance:
369
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
370
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
371
-
372
- # compute the previous noisy sample x_t -> x_t-1
373
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
374
-
375
- # call the callback, if provided
376
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
377
- progress_bar.update()
378
- if callback is not None and i % callback_steps == 0:
379
- callback(i, t, latents)
380
-
381
- # 8. Post-processing
382
- image = self.decode_latents(latents)
383
-
384
- # 9. Run safety checker
385
- image, has_nsfw_concept = self.run_safety_checker(image, image_embeddings.dtype)
386
-
387
- # 10. Convert to PIL
388
- if output_type == "pil":
389
- image = self.numpy_to_pil(image)
390
-
391
- if not return_dict:
392
- return (image, has_nsfw_concept)
393
-
394
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_librosa_objects.py DELETED
@@ -1,48 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # This file is autogenerated by the command `make fix-copies`, do not edit.
16
- # flake8: noqa
17
-
18
- from ..utils import DummyObject, requires_backends
19
-
20
-
21
- class AudioDiffusionPipeline(metaclass=DummyObject):
22
- _backends = ["paddle", "librosa"]
23
-
24
- def __init__(self, *args, **kwargs):
25
- requires_backends(self, ["paddle", "librosa"])
26
-
27
- @classmethod
28
- def from_config(cls, *args, **kwargs):
29
- requires_backends(cls, ["paddle", "librosa"])
30
-
31
- @classmethod
32
- def from_pretrained(cls, *args, **kwargs):
33
- requires_backends(cls, ["paddle", "librosa"])
34
-
35
-
36
- class Mel(metaclass=DummyObject):
37
- _backends = ["paddle", "librosa"]
38
-
39
- def __init__(self, *args, **kwargs):
40
- requires_backends(self, ["paddle", "librosa"])
41
-
42
- @classmethod
43
- def from_config(cls, *args, **kwargs):
44
- requires_backends(cls, ["paddle", "librosa"])
45
-
46
- @classmethod
47
- def from_pretrained(cls, *args, **kwargs):
48
- requires_backends(cls, ["paddle", "librosa"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/cloudflare/worker.js DELETED
@@ -1,18 +0,0 @@
1
- const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。
2
-
3
- export default {
4
- async fetch(request) {
5
- const uri = new URL(request.url);
6
- if (uri.protocol === 'http:') {
7
- uri.protocol = 'https:';
8
- return new Response('', {
9
- status: 301,
10
- headers: {
11
- location: uri.toString(),
12
- },
13
- })
14
- }
15
- uri.host = TRAGET_HOST
16
- return fetch(new Request(uri.toString(), request));
17
- },
18
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4com/stable-diffusion/app.py DELETED
@@ -1,177 +0,0 @@
1
- import numpy as np
2
- import gradio as gr
3
- import requests
4
- import time
5
- import json
6
- import base64
7
- import os
8
- from PIL import Image
9
- from io import BytesIO
10
-
11
- class Prodia:
12
- def __init__(self, api_key, base=None):
13
- self.base = base or "https://api.prodia.com/v1"
14
- self.headers = {
15
- "X-Prodia-Key": api_key
16
- }
17
-
18
- def generate(self, params):
19
- response = self._post(f"{self.base}/sd/generate", params)
20
- return response.json()
21
-
22
- def transform(self, params):
23
- response = self._post(f"{self.base}/sd/transform", params)
24
- return response.json()
25
-
26
- def controlnet(self, params):
27
- response = self._post(f"{self.base}/sd/controlnet", params)
28
- return response.json()
29
-
30
- def get_job(self, job_id):
31
- response = self._get(f"{self.base}/job/{job_id}")
32
- return response.json()
33
-
34
- def wait(self, job):
35
- job_result = job
36
-
37
- while job_result['status'] not in ['succeeded', 'failed']:
38
- time.sleep(0.25)
39
- job_result = self.get_job(job['job'])
40
-
41
- return job_result
42
-
43
- def list_models(self):
44
- response = self._get(f"{self.base}/models/list")
45
- return response.json()
46
-
47
- def _post(self, url, params):
48
- headers = {
49
- **self.headers,
50
- "Content-Type": "application/json"
51
- }
52
- response = requests.post(url, headers=headers, data=json.dumps(params))
53
-
54
- if response.status_code != 200:
55
- raise Exception(f"Bad Prodia Response: {response.status_code}")
56
-
57
- return response
58
-
59
- def _get(self, url):
60
- response = requests.get(url, headers=self.headers)
61
-
62
- if response.status_code != 200:
63
- raise Exception(f"Bad Prodia Response: {response.status_code}")
64
-
65
- return response
66
-
67
-
68
- def image_to_base64(image_path):
69
- # Open the image with PIL
70
- with Image.open(image_path) as image:
71
- # Convert the image to bytes
72
- buffered = BytesIO()
73
- image.save(buffered, format="PNG") # You can change format to PNG if needed
74
-
75
- # Encode the bytes to base64
76
- img_str = base64.b64encode(buffered.getvalue())
77
-
78
- return img_str.decode('utf-8') # Convert bytes to string
79
-
80
-
81
-
82
- prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
83
-
84
- def flip_text(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
85
- result = prodia_client.generate({
86
- "prompt": prompt,
87
- "negative_prompt": negative_prompt,
88
- "model": model,
89
- "steps": steps,
90
- "sampler": sampler,
91
- "cfg_scale": cfg_scale,
92
- "width": width,
93
- "height": height,
94
- "seed": seed
95
- })
96
-
97
- job = prodia_client.wait(result)
98
-
99
- return job["imageUrl"]
100
-
101
- css = """
102
- #generate {
103
- height: 100%;
104
- }
105
- """
106
-
107
- with gr.Blocks(css=css, theme="Base") as demo:
108
-
109
-
110
-
111
- with gr.Row():
112
- gr.Markdown("<h1><center>Stable Diffusion Demo</center></h1>")
113
- with gr.Tab("Playground"):
114
- with gr.Row():
115
- with gr.Column(scale=6, min_width=600):
116
- prompt = gr.Textbox(label="Prompt", placeholder="beautiful cat, 8k", show_label=True, lines=2)
117
- negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", placeholder="text, blurry, fuzziness", show_label=True, lines=3)
118
- with gr.Column():
119
- text_button = gr.Button("Generate", variant='primary', elem_id="generate")
120
-
121
- with gr.Row():
122
-
123
-
124
-
125
- with gr.Column(scale=2):
126
- image_output = gr.Image()
127
-
128
- with gr.Accordion("Advanced options", open=False):
129
- with gr.Row():
130
- with gr.Column(scale=6):
131
- model = gr.Dropdown(interactive=True,value="v1-5-pruned-emaonly.safetensors [d7049739]", show_label=True, label="Model", choices=prodia_client.list_models())
132
-
133
-
134
- with gr.Row():
135
- with gr.Column(scale=1):
136
- sampler = gr.Dropdown(value="DPM++ SDE", show_label=True, label="Sampler", choices=[
137
- "Euler",
138
- "Euler a",
139
- "LMS",
140
- "Heun",
141
- "DPM2",
142
- "DPM2 a",
143
- "DPM++ 2S a",
144
- "DPM++ 2M",
145
- "DPM++ SDE",
146
- "DPM fast",
147
- "DPM adaptive",
148
- "LMS Karras",
149
- "DPM2 Karras",
150
- "DPM2 a Karras",
151
- "DPM++ 2S a Karras",
152
- "DPM++ 2M Karras",
153
- "DPM++ SDE Karras",
154
- "DDIM",
155
- "PLMS",
156
- ])
157
-
158
- with gr.Column(scale=1):
159
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=30, step=1)
160
-
161
- with gr.Row():
162
- with gr.Column(scale=1):
163
- width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
164
- height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
165
-
166
- with gr.Column(scale=1):
167
- batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
168
- batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
169
-
170
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
171
- seed = gr.Slider(label="Seed", maximum=4294967295, minimum = -1, value=-1, step=1, info="""'-1' is random seed""")
172
-
173
-
174
- text_button.click(flip_text, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed], outputs=image_output)
175
-
176
- demo.queue(concurrency_count=10)
177
- demo.launch(debug=False, share=False, show_error=False, show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_123812KB .py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.bottleneck = nn.Sequential(
104
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
105
- )
106
-
107
- def forward(self, x):
108
- _, _, h, w = x.size()
109
- feat1 = F.interpolate(
110
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
111
- )
112
- feat2 = self.conv2(x)
113
- feat3 = self.conv3(x)
114
- feat4 = self.conv4(x)
115
- feat5 = self.conv5(x)
116
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
117
- bottle = self.bottleneck(out)
118
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/train/data_utils.py DELETED
@@ -1,512 +0,0 @@
1
- import os, traceback
2
- import numpy as np
3
- import torch
4
- import torch.utils.data
5
-
6
- from mel_processing import spectrogram_torch
7
- from utils import load_wav_to_torch, load_filepaths_and_text
8
-
9
-
10
- class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
11
- """
12
- 1) loads audio, text pairs
13
- 2) normalizes text and converts them to sequences of integers
14
- 3) computes spectrograms from audio files.
15
- """
16
-
17
- def __init__(self, audiopaths_and_text, hparams):
18
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
19
- self.max_wav_value = hparams.max_wav_value
20
- self.sampling_rate = hparams.sampling_rate
21
- self.filter_length = hparams.filter_length
22
- self.hop_length = hparams.hop_length
23
- self.win_length = hparams.win_length
24
- self.sampling_rate = hparams.sampling_rate
25
- self.min_text_len = getattr(hparams, "min_text_len", 1)
26
- self.max_text_len = getattr(hparams, "max_text_len", 5000)
27
- self._filter()
28
-
29
- def _filter(self):
30
- """
31
- Filter text & store spec lengths
32
- """
33
- # Store spectrogram lengths for Bucketing
34
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
35
- # spec_length = wav_length // hop_length
36
- audiopaths_and_text_new = []
37
- lengths = []
38
- for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text:
39
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
40
- audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv])
41
- lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
42
- self.audiopaths_and_text = audiopaths_and_text_new
43
- self.lengths = lengths
44
-
45
- def get_sid(self, sid):
46
- sid = torch.LongTensor([int(sid)])
47
- return sid
48
-
49
- def get_audio_text_pair(self, audiopath_and_text):
50
- # separate filename and text
51
- file = audiopath_and_text[0]
52
- phone = audiopath_and_text[1]
53
- pitch = audiopath_and_text[2]
54
- pitchf = audiopath_and_text[3]
55
- dv = audiopath_and_text[4]
56
-
57
- phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf)
58
- spec, wav = self.get_audio(file)
59
- dv = self.get_sid(dv)
60
-
61
- len_phone = phone.size()[0]
62
- len_spec = spec.size()[-1]
63
- # print(123,phone.shape,pitch.shape,spec.shape)
64
- if len_phone != len_spec:
65
- len_min = min(len_phone, len_spec)
66
- # amor
67
- len_wav = len_min * self.hop_length
68
-
69
- spec = spec[:, :len_min]
70
- wav = wav[:, :len_wav]
71
-
72
- phone = phone[:len_min, :]
73
- pitch = pitch[:len_min]
74
- pitchf = pitchf[:len_min]
75
-
76
- return (spec, wav, phone, pitch, pitchf, dv)
77
-
78
- def get_labels(self, phone, pitch, pitchf):
79
- phone = np.load(phone)
80
- phone = np.repeat(phone, 2, axis=0)
81
- pitch = np.load(pitch)
82
- pitchf = np.load(pitchf)
83
- n_num = min(phone.shape[0], 900) # DistributedBucketSampler
84
- # print(234,phone.shape,pitch.shape)
85
- phone = phone[:n_num, :]
86
- pitch = pitch[:n_num]
87
- pitchf = pitchf[:n_num]
88
- phone = torch.FloatTensor(phone)
89
- pitch = torch.LongTensor(pitch)
90
- pitchf = torch.FloatTensor(pitchf)
91
- return phone, pitch, pitchf
92
-
93
- def get_audio(self, filename):
94
- audio, sampling_rate = load_wav_to_torch(filename)
95
- if sampling_rate != self.sampling_rate:
96
- raise ValueError(
97
- "{} SR doesn't match target {} SR".format(
98
- sampling_rate, self.sampling_rate
99
- )
100
- )
101
- audio_norm = audio
102
- # audio_norm = audio / self.max_wav_value
103
- # audio_norm = audio / np.abs(audio).max()
104
-
105
- audio_norm = audio_norm.unsqueeze(0)
106
- spec_filename = filename.replace(".wav", ".spec.pt")
107
- if os.path.exists(spec_filename):
108
- try:
109
- spec = torch.load(spec_filename)
110
- except:
111
- print(spec_filename, traceback.format_exc())
112
- spec = spectrogram_torch(
113
- audio_norm,
114
- self.filter_length,
115
- self.sampling_rate,
116
- self.hop_length,
117
- self.win_length,
118
- center=False,
119
- )
120
- spec = torch.squeeze(spec, 0)
121
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
122
- else:
123
- spec = spectrogram_torch(
124
- audio_norm,
125
- self.filter_length,
126
- self.sampling_rate,
127
- self.hop_length,
128
- self.win_length,
129
- center=False,
130
- )
131
- spec = torch.squeeze(spec, 0)
132
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
133
- return spec, audio_norm
134
-
135
- def __getitem__(self, index):
136
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
137
-
138
- def __len__(self):
139
- return len(self.audiopaths_and_text)
140
-
141
-
142
- class TextAudioCollateMultiNSFsid:
143
- """Zero-pads model inputs and targets"""
144
-
145
- def __init__(self, return_ids=False):
146
- self.return_ids = return_ids
147
-
148
- def __call__(self, batch):
149
- """Collate's training batch from normalized text and aduio
150
- PARAMS
151
- ------
152
- batch: [text_normalized, spec_normalized, wav_normalized]
153
- """
154
- # Right zero-pad all one-hot text sequences to max input length
155
- _, ids_sorted_decreasing = torch.sort(
156
- torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
157
- )
158
-
159
- max_spec_len = max([x[0].size(1) for x in batch])
160
- max_wave_len = max([x[1].size(1) for x in batch])
161
- spec_lengths = torch.LongTensor(len(batch))
162
- wave_lengths = torch.LongTensor(len(batch))
163
- spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
164
- wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
165
- spec_padded.zero_()
166
- wave_padded.zero_()
167
-
168
- max_phone_len = max([x[2].size(0) for x in batch])
169
- phone_lengths = torch.LongTensor(len(batch))
170
- phone_padded = torch.FloatTensor(
171
- len(batch), max_phone_len, batch[0][2].shape[1]
172
- ) # (spec, wav, phone, pitch)
173
- pitch_padded = torch.LongTensor(len(batch), max_phone_len)
174
- pitchf_padded = torch.FloatTensor(len(batch), max_phone_len)
175
- phone_padded.zero_()
176
- pitch_padded.zero_()
177
- pitchf_padded.zero_()
178
- # dv = torch.FloatTensor(len(batch), 256)#gin=256
179
- sid = torch.LongTensor(len(batch))
180
-
181
- for i in range(len(ids_sorted_decreasing)):
182
- row = batch[ids_sorted_decreasing[i]]
183
-
184
- spec = row[0]
185
- spec_padded[i, :, : spec.size(1)] = spec
186
- spec_lengths[i] = spec.size(1)
187
-
188
- wave = row[1]
189
- wave_padded[i, :, : wave.size(1)] = wave
190
- wave_lengths[i] = wave.size(1)
191
-
192
- phone = row[2]
193
- phone_padded[i, : phone.size(0), :] = phone
194
- phone_lengths[i] = phone.size(0)
195
-
196
- pitch = row[3]
197
- pitch_padded[i, : pitch.size(0)] = pitch
198
- pitchf = row[4]
199
- pitchf_padded[i, : pitchf.size(0)] = pitchf
200
-
201
- # dv[i] = row[5]
202
- sid[i] = row[5]
203
-
204
- return (
205
- phone_padded,
206
- phone_lengths,
207
- pitch_padded,
208
- pitchf_padded,
209
- spec_padded,
210
- spec_lengths,
211
- wave_padded,
212
- wave_lengths,
213
- # dv
214
- sid,
215
- )
216
-
217
-
218
- class TextAudioLoader(torch.utils.data.Dataset):
219
- """
220
- 1) loads audio, text pairs
221
- 2) normalizes text and converts them to sequences of integers
222
- 3) computes spectrograms from audio files.
223
- """
224
-
225
- def __init__(self, audiopaths_and_text, hparams):
226
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
227
- self.max_wav_value = hparams.max_wav_value
228
- self.sampling_rate = hparams.sampling_rate
229
- self.filter_length = hparams.filter_length
230
- self.hop_length = hparams.hop_length
231
- self.win_length = hparams.win_length
232
- self.sampling_rate = hparams.sampling_rate
233
- self.min_text_len = getattr(hparams, "min_text_len", 1)
234
- self.max_text_len = getattr(hparams, "max_text_len", 5000)
235
- self._filter()
236
-
237
- def _filter(self):
238
- """
239
- Filter text & store spec lengths
240
- """
241
- # Store spectrogram lengths for Bucketing
242
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
243
- # spec_length = wav_length // hop_length
244
- audiopaths_and_text_new = []
245
- lengths = []
246
- for audiopath, text, dv in self.audiopaths_and_text:
247
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
248
- audiopaths_and_text_new.append([audiopath, text, dv])
249
- lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
250
- self.audiopaths_and_text = audiopaths_and_text_new
251
- self.lengths = lengths
252
-
253
- def get_sid(self, sid):
254
- sid = torch.LongTensor([int(sid)])
255
- return sid
256
-
257
- def get_audio_text_pair(self, audiopath_and_text):
258
- # separate filename and text
259
- file = audiopath_and_text[0]
260
- phone = audiopath_and_text[1]
261
- dv = audiopath_and_text[2]
262
-
263
- phone = self.get_labels(phone)
264
- spec, wav = self.get_audio(file)
265
- dv = self.get_sid(dv)
266
-
267
- len_phone = phone.size()[0]
268
- len_spec = spec.size()[-1]
269
- if len_phone != len_spec:
270
- len_min = min(len_phone, len_spec)
271
- len_wav = len_min * self.hop_length
272
- spec = spec[:, :len_min]
273
- wav = wav[:, :len_wav]
274
- phone = phone[:len_min, :]
275
- return (spec, wav, phone, dv)
276
-
277
- def get_labels(self, phone):
278
- phone = np.load(phone)
279
- phone = np.repeat(phone, 2, axis=0)
280
- n_num = min(phone.shape[0], 900) # DistributedBucketSampler
281
- phone = phone[:n_num, :]
282
- phone = torch.FloatTensor(phone)
283
- return phone
284
-
285
- def get_audio(self, filename):
286
- audio, sampling_rate = load_wav_to_torch(filename)
287
- if sampling_rate != self.sampling_rate:
288
- raise ValueError(
289
- "{} SR doesn't match target {} SR".format(
290
- sampling_rate, self.sampling_rate
291
- )
292
- )
293
- audio_norm = audio
294
- # audio_norm = audio / self.max_wav_value
295
- # audio_norm = audio / np.abs(audio).max()
296
-
297
- audio_norm = audio_norm.unsqueeze(0)
298
- spec_filename = filename.replace(".wav", ".spec.pt")
299
- if os.path.exists(spec_filename):
300
- try:
301
- spec = torch.load(spec_filename)
302
- except:
303
- print(spec_filename, traceback.format_exc())
304
- spec = spectrogram_torch(
305
- audio_norm,
306
- self.filter_length,
307
- self.sampling_rate,
308
- self.hop_length,
309
- self.win_length,
310
- center=False,
311
- )
312
- spec = torch.squeeze(spec, 0)
313
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
314
- else:
315
- spec = spectrogram_torch(
316
- audio_norm,
317
- self.filter_length,
318
- self.sampling_rate,
319
- self.hop_length,
320
- self.win_length,
321
- center=False,
322
- )
323
- spec = torch.squeeze(spec, 0)
324
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
325
- return spec, audio_norm
326
-
327
- def __getitem__(self, index):
328
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
329
-
330
- def __len__(self):
331
- return len(self.audiopaths_and_text)
332
-
333
-
334
- class TextAudioCollate:
335
- """Zero-pads model inputs and targets"""
336
-
337
- def __init__(self, return_ids=False):
338
- self.return_ids = return_ids
339
-
340
- def __call__(self, batch):
341
- """Collate's training batch from normalized text and aduio
342
- PARAMS
343
- ------
344
- batch: [text_normalized, spec_normalized, wav_normalized]
345
- """
346
- # Right zero-pad all one-hot text sequences to max input length
347
- _, ids_sorted_decreasing = torch.sort(
348
- torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
349
- )
350
-
351
- max_spec_len = max([x[0].size(1) for x in batch])
352
- max_wave_len = max([x[1].size(1) for x in batch])
353
- spec_lengths = torch.LongTensor(len(batch))
354
- wave_lengths = torch.LongTensor(len(batch))
355
- spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
356
- wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
357
- spec_padded.zero_()
358
- wave_padded.zero_()
359
-
360
- max_phone_len = max([x[2].size(0) for x in batch])
361
- phone_lengths = torch.LongTensor(len(batch))
362
- phone_padded = torch.FloatTensor(
363
- len(batch), max_phone_len, batch[0][2].shape[1]
364
- )
365
- phone_padded.zero_()
366
- sid = torch.LongTensor(len(batch))
367
-
368
- for i in range(len(ids_sorted_decreasing)):
369
- row = batch[ids_sorted_decreasing[i]]
370
-
371
- spec = row[0]
372
- spec_padded[i, :, : spec.size(1)] = spec
373
- spec_lengths[i] = spec.size(1)
374
-
375
- wave = row[1]
376
- wave_padded[i, :, : wave.size(1)] = wave
377
- wave_lengths[i] = wave.size(1)
378
-
379
- phone = row[2]
380
- phone_padded[i, : phone.size(0), :] = phone
381
- phone_lengths[i] = phone.size(0)
382
-
383
- sid[i] = row[3]
384
-
385
- return (
386
- phone_padded,
387
- phone_lengths,
388
- spec_padded,
389
- spec_lengths,
390
- wave_padded,
391
- wave_lengths,
392
- sid,
393
- )
394
-
395
-
396
- class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
397
- """
398
- Maintain similar input lengths in a batch.
399
- Length groups are specified by boundaries.
400
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
401
-
402
- It removes samples which are not included in the boundaries.
403
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
404
- """
405
-
406
- def __init__(
407
- self,
408
- dataset,
409
- batch_size,
410
- boundaries,
411
- num_replicas=None,
412
- rank=None,
413
- shuffle=True,
414
- ):
415
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
416
- self.lengths = dataset.lengths
417
- self.batch_size = batch_size
418
- self.boundaries = boundaries
419
-
420
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
421
- self.total_size = sum(self.num_samples_per_bucket)
422
- self.num_samples = self.total_size // self.num_replicas
423
-
424
- def _create_buckets(self):
425
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
426
- for i in range(len(self.lengths)):
427
- length = self.lengths[i]
428
- idx_bucket = self._bisect(length)
429
- if idx_bucket != -1:
430
- buckets[idx_bucket].append(i)
431
-
432
- for i in range(len(buckets) - 1, -1, -1): #
433
- if len(buckets[i]) == 0:
434
- buckets.pop(i)
435
- self.boundaries.pop(i + 1)
436
-
437
- num_samples_per_bucket = []
438
- for i in range(len(buckets)):
439
- len_bucket = len(buckets[i])
440
- total_batch_size = self.num_replicas * self.batch_size
441
- rem = (
442
- total_batch_size - (len_bucket % total_batch_size)
443
- ) % total_batch_size
444
- num_samples_per_bucket.append(len_bucket + rem)
445
- return buckets, num_samples_per_bucket
446
-
447
- def __iter__(self):
448
- # deterministically shuffle based on epoch
449
- g = torch.Generator()
450
- g.manual_seed(self.epoch)
451
-
452
- indices = []
453
- if self.shuffle:
454
- for bucket in self.buckets:
455
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
456
- else:
457
- for bucket in self.buckets:
458
- indices.append(list(range(len(bucket))))
459
-
460
- batches = []
461
- for i in range(len(self.buckets)):
462
- bucket = self.buckets[i]
463
- len_bucket = len(bucket)
464
- ids_bucket = indices[i]
465
- num_samples_bucket = self.num_samples_per_bucket[i]
466
-
467
- # add extra samples to make it evenly divisible
468
- rem = num_samples_bucket - len_bucket
469
- ids_bucket = (
470
- ids_bucket
471
- + ids_bucket * (rem // len_bucket)
472
- + ids_bucket[: (rem % len_bucket)]
473
- )
474
-
475
- # subsample
476
- ids_bucket = ids_bucket[self.rank :: self.num_replicas]
477
-
478
- # batching
479
- for j in range(len(ids_bucket) // self.batch_size):
480
- batch = [
481
- bucket[idx]
482
- for idx in ids_bucket[
483
- j * self.batch_size : (j + 1) * self.batch_size
484
- ]
485
- ]
486
- batches.append(batch)
487
-
488
- if self.shuffle:
489
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
490
- batches = [batches[i] for i in batch_ids]
491
- self.batches = batches
492
-
493
- assert len(self.batches) * self.batch_size == self.num_samples
494
- return iter(self.batches)
495
-
496
- def _bisect(self, x, lo=0, hi=None):
497
- if hi is None:
498
- hi = len(self.boundaries) - 1
499
-
500
- if hi > lo:
501
- mid = (hi + lo) // 2
502
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
503
- return mid
504
- elif x <= self.boundaries[mid]:
505
- return self._bisect(x, lo, mid)
506
- else:
507
- return self._bisect(x, mid + 1, hi)
508
- else:
509
- return -1
510
-
511
- def __len__(self):
512
- return self.num_samples // self.batch_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AHzizi/WaifuVoiceGen/monotonic_align/core.py DELETED
@@ -1,36 +0,0 @@
1
- import numba
2
-
3
-
4
- @numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
5
- nopython=True, nogil=True)
6
- def maximum_path_jit(paths, values, t_ys, t_xs):
7
- b = paths.shape[0]
8
- max_neg_val = -1e9
9
- for i in range(int(b)):
10
- path = paths[i]
11
- value = values[i]
12
- t_y = t_ys[i]
13
- t_x = t_xs[i]
14
-
15
- v_prev = v_cur = 0.0
16
- index = t_x - 1
17
-
18
- for y in range(t_y):
19
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
20
- if x == y:
21
- v_cur = max_neg_val
22
- else:
23
- v_cur = value[y - 1, x]
24
- if x == 0:
25
- if y == 0:
26
- v_prev = 0.
27
- else:
28
- v_prev = max_neg_val
29
- else:
30
- v_prev = value[y - 1, x - 1]
31
- value[y, x] += max(v_prev, v_cur)
32
-
33
- for y in range(t_y - 1, -1, -1):
34
- path[y, index] = 1
35
- if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
36
- index = index - 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/optim/ema.py DELETED
@@ -1,85 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # ModelEMA implementation is taken from
8
- # https://github.com/facebookresearch/demucs
9
-
10
- from collections import defaultdict
11
- import typing as tp
12
-
13
- import torch
14
- import torch.nn as nn
15
-
16
-
17
- def _get_all_non_persistent_buffers_set(module: nn.Module, root: str = "") -> set:
18
- names: set = set()
19
- for (name, sub_module) in module.named_modules():
20
- if name == '':
21
- buffer_names = module._non_persistent_buffers_set
22
- buffer_names = {f"{root}.{buff_name}" if len(root) > 0 else buff_name
23
- for buff_name in buffer_names}
24
- names.update(buffer_names)
25
- else:
26
- sub_name = f"{root}.{name}" if len(root) > 0 else name
27
- sub_buffer_names = _get_all_non_persistent_buffers_set(sub_module, sub_name)
28
- names.update(sub_buffer_names)
29
- return names
30
-
31
-
32
- def _get_named_tensors(module: nn.Module):
33
- non_persistent_buffers_set = _get_all_non_persistent_buffers_set(module)
34
- named_buffers = [(name, buffer) for (name, buffer) in module.named_buffers()
35
- if name not in non_persistent_buffers_set]
36
- named_parameters = list(module.named_parameters())
37
- return named_parameters + named_buffers
38
-
39
-
40
- class ModuleDictEMA:
41
- """Exponential Moving Average over a nn.ModuleDict.
42
-
43
- You can switch to the EMA weights temporarily.
44
- """
45
- def __init__(self, module_dict: nn.ModuleDict, decay: float = 0.999,
46
- unbias: bool = True, device: tp.Union[torch.device, str] = 'cpu'):
47
- self.decay = decay
48
- self.module_dict = module_dict
49
- self.state: dict = defaultdict(dict)
50
- self.count = 0
51
- self.device = device
52
- self.unbias = unbias
53
- self._init()
54
-
55
- def _init(self):
56
- for module_name, module in self.module_dict.items():
57
- for key, val in _get_named_tensors(module):
58
- if not val.is_floating_point():
59
- continue
60
- device = self.device or val.device
61
- if key not in self.state[module_name]:
62
- self.state[module_name][key] = val.detach().to(device, copy=True)
63
-
64
- def step(self):
65
- if self.unbias:
66
- self.count = self.count * self.decay + 1
67
- w = 1 / self.count
68
- else:
69
- w = 1 - self.decay
70
- for module_name, module in self.module_dict.items():
71
- for key, val in _get_named_tensors(module):
72
- if not val.is_floating_point():
73
- continue
74
- device = self.device or val.device
75
- self.state[module_name][key].mul_(1 - w)
76
- self.state[module_name][key].add_(val.detach().to(device), alpha=w)
77
-
78
- def state_dict(self):
79
- return {'state': self.state, 'count': self.count}
80
-
81
- def load_state_dict(self, state):
82
- self.count = state['count']
83
- for module_name, module in state['state'].items():
84
- for key, val in module.items():
85
- self.state[module_name][key].copy_(val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/transforms.py DELETED
@@ -1,98 +0,0 @@
1
- import logging
2
- import os
3
- from pathlib import Path
4
-
5
- import albumentations
6
- import numpy as np
7
- import torch
8
- from tqdm import tqdm
9
-
10
- logger = logging.getLogger(f'main.{__name__}')
11
-
12
-
13
- class StandardNormalizeAudio(object):
14
- '''
15
- Frequency-wise normalization
16
- '''
17
- def __init__(self, specs_dir, train_ids_path='./data/vggsound_train.txt', cache_path='./data/'):
18
- self.specs_dir = specs_dir
19
- self.train_ids_path = train_ids_path
20
- # making the stats filename to match the specs dir name
21
- self.cache_path = os.path.join(cache_path, f'train_means_stds_{Path(specs_dir).stem}.txt')
22
- logger.info('Assuming that the input stats are calculated using preprocessed spectrograms (log)')
23
- self.train_stats = self.calculate_or_load_stats()
24
-
25
- def __call__(self, item):
26
- # just to generalizat the input handling. Useful for FID, IS eval and training other staff
27
- if isinstance(item, dict):
28
- if 'input' in item:
29
- input_key = 'input'
30
- elif 'image' in item:
31
- input_key = 'image'
32
- else:
33
- raise NotImplementedError
34
- item[input_key] = (item[input_key] - self.train_stats['means']) / self.train_stats['stds']
35
- elif isinstance(item, torch.Tensor):
36
- # broadcasts np.ndarray (80, 1) to (1, 80, 1) because item is torch.Tensor (B, 80, T)
37
- item = (item - self.train_stats['means']) / self.train_stats['stds']
38
- else:
39
- raise NotImplementedError
40
- return item
41
-
42
- def calculate_or_load_stats(self):
43
- try:
44
- # (F, 2)
45
- train_stats = np.loadtxt(self.cache_path)
46
- means, stds = train_stats.T
47
- logger.info('Trying to load train stats for Standard Normalization of inputs')
48
- except OSError:
49
- logger.info('Could not find the precalculated stats for Standard Normalization. Calculating...')
50
- train_vid_ids = open(self.train_ids_path)
51
- specs_paths = [os.path.join(self.specs_dir, f'{i.rstrip()}_mel.npy') for i in train_vid_ids]
52
- means = [None] * len(specs_paths)
53
- stds = [None] * len(specs_paths)
54
- for i, path in enumerate(tqdm(specs_paths)):
55
- spec = np.load(path)
56
- means[i] = spec.mean(axis=1)
57
- stds[i] = spec.std(axis=1)
58
- # (F) <- (num_files, F)
59
- means = np.array(means).mean(axis=0)
60
- stds = np.array(stds).mean(axis=0)
61
- # saving in two columns
62
- np.savetxt(self.cache_path, np.vstack([means, stds]).T, fmt='%0.8f')
63
- means = means.reshape(-1, 1)
64
- stds = stds.reshape(-1, 1)
65
- return {'means': means, 'stds': stds}
66
-
67
- class ToTensor(object):
68
-
69
- def __call__(self, item):
70
- item['input'] = torch.from_numpy(item['input']).float()
71
- # if 'target' in item:
72
- item['target'] = torch.tensor(item['target'])
73
- return item
74
-
75
- class Crop(object):
76
-
77
- def __init__(self, cropped_shape=None, random_crop=False):
78
- self.cropped_shape = cropped_shape
79
- if cropped_shape is not None:
80
- mel_num, spec_len = cropped_shape
81
- if random_crop:
82
- self.cropper = albumentations.RandomCrop
83
- else:
84
- self.cropper = albumentations.CenterCrop
85
- self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)])
86
- else:
87
- self.preprocessor = lambda **kwargs: kwargs
88
-
89
- def __call__(self, item):
90
- item['input'] = self.preprocessor(image=item['input'])['image']
91
- return item
92
-
93
-
94
- if __name__ == '__main__':
95
- cropper = Crop([80, 848])
96
- item = {'input': torch.rand([80, 860])}
97
- outputs = cropper(item)
98
- print(outputs['input'].shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/data/__init__.py DELETED
@@ -1,8 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # flake8: noqa
8
- from . import audio, audio_dataset
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/models/__init__.py DELETED
File without changes
spaces/Accel/media-converter/functions.py DELETED
@@ -1,515 +0,0 @@
1
- """
2
- util functions and classes
3
- """
4
- import json
5
- from pprint import pprint
6
- from tempfile import _TemporaryFileWrapper
7
- from typing import List
8
-
9
- import gradio as gr
10
- from gradio.components import Component
11
-
12
- def parse(param: json) -> dict:
13
- with open(param) as file:
14
- return json.load(file)
15
-
16
-
17
- data = parse("./data.json")
18
- codecs = parse("./codecs.json")
19
-
20
- """Video"""
21
- containers = [j.get("name") for i in data["containers"]
22
- for j in data["containers"][i]]
23
- video_containers = [i.get("name") for i in data["containers"]["video"]]
24
- video_codecs = [i.get("value") for i in data["codecs"]["video"]]
25
- video_aspect_ratio = [i.get("name") for i in data["aspects"]]
26
- video_scaling = [i.get("name") for i in data["scalings"]]
27
- """ Audio """
28
- audio_containers = [i.get("name") for i in data["containers"]["audio"]]
29
- audio_codecs = [i.get("value") for i in data["codecs"]["audio"]]
30
- audio_channels = [i.get("name") for i in data["audioChannels"]]
31
- audio_quality = [i.get("name") for i in data["audioQualities"]]
32
- audio_sample_rates = [i.get("name") for i in data["sampleRates"]]
33
-
34
- """ Video & Audio Filters """
35
- # deband=[i.get("name") for i in data["deband"]]
36
- # deflicker=[i.get("name") for i in data["deflicker"]]
37
- # deshake=[i.get("name") for i in data["deshake"]]
38
- # dejudder=[i.get("name") for i in data["dejudder"]]
39
- # denoise=[i.get("name") for i in data["denoise"]]
40
- # deinterlace=[i.get("name") for i in data["deinterlace"]]
41
- filters = ["deband", "deflicker", "deshake",
42
- "dejudder", "denoise", "deinterlace"]
43
- vf = [{vFilter: names} for vFilter in filters for names in [
44
- [i for i in data[vFilter]]]]
45
-
46
- presets = [i.get("name") for i in data["presets"]]
47
- profiles = [i.get("name") for i in data["profiles"]]
48
- speeds = [i.get("name") for i in data["speeds"]]
49
-
50
-
51
- outputMap = parse("./mappings.json")
52
- newoutputMap = parse("./new_mappings.json")
53
- """Output Mappings of commands to value
54
- audioQuality -b:a 128k
55
- """
56
-
57
-
58
- class CommandBuilder():
59
- """Takes a collection of gradio layout elements and attaches
60
- a function to each component in the context
61
- to build an array of ffmpeg commands"""
62
-
63
- def __call__(self, *args, **kwds):
64
- return [i.value for i in self._component]
65
-
66
- def do(self, *inputs, **kwds):
67
- for comp in self._component:
68
- if comp.label is not None:
69
- self.changefunc(comp, "", comp.value)
70
-
71
- def reset(self):
72
- self.outputDict = {"vf": {}, "af": {}}
73
- self.commands=""
74
- self.vf, self.af, self.extra = ([] for _ in range(3))
75
-
76
- def __init__(self, *inputs: gr.Blocks) -> None:
77
- """
78
- Parameters:
79
- *inputs: A tuple of layout blocks containing components(Textbox,Button...).
80
- """
81
-
82
- self.outputDict = {"vf": {}, "af": {}}
83
- self.formatOutputDict = {"vf": {}, "af": {}}
84
- # state=gr.Variable()
85
- # state2=gr.Variable()
86
-
87
- self._component: List[Component] = []
88
- self.vf, self.af, self.extra = ([] for _ in range(3))
89
- self.commands = ""
90
- if inputs is None:
91
- return None
92
- for i in inputs:
93
- self._component += self._get_component_instance(i)
94
- for comp in self._component:
95
- state = gr.Variable()
96
- state2 = gr.Variable()
97
- if comp.label is not None:
98
- state.value = comp
99
- state2.value = comp.label
100
- comp.change(fn=self.changefunc, inputs=[
101
- state, state2, comp], outputs=[])
102
-
103
- def changefunc(self, input: gr.components.IOComponent, c_label="", newValue=""):
104
- label, *_ = input.label.strip(": \n").lower().split(
105
- ) if type(input.label) != list else "".join(input.label).strip(": ").lower().split()
106
- label += "".join(_).title()
107
- key = newoutputMap.get(label)
108
- lst_extra, vf, af = ([] for _ in range(3))
109
- if newValue not in [None, "Source", "Auto", "", "None", "none", 0]:
110
- self.setVf(label, newValue)
111
- self.setAf(label, newValue)
112
- self.setF(label, newValue)
113
- for val in self.outputDict:
114
- if val == "vf":
115
- vf = self.outputDict.get(val).values()
116
- vf = ",".join(list(vf))
117
- elif val == "af":
118
- af = self.outputDict.get(val).values()
119
- af = ",".join(list(af))
120
- pass
121
- else:
122
- lst_extra.extend([val, self.outputDict.get(val)])
123
-
124
- else:
125
- self.outputDict.pop(key, "No Key Exists")
126
- self.outputDict["vf"].pop(label, "No Key Exists")
127
- self.outputDict["af"].pop(label, "No Key Exists")
128
- self.vf = f"-vf '{vf}'" if vf else ""
129
- self.af = f"-af '{af}'" if af else ""
130
- self.extra = " ".join(lst_extra)
131
- self.commands = f"{self.vf} {self.af} {self.extra}"
132
-
133
- print(self.vf, self.af, self.extra)
134
-
135
- def setVf(self, label:str, newValue:"str| int"):
136
- """Sets Video filters
137
-
138
- Args:
139
- label : label of components
140
- newValue : value of component
141
- """
142
- if newoutputMap["vf"].get(label):
143
- key = newoutputMap["vf"].get(label)
144
- if label in ["deinterlace", "denoise"]:
145
- value = "_".join(newValue.lower().split())
146
- arg = key.get(value, None)
147
- self.outputDict["vf"].update({label: arg})
148
- else:
149
- self.outputDict["vf"].update({key: key})
150
-
151
- def setF(self, label, newValue):
152
- """ Sets Extra filters
153
- Args:
154
- label : label of components
155
- newValue : value of component
156
- """
157
- if newoutputMap.get(label):
158
- key = newoutputMap.get(label)
159
- if label in ["video", "audio"]:
160
- value=codecs.get(label).get(newValue,newValue)
161
- print(value)
162
- self.outputDict.update({key:value})
163
- elif label in ["startTime", "stopTime"]:
164
- self.outputDict.update({key: newValue})
165
- else:
166
- value = "".join([i.get("value", "None") for i in data.get(
167
- label) if i.get("name", None) == newValue])
168
- self.outputDict.update({key: value})
169
-
170
- def setAf(self, label:str, newValue:"str|int"):
171
- """ Sets Extra filters
172
- Args:
173
- label : label of components
174
- newValue : value of component
175
- """
176
- if newoutputMap["af"].get(label):
177
- value = int(newValue)/100
178
- arg = f"{label}={value}"
179
- self.outputDict["af"].update({label: arg})
180
-
181
- def update(self, Component: Component):
182
- for comp in self._component:
183
- comp.change(lambda: gr.update(
184
- value=self.outputDict), [], [Component])
185
-
186
- def _get_component_instance(self, inputs: gr.Blocks) -> List[Component]:
187
- """
188
- returns components present in a layout block
189
- Parameters:
190
- inputs: layout block
191
- """
192
- res=[]
193
- for i in inputs.children:
194
- # print(i,hasattr(i,"children"))
195
- if not (hasattr(i,"children")):
196
- # res.append(gr.components.get_component_instance(i,render=True))
197
- res+=[gr.components.get_component_instance(i,render=True)]
198
- # print(res)
199
- elif hasattr(i,"children"):
200
- res+=self._get_component_instance(i)
201
- # print(res)
202
- return res
203
- # return [gr.components.get_component_instance(i, render=True) for i in inputs.children if not hasattr(i, "children")]
204
-
205
- def setVideoFilters(self, options):
206
- value = self.outputDict.get(options, "-")
207
- filters = newoutputMap.get(options, None)
208
- arg = ""
209
- if options in ["deinterlace", "denoise"]:
210
- value = "_".join(value.lower().split())
211
- arg = filters.get(value, None)
212
- # self.vf.append(arg)
213
- self.outputDict["vf"].update({options: arg})
214
- return True
215
- if options in ["deband", "deflicker", "deshake", "dejudder"]:
216
- arg = filters
217
- self.outputDict["vf"].update({options: arg})
218
- return True
219
-
220
- return
221
-
222
- def setAudioFilters(self, options):
223
- value = self.outputDict.get(options, "-")
224
- if options in ["acontrast"]:
225
- value = int(value)/100
226
- arg = f"{options}={value}"
227
-
228
- self.outputDict["af"].update({options: arg})
229
- return True
230
- return
231
-
232
- def setFormat(self, options):
233
- value = self.outputDict.get(options, "-")
234
- filters = newoutputMap.get(options, None)
235
- if options in ["video", "audio"]:
236
- value = "".join([i.get("value", "None") for i in data.get(
237
- "codecs").get(options) if i.get("name", None) == value])
238
- arg = f"{filters} {value}"
239
- self.outputDict.update({options: arg})
240
- return True
241
- elif data.get(options) == None:
242
- arg = f"{filters} {value}"
243
- self.outputDict.update({options: arg})
244
- return True
245
- elif options != "clip":
246
- value = "".join([i.get("value", "None") for i in data.get(
247
- options) if i.get("name", None) == value])
248
- arg = f"{filters} {value}"
249
- self.outputDict.update({options: arg})
250
-
251
- def build(self):
252
- for i in self.outputDict:
253
- if self.setVideoFilters(i):
254
- continue
255
- elif self.setAudioFilters(i):
256
- continue
257
- else:
258
- self.setFormat(i)
259
- lst_extra, vf, af = ([] for _ in range(3))
260
- for val in self.outputDict:
261
- if val == "vf":
262
- vf = self.outputDict.get(val).values()
263
- vf = ",".join(list(vf))
264
- elif val == "af":
265
- af = self.outputDict.get(val).values()
266
- af = ",".join(list(af))
267
- else:
268
- lst_extra.append(self.outputDict.get(val))
269
- # print(lst_extra, "temp x")
270
- # if vf:self.vf=f"-vf '{vf}'"
271
- # if af:self.af=f"-af '{af}'"
272
- self.vf = f"-vf '{vf}'" if vf else ""
273
- self.af = f"-af '{af}'" if af else ""
274
- self.extra = " ".join(lst_extra)
275
- self.commands = f"{self.vf} {self.af} {self.extra}"
276
-
277
- def startfunc(self, input: gr.components.IOComponent, c_label="", newValue=""):
278
- label, *_ = input.label.strip(": ").lower().split(
279
- ) if type(input.label) != list else "".join(input.label).strip(": ").lower().split()
280
- label += "".join(_).title()
281
- if newValue not in [None, "Source", "Auto", "", "None", 0]:
282
- self.outputDict["vf"].update({label: newValue})
283
- self.outputDict["af"].update({label: newValue})
284
- self.outputDict.update({label: newValue})
285
- else:
286
- self.outputDict.pop(label, "No Key Exists")
287
- self.outputDict["vf"].pop(label, "No Key Exists")
288
- self.outputDict["af"].pop(label, "No Key Exists")
289
- # self.formatOutputDict["vf"].pop(label, "Key is None or similar")
290
- # self.formatOutputDict["af"].pop(label, "Key is None or similar")
291
- # self.formatOutputDict.pop(label, "Key is None or similar")
292
- print(self.outputDict)
293
- self.build()
294
-
295
-
296
- # def somefunc(input: gr.components.IOComponent, c_label=""):
297
- # label = ""
298
- # output = {}
299
- # print(input, c_label)
300
- # label, *_ = input.label.strip(": ").lower().split(
301
- # ) if type(input.label) != list else "".join(input.label).strip(": ").lower().split()
302
- # label += "".join(_).title()
303
- # print(newoutputMap.get(label), label, c_label)
304
- # if c_label not in [None, "Source", "Auto", ""]:
305
- # print(input.value)
306
- # output.update({label: c_label})
307
- # else:
308
- # output.pop(label, "No Key Exists")
309
- # pprint(output)
310
-
311
- # def mediaChange(option):
312
- # no_=gr.update(visible=False)
313
- # if option in video_containers:
314
- # output=gr.update(visible=True)
315
- # return [no_,output]
316
- # elif option in audio_containers:
317
- # output=gr.update(visible=True)
318
- # return [output,no_]
319
- # else:
320
- # output=gr.update(visible=False)
321
- # return [no_,no_]
322
-
323
-
324
- def mediaChange(option:str,state)-> List[Component]:
325
- """
326
- Allows playing the media in various options,
327
- Video, Audio or File
328
-
329
- Args:
330
- option : Clicked buttons value
331
-
332
- Returns:
333
- List[Component]: list of toggled output components to display
334
- """
335
- ops = {"Audio": gr.update(visible=True,value=state)}
336
- ops2 = {"Video": gr.update(visible=True,value=state)}
337
- ops3 = {"File": gr.update(visible=True,value=state, interactive=False)}
338
-
339
- def chosen(x): return x.get(option, gr.update(visible=False))
340
- return [chosen(ops), chosen(ops2), chosen(ops3)]
341
-
342
-
343
- # def videoChange(value):
344
- # print(value.name)
345
-
346
- # if option in video_containers:
347
- # output=gr.update(visible=True)
348
- # return [no_,output]
349
- # elif option in audio_containers:
350
- # output=gr.update(visible=True)
351
- # return [output,no_]
352
- # else:
353
- # output=gr.update(visible=False)
354
- # return [no_,no_]
355
-
356
-
357
-
358
-
359
- """Helper Functions for Processing """
360
-
361
-
362
- # def clear(*input):
363
- # print(input, " clear_func")
364
- # # for i in [inp for i in input for inp in i]:
365
- # # print(i, hasattr(i,"cleared_value"),type(i))
366
- # # a=default_clear(input_components)
367
- # def clear_func(x): return [component.cleared_value if hasattr(
368
- # component, "cleared_value") else None for component in x]
369
- # print(clear_func(input))
370
- # return clear_func(input)
371
-
372
- def customBitrate(choice:int)-> Component:
373
- """
374
- Toggle a component for custom Audio Quality
375
- visible/none
376
- Args:
377
- choice : Custom audio quality
378
-
379
- Returns:
380
- Component: component toggle state
381
- """
382
- if choice == "Custom":
383
- return gr.update(visible=True)
384
- else:
385
- return gr.update(visible=False, value=0)
386
-
387
-
388
- def supported_codecs(format: str)-> List[Component]:
389
- """
390
- Changes video and audio components with appropriate
391
- options according to passed format
392
-
393
- Args:
394
- format: passed media codec (x264,x265)
395
-
396
- Returns:
397
- List[Component]: list of components with updated choices
398
- """
399
- if format:
400
- format = format.lower()
401
- video_lst = [val.get("value") for val in data["codecs"]["video"]
402
- if val.get("supported") == None or format in val["supported"]]
403
- audio_lst = [val.get("value") for val in data["codecs"]["audio"]
404
- if val.get("supported") == None or format in val["supported"]]
405
- return [gr.update(choices=video_lst), gr.update(choices=audio_lst)]
406
-
407
-
408
- def supported_presets(format: str)-> Component:
409
- """
410
- Changes presets component with appropriate
411
- options according to passed format
412
- Args:
413
- format: passed media codec (x264,x265)
414
-
415
- Returns:
416
- Component: component with updated choice list (video codecs)
417
- """
418
- if format:
419
- format = format.lower()
420
- video_lst = [val.get("name") for val in data["presets"]
421
- if val.get("supported") == None or format in val["supported"]]
422
- return gr.update(choices=video_lst)
423
-
424
-
425
- def change_clipbox(choice:str)-> List[Component]:
426
- """
427
- Toggles the clipping Textbox
428
-
429
- Args:
430
- choice: Enabled/None
431
-
432
- Returns:
433
- List[Component]: list of components with visible state of the clip components
434
- """
435
- if choice == "Enabled":
436
- return [gr.update(visible=True, value="00:00"), gr.update(visible=True, value="00:10")]
437
- else:
438
- return [gr.update(visible=False, value=""), gr.update(visible=False, value="")]
439
-
440
-
441
- def updateOutput(file: _TemporaryFileWrapper)-> Component:
442
- if file:
443
- print(file.name)
444
- return gr.update(value=file.name)
445
-
446
-
447
- def get_component_instance(inputs: gr.Blocks)-> List[Component]:
448
- """ returns only components
449
-
450
- Args:
451
- inputs: layout elements
452
-
453
- Returns:
454
- List[Component]: components
455
- """
456
- return [gr.components.get_component_instance(i, render=True) for i in inputs.children]
457
-
458
-
459
- class Clear(CommandBuilder):
460
- """ Class for clearing components in layouts
461
- """
462
-
463
- def __call__(self, *args, **kwds):
464
- return self._component
465
-
466
- def __str__(self):
467
- return f"{self._component} __clear__ class"
468
-
469
- def __repr__(self):
470
- return self._component
471
-
472
- def __init__(self, *input_component: gr.Blocks()) -> None:
473
- """
474
- Parameters:
475
- *input_component: A tuple of layout blocks containing components
476
- """
477
- self._component = []
478
- if input_component is not None:
479
- for i in input_component:
480
- # self._component += super()._get_component_instance(i)
481
- self._component += self.__get_component_instance(i)
482
-
483
- def __get_component_instance(self, inputs: gr.Blocks) -> list:
484
- # print(inputs, " class instance")
485
- res=[]
486
- # print(*inputs.children)
487
- for i in inputs.children:
488
- # print(i,hasattr(i,"children"))
489
- if not (hasattr(i,"children")):
490
- # res.append(gr.components.get_component_instance(i,render=True))
491
- res+=[gr.components.get_component_instance(i,render=True)]
492
- # print(i)
493
- elif hasattr(i,"children"):
494
- # print(*i.children)
495
- res+=self.__get_component_instance(i)
496
- # res=[gr.components.get_component_instance(i, render=True) for i in inputs.children if not hasattr(i, "children")]
497
- # print(res,"__ result")
498
- # print(res)
499
- return res
500
- # return [gr.components.get_component_instance(i, render=True) for i in inputs.children if not hasattr(i, "children")]
501
-
502
- def add(self, *args):
503
- print(args, type(args))
504
- if args is not None:
505
- for i in args:
506
- self._component += super().__get_component_instance(i)
507
- return self._component
508
-
509
- def clear(self, *args):
510
- """
511
- Function to clear components from a Block in the class instance
512
- """
513
- def clear_func(x): return [component.cleared_value if hasattr(
514
- component, "cleared_value") else component.value for component in x]
515
- return clear_func(self._component)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Factory.d.ts DELETED
@@ -1,7 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Shake from "./Shake";
3
-
4
- export default function (
5
- gameObject: Phaser.GameObjects.GameObject | Phaser.Scene,
6
- config?: Shake.IConfig
7
- ): Shake;
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/PositionToPercent.js DELETED
@@ -1,13 +0,0 @@
1
- const Percent = Phaser.Math.Percent;
2
-
3
- var PositionToPercent = function (startPoint, endPoint, currentPoint) {
4
- var value;
5
- if (startPoint.y === endPoint.y) {
6
- value = Percent(currentPoint.x, startPoint.x, endPoint.x);
7
- } else if (startPoint.x === endPoint.x) {
8
- value = Percent(currentPoint.y, startPoint.y, endPoint.y);
9
- }
10
- return value
11
- }
12
-
13
- export default PositionToPercent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alfasign/dIFFU/app.py DELETED
@@ -1,1028 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
-
6
- models = [
7
- "Yntec/OpenGenDiffusers",
8
- "Yntec/DeliShaper",
9
- "Yntec/Dreamlike",
10
- "Yntec/dreamlike-photoreal-remix",
11
- "Yntec/DreamShaperRemix",
12
- "Yntec/DeliberateRemix",
13
- "Yntec/epiCVision",
14
- "Yntec/realistic-vision-v12",
15
- "Yntec/epiCRealismVAE",
16
- "Yntec/MangledMerge3_768",
17
- "Yntec/OpenNijiRemix",
18
- "Linaqruf/animagine-xl",
19
- "nerijs/pixel-art-xl",
20
- "stabilityai/stable-diffusion-xl-base-1.0",
21
- "Yntec/OpenLexica",
22
- "Yntec/MapleSyrup",
23
- "Yntec/WoopWoopRemix",
24
- "Yntec/DreamLikeRemix",
25
- "Yntec/Toonify2",
26
- "Yntec/ArcticFowl",
27
- "Yntec/iComixRemix",
28
- "Yntec/Infinite80s",
29
- "Yntec/SamaritanDoesArt",
30
- "Yntec/samaritan3dCartoon2MVAE",
31
- "Yntec/CartoonStyleClassic",
32
- "Yntec/CultClassic",
33
- "Yntec/photoMovieX",
34
- "Yntec/photoMovieRealistic",
35
- "Yntec/CinemaE",
36
- "Yntec/GalenaVAE",
37
- "Yntec/a-ZovyaRemix",
38
- "Yntec/a-ZovyaRPGV3VAE",
39
- "Yntec/a-ZoviaRPGArtistV2VAE",
40
- "Yntec/GameAssetsDigitalUnitsCreationKit",
41
- "Yntec/InsaneRealisticCVAE",
42
- "Yntec/Lunar",
43
- "Yntec/LunarLuma",
44
- "Yntec/QToriReloaded",
45
- "Yntec/Chik2",
46
- "Yntec/InsaneM3U",
47
- "Yntec/DucHaiten-StyleLikeMeVAE",
48
- "Yntec/Luma",
49
- "Yntec/Noosphere_v3_CVAE",
50
- "Yntec/RealRainbows",
51
- "Yntec/Ninja-Diffusers",
52
- "Yntec/ChildrenStoriesAnime",
53
- "Yntec/theallysMixIV-verisimilar",
54
- "Yntec/DucHaitenAnime768",
55
- "Yntec/RainbowClassicAnime",
56
- "Yntec/DucHaitenClassicAnime768",
57
- "Yntec/GOLDFish",
58
- "Yntec/WesternAnimation",
59
- "Yntec/NeverExisted",
60
- "Yntec/Rainbowsphere",
61
- "Yntec/DreamAnything",
62
- "Yntec/Dreamsphere",
63
- "Yntec/Photosphere",
64
- "Yntec/yabalMixTrue25D_v2_VAE",
65
- "dreamlike-art/dreamlike-anime-1.0",
66
- "Yntec/RainbowDreams",
67
- "dreamlike-art/dreamlike-photoreal-2.0",
68
- "Yntec/rainbowpatch",
69
- "Yntec/DucHaiten-Retro-Diffusers",
70
- "Yntec/ElldrethsRetroMix_Diffusers",
71
- "Yntec/sexyToons",
72
- "digiplay/BeenYouLiteL11_diffusers",
73
- "Yntec/CuteYuki2",
74
- "digiplay/AI-infinity-V1-fp16",
75
- "digiplay/wantan25D_prototype",
76
- "digiplay/PotoPhotoRealism_v1",
77
- "digiplay/LunarDiffusion_v1.27",
78
- "digiplay/insaneRealistic_v1",
79
- "digiplay/OLDFish_2348_diffusers",
80
- "DucHaiten/DucHaitenDreamWorld",
81
- "digiplay/LemonteaMixPainterly2_v1",
82
- "digiplay/SweetMuse_diffusers",
83
- "dreamlike-art/dreamlike-diffusion-1.0",
84
- "digiplay/Realisian_v1",
85
- "Hius/DreamFul-V2",
86
- "digiplay/m3u", #263
87
- "digiplay/RMHF_2.5D_v2",
88
- "digiplay/FishMix_v1.1",
89
- "stablediffusionapi/icomix-2",
90
- "digiplay/Remedy",
91
- "Hemlok/QuinceMix",
92
- "digiplay/K-main",
93
- "digiplay/LusterMix_v1.5_safetensors", #256
94
- "digiplay/perfectLewdFantasy_v1.01",
95
- "digiplay/Opiate_v2",
96
- "digiplay/PhotoSomnia_vFinal",
97
- "Yntec/KIDSILLUSTRATIONS",
98
- "digiplay/polla_mix_2.5D",
99
- "Yntec/COOLKIDSV2",
100
- "Yntec/Pavo-Mix-Diffusers",
101
- "Yntec/RPG_Remix",
102
- "Yntec/OrangeRemix",
103
- "Yntec/PeachMix3",
104
- "Yntec/DucHaitenAIart-beta",
105
- "Yntec/samdoesartsUlt",
106
- "stablediffusionapi/all-526-animated",
107
- "AstraliteHeart/pony-diffusion",
108
- "stablediffusionapi/chilloutmixsf",
109
- "Masagin/Deliberate", #235
110
- "DucHaiten/DucHaitenSuperCute",
111
- "stablediffusionapi/all-526",
112
- "theintuitiveye/HARDblend",
113
- "stablediffusionapi/cusp-of-serenity",
114
- "stablediffusionapi/cyberrealistic",
115
- "SG161222/Realistic_Vision_V1.4",
116
- "digiplay/paulEberSRealismMix_v1",
117
- "Ojimi/anime-kawai-diffusion",
118
- "hassanblend/hassanblend1.4",
119
- "digiplay/zodiac_eclipse_DAY1",
120
- "LottePeisch/RevAnimated-Diffusers",
121
- "claudfuen/photorealistic-fuen-v1",
122
- "stablediffusionapi/chillout-app-factory",
123
- "DucHaiten/DucHaitenJourney",
124
- "robotjung/SemiRealMix",
125
- "Joeythemonster/anything-midjourney-v-4-1",
126
- "prompthero/midjourney-v4-diffusion",
127
- "prompthero/openjourney-v4",
128
- "x67/shortjourney",
129
- "darkstorm2150/Protogen_v2.2_Official_Release",
130
- "FredZhang7/paint-journey-v2",
131
- "digiplay/PersonaStyleCheckpoint",
132
- "darkstorm2150/Protogen_Infinity_Official_Release",
133
- "PeggyWang/openjourney-v2",
134
- "darkstorm2150/Protogen_x3.4_Official_Release",
135
- "stablediffusionapi/deliberateappfactory", #236
136
- "digiplay/CrossoverMix_v2",
137
- "stablediffusionapi/spybg",
138
- "stablediffusionapi/dreamshaper-v6", #239
139
- "stablediffusionapi/the-ally",
140
- "darkstorm2150/Protogen_x5.8_Official_Release",
141
- "coreco/seek.art_MEGA",
142
- "digiplay/BlankCanvas_v1", #07.11
143
- "digiplay/OnlyAnime_v2.3",
144
- "Korakoe/OpenNiji",
145
- "digiplay/Photon_v1",
146
- "digiplay/Pika_v2",
147
- "digiplay/RealCartoon3D_F16full_v3.1", #254
148
- "digiplay/realidefmix_3.5VAE",
149
- "digiplay/realmixUnrealjourney_v1",
150
- "digiplay/SyncMix_v1.5",
151
- "digiplay/TWingshadow_v1.2",
152
- "digiplay/V3_by_Hans_Asian",
153
- "digiplay/whatamix_v1",
154
-
155
- "digiplay/2K", #216
156
- "digiplay/AIGEN_v1.4_diffusers",
157
- "digiplay/BrickAndMortarMix_v2.0_diffusers", #224
158
- "digiplay/BeautyFool_v1.2VAE_pruned",
159
- "digiplay/breakdomainrealistic_R2333",
160
- "digiplay/CCTV2.5d_v1", #219
161
- "digiplay/ChikMix_V3", #253
162
- "stablediffusionapi/chilledremixsazyou-r", #195
163
- "digiplay/CityEdge_StyleMix_v1.44",
164
- "stablediffusionapi/dalcefopainting2", #199
165
- "digiplay/EdisonNilMix_v1", #07.10
166
- "digiplay/DiamondCoalMix_v2_pruned_diffusers",
167
- "digiplay/DreamShaper_7", #259
168
- "digiplay/elegantEntropy_v1.1", #221
169
- "digiplay/EtherRealMix_LUX2",
170
- "digiplay/KawaiiRealisticAnimeMix_A0.3",
171
- "digiplay/highQualityCGMIX_v1",
172
- "digiplay/HIMAWARI_v1",
173
- "digiplay/Hodgepodge_v2.1", #217
174
- "digiplay/illustro1stEdition_illustroV1", #214
175
- "digiplay/Juggernaut_final", #07.11
176
- "digiplay/Landscape_PhotoReal_v1",
177
- "digiplay/LuckyStrikeMix0.2Realistic", #07.10
178
- "digiplay/Matrix_Stellar_VAE_v1",
179
- "digiplay/PrefixRealisticMix_v1",
180
- "digiplay/RealEpicMajicRevolution_v1", #07.11
181
- "digiplay/ShampooMix_4", #252
182
- "digiplay/SoapMix2.5D_v1",
183
- "digiplay/ZemiHR_v2_diffusers",
184
-
185
- "Redamancy2299/dreambooth",
186
- "Lykon/DreamShaper", #240
187
- "trysem/DreamShaper-3.3",
188
- "HusseinHE/hussein-deliberate-1000steps", #237
189
- "stablediffusionapi/majicmixfantasy",
190
- "stablediffusionapi/majicmixsombre", #247
191
- "wavymulder/modelshoot",
192
- "digiplay/ChillyMix_v1", #215
193
- "stablediffusionapi/foto-assisted-diffusion", #197
194
- "wavymulder/portraitplus",
195
- "stablediffusionapi/chilloutmix-4264",
196
- "stablediffusionapi/product-design", #194
197
- "kandinsky-community/kandinsky-2-1", #251
198
-
199
- "digiplay/2.5DSET_diffusers", #227
200
- "digiplay/2-KWI", #213
201
- "digiplay/alstroemeriaMix_v1",
202
- "wavymulder/Analog-Diffusion",
203
- "digiplay/AniRealityMix_v1", #257
204
- "digiplay/ARRealVX1.1",
205
- "digiplay/BadAnime_v1",
206
- "digiplay/BasilKorea_v2", #07.11
207
- "digiplay/bluePencilRealistic_v01",
208
- "digiplay/bra_v40_diffusers",
209
- "digiplay/Burger_Mix_semiR2Lite", #222
210
- "digiplay/calicomixreal_v2.0_diffusers",
211
- "digiplay/CampurSari_Gen1",
212
- "digiplay/cocotifacute_v1", #07.10
213
- "digiplay/cosfMix_v1", #223
214
- "digiplay/CounterMix_v2", #211
215
- "digiplay/CuriousMerge2.5D_v5",
216
- "digiplay/dosmix",
217
- "digiplay/epi_2.5Dphotogodess_diffusers",
218
- "stablediffusionapi/droodlyrielv15",
219
- "digiplay/fantexi_v0.7",
220
- "digiplay/fishmix_other_v1",
221
- "digiplay/FormCleansingMix_v1", #228
222
- "digiplay/FumizukiMix_v1",
223
- "digiplay/helloworld_v3",
224
- "digiplay/HenmixArt_v1",
225
- "digiplay/ISOmix_v3.22",
226
- "digiplay/JF-Cu_v1",
227
- "digiplay/kencanmix_v2.0beta",
228
- "wavymulder/lomo-diffusion",
229
- "stablediffusionapi/majicmixv5", #192
230
- "digiplay/mecha_musume_vivid_soft",
231
- "digiplay/MiracleMixGlitter_v1",
232
- "digiplay/MixTape_RocknRoll_v3punk_bake_fp16",
233
- "digiplay/NextPhoto_v1",
234
- "digiplay/Noosphere_v3",
235
- "digiplay/nk15_diffusers", #230
236
- "digiplay/PeachMixsRelistic_R0", #262
237
- "wavymulder/timeless-diffusion",
238
- "digiplay/WhiteDreamyHillMix_v1", #220
239
- "digiplay/ya3p_VAE", #258
240
-
241
- "DucHaiten/DucHaitenAnime",
242
- "DucHaiten/DucHaitenAIart",
243
- "Manseo/Colorful-v4.5-Plus", #244
244
- "Guizmus/SDArt_ChaosAndOrder",
245
- "DucHaiten/DH_ClassicAnime",
246
- "stablediffusionapi/disneypixar",
247
- "johnslegers/epic-diffusion-v1.1",
248
- "emilianJR/epiCRealism",
249
- "johnslegers/epic-diffusion",
250
- "digiplay/endlessMixRenatus_v1.1", #07.10
251
- "digiplay/fantasticAnime_diffusers",
252
- "stablediffusionapi/ghostmix",
253
- "Duskfallcrew/EpicMix_Realism",
254
- "nitrosocke/Nitro-Diffusion",
255
- "prompthero/openjourney",
256
- "Guizmus/SDArt_something",
257
- "DucHaiten/DucHaiten-StyleLikeMe",
258
- "ddPn08/subtly", #250
259
- "22h/vintedois-diffusion-v0-1",
260
-
261
- "circulus/sd-anireal-v2.7",
262
- "0xJustin/Dungeons-and-Diffusion",
263
- "Guizmus/SDArt_AliceInDiffusionLand",
264
- "stablediffusionapi/realistic-vision-v20-2047",
265
- "redstonehero/RPG-v5-itr17_A10T",
266
-
267
- "stablediffusionapi/camelliamix25d",
268
- "Guizmus/SDArt_cosmichorrors",
269
- "DGSpitzer/DGSpitzer-Art-Diffusion",
270
- "stablediffusionapi/emotion-puppeteer-v2",
271
- "stablediffusionapi/fengjing",
272
- "stablediffusionapi/fuwafuwamix",
273
- "Fred99774/girlnew1",
274
- "stablediffusionapi/majicmixrealistic",
275
- "badmonk/nxka",
276
- "ItsJayQz/SynthwavePunk-v2",
277
- "zhyemmmm/ToonYou",
278
- "stablediffusionapi/uber-realistic-merge",
279
- "stablediffusionapi/vne732h9dh4",
280
- "stablediffusionapi/wand-magic2",
281
- "stablediffusionapi/waifu-journey-2",
282
- "stablediffusionapi/zovya",
283
-
284
- "Guizmus/SDArt_cosmichorrors768",
285
- "stablediffusionapi/counterfeit-v30",
286
- "stablediffusionapi/amireal",
287
- #"JamesFlare/pastel-mix", #"andite/pastel-mix",
288
- "stablediffusionapi/rev-anim",
289
- "aipicasso/picasso-diffusion-1-1",
290
- "xiaolxl/Gf_style2",
291
- "circulus/sd-semireal-v2.8",
292
- "Crosstyan/BPModel", #07.11
293
-
294
- "digiplay/Dusk-1",
295
- "ogkalu/Comic-Diffusion",
296
- "Guizmus/SDArt_ChaosAndOrder768",
297
- "gsdf/Counterfeit-V2.0",
298
- "dwancin/memoji", #07.11
299
- "nousr/robo-diffusion-2-base",
300
-
301
- ##"hakurei/waifu-diffusion",
302
- "WarriorMama777/AbyssOrangeMix2",
303
- "stablediffusionapi/abyssorangemix2nsfw", #200
304
- "cag/anything-v3-1",
305
- "iZELX1/Anything-V3-X",
306
- "xyn-ai/anything-v4.0", #"andite/anything-v4.0",
307
- "D1b4l4p/AsianMix",
308
- #"Fred99774/chilloutvlara",
309
- "aipicasso/cool-japan-diffusion-2-1-2",
310
- "stablediffusionapi/corneos-7th-heaven-m", #196
311
- "DGSpitzer/Cyberpunk-Anime-Diffusion",
312
- "stablediffusionapi/dark-sushi-mix",
313
- "joachimsallstrom/Double-Exposure-Diffusion",
314
- "eimiss/EimisAnimeDiffusion_1.0v",
315
- "prompthero/funko-diffusion",
316
- "nitrosocke/Ghibli-Diffusion",
317
- ###"iZELX1/Grapefruit",
318
- "xiaolxl/GuoFeng3",
319
- "stablediffusionapi/tmnd-mix",
320
- "coder119/Vectorartz_Diffusion", #203
321
-
322
- "WarriorMama777/AbyssOrangeMix",
323
- "AIARTCHAN/7pa",
324
- "JosephusCheung/ACertainModel",
325
- "JosephusCheung/ACertainThing",
326
- "AIARTCHAN/AbyssHellHero",
327
- "JosephusCheung/ACertainty",
328
- "AIARTCHAN/AbyssHellVer3",
329
- "AIARTCHAN/AbyssMapleVer3",
330
- "stablediffusionapi/abyssorangemixsfw",
331
- "AIARTCHAN/anidosmixV2",
332
- "stablediffusionapi/anime-model-v2",
333
- "kubanemil/AnyLORA",
334
- "stablediffusionapi/hc-anything-v3-vae", #231
335
- "mm00/anything-v3.0-light",
336
- "stablediffusionapi/anythingelse-v4",
337
- "stablediffusionapi/anything-v45-fixed",
338
- "stablediffusionapi/anything-v5",
339
- "nitrosocke/Arcane-Diffusion",
340
- "nitrosocke/archer-diffusion",
341
- "stablediffusionapi/architecture-tuned-model",
342
- "WarriorMama777/BloodOrangeMix",
343
- "wavymulder/collage-diffusion",
344
- "stablediffusionapi/camelliamixline",
345
- "digiplay/chrysanthemumMix_v1",
346
- "digiplay/CiderMix_ciderR", #260
347
- "Johnhex/Clam", #243
348
- "stablediffusionapi/cosmic-babes",
349
- "digiplay/CoffeeDonut_v1",
350
- "stablediffusionapi/dark-sushi-25d",
351
- "digiplay/Defacta_v1_diffusers", #226
352
- ## "WarriorMama777/EerieOrangeMix",
353
- "digiplay/DuelAnimeMix_v1", #225
354
- "Envvi/Inkpunk-Diffusion",
355
- "digiplay/kotosmix_diffusers", #229
356
- "stablediffusionapi/meinaalter",
357
- "Nacholmo/meinamixv7-diffusers",
358
- "stablediffusionapi/meinapastel",
359
- "AIARTCHAN/MIX-Pro-V4",
360
- "Lykon/NeverEnding-Dream",
361
- "stablediffusionapi/shirataki-mix", #191
362
- "NoCrypt/SomethingV2_2",
363
- "NoCrypt/SomethingV2",
364
- "badmonk/sxzumi",
365
- ## "stablediffusionapi/three-delicacy",
366
- ## "stablediffusionapi/three-delicacy-wonto",
367
- "etherealxx/systemy-csrmodel-cutesexyrobutts", #"andite/cutesexyrobutts-diffusion",
368
- "sd-dreambooth-library/true-guweiz-style", # "andite/guweiz-diffusion",
369
- "stablediffusionapi/vector-art", #198
370
- "digiplay/xxMix_4",
371
- ###"mio/hiten", #"andite/hiten-diffusion",
372
- ### "andite/mashuu-diffusion",
373
- ### "andite/mignon-diffusion",
374
- ### "andite/mikapikazo-diffusion",
375
- ### "andite/piromizu-diffusion",
376
- "digiplay/Zevinemix_v1.0/",
377
-
378
- "digiplay/AnaMix_v2", #07.11
379
- "stablediffusionapi/animetestmodelv3",
380
- "yulet1de/anything", #232
381
- "hakurei/artstation-diffusion", #07.11
382
- "Fictiverse/Stable_Diffusion_BalloonArt_Model",
383
- "stablediffusionapi/bg-dream-irl",
384
- "stablediffusionapi/bg-dream-model-b", #193
385
- "Rardilit/Ciffusion_v0.1",
386
- "circulus/sd-anireal-2d-v2",
387
- "circulus/sd-photoreal-v2.7",
388
- "circulus/sd-photoreal-photo-v2",
389
- "circulus/sd-anireal-2.5d-v2",
390
- "circulus/sd-anireal-v2.5",
391
- "circulus/sd-photoreal-semi-v2",
392
- "circulus/sd-photoreal-real-v2",
393
- "circulus/sd-photoreal-v2.5",
394
- "circulus/sd-anireal-3d-v2",
395
- "circulus/sd-anireal-v2.8",
396
- "nitrosocke/classic-anim-diffusion",
397
- "Conflictx/Complex-Lineart", #245
398
- "sayakpaul/da-vinci-sd-pokemon",
399
- "nitrosocke/elden-ring-diffusion",
400
- "digiplay/EtherBluMix_1", #07.11
401
- "digiplay/fantasticmix_v40_test", #261
402
- "theintuitiveye/FantasyMix",
403
- "Fictiverse/Stable_Diffusion_FluidArt_Model",
404
- "nitrosocke/Future-Diffusion",
405
- "ItsJayQz/GTA5_Artwork_Diffusion", #205
406
- "digiplay/hellopure_v2.23",
407
- "TheLastBen/hrrzg-style-768px", #246
408
- "nevernotsean/IllustratedPaperMini", #242
409
- "dallinmackay/JWST-Deep-Space-diffusion",
410
- "prompthero/linkedin-diffusion",
411
- "mann-e/mann-e_4_rev-0-1", #210
412
- "ItsJayQz/Marvel_WhatIf_Diffusion", #206
413
- "yuanbit/max-15-1e-6-1500",
414
- "MyneFactory/MF-Base", #248
415
- "Fictiverse/Stable_Diffusion_Microscopic_model", #249
416
- "nitrosocke/mo-di-diffusion",
417
- "luongphamit/NeverEnding-Dream2", #241
418
- "lambdalabs/sd-naruto-diffusers", #201
419
- "Vernon-2/output_test",
420
- "Fictiverse/Stable_Diffusion_PaperCut_Model",
421
- "bsuutari/path_to_saved_model",
422
- "bsuutari/path_to_saved_model_rafa",
423
- "digiplay/PlanetBumix_v1",
424
- "lambdalabs/sd-pokemon-diffusers", #202
425
- "prompthero/poolsuite-diffusion",
426
- "digiplay/RealismEngine_v1",
427
- "nitrosocke/redshift-diffusion",
428
- "nitrosocke/redshift-diffusion-768",
429
- "nousr/robo-diffusion",
430
- "digiplay/SDVN1-Real_v1", #255
431
- "nitrosocke/spider-verse-diffusion",
432
- #"runwayml/stable-diffusion-v1-5",
433
- "nicky007/stable-diffusion-logo-fine-tuned",
434
- "stablediffusionapi/three-delicacy", #233
435
- "stablediffusionapi/three-delicacy-wonto", #234
436
- "naclbit/trinart_stable_diffusion_v2",
437
- "dallinmackay/Tron-Legacy-diffusion",
438
- "digiplay/unstableDiffusersYamerMIX_v3",
439
- "dallinmackay/Van-Gogh-diffusion",
440
- "ItsJayQz/Valorant_Diffusion",
441
- "Fictiverse/Stable_Diffusion_VoxelArt_Model", #204
442
- "wavymulder/wavyfusion",
443
- "CompVis/stable-diffusion-v1-3", #207
444
- "CompVis/stable-diffusion-v1-2", #208
445
- "CompVis/stable-diffusion-v1-1", #209
446
- "Yntec/CinematicReality",
447
- ]
448
- current_model = models[0]
449
-
450
- text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
451
-
452
- models2=[
453
- gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
454
- gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
455
- gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False),
456
- gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False),
457
- gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False),
458
- gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False),
459
- gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False),
460
- gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False),
461
- gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False),
462
- gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False),
463
- gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False),
464
- gr.Interface.load(f"models/{models[11]}",live=True,preprocess=False),
465
- gr.Interface.load(f"models/{models[12]}",live=True,preprocess=False),
466
- gr.Interface.load(f"models/{models[13]}",live=True,preprocess=False),
467
- gr.Interface.load(f"models/{models[14]}",live=True,preprocess=False),
468
- gr.Interface.load(f"models/{models[15]}",live=True,preprocess=False),
469
- gr.Interface.load(f"models/{models[16]}",live=True,preprocess=False),
470
- gr.Interface.load(f"models/{models[17]}",live=True,preprocess=False),
471
- gr.Interface.load(f"models/{models[18]}",live=True,preprocess=False),
472
- gr.Interface.load(f"models/{models[19]}",live=True,preprocess=False),
473
- gr.Interface.load(f"models/{models[20]}",live=True,preprocess=False),
474
- gr.Interface.load(f"models/{models[21]}",live=True,preprocess=False),
475
- gr.Interface.load(f"models/{models[22]}",live=True,preprocess=False),
476
- gr.Interface.load(f"models/{models[23]}",live=True,preprocess=False),
477
- gr.Interface.load(f"models/{models[24]}",live=True,preprocess=False),
478
- gr.Interface.load(f"models/{models[25]}",live=True,preprocess=False),
479
- gr.Interface.load(f"models/{models[26]}",live=True,preprocess=False),
480
- gr.Interface.load(f"models/{models[27]}",live=True,preprocess=False),
481
- gr.Interface.load(f"models/{models[28]}",live=True,preprocess=False),
482
- gr.Interface.load(f"models/{models[29]}",live=True,preprocess=False),
483
- gr.Interface.load(f"models/{models[30]}",live=True,preprocess=False),
484
- gr.Interface.load(f"models/{models[31]}",live=True,preprocess=False),
485
- gr.Interface.load(f"models/{models[32]}",live=True,preprocess=False),
486
- gr.Interface.load(f"models/{models[33]}",live=True,preprocess=False),
487
- gr.Interface.load(f"models/{models[34]}",live=True,preprocess=False),
488
- gr.Interface.load(f"models/{models[35]}",live=True,preprocess=False),
489
- gr.Interface.load(f"models/{models[36]}",live=True,preprocess=False),
490
- gr.Interface.load(f"models/{models[37]}",live=True,preprocess=False),
491
- gr.Interface.load(f"models/{models[38]}",live=True,preprocess=False),
492
- gr.Interface.load(f"models/{models[39]}",live=True,preprocess=False),
493
- gr.Interface.load(f"models/{models[40]}",live=True,preprocess=False),
494
- gr.Interface.load(f"models/{models[41]}",live=True,preprocess=False),
495
- gr.Interface.load(f"models/{models[42]}",live=True,preprocess=False),
496
- gr.Interface.load(f"models/{models[43]}",live=True,preprocess=False),
497
- gr.Interface.load(f"models/{models[44]}",live=True,preprocess=False),
498
- gr.Interface.load(f"models/{models[45]}",live=True,preprocess=False),
499
- gr.Interface.load(f"models/{models[46]}",live=True,preprocess=False),
500
- gr.Interface.load(f"models/{models[47]}",live=True,preprocess=False),
501
- gr.Interface.load(f"models/{models[48]}",live=True,preprocess=False),
502
- gr.Interface.load(f"models/{models[49]}",live=True,preprocess=False),
503
- gr.Interface.load(f"models/{models[50]}",live=True,preprocess=False),
504
- gr.Interface.load(f"models/{models[51]}",live=True,preprocess=False),
505
- gr.Interface.load(f"models/{models[52]}",live=True,preprocess=False),
506
- gr.Interface.load(f"models/{models[53]}",live=True,preprocess=False),
507
- gr.Interface.load(f"models/{models[54]}",live=True,preprocess=False),
508
- gr.Interface.load(f"models/{models[55]}",live=True,preprocess=False),
509
- gr.Interface.load(f"models/{models[56]}",live=True,preprocess=False),
510
- gr.Interface.load(f"models/{models[57]}",live=True,preprocess=False),
511
- gr.Interface.load(f"models/{models[58]}",live=True,preprocess=False),
512
- gr.Interface.load(f"models/{models[59]}",live=True,preprocess=False),
513
- gr.Interface.load(f"models/{models[60]}",live=True,preprocess=False),
514
- gr.Interface.load(f"models/{models[61]}",live=True,preprocess=False),
515
- gr.Interface.load(f"models/{models[62]}",live=True,preprocess=False),
516
- gr.Interface.load(f"models/{models[63]}",live=True,preprocess=False),
517
- gr.Interface.load(f"models/{models[64]}",live=True,preprocess=False),
518
- gr.Interface.load(f"models/{models[65]}",live=True,preprocess=False),
519
- gr.Interface.load(f"models/{models[66]}",live=True,preprocess=False),
520
- gr.Interface.load(f"models/{models[67]}",live=True,preprocess=False),
521
- gr.Interface.load(f"models/{models[68]}",live=True,preprocess=False),
522
- gr.Interface.load(f"models/{models[69]}",live=True,preprocess=False),
523
- gr.Interface.load(f"models/{models[70]}",live=True,preprocess=False),
524
- gr.Interface.load(f"models/{models[71]}",live=True,preprocess=False),
525
- gr.Interface.load(f"models/{models[72]}",live=True,preprocess=False),
526
- gr.Interface.load(f"models/{models[73]}",live=True,preprocess=False),
527
- gr.Interface.load(f"models/{models[74]}",live=True,preprocess=False),
528
- gr.Interface.load(f"models/{models[75]}",live=True,preprocess=False),
529
- gr.Interface.load(f"models/{models[76]}",live=True,preprocess=False),
530
- gr.Interface.load(f"models/{models[77]}",live=True,preprocess=False),
531
- gr.Interface.load(f"models/{models[78]}",live=True,preprocess=False),
532
- gr.Interface.load(f"models/{models[79]}",live=True,preprocess=False),
533
- gr.Interface.load(f"models/{models[80]}",live=True,preprocess=False),
534
- gr.Interface.load(f"models/{models[81]}",live=True,preprocess=False),
535
- gr.Interface.load(f"models/{models[82]}",live=True,preprocess=False),
536
- gr.Interface.load(f"models/{models[83]}",live=True,preprocess=False),
537
- gr.Interface.load(f"models/{models[84]}",live=True,preprocess=False),
538
- gr.Interface.load(f"models/{models[85]}",live=True,preprocess=False),
539
- gr.Interface.load(f"models/{models[86]}",live=True,preprocess=False),
540
- gr.Interface.load(f"models/{models[87]}",live=True,preprocess=False),
541
- gr.Interface.load(f"models/{models[88]}",live=True,preprocess=False),
542
- gr.Interface.load(f"models/{models[89]}",live=True,preprocess=False),
543
- gr.Interface.load(f"models/{models[90]}",live=True,preprocess=False),
544
- gr.Interface.load(f"models/{models[91]}",live=True,preprocess=False),
545
- gr.Interface.load(f"models/{models[92]}",live=True,preprocess=False),
546
- gr.Interface.load(f"models/{models[93]}",live=True,preprocess=False),
547
- gr.Interface.load(f"models/{models[94]}",live=True,preprocess=False),
548
- gr.Interface.load(f"models/{models[95]}",live=True,preprocess=False),
549
- gr.Interface.load(f"models/{models[96]}",live=True,preprocess=False),
550
- gr.Interface.load(f"models/{models[97]}",live=True,preprocess=False),
551
- gr.Interface.load(f"models/{models[98]}",live=True,preprocess=False),
552
- gr.Interface.load(f"models/{models[99]}",live=True,preprocess=False),
553
- gr.Interface.load(f"models/{models[100]}",live=True,preprocess=False),
554
- gr.Interface.load(f"models/{models[101]}",live=True,preprocess=False),
555
- gr.Interface.load(f"models/{models[102]}",live=True,preprocess=False),
556
- gr.Interface.load(f"models/{models[103]}",live=True,preprocess=False),
557
- gr.Interface.load(f"models/{models[104]}",live=True,preprocess=False),
558
- gr.Interface.load(f"models/{models[105]}",live=True,preprocess=False),
559
- gr.Interface.load(f"models/{models[106]}",live=True,preprocess=False),
560
- gr.Interface.load(f"models/{models[107]}",live=True,preprocess=False),
561
- gr.Interface.load(f"models/{models[108]}",live=True,preprocess=False),
562
- gr.Interface.load(f"models/{models[109]}",live=True,preprocess=False),
563
- gr.Interface.load(f"models/{models[110]}",live=True,preprocess=False),
564
- gr.Interface.load(f"models/{models[111]}",live=True,preprocess=False),
565
- gr.Interface.load(f"models/{models[112]}",live=True,preprocess=False),
566
- gr.Interface.load(f"models/{models[113]}",live=True,preprocess=False),
567
- gr.Interface.load(f"models/{models[114]}",live=True,preprocess=False),
568
- gr.Interface.load(f"models/{models[115]}",live=True,preprocess=False),
569
- gr.Interface.load(f"models/{models[116]}",live=True,preprocess=False),
570
- gr.Interface.load(f"models/{models[117]}",live=True,preprocess=False),
571
- gr.Interface.load(f"models/{models[118]}",live=True,preprocess=False),
572
- gr.Interface.load(f"models/{models[119]}",live=True,preprocess=False),
573
- gr.Interface.load(f"models/{models[120]}",live=True,preprocess=False),
574
- gr.Interface.load(f"models/{models[121]}",live=True,preprocess=False),
575
- gr.Interface.load(f"models/{models[122]}",live=True,preprocess=False),
576
- gr.Interface.load(f"models/{models[123]}",live=True,preprocess=False),
577
- gr.Interface.load(f"models/{models[124]}",live=True,preprocess=False),
578
- gr.Interface.load(f"models/{models[125]}",live=True,preprocess=False),
579
- gr.Interface.load(f"models/{models[126]}",live=True,preprocess=False),
580
- gr.Interface.load(f"models/{models[127]}",live=True,preprocess=False),
581
- gr.Interface.load(f"models/{models[128]}",live=True,preprocess=False),
582
- gr.Interface.load(f"models/{models[129]}",live=True,preprocess=False),
583
- gr.Interface.load(f"models/{models[130]}",live=True,preprocess=False),
584
- gr.Interface.load(f"models/{models[131]}",live=True,preprocess=False),
585
- gr.Interface.load(f"models/{models[132]}",live=True,preprocess=False),
586
- gr.Interface.load(f"models/{models[133]}",live=True,preprocess=False),
587
- gr.Interface.load(f"models/{models[134]}",live=True,preprocess=False),
588
- gr.Interface.load(f"models/{models[135]}",live=True,preprocess=False),
589
- gr.Interface.load(f"models/{models[136]}",live=True,preprocess=False),
590
- gr.Interface.load(f"models/{models[137]}",live=True,preprocess=False),
591
- gr.Interface.load(f"models/{models[138]}",live=True,preprocess=False),
592
- gr.Interface.load(f"models/{models[139]}",live=True,preprocess=False),
593
- gr.Interface.load(f"models/{models[140]}",live=True,preprocess=False),
594
- gr.Interface.load(f"models/{models[141]}",live=True,preprocess=False),
595
- gr.Interface.load(f"models/{models[142]}",live=True,preprocess=False),
596
- gr.Interface.load(f"models/{models[143]}",live=True,preprocess=False),
597
- gr.Interface.load(f"models/{models[144]}",live=True,preprocess=False),
598
- gr.Interface.load(f"models/{models[145]}",live=True,preprocess=False),
599
- gr.Interface.load(f"models/{models[146]}",live=True,preprocess=False),
600
- gr.Interface.load(f"models/{models[147]}",live=True,preprocess=False),
601
- gr.Interface.load(f"models/{models[148]}",live=True,preprocess=False),
602
- gr.Interface.load(f"models/{models[149]}",live=True,preprocess=False),
603
- gr.Interface.load(f"models/{models[150]}",live=True,preprocess=False),
604
- gr.Interface.load(f"models/{models[151]}",live=True,preprocess=False),
605
- gr.Interface.load(f"models/{models[152]}",live=True,preprocess=False),
606
- gr.Interface.load(f"models/{models[153]}",live=True,preprocess=False),
607
- gr.Interface.load(f"models/{models[154]}",live=True,preprocess=False),
608
- gr.Interface.load(f"models/{models[155]}",live=True,preprocess=False),
609
- gr.Interface.load(f"models/{models[156]}",live=True,preprocess=False),
610
- gr.Interface.load(f"models/{models[157]}",live=True,preprocess=False),
611
- gr.Interface.load(f"models/{models[158]}",live=True,preprocess=False),
612
- gr.Interface.load(f"models/{models[159]}",live=True,preprocess=False),
613
-
614
- gr.Interface.load(f"models/{models[160]}",live=True,preprocess=False),
615
- gr.Interface.load(f"models/{models[161]}",live=True,preprocess=False),
616
- gr.Interface.load(f"models/{models[162]}",live=True,preprocess=False),
617
- gr.Interface.load(f"models/{models[163]}",live=True,preprocess=False),
618
- gr.Interface.load(f"models/{models[164]}",live=True,preprocess=False),
619
- gr.Interface.load(f"models/{models[165]}",live=True,preprocess=False),
620
- gr.Interface.load(f"models/{models[166]}",live=True,preprocess=False),
621
- gr.Interface.load(f"models/{models[167]}",live=True,preprocess=False),
622
- gr.Interface.load(f"models/{models[168]}",live=True,preprocess=False),
623
- gr.Interface.load(f"models/{models[169]}",live=True,preprocess=False),
624
-
625
- gr.Interface.load(f"models/{models[170]}",live=True,preprocess=False),
626
- gr.Interface.load(f"models/{models[171]}",live=True,preprocess=False),
627
- gr.Interface.load(f"models/{models[172]}",live=True,preprocess=False),
628
- gr.Interface.load(f"models/{models[173]}",live=True,preprocess=False),
629
- gr.Interface.load(f"models/{models[174]}",live=True,preprocess=False),
630
- gr.Interface.load(f"models/{models[175]}",live=True,preprocess=False),
631
- gr.Interface.load(f"models/{models[176]}",live=True,preprocess=False),
632
- gr.Interface.load(f"models/{models[177]}",live=True,preprocess=False),
633
- gr.Interface.load(f"models/{models[178]}",live=True,preprocess=False),
634
- gr.Interface.load(f"models/{models[179]}",live=True,preprocess=False),
635
-
636
- gr.Interface.load(f"models/{models[180]}",live=True,preprocess=False),
637
- gr.Interface.load(f"models/{models[181]}",live=True,preprocess=False),
638
- gr.Interface.load(f"models/{models[182]}",live=True,preprocess=False),
639
- gr.Interface.load(f"models/{models[183]}",live=True,preprocess=False),
640
- gr.Interface.load(f"models/{models[184]}",live=True,preprocess=False),
641
- gr.Interface.load(f"models/{models[185]}",live=True,preprocess=False),
642
- gr.Interface.load(f"models/{models[186]}",live=True,preprocess=False),
643
- gr.Interface.load(f"models/{models[187]}",live=True,preprocess=False),
644
- gr.Interface.load(f"models/{models[188]}",live=True,preprocess=False),
645
- gr.Interface.load(f"models/{models[189]}",live=True,preprocess=False),
646
-
647
- gr.Interface.load(f"models/{models[190]}",live=True,preprocess=False),
648
- gr.Interface.load(f"models/{models[191]}",live=True,preprocess=False),
649
- gr.Interface.load(f"models/{models[192]}",live=True,preprocess=False),
650
- gr.Interface.load(f"models/{models[193]}",live=True,preprocess=False),
651
- gr.Interface.load(f"models/{models[194]}",live=True,preprocess=False),
652
- gr.Interface.load(f"models/{models[195]}",live=True,preprocess=False),
653
- gr.Interface.load(f"models/{models[196]}",live=True,preprocess=False),
654
- gr.Interface.load(f"models/{models[197]}",live=True,preprocess=False),
655
- gr.Interface.load(f"models/{models[198]}",live=True,preprocess=False),
656
- gr.Interface.load(f"models/{models[199]}",live=True,preprocess=False),
657
-
658
- gr.Interface.load(f"models/{models[200]}",live=True,preprocess=False),
659
- gr.Interface.load(f"models/{models[201]}",live=True,preprocess=False),
660
- gr.Interface.load(f"models/{models[202]}",live=True,preprocess=False),
661
- gr.Interface.load(f"models/{models[203]}",live=True,preprocess=False),
662
- gr.Interface.load(f"models/{models[204]}",live=True,preprocess=False),
663
- gr.Interface.load(f"models/{models[205]}",live=True,preprocess=False),
664
- gr.Interface.load(f"models/{models[206]}",live=True,preprocess=False),
665
- gr.Interface.load(f"models/{models[207]}",live=True,preprocess=False),
666
- gr.Interface.load(f"models/{models[208]}",live=True,preprocess=False),
667
- gr.Interface.load(f"models/{models[209]}",live=True,preprocess=False),
668
-
669
- gr.Interface.load(f"models/{models[210]}",live=True,preprocess=False),
670
- gr.Interface.load(f"models/{models[211]}",live=True,preprocess=False),
671
- gr.Interface.load(f"models/{models[212]}",live=True,preprocess=False),
672
- gr.Interface.load(f"models/{models[213]}",live=True,preprocess=False),
673
- gr.Interface.load(f"models/{models[214]}",live=True,preprocess=False),
674
- gr.Interface.load(f"models/{models[215]}",live=True,preprocess=False),
675
- gr.Interface.load(f"models/{models[216]}",live=True,preprocess=False),
676
- gr.Interface.load(f"models/{models[217]}",live=True,preprocess=False),
677
- gr.Interface.load(f"models/{models[218]}",live=True,preprocess=False),
678
- gr.Interface.load(f"models/{models[219]}",live=True,preprocess=False),
679
-
680
- gr.Interface.load(f"models/{models[220]}",live=True,preprocess=False),
681
- gr.Interface.load(f"models/{models[221]}",live=True,preprocess=False),
682
- gr.Interface.load(f"models/{models[222]}",live=True,preprocess=False),
683
- gr.Interface.load(f"models/{models[223]}",live=True,preprocess=False),
684
- gr.Interface.load(f"models/{models[224]}",live=True,preprocess=False),
685
- gr.Interface.load(f"models/{models[225]}",live=True,preprocess=False),
686
- gr.Interface.load(f"models/{models[226]}",live=True,preprocess=False),
687
- gr.Interface.load(f"models/{models[227]}",live=True,preprocess=False),
688
- gr.Interface.load(f"models/{models[228]}",live=True,preprocess=False),
689
- gr.Interface.load(f"models/{models[229]}",live=True,preprocess=False),
690
-
691
- gr.Interface.load(f"models/{models[230]}",live=True,preprocess=False),
692
- gr.Interface.load(f"models/{models[231]}",live=True,preprocess=False),
693
- gr.Interface.load(f"models/{models[232]}",live=True,preprocess=False),
694
- gr.Interface.load(f"models/{models[233]}",live=True,preprocess=False),
695
- gr.Interface.load(f"models/{models[234]}",live=True,preprocess=False),
696
- gr.Interface.load(f"models/{models[235]}",live=True,preprocess=False),
697
- gr.Interface.load(f"models/{models[236]}",live=True,preprocess=False),
698
- gr.Interface.load(f"models/{models[237]}",live=True,preprocess=False),
699
- gr.Interface.load(f"models/{models[238]}",live=True,preprocess=False),
700
- gr.Interface.load(f"models/{models[239]}",live=True,preprocess=False),
701
-
702
- gr.Interface.load(f"models/{models[240]}",live=True,preprocess=False),
703
- gr.Interface.load(f"models/{models[241]}",live=True,preprocess=False),
704
- gr.Interface.load(f"models/{models[242]}",live=True,preprocess=False),
705
- gr.Interface.load(f"models/{models[243]}",live=True,preprocess=False),
706
- gr.Interface.load(f"models/{models[244]}",live=True,preprocess=False),
707
- gr.Interface.load(f"models/{models[245]}",live=True,preprocess=False),
708
- gr.Interface.load(f"models/{models[246]}",live=True,preprocess=False),
709
- gr.Interface.load(f"models/{models[247]}",live=True,preprocess=False),
710
- gr.Interface.load(f"models/{models[248]}",live=True,preprocess=False),
711
- gr.Interface.load(f"models/{models[249]}",live=True,preprocess=False),
712
-
713
- gr.Interface.load(f"models/{models[250]}",live=True,preprocess=False),
714
- gr.Interface.load(f"models/{models[251]}",live=True,preprocess=False),
715
- gr.Interface.load(f"models/{models[252]}",live=True,preprocess=False),
716
- gr.Interface.load(f"models/{models[253]}",live=True,preprocess=False),
717
- gr.Interface.load(f"models/{models[254]}",live=True,preprocess=False),
718
- gr.Interface.load(f"models/{models[255]}",live=True,preprocess=False),
719
- gr.Interface.load(f"models/{models[256]}",live=True,preprocess=False),
720
- gr.Interface.load(f"models/{models[257]}",live=True,preprocess=False),
721
- gr.Interface.load(f"models/{models[258]}",live=True,preprocess=False),
722
- gr.Interface.load(f"models/{models[259]}",live=True,preprocess=False),
723
-
724
- gr.Interface.load(f"models/{models[260]}",live=True,preprocess=False),
725
- gr.Interface.load(f"models/{models[261]}",live=True,preprocess=False),
726
- gr.Interface.load(f"models/{models[262]}",live=True,preprocess=False),
727
- gr.Interface.load(f"models/{models[263]}",live=True,preprocess=False),
728
- gr.Interface.load(f"models/{models[264]}",live=True,preprocess=False),
729
- gr.Interface.load(f"models/{models[265]}",live=True,preprocess=False),
730
- gr.Interface.load(f"models/{models[266]}",live=True,preprocess=False),
731
- gr.Interface.load(f"models/{models[267]}",live=True,preprocess=False),
732
- gr.Interface.load(f"models/{models[268]}",live=True,preprocess=False),
733
- gr.Interface.load(f"models/{models[269]}",live=True,preprocess=False),
734
-
735
- gr.Interface.load(f"models/{models[270]}",live=True,preprocess=False),
736
- gr.Interface.load(f"models/{models[271]}",live=True,preprocess=False),
737
- gr.Interface.load(f"models/{models[272]}",live=True,preprocess=False),
738
- gr.Interface.load(f"models/{models[273]}",live=True,preprocess=False),
739
- gr.Interface.load(f"models/{models[274]}",live=True,preprocess=False),
740
- gr.Interface.load(f"models/{models[275]}",live=True,preprocess=False),
741
- gr.Interface.load(f"models/{models[276]}",live=True,preprocess=False),
742
- gr.Interface.load(f"models/{models[277]}",live=True,preprocess=False),
743
- gr.Interface.load(f"models/{models[278]}",live=True,preprocess=False),
744
- gr.Interface.load(f"models/{models[279]}",live=True,preprocess=False),
745
-
746
- gr.Interface.load(f"models/{models[280]}",live=True,preprocess=False),
747
- gr.Interface.load(f"models/{models[281]}",live=True,preprocess=False),
748
- gr.Interface.load(f"models/{models[282]}",live=True,preprocess=False),
749
- gr.Interface.load(f"models/{models[283]}",live=True,preprocess=False),
750
- gr.Interface.load(f"models/{models[284]}",live=True,preprocess=False),
751
- gr.Interface.load(f"models/{models[285]}",live=True,preprocess=False),
752
- gr.Interface.load(f"models/{models[286]}",live=True,preprocess=False),
753
- gr.Interface.load(f"models/{models[287]}",live=True,preprocess=False),
754
- gr.Interface.load(f"models/{models[288]}",live=True,preprocess=False),
755
- gr.Interface.load(f"models/{models[289]}",live=True,preprocess=False),
756
-
757
- gr.Interface.load(f"models/{models[290]}",live=True,preprocess=False),
758
- gr.Interface.load(f"models/{models[291]}",live=True,preprocess=False),
759
- gr.Interface.load(f"models/{models[292]}",live=True,preprocess=False),
760
- gr.Interface.load(f"models/{models[293]}",live=True,preprocess=False),
761
- gr.Interface.load(f"models/{models[294]}",live=True,preprocess=False),
762
- gr.Interface.load(f"models/{models[295]}",live=True,preprocess=False),
763
- gr.Interface.load(f"models/{models[296]}",live=True,preprocess=False),
764
- gr.Interface.load(f"models/{models[297]}",live=True,preprocess=False),
765
- gr.Interface.load(f"models/{models[298]}",live=True,preprocess=False),
766
- gr.Interface.load(f"models/{models[299]}",live=True,preprocess=False),
767
-
768
- gr.Interface.load(f"models/{models[300]}",live=True,preprocess=False),
769
- gr.Interface.load(f"models/{models[301]}",live=True,preprocess=False),
770
- gr.Interface.load(f"models/{models[302]}",live=True,preprocess=False),
771
- gr.Interface.load(f"models/{models[303]}",live=True,preprocess=False),
772
- gr.Interface.load(f"models/{models[304]}",live=True,preprocess=False),
773
- gr.Interface.load(f"models/{models[305]}",live=True,preprocess=False),
774
- gr.Interface.load(f"models/{models[306]}",live=True,preprocess=False),
775
- gr.Interface.load(f"models/{models[307]}",live=True,preprocess=False),
776
- gr.Interface.load(f"models/{models[308]}",live=True,preprocess=False),
777
- gr.Interface.load(f"models/{models[309]}",live=True,preprocess=False),
778
-
779
- gr.Interface.load(f"models/{models[310]}",live=True,preprocess=False),
780
- gr.Interface.load(f"models/{models[311]}",live=True,preprocess=False),
781
- gr.Interface.load(f"models/{models[312]}",live=True,preprocess=False),
782
- gr.Interface.load(f"models/{models[313]}",live=True,preprocess=False),
783
- gr.Interface.load(f"models/{models[314]}",live=True,preprocess=False),
784
- gr.Interface.load(f"models/{models[315]}",live=True,preprocess=False),
785
- gr.Interface.load(f"models/{models[316]}",live=True,preprocess=False),
786
- gr.Interface.load(f"models/{models[317]}",live=True,preprocess=False),
787
- gr.Interface.load(f"models/{models[318]}",live=True,preprocess=False),
788
- gr.Interface.load(f"models/{models[319]}",live=True,preprocess=False),
789
-
790
- gr.Interface.load(f"models/{models[320]}",live=True,preprocess=False),
791
- gr.Interface.load(f"models/{models[321]}",live=True,preprocess=False),
792
- gr.Interface.load(f"models/{models[322]}",live=True,preprocess=False),
793
- gr.Interface.load(f"models/{models[323]}",live=True,preprocess=False),
794
- gr.Interface.load(f"models/{models[324]}",live=True,preprocess=False),
795
- gr.Interface.load(f"models/{models[325]}",live=True,preprocess=False),
796
- gr.Interface.load(f"models/{models[326]}",live=True,preprocess=False),
797
- gr.Interface.load(f"models/{models[327]}",live=True,preprocess=False),
798
- gr.Interface.load(f"models/{models[328]}",live=True,preprocess=False),
799
- gr.Interface.load(f"models/{models[329]}",live=True,preprocess=False),
800
-
801
- gr.Interface.load(f"models/{models[330]}",live=True,preprocess=False),
802
- gr.Interface.load(f"models/{models[331]}",live=True,preprocess=False),
803
- gr.Interface.load(f"models/{models[332]}",live=True,preprocess=False),
804
- gr.Interface.load(f"models/{models[333]}",live=True,preprocess=False),
805
- gr.Interface.load(f"models/{models[334]}",live=True,preprocess=False),
806
- gr.Interface.load(f"models/{models[335]}",live=True,preprocess=False),
807
- gr.Interface.load(f"models/{models[336]}",live=True,preprocess=False),
808
- gr.Interface.load(f"models/{models[337]}",live=True,preprocess=False),
809
- gr.Interface.load(f"models/{models[338]}",live=True,preprocess=False),
810
- gr.Interface.load(f"models/{models[339]}",live=True,preprocess=False),
811
-
812
- gr.Interface.load(f"models/{models[340]}",live=True,preprocess=False),
813
- gr.Interface.load(f"models/{models[341]}",live=True,preprocess=False),
814
- gr.Interface.load(f"models/{models[342]}",live=True,preprocess=False),
815
- gr.Interface.load(f"models/{models[343]}",live=True,preprocess=False),
816
- gr.Interface.load(f"models/{models[344]}",live=True,preprocess=False),
817
- gr.Interface.load(f"models/{models[345]}",live=True,preprocess=False),
818
- gr.Interface.load(f"models/{models[346]}",live=True,preprocess=False),
819
- gr.Interface.load(f"models/{models[347]}",live=True,preprocess=False),
820
- gr.Interface.load(f"models/{models[348]}",live=True,preprocess=False),
821
- gr.Interface.load(f"models/{models[349]}",live=True,preprocess=False),
822
- gr.Interface.load(f"models/{models[350]}",live=True,preprocess=False),
823
-
824
- gr.Interface.load(f"models/{models[351]}",live=True,preprocess=False),
825
- gr.Interface.load(f"models/{models[352]}",live=True,preprocess=False),
826
- gr.Interface.load(f"models/{models[353]}",live=True,preprocess=False),
827
- gr.Interface.load(f"models/{models[354]}",live=True,preprocess=False),
828
- gr.Interface.load(f"models/{models[355]}",live=True,preprocess=False),
829
- gr.Interface.load(f"models/{models[356]}",live=True,preprocess=False),
830
- gr.Interface.load(f"models/{models[357]}",live=True,preprocess=False),
831
- gr.Interface.load(f"models/{models[358]}",live=True,preprocess=False),
832
- gr.Interface.load(f"models/{models[359]}",live=True,preprocess=False),
833
-
834
- gr.Interface.load(f"models/{models[360]}",live=True,preprocess=False),
835
- gr.Interface.load(f"models/{models[361]}",live=True,preprocess=False),
836
- gr.Interface.load(f"models/{models[362]}",live=True,preprocess=False),
837
- gr.Interface.load(f"models/{models[363]}",live=True,preprocess=False),
838
- gr.Interface.load(f"models/{models[364]}",live=True,preprocess=False),
839
- gr.Interface.load(f"models/{models[365]}",live=True,preprocess=False),
840
- gr.Interface.load(f"models/{models[366]}",live=True,preprocess=False),
841
- gr.Interface.load(f"models/{models[367]}",live=True,preprocess=False),
842
- gr.Interface.load(f"models/{models[368]}",live=True,preprocess=False),
843
- gr.Interface.load(f"models/{models[369]}",live=True,preprocess=False),
844
-
845
- gr.Interface.load(f"models/{models[370]}",live=True,preprocess=False),
846
- gr.Interface.load(f"models/{models[371]}",live=True,preprocess=False),
847
- gr.Interface.load(f"models/{models[372]}",live=True,preprocess=False),
848
- gr.Interface.load(f"models/{models[373]}",live=True,preprocess=False),
849
- gr.Interface.load(f"models/{models[374]}",live=True,preprocess=False),
850
- gr.Interface.load(f"models/{models[375]}",live=True,preprocess=False),
851
- gr.Interface.load(f"models/{models[376]}",live=True,preprocess=False),
852
- gr.Interface.load(f"models/{models[377]}",live=True,preprocess=False),
853
- gr.Interface.load(f"models/{models[378]}",live=True,preprocess=False),
854
- gr.Interface.load(f"models/{models[379]}",live=True,preprocess=False),
855
-
856
- gr.Interface.load(f"models/{models[380]}",live=True,preprocess=False),
857
- gr.Interface.load(f"models/{models[381]}",live=True,preprocess=False),
858
- gr.Interface.load(f"models/{models[382]}",live=True,preprocess=False),
859
- gr.Interface.load(f"models/{models[383]}",live=True,preprocess=False),
860
- gr.Interface.load(f"models/{models[384]}",live=True,preprocess=False),
861
- gr.Interface.load(f"models/{models[385]}",live=True,preprocess=False),
862
- gr.Interface.load(f"models/{models[386]}",live=True,preprocess=False),
863
- gr.Interface.load(f"models/{models[387]}",live=True,preprocess=False),
864
- gr.Interface.load(f"models/{models[388]}",live=True,preprocess=False),
865
- gr.Interface.load(f"models/{models[389]}",live=True,preprocess=False),
866
-
867
- gr.Interface.load(f"models/{models[390]}",live=True,preprocess=False),
868
- gr.Interface.load(f"models/{models[391]}",live=True,preprocess=False),
869
- gr.Interface.load(f"models/{models[392]}",live=True,preprocess=False),
870
- gr.Interface.load(f"models/{models[393]}",live=True,preprocess=False),
871
- gr.Interface.load(f"models/{models[394]}",live=True,preprocess=False),
872
- gr.Interface.load(f"models/{models[395]}",live=True,preprocess=False),
873
- gr.Interface.load(f"models/{models[396]}",live=True,preprocess=False),
874
- gr.Interface.load(f"models/{models[397]}",live=True,preprocess=False),
875
- gr.Interface.load(f"models/{models[398]}",live=True,preprocess=False),
876
- gr.Interface.load(f"models/{models[399]}",live=True,preprocess=False),
877
-
878
- gr.Interface.load(f"models/{models[400]}",live=True,preprocess=False),
879
- gr.Interface.load(f"models/{models[401]}",live=True,preprocess=False),
880
- gr.Interface.load(f"models/{models[402]}",live=True,preprocess=False),
881
- gr.Interface.load(f"models/{models[403]}",live=True,preprocess=False),
882
- gr.Interface.load(f"models/{models[404]}",live=True,preprocess=False),
883
- gr.Interface.load(f"models/{models[405]}",live=True,preprocess=False),
884
- gr.Interface.load(f"models/{models[406]}",live=True,preprocess=False),
885
- gr.Interface.load(f"models/{models[407]}",live=True,preprocess=False),
886
- gr.Interface.load(f"models/{models[408]}",live=True,preprocess=False),
887
- gr.Interface.load(f"models/{models[409]}",live=True,preprocess=False),
888
-
889
- gr.Interface.load(f"models/{models[410]}",live=True,preprocess=False),
890
- gr.Interface.load(f"models/{models[411]}",live=True,preprocess=False),
891
- gr.Interface.load(f"models/{models[412]}",live=True,preprocess=False),
892
- gr.Interface.load(f"models/{models[413]}",live=True,preprocess=False),
893
- gr.Interface.load(f"models/{models[414]}",live=True,preprocess=False),
894
- ]
895
-
896
-
897
- def text_it1(inputs,text_gen1=text_gen1):
898
- go_t1=text_gen1(inputs)
899
- return(go_t1)
900
-
901
- def set_model(current_model):
902
- current_model = models[current_model]
903
- return gr.update(label=(f"{current_model}"))
904
-
905
-
906
- def send_it1(inputs, model_choice):
907
- proc1=models2[model_choice]
908
- output1=proc1(inputs)
909
- return(output1)
910
- css=""""""
911
-
912
-
913
-
914
- with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as myface:
915
- gr.HTML("""
916
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
917
- <div>
918
- <style>
919
- h1 {
920
- font-size: 6em;
921
- color: #ffc99f;
922
- margin-top: 30px;
923
- margin-bottom: 30px;
924
- text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
925
- }
926
- h3 {
927
- color: #ffc99f; !important;
928
- }
929
- h4 {
930
- color: #ffffff; !important;
931
- }
932
- .gradio-container {
933
- background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
934
- color: #ffaa66 !important;
935
- font-family: 'IBM Plex Sans', sans-serif !important;
936
- }
937
- .text-gray-500 {
938
- color: #ffc99f !important;
939
- }
940
- .gr-box {
941
- background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
942
- border-top-color: #000000 !important;
943
- border-right-color: #ffffff !important;
944
- border-bottom-color: #ffffff !important;
945
- border-left-color: #000000 !important;
946
- }
947
- .gr-input {
948
- color: #ffc99f; !important;
949
- background-color: #254150 !important;
950
- }
951
- :root {
952
- --neutral-100: #000000 !important;
953
- }
954
- </style>
955
- <body>
956
- <div class="center"><h1>Einfach.AI</h1>
957
- </div>
958
- </body>
959
- </div>
960
- <p style="margin-bottom: 10px; color: #ffaa66;">
961
- <h3>Top 411 Stable Diffusion models for your enjoyment!</h3></p>
962
- <p style="margin-bottom: 10px; font-size: 98%">
963
- <br><h4>Beim ersten Laden eines Modells dauert es 200 Sekunden.</h4>
964
- <br><h4>Aber nachdem es geladen ist, dauert es 20 Sekunden, um jedes neue Bild zu generieren.</h4></p>
965
- </div>
966
- """)
967
- with gr.Row():
968
- with gr.Column(scale=100):
969
- #Model selection dropdown
970
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
971
- with gr.Row():
972
- with gr.Column(scale=100):
973
- magic1=gr.Textbox(label="Your Prompt", lines=4)
974
- gr.HTML("""<style> .gr-button {
975
- color: #ffffff !important;
976
- text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
977
- background-image: linear-gradient(#76635a, #d2a489) !important;
978
- border-radius: 24px !important;
979
- border: solid 1px !important;
980
- border-top-color: #ffc99f !important;
981
- border-right-color: #000000 !important;
982
- border-bottom-color: #000000 !important;
983
- border-left-color: #ffc99f !important;
984
- padding: 6px 30px;
985
- }
986
-
987
- .gr-button:active {
988
- color: #ffc99f !important;
989
- font-size: 98% !important;
990
- text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
991
- background-image: linear-gradient(#d2a489, #76635a) !important;
992
- border-top-color: #000000 !important;
993
- border-right-color: #ffffff !important;
994
- border-bottom-color: #ffffff !important;
995
- border-left-color: #000000 !important;
996
- }
997
-
998
- .gr-button:hover {
999
- filter: brightness(130%);
1000
- }
1001
-
1002
- </style>""")
1003
- run=gr.Button("Generate Image")
1004
- with gr.Row():
1005
- with gr.Column(style="width=800px"):
1006
- output1=gr.Image(label=(f"{current_model}"))
1007
-
1008
-
1009
- with gr.Row():
1010
- with gr.Column(scale=50):
1011
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
1012
- use_short=gr.Button("Use Short Prompt")
1013
- see_prompts=gr.Button("Extend Idea")
1014
-
1015
-
1016
- def short_prompt(inputs):
1017
- return(inputs)
1018
-
1019
- model_name1.change(set_model,inputs=model_name1,outputs=[output1])
1020
-
1021
- run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
1022
-
1023
- use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
1024
-
1025
- see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
1026
-
1027
- myface.queue(concurrency_count=200)
1028
- myface.launch(inline=True, show_api=False, max_threads=400)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alican/pixera/util/util.py DELETED
@@ -1,103 +0,0 @@
1
- """This module contains simple helper functions """
2
- from __future__ import print_function
3
- import torch
4
- import numpy as np
5
- from PIL import Image
6
- import os
7
-
8
-
9
- def tensor2im(input_image, imtype=np.uint8):
10
- """"Converts a Tensor array into a numpy image array.
11
-
12
- Parameters:
13
- input_image (tensor) -- the input image tensor array
14
- imtype (type) -- the desired type of the converted numpy array
15
- """
16
- if not isinstance(input_image, np.ndarray):
17
- if isinstance(input_image, torch.Tensor): # get the data from a variable
18
- image_tensor = input_image.data
19
- else:
20
- return input_image
21
- image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
22
- if image_numpy.shape[0] == 1: # grayscale to RGB
23
- image_numpy = np.tile(image_numpy, (3, 1, 1))
24
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
25
- else: # if it is a numpy array, do nothing
26
- image_numpy = input_image
27
- return image_numpy.astype(imtype)
28
-
29
-
30
- def diagnose_network(net, name='network'):
31
- """Calculate and print the mean of average absolute(gradients)
32
-
33
- Parameters:
34
- net (torch network) -- Torch network
35
- name (str) -- the name of the network
36
- """
37
- mean = 0.0
38
- count = 0
39
- for param in net.parameters():
40
- if param.grad is not None:
41
- mean += torch.mean(torch.abs(param.grad.data))
42
- count += 1
43
- if count > 0:
44
- mean = mean / count
45
- print(name)
46
- print(mean)
47
-
48
-
49
- def save_image(image_numpy, image_path, aspect_ratio=1.0):
50
- """Save a numpy image to the disk
51
-
52
- Parameters:
53
- image_numpy (numpy array) -- input numpy array
54
- image_path (str) -- the path of the image
55
- """
56
-
57
- image_pil = Image.fromarray(image_numpy)
58
- h, w, _ = image_numpy.shape
59
-
60
- if aspect_ratio > 1.0:
61
- image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
62
- if aspect_ratio < 1.0:
63
- image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
64
- image_pil.save(image_path)
65
-
66
-
67
- def print_numpy(x, val=True, shp=False):
68
- """Print the mean, min, max, median, std, and size of a numpy array
69
-
70
- Parameters:
71
- val (bool) -- if print the values of the numpy array
72
- shp (bool) -- if print the shape of the numpy array
73
- """
74
- x = x.astype(np.float64)
75
- if shp:
76
- print('shape,', x.shape)
77
- if val:
78
- x = x.flatten()
79
- print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
80
- np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
81
-
82
-
83
- def mkdirs(paths):
84
- """create empty directories if they don't exist
85
-
86
- Parameters:
87
- paths (str list) -- a list of directory paths
88
- """
89
- if isinstance(paths, list) and not isinstance(paths, str):
90
- for path in paths:
91
- mkdir(path)
92
- else:
93
- mkdir(paths)
94
-
95
-
96
- def mkdir(path):
97
- """create a single empty directory if it didn't exist
98
-
99
- Parameters:
100
- path (str) -- a single directory path
101
- """
102
- if not os.path.exists(path):
103
- os.makedirs(path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/docs/Dataset.md DELETED
@@ -1,74 +0,0 @@
1
- # SHHQ Dataset
2
- <img src="../img/preview_samples1.png" width="96%" height="96%">
3
-
4
- ## Overview
5
- SHHQ is a dataset with high-quality full-body human images in a resolution of 1024 × 512.
6
- Since we need to follow a rigorous legal review in our institute, we can not release all of the data at once.
7
-
8
- For now, SHHQ-1.0 with 40K images is released! More data will be released in the later versions.
9
-
10
-
11
- ## Data Sources
12
- Images are collected in two main ways:
13
- 1) From the Internet.
14
- We developed a crawler tool with an official API, mainly downloading images from Flickr, Pixabay and Pexels. So you need to meet all the following licenses when using the dataset: CC0, [Pixabay License](https://pixabay.com/service/license/), and [Pexels Licenses](https://www.pexels.com/license/).
15
- 2) From the data providers.
16
- We purchased images from databases of individual photographers, modeling agencies and other suppliers.
17
- Images were reviewed by our legal team prior to purchase to ensure permission for use in research.
18
-
19
- ### Note:
20
- The composition of SHHQ-1.0:
21
-
22
- 1) Images obtained from the above sources.
23
- 2) Processed 9991 DeepFashion [[1]](#1) images (retain only full body images).
24
- 3) 1940 African images from the InFashAI [[2]](#2) dataset to increase data diversity.
25
-
26
- ## Data License
27
- We are aware of privacy concerns and seriously treat the license and privacy issues. All released data will be ensured under the license of CC0 and free for research use. Also, persons in the dataset are anonymised without additional private or sensitive metadata.
28
-
29
- ## Agreement
30
- The SHHQ is available for non-commercial research purposes only.
31
-
32
- You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit any portion of the images and any portion of the derived data for commercial purposes.
33
-
34
- You agree NOT to further copy, publish or distribute any portion of SHHQ to any third party for any purpose. Except, for internal use at a single site within the same organization it is allowed to make copies of the dataset.
35
-
36
- Shanghai AI Lab reserves the right to terminate your access to the SHHQ at any time.
37
-
38
- ## Dataset Preview
39
- For those interested in our dataset, we provide a preview version with 100 images randomly sampled from SHHQ-1.0: [SHHQ-1.0_samples](https://drive.google.com/file/d/1tnNFfmFtzRbYL3qEnNXQ_ShaN9YV5tI5/view?usp=sharing).
40
-
41
- In SHHQ-1.0, we provide aligned raw images along with machine-calculated segmentation masks. Later we are planning to release manually annotated human-parsing version of these 40,000 images. Please stay tuned.
42
-
43
- > We also provide script [bg_white.py](../bg_white.py) to whiten the background of the raw image using its segmentation mask.
44
-
45
- If you want to access the full SHHQ-1.0, please read the following instructions.
46
-
47
- ## Model trained using SHHQ-1.0
48
-
49
- | Structure | 1024x512 | Metric | Scores | 512x256 | Metric | Scores |
50
- | --------- |:----------:| :----------:| :----------:| :-----: | :-----: | :-----: |
51
- | StyleGAN1 | to be released | - | - | to be released | - | - |
52
- | StyleGAN2 | [SHHQ-1.0_sg2_1024.pkl](https://drive.google.com/file/d/1PuvE72xpc69Zq4y58dohuKbG9dFnnjEX/view?usp=sharing) | fid50k_full | 3.56 | [SHHQ-1.0_sg2_512.pkl](https://drive.google.com/file/d/170t2FRWxR8_TG3_y0nVtDBogLPOClnyf/view?usp=sharing) | fid50k_full | 3.68 |
53
- | StyleGAN3 | to be released | - | - |to be released | - | - |
54
-
55
-
56
- ## Download Instructions
57
- Please download the SHHQ Dataset Release Agreement from [link](./SHHQ_Dataset_Release_Agreement.pdf).
58
- Read it carefully, complete and sign it appropriately.
59
-
60
- Please send the completed form to Jianglin Fu ([email protected]) and Shikai Li ([email protected]), and cc to Wayne Wu ([email protected]) using institutional email address. The email Subject Title is "SHHQ Dataset Release Agreement". We will verify your request and contact you with the dataset link and password to unzip the image data.
61
-
62
- Note:
63
-
64
- 1. We are currently facing large incoming applications, and we need to carefully verify all the applicants, please be patient, and we will reply to you as soon as possible.
65
-
66
- 2. The signature in the agreement should be hand-written.
67
-
68
- ## References
69
- <a id="1">[1]</a>
70
- Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou. DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations. CVPR (2016)
71
-
72
- <a id="2">[2]</a>
73
- Hacheme, Gilles and Sayouti, Noureini. Neural fashion image captioning: Accounting for data diversity. arXiv preprint arXiv:2106.12154 (2021)
74
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py DELETED
@@ -1,761 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Union
16
-
17
- import PIL
18
- import torch
19
- from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection
20
-
21
- from ...models import PriorTransformer, UNet2DConditionModel, VQModel
22
- from ...schedulers import DDPMScheduler, UnCLIPScheduler
23
- from ...utils import (
24
- logging,
25
- replace_example_docstring,
26
- )
27
- from ..pipeline_utils import DiffusionPipeline
28
- from .pipeline_kandinsky2_2 import KandinskyV22Pipeline
29
- from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline
30
- from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline
31
- from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline
32
-
33
-
34
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
-
36
- TEXT2IMAGE_EXAMPLE_DOC_STRING = """
37
- Examples:
38
- ```py
39
- from diffusers import AutoPipelineForText2Image
40
- import torch
41
-
42
- pipe = AutoPipelineForText2Image.from_pretrained(
43
- "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
44
- )
45
- pipe.enable_model_cpu_offload()
46
-
47
- prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
48
-
49
- image = pipe(prompt=prompt, num_inference_steps=25).images[0]
50
- ```
51
- """
52
-
53
- IMAGE2IMAGE_EXAMPLE_DOC_STRING = """
54
- Examples:
55
- ```py
56
- from diffusers import AutoPipelineForImage2Image
57
- import torch
58
- import requests
59
- from io import BytesIO
60
- from PIL import Image
61
- import os
62
-
63
- pipe = AutoPipelineForImage2Image.from_pretrained(
64
- "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
65
- )
66
- pipe.enable_model_cpu_offload()
67
-
68
- prompt = "A fantasy landscape, Cinematic lighting"
69
- negative_prompt = "low quality, bad quality"
70
-
71
- url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
72
-
73
- response = requests.get(url)
74
- image = Image.open(BytesIO(response.content)).convert("RGB")
75
- image.thumbnail((768, 768))
76
-
77
- image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]
78
- ```
79
- """
80
-
81
- INPAINT_EXAMPLE_DOC_STRING = """
82
- Examples:
83
- ```py
84
- from diffusers import AutoPipelineForInpainting
85
- from diffusers.utils import load_image
86
- import torch
87
- import numpy as np
88
-
89
- pipe = AutoPipelineForInpainting.from_pretrained(
90
- "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
91
- )
92
- pipe.enable_model_cpu_offload()
93
-
94
- prompt = "A fantasy landscape, Cinematic lighting"
95
- negative_prompt = "low quality, bad quality"
96
-
97
- original_image = load_image(
98
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
99
- )
100
-
101
- mask = np.zeros((768, 768), dtype=np.float32)
102
- # Let's mask out an area above the cat's head
103
- mask[:250, 250:-250] = 1
104
-
105
- image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]
106
- ```
107
- """
108
-
109
-
110
- class KandinskyV22CombinedPipeline(DiffusionPipeline):
111
- """
112
- Combined Pipeline for text-to-image generation using Kandinsky
113
-
114
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
115
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
116
-
117
- Args:
118
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
119
- A scheduler to be used in combination with `unet` to generate image latents.
120
- unet ([`UNet2DConditionModel`]):
121
- Conditional U-Net architecture to denoise the image embedding.
122
- movq ([`VQModel`]):
123
- MoVQ Decoder to generate the image from the latents.
124
- prior_prior ([`PriorTransformer`]):
125
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
126
- prior_image_encoder ([`CLIPVisionModelWithProjection`]):
127
- Frozen image-encoder.
128
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
129
- Frozen text-encoder.
130
- prior_tokenizer (`CLIPTokenizer`):
131
- Tokenizer of class
132
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
133
- prior_scheduler ([`UnCLIPScheduler`]):
134
- A scheduler to be used in combination with `prior` to generate image embedding.
135
- prior_image_processor ([`CLIPImageProcessor`]):
136
- A image_processor to be used to preprocess image from clip.
137
- """
138
-
139
- _load_connected_pipes = True
140
-
141
- def __init__(
142
- self,
143
- unet: UNet2DConditionModel,
144
- scheduler: DDPMScheduler,
145
- movq: VQModel,
146
- prior_prior: PriorTransformer,
147
- prior_image_encoder: CLIPVisionModelWithProjection,
148
- prior_text_encoder: CLIPTextModelWithProjection,
149
- prior_tokenizer: CLIPTokenizer,
150
- prior_scheduler: UnCLIPScheduler,
151
- prior_image_processor: CLIPImageProcessor,
152
- ):
153
- super().__init__()
154
-
155
- self.register_modules(
156
- unet=unet,
157
- scheduler=scheduler,
158
- movq=movq,
159
- prior_prior=prior_prior,
160
- prior_image_encoder=prior_image_encoder,
161
- prior_text_encoder=prior_text_encoder,
162
- prior_tokenizer=prior_tokenizer,
163
- prior_scheduler=prior_scheduler,
164
- prior_image_processor=prior_image_processor,
165
- )
166
- self.prior_pipe = KandinskyV22PriorPipeline(
167
- prior=prior_prior,
168
- image_encoder=prior_image_encoder,
169
- text_encoder=prior_text_encoder,
170
- tokenizer=prior_tokenizer,
171
- scheduler=prior_scheduler,
172
- image_processor=prior_image_processor,
173
- )
174
- self.decoder_pipe = KandinskyV22Pipeline(
175
- unet=unet,
176
- scheduler=scheduler,
177
- movq=movq,
178
- )
179
-
180
- def enable_model_cpu_offload(self, gpu_id=0):
181
- r"""
182
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
183
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
184
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
185
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
186
- """
187
- self.prior_pipe.enable_model_cpu_offload()
188
- self.decoder_pipe.enable_model_cpu_offload()
189
-
190
- def progress_bar(self, iterable=None, total=None):
191
- self.prior_pipe.progress_bar(iterable=iterable, total=total)
192
- self.decoder_pipe.progress_bar(iterable=iterable, total=total)
193
- self.decoder_pipe.enable_model_cpu_offload()
194
-
195
- def set_progress_bar_config(self, **kwargs):
196
- self.prior_pipe.set_progress_bar_config(**kwargs)
197
- self.decoder_pipe.set_progress_bar_config(**kwargs)
198
-
199
- @torch.no_grad()
200
- @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
201
- def __call__(
202
- self,
203
- prompt: Union[str, List[str]],
204
- negative_prompt: Optional[Union[str, List[str]]] = None,
205
- num_inference_steps: int = 100,
206
- guidance_scale: float = 4.0,
207
- num_images_per_prompt: int = 1,
208
- height: int = 512,
209
- width: int = 512,
210
- prior_guidance_scale: float = 4.0,
211
- prior_num_inference_steps: int = 25,
212
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
213
- latents: Optional[torch.FloatTensor] = None,
214
- output_type: Optional[str] = "pil",
215
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
216
- callback_steps: int = 1,
217
- return_dict: bool = True,
218
- ):
219
- """
220
- Function invoked when calling the pipeline for generation.
221
-
222
- Args:
223
- prompt (`str` or `List[str]`):
224
- The prompt or prompts to guide the image generation.
225
- negative_prompt (`str` or `List[str]`, *optional*):
226
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
227
- if `guidance_scale` is less than `1`).
228
- num_images_per_prompt (`int`, *optional*, defaults to 1):
229
- The number of images to generate per prompt.
230
- num_inference_steps (`int`, *optional*, defaults to 100):
231
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
232
- expense of slower inference.
233
- height (`int`, *optional*, defaults to 512):
234
- The height in pixels of the generated image.
235
- width (`int`, *optional*, defaults to 512):
236
- The width in pixels of the generated image.
237
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
238
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
239
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
240
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
241
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
242
- usually at the expense of lower image quality.
243
- prior_num_inference_steps (`int`, *optional*, defaults to 100):
244
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
245
- expense of slower inference.
246
- guidance_scale (`float`, *optional*, defaults to 4.0):
247
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
248
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
249
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
250
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
251
- usually at the expense of lower image quality.
252
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
253
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
254
- to make generation deterministic.
255
- latents (`torch.FloatTensor`, *optional*):
256
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
257
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
258
- tensor will ge generated by sampling using the supplied random `generator`.
259
- output_type (`str`, *optional*, defaults to `"pil"`):
260
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
261
- (`np.array`) or `"pt"` (`torch.Tensor`).
262
- callback (`Callable`, *optional*):
263
- A function that calls every `callback_steps` steps during inference. The function is called with the
264
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
265
- callback_steps (`int`, *optional*, defaults to 1):
266
- The frequency at which the `callback` function is called. If not specified, the callback is called at
267
- every step.
268
- return_dict (`bool`, *optional*, defaults to `True`):
269
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
270
-
271
- Examples:
272
-
273
- Returns:
274
- [`~pipelines.ImagePipelineOutput`] or `tuple`
275
- """
276
- prior_outputs = self.prior_pipe(
277
- prompt=prompt,
278
- negative_prompt=negative_prompt,
279
- num_images_per_prompt=num_images_per_prompt,
280
- num_inference_steps=prior_num_inference_steps,
281
- generator=generator,
282
- latents=latents,
283
- guidance_scale=prior_guidance_scale,
284
- output_type="pt",
285
- return_dict=False,
286
- )
287
- image_embeds = prior_outputs[0]
288
- negative_image_embeds = prior_outputs[1]
289
-
290
- prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
291
-
292
- if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
293
- prompt = (image_embeds.shape[0] // len(prompt)) * prompt
294
-
295
- outputs = self.decoder_pipe(
296
- image_embeds=image_embeds,
297
- negative_image_embeds=negative_image_embeds,
298
- width=width,
299
- height=height,
300
- num_inference_steps=num_inference_steps,
301
- generator=generator,
302
- guidance_scale=guidance_scale,
303
- output_type=output_type,
304
- callback=callback,
305
- callback_steps=callback_steps,
306
- return_dict=return_dict,
307
- )
308
- return outputs
309
-
310
-
311
- class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline):
312
- """
313
- Combined Pipeline for image-to-image generation using Kandinsky
314
-
315
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
316
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
317
-
318
- Args:
319
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
320
- A scheduler to be used in combination with `unet` to generate image latents.
321
- unet ([`UNet2DConditionModel`]):
322
- Conditional U-Net architecture to denoise the image embedding.
323
- movq ([`VQModel`]):
324
- MoVQ Decoder to generate the image from the latents.
325
- prior_prior ([`PriorTransformer`]):
326
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
327
- prior_image_encoder ([`CLIPVisionModelWithProjection`]):
328
- Frozen image-encoder.
329
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
330
- Frozen text-encoder.
331
- prior_tokenizer (`CLIPTokenizer`):
332
- Tokenizer of class
333
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
334
- prior_scheduler ([`UnCLIPScheduler`]):
335
- A scheduler to be used in combination with `prior` to generate image embedding.
336
- prior_image_processor ([`CLIPImageProcessor`]):
337
- A image_processor to be used to preprocess image from clip.
338
- """
339
-
340
- _load_connected_pipes = True
341
-
342
- def __init__(
343
- self,
344
- unet: UNet2DConditionModel,
345
- scheduler: DDPMScheduler,
346
- movq: VQModel,
347
- prior_prior: PriorTransformer,
348
- prior_image_encoder: CLIPVisionModelWithProjection,
349
- prior_text_encoder: CLIPTextModelWithProjection,
350
- prior_tokenizer: CLIPTokenizer,
351
- prior_scheduler: UnCLIPScheduler,
352
- prior_image_processor: CLIPImageProcessor,
353
- ):
354
- super().__init__()
355
-
356
- self.register_modules(
357
- unet=unet,
358
- scheduler=scheduler,
359
- movq=movq,
360
- prior_prior=prior_prior,
361
- prior_image_encoder=prior_image_encoder,
362
- prior_text_encoder=prior_text_encoder,
363
- prior_tokenizer=prior_tokenizer,
364
- prior_scheduler=prior_scheduler,
365
- prior_image_processor=prior_image_processor,
366
- )
367
- self.prior_pipe = KandinskyV22PriorPipeline(
368
- prior=prior_prior,
369
- image_encoder=prior_image_encoder,
370
- text_encoder=prior_text_encoder,
371
- tokenizer=prior_tokenizer,
372
- scheduler=prior_scheduler,
373
- image_processor=prior_image_processor,
374
- )
375
- self.decoder_pipe = KandinskyV22Img2ImgPipeline(
376
- unet=unet,
377
- scheduler=scheduler,
378
- movq=movq,
379
- )
380
-
381
- def enable_model_cpu_offload(self, gpu_id=0):
382
- r"""
383
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
384
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
385
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
386
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
387
- """
388
- self.prior_pipe.enable_model_cpu_offload()
389
- self.decoder_pipe.enable_model_cpu_offload()
390
-
391
- def progress_bar(self, iterable=None, total=None):
392
- self.prior_pipe.progress_bar(iterable=iterable, total=total)
393
- self.decoder_pipe.progress_bar(iterable=iterable, total=total)
394
- self.decoder_pipe.enable_model_cpu_offload()
395
-
396
- def set_progress_bar_config(self, **kwargs):
397
- self.prior_pipe.set_progress_bar_config(**kwargs)
398
- self.decoder_pipe.set_progress_bar_config(**kwargs)
399
-
400
- @torch.no_grad()
401
- @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING)
402
- def __call__(
403
- self,
404
- prompt: Union[str, List[str]],
405
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
406
- negative_prompt: Optional[Union[str, List[str]]] = None,
407
- num_inference_steps: int = 100,
408
- guidance_scale: float = 4.0,
409
- strength: float = 0.3,
410
- num_images_per_prompt: int = 1,
411
- height: int = 512,
412
- width: int = 512,
413
- prior_guidance_scale: float = 4.0,
414
- prior_num_inference_steps: int = 25,
415
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
416
- latents: Optional[torch.FloatTensor] = None,
417
- output_type: Optional[str] = "pil",
418
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
419
- callback_steps: int = 1,
420
- return_dict: bool = True,
421
- ):
422
- """
423
- Function invoked when calling the pipeline for generation.
424
-
425
- Args:
426
- prompt (`str` or `List[str]`):
427
- The prompt or prompts to guide the image generation.
428
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
429
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
430
- process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
431
- again.
432
- negative_prompt (`str` or `List[str]`, *optional*):
433
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
434
- if `guidance_scale` is less than `1`).
435
- num_images_per_prompt (`int`, *optional*, defaults to 1):
436
- The number of images to generate per prompt.
437
- guidance_scale (`float`, *optional*, defaults to 4.0):
438
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
439
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
440
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
441
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
442
- usually at the expense of lower image quality.
443
- strength (`float`, *optional*, defaults to 0.3):
444
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
445
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
446
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
447
- be maximum and the denoising process will run for the full number of iterations specified in
448
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
449
- num_inference_steps (`int`, *optional*, defaults to 100):
450
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
451
- expense of slower inference.
452
- height (`int`, *optional*, defaults to 512):
453
- The height in pixels of the generated image.
454
- width (`int`, *optional*, defaults to 512):
455
- The width in pixels of the generated image.
456
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
457
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
458
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
459
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
460
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
461
- usually at the expense of lower image quality.
462
- prior_num_inference_steps (`int`, *optional*, defaults to 100):
463
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
464
- expense of slower inference.
465
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
466
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
467
- to make generation deterministic.
468
- latents (`torch.FloatTensor`, *optional*):
469
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
470
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
471
- tensor will ge generated by sampling using the supplied random `generator`.
472
- output_type (`str`, *optional*, defaults to `"pil"`):
473
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
474
- (`np.array`) or `"pt"` (`torch.Tensor`).
475
- callback (`Callable`, *optional*):
476
- A function that calls every `callback_steps` steps during inference. The function is called with the
477
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
478
- callback_steps (`int`, *optional*, defaults to 1):
479
- The frequency at which the `callback` function is called. If not specified, the callback is called at
480
- every step.
481
- return_dict (`bool`, *optional*, defaults to `True`):
482
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
483
-
484
- Examples:
485
-
486
- Returns:
487
- [`~pipelines.ImagePipelineOutput`] or `tuple`
488
- """
489
- prior_outputs = self.prior_pipe(
490
- prompt=prompt,
491
- negative_prompt=negative_prompt,
492
- num_images_per_prompt=num_images_per_prompt,
493
- num_inference_steps=prior_num_inference_steps,
494
- generator=generator,
495
- latents=latents,
496
- guidance_scale=prior_guidance_scale,
497
- output_type="pt",
498
- return_dict=False,
499
- )
500
- image_embeds = prior_outputs[0]
501
- negative_image_embeds = prior_outputs[1]
502
-
503
- prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
504
- image = [image] if isinstance(prompt, PIL.Image.Image) else image
505
-
506
- if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
507
- prompt = (image_embeds.shape[0] // len(prompt)) * prompt
508
-
509
- if (
510
- isinstance(image, (list, tuple))
511
- and len(image) < image_embeds.shape[0]
512
- and image_embeds.shape[0] % len(image) == 0
513
- ):
514
- image = (image_embeds.shape[0] // len(image)) * image
515
-
516
- outputs = self.decoder_pipe(
517
- image=image,
518
- image_embeds=image_embeds,
519
- negative_image_embeds=negative_image_embeds,
520
- width=width,
521
- height=height,
522
- strength=strength,
523
- num_inference_steps=num_inference_steps,
524
- generator=generator,
525
- guidance_scale=guidance_scale,
526
- output_type=output_type,
527
- callback=callback,
528
- callback_steps=callback_steps,
529
- return_dict=return_dict,
530
- )
531
- return outputs
532
-
533
-
534
- class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline):
535
- """
536
- Combined Pipeline for inpainting generation using Kandinsky
537
-
538
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
539
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
540
-
541
- Args:
542
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
543
- A scheduler to be used in combination with `unet` to generate image latents.
544
- unet ([`UNet2DConditionModel`]):
545
- Conditional U-Net architecture to denoise the image embedding.
546
- movq ([`VQModel`]):
547
- MoVQ Decoder to generate the image from the latents.
548
- prior_prior ([`PriorTransformer`]):
549
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
550
- prior_image_encoder ([`CLIPVisionModelWithProjection`]):
551
- Frozen image-encoder.
552
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
553
- Frozen text-encoder.
554
- prior_tokenizer (`CLIPTokenizer`):
555
- Tokenizer of class
556
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
557
- prior_scheduler ([`UnCLIPScheduler`]):
558
- A scheduler to be used in combination with `prior` to generate image embedding.
559
- prior_image_processor ([`CLIPImageProcessor`]):
560
- A image_processor to be used to preprocess image from clip.
561
- """
562
-
563
- _load_connected_pipes = True
564
-
565
- def __init__(
566
- self,
567
- unet: UNet2DConditionModel,
568
- scheduler: DDPMScheduler,
569
- movq: VQModel,
570
- prior_prior: PriorTransformer,
571
- prior_image_encoder: CLIPVisionModelWithProjection,
572
- prior_text_encoder: CLIPTextModelWithProjection,
573
- prior_tokenizer: CLIPTokenizer,
574
- prior_scheduler: UnCLIPScheduler,
575
- prior_image_processor: CLIPImageProcessor,
576
- ):
577
- super().__init__()
578
-
579
- self.register_modules(
580
- unet=unet,
581
- scheduler=scheduler,
582
- movq=movq,
583
- prior_prior=prior_prior,
584
- prior_image_encoder=prior_image_encoder,
585
- prior_text_encoder=prior_text_encoder,
586
- prior_tokenizer=prior_tokenizer,
587
- prior_scheduler=prior_scheduler,
588
- prior_image_processor=prior_image_processor,
589
- )
590
- self.prior_pipe = KandinskyV22PriorPipeline(
591
- prior=prior_prior,
592
- image_encoder=prior_image_encoder,
593
- text_encoder=prior_text_encoder,
594
- tokenizer=prior_tokenizer,
595
- scheduler=prior_scheduler,
596
- image_processor=prior_image_processor,
597
- )
598
- self.decoder_pipe = KandinskyV22InpaintPipeline(
599
- unet=unet,
600
- scheduler=scheduler,
601
- movq=movq,
602
- )
603
-
604
- def enable_model_cpu_offload(self, gpu_id=0):
605
- r"""
606
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
607
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
608
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
609
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
610
- """
611
- self.prior_pipe.enable_model_cpu_offload()
612
- self.decoder_pipe.enable_model_cpu_offload()
613
-
614
- def progress_bar(self, iterable=None, total=None):
615
- self.prior_pipe.progress_bar(iterable=iterable, total=total)
616
- self.decoder_pipe.progress_bar(iterable=iterable, total=total)
617
- self.decoder_pipe.enable_model_cpu_offload()
618
-
619
- def set_progress_bar_config(self, **kwargs):
620
- self.prior_pipe.set_progress_bar_config(**kwargs)
621
- self.decoder_pipe.set_progress_bar_config(**kwargs)
622
-
623
- @torch.no_grad()
624
- @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING)
625
- def __call__(
626
- self,
627
- prompt: Union[str, List[str]],
628
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
629
- mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
630
- negative_prompt: Optional[Union[str, List[str]]] = None,
631
- num_inference_steps: int = 100,
632
- guidance_scale: float = 4.0,
633
- num_images_per_prompt: int = 1,
634
- height: int = 512,
635
- width: int = 512,
636
- prior_guidance_scale: float = 4.0,
637
- prior_num_inference_steps: int = 25,
638
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
639
- latents: Optional[torch.FloatTensor] = None,
640
- output_type: Optional[str] = "pil",
641
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
642
- callback_steps: int = 1,
643
- return_dict: bool = True,
644
- ):
645
- """
646
- Function invoked when calling the pipeline for generation.
647
-
648
- Args:
649
- prompt (`str` or `List[str]`):
650
- The prompt or prompts to guide the image generation.
651
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
652
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
653
- process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
654
- again.
655
- mask_image (`np.array`):
656
- Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while
657
- black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
658
- channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3,
659
- so the expected shape would be `(B, H, W, 1)`.
660
- negative_prompt (`str` or `List[str]`, *optional*):
661
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
662
- if `guidance_scale` is less than `1`).
663
- num_images_per_prompt (`int`, *optional*, defaults to 1):
664
- The number of images to generate per prompt.
665
- guidance_scale (`float`, *optional*, defaults to 4.0):
666
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
667
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
668
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
669
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
670
- usually at the expense of lower image quality.
671
- num_inference_steps (`int`, *optional*, defaults to 100):
672
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
673
- expense of slower inference.
674
- height (`int`, *optional*, defaults to 512):
675
- The height in pixels of the generated image.
676
- width (`int`, *optional*, defaults to 512):
677
- The width in pixels of the generated image.
678
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
679
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
680
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
681
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
682
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
683
- usually at the expense of lower image quality.
684
- prior_num_inference_steps (`int`, *optional*, defaults to 100):
685
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
686
- expense of slower inference.
687
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
688
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
689
- to make generation deterministic.
690
- latents (`torch.FloatTensor`, *optional*):
691
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
692
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
693
- tensor will ge generated by sampling using the supplied random `generator`.
694
- output_type (`str`, *optional*, defaults to `"pil"`):
695
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
696
- (`np.array`) or `"pt"` (`torch.Tensor`).
697
- callback (`Callable`, *optional*):
698
- A function that calls every `callback_steps` steps during inference. The function is called with the
699
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
700
- callback_steps (`int`, *optional*, defaults to 1):
701
- The frequency at which the `callback` function is called. If not specified, the callback is called at
702
- every step.
703
- return_dict (`bool`, *optional*, defaults to `True`):
704
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
705
-
706
- Examples:
707
-
708
- Returns:
709
- [`~pipelines.ImagePipelineOutput`] or `tuple`
710
- """
711
- prior_outputs = self.prior_pipe(
712
- prompt=prompt,
713
- negative_prompt=negative_prompt,
714
- num_images_per_prompt=num_images_per_prompt,
715
- num_inference_steps=prior_num_inference_steps,
716
- generator=generator,
717
- latents=latents,
718
- guidance_scale=prior_guidance_scale,
719
- output_type="pt",
720
- return_dict=False,
721
- )
722
- image_embeds = prior_outputs[0]
723
- negative_image_embeds = prior_outputs[1]
724
-
725
- prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
726
- image = [image] if isinstance(prompt, PIL.Image.Image) else image
727
- mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image
728
-
729
- if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
730
- prompt = (image_embeds.shape[0] // len(prompt)) * prompt
731
-
732
- if (
733
- isinstance(image, (list, tuple))
734
- and len(image) < image_embeds.shape[0]
735
- and image_embeds.shape[0] % len(image) == 0
736
- ):
737
- image = (image_embeds.shape[0] // len(image)) * image
738
-
739
- if (
740
- isinstance(mask_image, (list, tuple))
741
- and len(mask_image) < image_embeds.shape[0]
742
- and image_embeds.shape[0] % len(mask_image) == 0
743
- ):
744
- mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image
745
-
746
- outputs = self.decoder_pipe(
747
- image=image,
748
- mask_image=mask_image,
749
- image_embeds=image_embeds,
750
- negative_image_embeds=negative_image_embeds,
751
- width=width,
752
- height=height,
753
- num_inference_steps=num_inference_steps,
754
- generator=generator,
755
- guidance_scale=guidance_scale,
756
- output_type=output_type,
757
- callback=callback,
758
- callback_steps=callback_steps,
759
- return_dict=return_dict,
760
- )
761
- return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py DELETED
@@ -1,39 +0,0 @@
1
- _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
2
- img_norm_cfg = dict(
3
- mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
4
- train_pipeline = [
5
- dict(type='LoadImageFromFile'),
6
- dict(type='LoadAnnotations', with_bbox=True),
7
- dict(
8
- type='Resize',
9
- img_scale=[(1333, 640), (1333, 800)],
10
- multiscale_mode='value',
11
- keep_ratio=True),
12
- dict(type='RandomFlip', flip_ratio=0.5),
13
- dict(type='Normalize', **img_norm_cfg),
14
- dict(type='Pad', size_divisor=32),
15
- dict(type='DefaultFormatBundle'),
16
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
17
- ]
18
- test_pipeline = [
19
- dict(type='LoadImageFromFile'),
20
- dict(
21
- type='MultiScaleFlipAug',
22
- img_scale=(1333, 800),
23
- flip=False,
24
- transforms=[
25
- dict(type='Resize', keep_ratio=True),
26
- dict(type='RandomFlip'),
27
- dict(type='Normalize', **img_norm_cfg),
28
- dict(type='Pad', size_divisor=32),
29
- dict(type='ImageToTensor', keys=['img']),
30
- dict(type='Collect', keys=['img']),
31
- ])
32
- ]
33
- data = dict(
34
- train=dict(pipeline=train_pipeline),
35
- val=dict(pipeline=test_pipeline),
36
- test=dict(pipeline=test_pipeline))
37
- # learning policy
38
- lr_config = dict(step=[16, 22])
39
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/modulated_deform_conv.py DELETED
@@ -1,282 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
- from torch.autograd import Function
7
- from torch.autograd.function import once_differentiable
8
- from torch.nn.modules.utils import _pair, _single
9
-
10
- from annotator.uniformer.mmcv.utils import deprecated_api_warning
11
- from ..cnn import CONV_LAYERS
12
- from ..utils import ext_loader, print_log
13
-
14
- ext_module = ext_loader.load_ext(
15
- '_ext',
16
- ['modulated_deform_conv_forward', 'modulated_deform_conv_backward'])
17
-
18
-
19
- class ModulatedDeformConv2dFunction(Function):
20
-
21
- @staticmethod
22
- def symbolic(g, input, offset, mask, weight, bias, stride, padding,
23
- dilation, groups, deform_groups):
24
- input_tensors = [input, offset, mask, weight]
25
- if bias is not None:
26
- input_tensors.append(bias)
27
- return g.op(
28
- 'mmcv::MMCVModulatedDeformConv2d',
29
- *input_tensors,
30
- stride_i=stride,
31
- padding_i=padding,
32
- dilation_i=dilation,
33
- groups_i=groups,
34
- deform_groups_i=deform_groups)
35
-
36
- @staticmethod
37
- def forward(ctx,
38
- input,
39
- offset,
40
- mask,
41
- weight,
42
- bias=None,
43
- stride=1,
44
- padding=0,
45
- dilation=1,
46
- groups=1,
47
- deform_groups=1):
48
- if input is not None and input.dim() != 4:
49
- raise ValueError(
50
- f'Expected 4D tensor as input, got {input.dim()}D tensor \
51
- instead.')
52
- ctx.stride = _pair(stride)
53
- ctx.padding = _pair(padding)
54
- ctx.dilation = _pair(dilation)
55
- ctx.groups = groups
56
- ctx.deform_groups = deform_groups
57
- ctx.with_bias = bias is not None
58
- if not ctx.with_bias:
59
- bias = input.new_empty(0) # fake tensor
60
- # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;
61
- # amp won't cast the type of model (float32), but "offset" is cast
62
- # to float16 by nn.Conv2d automatically, leading to the type
63
- # mismatch with input (when it is float32) or weight.
64
- # The flag for whether to use fp16 or amp is the type of "offset",
65
- # we cast weight and input to temporarily support fp16 and amp
66
- # whatever the pytorch version is.
67
- input = input.type_as(offset)
68
- weight = weight.type_as(input)
69
- ctx.save_for_backward(input, offset, mask, weight, bias)
70
- output = input.new_empty(
71
- ModulatedDeformConv2dFunction._output_size(ctx, input, weight))
72
- ctx._bufs = [input.new_empty(0), input.new_empty(0)]
73
- ext_module.modulated_deform_conv_forward(
74
- input,
75
- weight,
76
- bias,
77
- ctx._bufs[0],
78
- offset,
79
- mask,
80
- output,
81
- ctx._bufs[1],
82
- kernel_h=weight.size(2),
83
- kernel_w=weight.size(3),
84
- stride_h=ctx.stride[0],
85
- stride_w=ctx.stride[1],
86
- pad_h=ctx.padding[0],
87
- pad_w=ctx.padding[1],
88
- dilation_h=ctx.dilation[0],
89
- dilation_w=ctx.dilation[1],
90
- group=ctx.groups,
91
- deformable_group=ctx.deform_groups,
92
- with_bias=ctx.with_bias)
93
- return output
94
-
95
- @staticmethod
96
- @once_differentiable
97
- def backward(ctx, grad_output):
98
- input, offset, mask, weight, bias = ctx.saved_tensors
99
- grad_input = torch.zeros_like(input)
100
- grad_offset = torch.zeros_like(offset)
101
- grad_mask = torch.zeros_like(mask)
102
- grad_weight = torch.zeros_like(weight)
103
- grad_bias = torch.zeros_like(bias)
104
- grad_output = grad_output.contiguous()
105
- ext_module.modulated_deform_conv_backward(
106
- input,
107
- weight,
108
- bias,
109
- ctx._bufs[0],
110
- offset,
111
- mask,
112
- ctx._bufs[1],
113
- grad_input,
114
- grad_weight,
115
- grad_bias,
116
- grad_offset,
117
- grad_mask,
118
- grad_output,
119
- kernel_h=weight.size(2),
120
- kernel_w=weight.size(3),
121
- stride_h=ctx.stride[0],
122
- stride_w=ctx.stride[1],
123
- pad_h=ctx.padding[0],
124
- pad_w=ctx.padding[1],
125
- dilation_h=ctx.dilation[0],
126
- dilation_w=ctx.dilation[1],
127
- group=ctx.groups,
128
- deformable_group=ctx.deform_groups,
129
- with_bias=ctx.with_bias)
130
- if not ctx.with_bias:
131
- grad_bias = None
132
-
133
- return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
134
- None, None, None, None, None)
135
-
136
- @staticmethod
137
- def _output_size(ctx, input, weight):
138
- channels = weight.size(0)
139
- output_size = (input.size(0), channels)
140
- for d in range(input.dim() - 2):
141
- in_size = input.size(d + 2)
142
- pad = ctx.padding[d]
143
- kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1
144
- stride_ = ctx.stride[d]
145
- output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
146
- if not all(map(lambda s: s > 0, output_size)):
147
- raise ValueError(
148
- 'convolution input is too small (output would be ' +
149
- 'x'.join(map(str, output_size)) + ')')
150
- return output_size
151
-
152
-
153
- modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply
154
-
155
-
156
- class ModulatedDeformConv2d(nn.Module):
157
-
158
- @deprecated_api_warning({'deformable_groups': 'deform_groups'},
159
- cls_name='ModulatedDeformConv2d')
160
- def __init__(self,
161
- in_channels,
162
- out_channels,
163
- kernel_size,
164
- stride=1,
165
- padding=0,
166
- dilation=1,
167
- groups=1,
168
- deform_groups=1,
169
- bias=True):
170
- super(ModulatedDeformConv2d, self).__init__()
171
- self.in_channels = in_channels
172
- self.out_channels = out_channels
173
- self.kernel_size = _pair(kernel_size)
174
- self.stride = _pair(stride)
175
- self.padding = _pair(padding)
176
- self.dilation = _pair(dilation)
177
- self.groups = groups
178
- self.deform_groups = deform_groups
179
- # enable compatibility with nn.Conv2d
180
- self.transposed = False
181
- self.output_padding = _single(0)
182
-
183
- self.weight = nn.Parameter(
184
- torch.Tensor(out_channels, in_channels // groups,
185
- *self.kernel_size))
186
- if bias:
187
- self.bias = nn.Parameter(torch.Tensor(out_channels))
188
- else:
189
- self.register_parameter('bias', None)
190
- self.init_weights()
191
-
192
- def init_weights(self):
193
- n = self.in_channels
194
- for k in self.kernel_size:
195
- n *= k
196
- stdv = 1. / math.sqrt(n)
197
- self.weight.data.uniform_(-stdv, stdv)
198
- if self.bias is not None:
199
- self.bias.data.zero_()
200
-
201
- def forward(self, x, offset, mask):
202
- return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
203
- self.stride, self.padding,
204
- self.dilation, self.groups,
205
- self.deform_groups)
206
-
207
-
208
- @CONV_LAYERS.register_module('DCNv2')
209
- class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
210
- """A ModulatedDeformable Conv Encapsulation that acts as normal Conv
211
- layers.
212
-
213
- Args:
214
- in_channels (int): Same as nn.Conv2d.
215
- out_channels (int): Same as nn.Conv2d.
216
- kernel_size (int or tuple[int]): Same as nn.Conv2d.
217
- stride (int): Same as nn.Conv2d, while tuple is not supported.
218
- padding (int): Same as nn.Conv2d, while tuple is not supported.
219
- dilation (int): Same as nn.Conv2d, while tuple is not supported.
220
- groups (int): Same as nn.Conv2d.
221
- bias (bool or str): If specified as `auto`, it will be decided by the
222
- norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
223
- False.
224
- """
225
-
226
- _version = 2
227
-
228
- def __init__(self, *args, **kwargs):
229
- super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs)
230
- self.conv_offset = nn.Conv2d(
231
- self.in_channels,
232
- self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1],
233
- kernel_size=self.kernel_size,
234
- stride=self.stride,
235
- padding=self.padding,
236
- dilation=self.dilation,
237
- bias=True)
238
- self.init_weights()
239
-
240
- def init_weights(self):
241
- super(ModulatedDeformConv2dPack, self).init_weights()
242
- if hasattr(self, 'conv_offset'):
243
- self.conv_offset.weight.data.zero_()
244
- self.conv_offset.bias.data.zero_()
245
-
246
- def forward(self, x):
247
- out = self.conv_offset(x)
248
- o1, o2, mask = torch.chunk(out, 3, dim=1)
249
- offset = torch.cat((o1, o2), dim=1)
250
- mask = torch.sigmoid(mask)
251
- return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
252
- self.stride, self.padding,
253
- self.dilation, self.groups,
254
- self.deform_groups)
255
-
256
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
257
- missing_keys, unexpected_keys, error_msgs):
258
- version = local_metadata.get('version', None)
259
-
260
- if version is None or version < 2:
261
- # the key is different in early versions
262
- # In version < 2, ModulatedDeformConvPack
263
- # loads previous benchmark models.
264
- if (prefix + 'conv_offset.weight' not in state_dict
265
- and prefix[:-1] + '_offset.weight' in state_dict):
266
- state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(
267
- prefix[:-1] + '_offset.weight')
268
- if (prefix + 'conv_offset.bias' not in state_dict
269
- and prefix[:-1] + '_offset.bias' in state_dict):
270
- state_dict[prefix +
271
- 'conv_offset.bias'] = state_dict.pop(prefix[:-1] +
272
- '_offset.bias')
273
-
274
- if version is not None and version > 1:
275
- print_log(
276
- f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to '
277
- 'version 2.',
278
- logger='root')
279
-
280
- super()._load_from_state_dict(state_dict, prefix, local_metadata,
281
- strict, missing_keys, unexpected_keys,
282
- error_msgs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtyomKhyan/Detection/models/common.py DELETED
@@ -1,102 +0,0 @@
1
- # This file contains modules common to various models
2
-
3
- from utils.utils import *
4
-
5
-
6
- def autopad(k, p=None): # kernel, padding
7
- # Pad to 'same'
8
- if p is None:
9
- p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
10
- return p
11
-
12
-
13
- def DWConv(c1, c2, k=1, s=1, act=True):
14
- # Depthwise convolution
15
- return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
16
-
17
-
18
- class Conv(nn.Module):
19
- # Standard convolution
20
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
21
- super(Conv, self).__init__()
22
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
23
- self.bn = nn.BatchNorm2d(c2)
24
- self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
25
-
26
- def forward(self, x):
27
- return self.act(self.bn(self.conv(x)))
28
-
29
- def fuseforward(self, x):
30
- return self.act(self.conv(x))
31
-
32
-
33
- class Bottleneck(nn.Module):
34
- # Standard bottleneck
35
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
36
- super(Bottleneck, self).__init__()
37
- c_ = int(c2 * e) # hidden channels
38
- self.cv1 = Conv(c1, c_, 1, 1)
39
- self.cv2 = Conv(c_, c2, 3, 1, g=g)
40
- self.add = shortcut and c1 == c2
41
-
42
- def forward(self, x):
43
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
44
-
45
-
46
- class BottleneckCSP(nn.Module):
47
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
48
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
49
- super(BottleneckCSP, self).__init__()
50
- c_ = int(c2 * e) # hidden channels
51
- self.cv1 = Conv(c1, c_, 1, 1)
52
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
53
- self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
54
- self.cv4 = Conv(2 * c_, c2, 1, 1)
55
- self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
56
- self.act = nn.LeakyReLU(0.1, inplace=True)
57
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
58
-
59
- def forward(self, x):
60
- y1 = self.cv3(self.m(self.cv1(x)))
61
- y2 = self.cv2(x)
62
- return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
63
-
64
-
65
- class SPP(nn.Module):
66
- # Spatial pyramid pooling layer used in YOLOv3-SPP
67
- def __init__(self, c1, c2, k=(5, 9, 13)):
68
- super(SPP, self).__init__()
69
- c_ = c1 // 2 # hidden channels
70
- self.cv1 = Conv(c1, c_, 1, 1)
71
- self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
72
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
73
-
74
- def forward(self, x):
75
- x = self.cv1(x)
76
- return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
77
-
78
-
79
- class Flatten(nn.Module):
80
- # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
81
- def forward(self, x):
82
- return x.view(x.size(0), -1)
83
-
84
-
85
- class Focus(nn.Module):
86
- # Focus wh information into c-space
87
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
88
- super(Focus, self).__init__()
89
- self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
90
-
91
- def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
92
- return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
93
-
94
-
95
- class Concat(nn.Module):
96
- # Concatenate a list of tensors along dimension
97
- def __init__(self, dimension=1):
98
- super(Concat, self).__init__()
99
- self.d = dimension
100
-
101
- def forward(self, x):
102
- return torch.cat(x, self.d)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/packaging.py DELETED
@@ -1,57 +0,0 @@
1
- import functools
2
- import logging
3
- import re
4
- from typing import NewType, Optional, Tuple, cast
5
-
6
- from pip._vendor.packaging import specifiers, version
7
- from pip._vendor.packaging.requirements import Requirement
8
-
9
- NormalizedExtra = NewType("NormalizedExtra", str)
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- def check_requires_python(
15
- requires_python: Optional[str], version_info: Tuple[int, ...]
16
- ) -> bool:
17
- """
18
- Check if the given Python version matches a "Requires-Python" specifier.
19
-
20
- :param version_info: A 3-tuple of ints representing a Python
21
- major-minor-micro version to check (e.g. `sys.version_info[:3]`).
22
-
23
- :return: `True` if the given Python version satisfies the requirement.
24
- Otherwise, return `False`.
25
-
26
- :raises InvalidSpecifier: If `requires_python` has an invalid format.
27
- """
28
- if requires_python is None:
29
- # The package provides no information
30
- return True
31
- requires_python_specifier = specifiers.SpecifierSet(requires_python)
32
-
33
- python_version = version.parse(".".join(map(str, version_info)))
34
- return python_version in requires_python_specifier
35
-
36
-
37
- @functools.lru_cache(maxsize=512)
38
- def get_requirement(req_string: str) -> Requirement:
39
- """Construct a packaging.Requirement object with caching"""
40
- # Parsing requirement strings is expensive, and is also expected to happen
41
- # with a low diversity of different arguments (at least relative the number
42
- # constructed). This method adds a cache to requirement object creation to
43
- # minimize repeated parsing of the same string to construct equivalent
44
- # Requirement objects.
45
- return Requirement(req_string)
46
-
47
-
48
- def safe_extra(extra: str) -> NormalizedExtra:
49
- """Convert an arbitrary string to a standard 'extra' name
50
-
51
- Any runs of non-alphanumeric characters are replaced with a single '_',
52
- and the result is always lowercased.
53
-
54
- This function is duplicated from ``pkg_resources``. Note that this is not
55
- the same to either ``canonicalize_name`` or ``_egg_link_name``.
56
- """
57
- return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/mbcssm.py DELETED
@@ -1,661 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from .codingstatemachinedict import CodingStateMachineDict
29
- from .enums import MachineState
30
-
31
- # BIG5
32
-
33
- # fmt: off
34
- BIG5_CLS = (
35
- 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value
36
- 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
37
- 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
38
- 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
39
- 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
40
- 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
41
- 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
42
- 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
43
- 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
44
- 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
45
- 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
46
- 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
47
- 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
48
- 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
49
- 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
50
- 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f
51
- 4, 4, 4, 4, 4, 4, 4, 4, # 80 - 87
52
- 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f
53
- 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97
54
- 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f
55
- 4, 3, 3, 3, 3, 3, 3, 3, # a0 - a7
56
- 3, 3, 3, 3, 3, 3, 3, 3, # a8 - af
57
- 3, 3, 3, 3, 3, 3, 3, 3, # b0 - b7
58
- 3, 3, 3, 3, 3, 3, 3, 3, # b8 - bf
59
- 3, 3, 3, 3, 3, 3, 3, 3, # c0 - c7
60
- 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf
61
- 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7
62
- 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df
63
- 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
64
- 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef
65
- 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
66
- 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff
67
- )
68
-
69
- BIG5_ST = (
70
- MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
71
- MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
72
- MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
73
- )
74
- # fmt: on
75
-
76
- BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
77
-
78
- BIG5_SM_MODEL: CodingStateMachineDict = {
79
- "class_table": BIG5_CLS,
80
- "class_factor": 5,
81
- "state_table": BIG5_ST,
82
- "char_len_table": BIG5_CHAR_LEN_TABLE,
83
- "name": "Big5",
84
- }
85
-
86
- # CP949
87
- # fmt: off
88
- CP949_CLS = (
89
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f
90
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f
91
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 2f
92
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 3f
93
- 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 4f
94
- 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 50 - 5f
95
- 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, # 60 - 6f
96
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 70 - 7f
97
- 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 80 - 8f
98
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 9f
99
- 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, # a0 - af
100
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, # b0 - bf
101
- 7, 7, 7, 7, 7, 7, 9, 2, 2, 3, 2, 2, 2, 2, 2, 2, # c0 - cf
102
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # d0 - df
103
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # e0 - ef
104
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, # f0 - ff
105
- )
106
-
107
- CP949_ST = (
108
- #cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
109
- MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
110
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
111
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
112
- MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
113
- MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
114
- MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
115
- MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
116
- )
117
- # fmt: on
118
-
119
- CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
120
-
121
- CP949_SM_MODEL: CodingStateMachineDict = {
122
- "class_table": CP949_CLS,
123
- "class_factor": 10,
124
- "state_table": CP949_ST,
125
- "char_len_table": CP949_CHAR_LEN_TABLE,
126
- "name": "CP949",
127
- }
128
-
129
- # EUC-JP
130
- # fmt: off
131
- EUCJP_CLS = (
132
- 4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07
133
- 4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f
134
- 4, 4, 4, 4, 4, 4, 4, 4, # 10 - 17
135
- 4, 4, 4, 5, 4, 4, 4, 4, # 18 - 1f
136
- 4, 4, 4, 4, 4, 4, 4, 4, # 20 - 27
137
- 4, 4, 4, 4, 4, 4, 4, 4, # 28 - 2f
138
- 4, 4, 4, 4, 4, 4, 4, 4, # 30 - 37
139
- 4, 4, 4, 4, 4, 4, 4, 4, # 38 - 3f
140
- 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 47
141
- 4, 4, 4, 4, 4, 4, 4, 4, # 48 - 4f
142
- 4, 4, 4, 4, 4, 4, 4, 4, # 50 - 57
143
- 4, 4, 4, 4, 4, 4, 4, 4, # 58 - 5f
144
- 4, 4, 4, 4, 4, 4, 4, 4, # 60 - 67
145
- 4, 4, 4, 4, 4, 4, 4, 4, # 68 - 6f
146
- 4, 4, 4, 4, 4, 4, 4, 4, # 70 - 77
147
- 4, 4, 4, 4, 4, 4, 4, 4, # 78 - 7f
148
- 5, 5, 5, 5, 5, 5, 5, 5, # 80 - 87
149
- 5, 5, 5, 5, 5, 5, 1, 3, # 88 - 8f
150
- 5, 5, 5, 5, 5, 5, 5, 5, # 90 - 97
151
- 5, 5, 5, 5, 5, 5, 5, 5, # 98 - 9f
152
- 5, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
153
- 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
154
- 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
155
- 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
156
- 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
157
- 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
158
- 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
159
- 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
160
- 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7
161
- 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef
162
- 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7
163
- 0, 0, 0, 0, 0, 0, 0, 5 # f8 - ff
164
- )
165
-
166
- EUCJP_ST = (
167
- 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
168
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
169
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
170
- MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
171
- 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
172
- )
173
- # fmt: on
174
-
175
- EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
176
-
177
- EUCJP_SM_MODEL: CodingStateMachineDict = {
178
- "class_table": EUCJP_CLS,
179
- "class_factor": 6,
180
- "state_table": EUCJP_ST,
181
- "char_len_table": EUCJP_CHAR_LEN_TABLE,
182
- "name": "EUC-JP",
183
- }
184
-
185
- # EUC-KR
186
- # fmt: off
187
- EUCKR_CLS = (
188
- 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
189
- 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
190
- 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
191
- 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
192
- 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
193
- 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
194
- 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
195
- 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
196
- 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47
197
- 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f
198
- 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57
199
- 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f
200
- 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67
201
- 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f
202
- 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77
203
- 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f
204
- 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
205
- 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f
206
- 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
207
- 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
208
- 0, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
209
- 2, 2, 2, 2, 2, 3, 3, 3, # a8 - af
210
- 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
211
- 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
212
- 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
213
- 2, 3, 2, 2, 2, 2, 2, 2, # c8 - cf
214
- 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
215
- 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
216
- 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7
217
- 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef
218
- 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7
219
- 2, 2, 2, 2, 2, 2, 2, 0 # f8 - ff
220
- )
221
-
222
- EUCKR_ST = (
223
- MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
224
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
225
- )
226
- # fmt: on
227
-
228
- EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
229
-
230
- EUCKR_SM_MODEL: CodingStateMachineDict = {
231
- "class_table": EUCKR_CLS,
232
- "class_factor": 4,
233
- "state_table": EUCKR_ST,
234
- "char_len_table": EUCKR_CHAR_LEN_TABLE,
235
- "name": "EUC-KR",
236
- }
237
-
238
- # JOHAB
239
- # fmt: off
240
- JOHAB_CLS = (
241
- 4,4,4,4,4,4,4,4, # 00 - 07
242
- 4,4,4,4,4,4,0,0, # 08 - 0f
243
- 4,4,4,4,4,4,4,4, # 10 - 17
244
- 4,4,4,0,4,4,4,4, # 18 - 1f
245
- 4,4,4,4,4,4,4,4, # 20 - 27
246
- 4,4,4,4,4,4,4,4, # 28 - 2f
247
- 4,3,3,3,3,3,3,3, # 30 - 37
248
- 3,3,3,3,3,3,3,3, # 38 - 3f
249
- 3,1,1,1,1,1,1,1, # 40 - 47
250
- 1,1,1,1,1,1,1,1, # 48 - 4f
251
- 1,1,1,1,1,1,1,1, # 50 - 57
252
- 1,1,1,1,1,1,1,1, # 58 - 5f
253
- 1,1,1,1,1,1,1,1, # 60 - 67
254
- 1,1,1,1,1,1,1,1, # 68 - 6f
255
- 1,1,1,1,1,1,1,1, # 70 - 77
256
- 1,1,1,1,1,1,1,2, # 78 - 7f
257
- 6,6,6,6,8,8,8,8, # 80 - 87
258
- 8,8,8,8,8,8,8,8, # 88 - 8f
259
- 8,7,7,7,7,7,7,7, # 90 - 97
260
- 7,7,7,7,7,7,7,7, # 98 - 9f
261
- 7,7,7,7,7,7,7,7, # a0 - a7
262
- 7,7,7,7,7,7,7,7, # a8 - af
263
- 7,7,7,7,7,7,7,7, # b0 - b7
264
- 7,7,7,7,7,7,7,7, # b8 - bf
265
- 7,7,7,7,7,7,7,7, # c0 - c7
266
- 7,7,7,7,7,7,7,7, # c8 - cf
267
- 7,7,7,7,5,5,5,5, # d0 - d7
268
- 5,9,9,9,9,9,9,5, # d8 - df
269
- 9,9,9,9,9,9,9,9, # e0 - e7
270
- 9,9,9,9,9,9,9,9, # e8 - ef
271
- 9,9,9,9,9,9,9,9, # f0 - f7
272
- 9,9,5,5,5,5,5,0 # f8 - ff
273
- )
274
-
275
- JOHAB_ST = (
276
- # cls = 0 1 2 3 4 5 6 7 8 9
277
- MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3 ,3 ,4 , # MachineState.START
278
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
279
- MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR , # MachineState.ERROR
280
- MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START , # 3
281
- MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START , # 4
282
- )
283
- # fmt: on
284
-
285
- JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2)
286
-
287
- JOHAB_SM_MODEL: CodingStateMachineDict = {
288
- "class_table": JOHAB_CLS,
289
- "class_factor": 10,
290
- "state_table": JOHAB_ST,
291
- "char_len_table": JOHAB_CHAR_LEN_TABLE,
292
- "name": "Johab",
293
- }
294
-
295
- # EUC-TW
296
- # fmt: off
297
- EUCTW_CLS = (
298
- 2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07
299
- 2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f
300
- 2, 2, 2, 2, 2, 2, 2, 2, # 10 - 17
301
- 2, 2, 2, 0, 2, 2, 2, 2, # 18 - 1f
302
- 2, 2, 2, 2, 2, 2, 2, 2, # 20 - 27
303
- 2, 2, 2, 2, 2, 2, 2, 2, # 28 - 2f
304
- 2, 2, 2, 2, 2, 2, 2, 2, # 30 - 37
305
- 2, 2, 2, 2, 2, 2, 2, 2, # 38 - 3f
306
- 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
307
- 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
308
- 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
309
- 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
310
- 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
311
- 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
312
- 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
313
- 2, 2, 2, 2, 2, 2, 2, 2, # 78 - 7f
314
- 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
315
- 0, 0, 0, 0, 0, 0, 6, 0, # 88 - 8f
316
- 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
317
- 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
318
- 0, 3, 4, 4, 4, 4, 4, 4, # a0 - a7
319
- 5, 5, 1, 1, 1, 1, 1, 1, # a8 - af
320
- 1, 1, 1, 1, 1, 1, 1, 1, # b0 - b7
321
- 1, 1, 1, 1, 1, 1, 1, 1, # b8 - bf
322
- 1, 1, 3, 1, 3, 3, 3, 3, # c0 - c7
323
- 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf
324
- 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7
325
- 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df
326
- 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
327
- 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef
328
- 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
329
- 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff
330
- )
331
-
332
- EUCTW_ST = (
333
- MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
334
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
335
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
336
- MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
337
- 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
338
- MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
339
- )
340
- # fmt: on
341
-
342
- EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
343
-
344
- EUCTW_SM_MODEL: CodingStateMachineDict = {
345
- "class_table": EUCTW_CLS,
346
- "class_factor": 7,
347
- "state_table": EUCTW_ST,
348
- "char_len_table": EUCTW_CHAR_LEN_TABLE,
349
- "name": "x-euc-tw",
350
- }
351
-
352
- # GB2312
353
- # fmt: off
354
- GB2312_CLS = (
355
- 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
356
- 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
357
- 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
358
- 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
359
- 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
360
- 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
361
- 3, 3, 3, 3, 3, 3, 3, 3, # 30 - 37
362
- 3, 3, 1, 1, 1, 1, 1, 1, # 38 - 3f
363
- 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
364
- 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
365
- 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
366
- 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
367
- 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
368
- 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
369
- 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
370
- 2, 2, 2, 2, 2, 2, 2, 4, # 78 - 7f
371
- 5, 6, 6, 6, 6, 6, 6, 6, # 80 - 87
372
- 6, 6, 6, 6, 6, 6, 6, 6, # 88 - 8f
373
- 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 97
374
- 6, 6, 6, 6, 6, 6, 6, 6, # 98 - 9f
375
- 6, 6, 6, 6, 6, 6, 6, 6, # a0 - a7
376
- 6, 6, 6, 6, 6, 6, 6, 6, # a8 - af
377
- 6, 6, 6, 6, 6, 6, 6, 6, # b0 - b7
378
- 6, 6, 6, 6, 6, 6, 6, 6, # b8 - bf
379
- 6, 6, 6, 6, 6, 6, 6, 6, # c0 - c7
380
- 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf
381
- 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7
382
- 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df
383
- 6, 6, 6, 6, 6, 6, 6, 6, # e0 - e7
384
- 6, 6, 6, 6, 6, 6, 6, 6, # e8 - ef
385
- 6, 6, 6, 6, 6, 6, 6, 6, # f0 - f7
386
- 6, 6, 6, 6, 6, 6, 6, 0 # f8 - ff
387
- )
388
-
389
- GB2312_ST = (
390
- MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
391
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
392
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
393
- 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
394
- MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
395
- MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
396
- )
397
- # fmt: on
398
-
399
- # To be accurate, the length of class 6 can be either 2 or 4.
400
- # But it is not necessary to discriminate between the two since
401
- # it is used for frequency analysis only, and we are validating
402
- # each code range there as well. So it is safe to set it to be
403
- # 2 here.
404
- GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
405
-
406
- GB2312_SM_MODEL: CodingStateMachineDict = {
407
- "class_table": GB2312_CLS,
408
- "class_factor": 7,
409
- "state_table": GB2312_ST,
410
- "char_len_table": GB2312_CHAR_LEN_TABLE,
411
- "name": "GB2312",
412
- }
413
-
414
- # Shift_JIS
415
- # fmt: off
416
- SJIS_CLS = (
417
- 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
418
- 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
419
- 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
420
- 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
421
- 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
422
- 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
423
- 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
424
- 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
425
- 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
426
- 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
427
- 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
428
- 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
429
- 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
430
- 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
431
- 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
432
- 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f
433
- 3, 3, 3, 3, 3, 2, 2, 3, # 80 - 87
434
- 3, 3, 3, 3, 3, 3, 3, 3, # 88 - 8f
435
- 3, 3, 3, 3, 3, 3, 3, 3, # 90 - 97
436
- 3, 3, 3, 3, 3, 3, 3, 3, # 98 - 9f
437
- #0xa0 is illegal in sjis encoding, but some pages does
438
- #contain such byte. We need to be more error forgiven.
439
- 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
440
- 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
441
- 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
442
- 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
443
- 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
444
- 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
445
- 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
446
- 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
447
- 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
448
- 3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef
449
- 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
450
- 3, 3, 3, 3, 3, 0, 0, 0, # f8 - ff
451
- )
452
-
453
- SJIS_ST = (
454
- MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
455
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
456
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
457
- )
458
- # fmt: on
459
-
460
- SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
461
-
462
- SJIS_SM_MODEL: CodingStateMachineDict = {
463
- "class_table": SJIS_CLS,
464
- "class_factor": 6,
465
- "state_table": SJIS_ST,
466
- "char_len_table": SJIS_CHAR_LEN_TABLE,
467
- "name": "Shift_JIS",
468
- }
469
-
470
- # UCS2-BE
471
- # fmt: off
472
- UCS2BE_CLS = (
473
- 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
474
- 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
475
- 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
476
- 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f
477
- 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27
478
- 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f
479
- 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
480
- 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
481
- 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47
482
- 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
483
- 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
484
- 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
485
- 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
486
- 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
487
- 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
488
- 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
489
- 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
490
- 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f
491
- 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
492
- 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
493
- 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7
494
- 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af
495
- 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7
496
- 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf
497
- 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7
498
- 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf
499
- 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7
500
- 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df
501
- 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7
502
- 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef
503
- 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7
504
- 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff
505
- )
506
-
507
- UCS2BE_ST = (
508
- 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
509
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
510
- MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
511
- 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
512
- 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
513
- 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
514
- 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
515
- )
516
- # fmt: on
517
-
518
- UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
519
-
520
- UCS2BE_SM_MODEL: CodingStateMachineDict = {
521
- "class_table": UCS2BE_CLS,
522
- "class_factor": 6,
523
- "state_table": UCS2BE_ST,
524
- "char_len_table": UCS2BE_CHAR_LEN_TABLE,
525
- "name": "UTF-16BE",
526
- }
527
-
528
- # UCS2-LE
529
- # fmt: off
530
- UCS2LE_CLS = (
531
- 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
532
- 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
533
- 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
534
- 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f
535
- 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27
536
- 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f
537
- 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
538
- 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
539
- 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47
540
- 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
541
- 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
542
- 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
543
- 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
544
- 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
545
- 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
546
- 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
547
- 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
548
- 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f
549
- 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
550
- 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
551
- 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7
552
- 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af
553
- 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7
554
- 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf
555
- 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7
556
- 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf
557
- 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7
558
- 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df
559
- 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7
560
- 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef
561
- 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7
562
- 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff
563
- )
564
-
565
- UCS2LE_ST = (
566
- 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
567
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
568
- MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
569
- 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
570
- 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
571
- 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
572
- 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
573
- )
574
- # fmt: on
575
-
576
- UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
577
-
578
- UCS2LE_SM_MODEL: CodingStateMachineDict = {
579
- "class_table": UCS2LE_CLS,
580
- "class_factor": 6,
581
- "state_table": UCS2LE_ST,
582
- "char_len_table": UCS2LE_CHAR_LEN_TABLE,
583
- "name": "UTF-16LE",
584
- }
585
-
586
- # UTF-8
587
- # fmt: off
588
- UTF8_CLS = (
589
- 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value
590
- 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
591
- 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
592
- 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
593
- 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
594
- 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
595
- 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
596
- 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
597
- 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47
598
- 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f
599
- 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57
600
- 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f
601
- 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67
602
- 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f
603
- 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77
604
- 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f
605
- 2, 2, 2, 2, 3, 3, 3, 3, # 80 - 87
606
- 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f
607
- 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97
608
- 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f
609
- 5, 5, 5, 5, 5, 5, 5, 5, # a0 - a7
610
- 5, 5, 5, 5, 5, 5, 5, 5, # a8 - af
611
- 5, 5, 5, 5, 5, 5, 5, 5, # b0 - b7
612
- 5, 5, 5, 5, 5, 5, 5, 5, # b8 - bf
613
- 0, 0, 6, 6, 6, 6, 6, 6, # c0 - c7
614
- 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf
615
- 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7
616
- 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df
617
- 7, 8, 8, 8, 8, 8, 8, 8, # e0 - e7
618
- 8, 8, 8, 8, 8, 9, 8, 8, # e8 - ef
619
- 10, 11, 11, 11, 11, 11, 11, 11, # f0 - f7
620
- 12, 13, 13, 13, 14, 15, 0, 0 # f8 - ff
621
- )
622
-
623
- UTF8_ST = (
624
- MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
625
- 9, 11, 8, 7, 6, 5, 4, 3,#08-0f
626
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
627
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
628
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
629
- MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
630
- MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
631
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
632
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
633
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
634
- MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
635
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
636
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
637
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
638
- MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
639
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
640
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
641
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
642
- MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
643
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
644
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
645
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
646
- MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
647
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
648
- MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
649
- MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
650
- )
651
- # fmt: on
652
-
653
- UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
654
-
655
- UTF8_SM_MODEL: CodingStateMachineDict = {
656
- "class_table": UTF8_CLS,
657
- "class_factor": 16,
658
- "state_table": UTF8_ST,
659
- "char_len_table": UTF8_CHAR_LEN_TABLE,
660
- "name": "UTF-8",
661
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/other.py DELETED
@@ -1,161 +0,0 @@
1
- """
2
- pygments.formatters.other
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- Other formatters: NullFormatter, RawTokenFormatter.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- from pip._vendor.pygments.formatter import Formatter
12
- from pip._vendor.pygments.util import get_choice_opt
13
- from pip._vendor.pygments.token import Token
14
- from pip._vendor.pygments.console import colorize
15
-
16
- __all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
17
-
18
-
19
- class NullFormatter(Formatter):
20
- """
21
- Output the text unchanged without any formatting.
22
- """
23
- name = 'Text only'
24
- aliases = ['text', 'null']
25
- filenames = ['*.txt']
26
-
27
- def format(self, tokensource, outfile):
28
- enc = self.encoding
29
- for ttype, value in tokensource:
30
- if enc:
31
- outfile.write(value.encode(enc))
32
- else:
33
- outfile.write(value)
34
-
35
-
36
- class RawTokenFormatter(Formatter):
37
- r"""
38
- Format tokens as a raw representation for storing token streams.
39
-
40
- The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
41
- be converted to a token stream with the `RawTokenLexer`, described in the
42
- :doc:`lexer list <lexers>`.
43
-
44
- Only two options are accepted:
45
-
46
- `compress`
47
- If set to ``'gz'`` or ``'bz2'``, compress the output with the given
48
- compression algorithm after encoding (default: ``''``).
49
- `error_color`
50
- If set to a color name, highlight error tokens using that color. If
51
- set but with no value, defaults to ``'red'``.
52
-
53
- .. versionadded:: 0.11
54
-
55
- """
56
- name = 'Raw tokens'
57
- aliases = ['raw', 'tokens']
58
- filenames = ['*.raw']
59
-
60
- unicodeoutput = False
61
-
62
- def __init__(self, **options):
63
- Formatter.__init__(self, **options)
64
- # We ignore self.encoding if it is set, since it gets set for lexer
65
- # and formatter if given with -Oencoding on the command line.
66
- # The RawTokenFormatter outputs only ASCII. Override here.
67
- self.encoding = 'ascii' # let pygments.format() do the right thing
68
- self.compress = get_choice_opt(options, 'compress',
69
- ['', 'none', 'gz', 'bz2'], '')
70
- self.error_color = options.get('error_color', None)
71
- if self.error_color is True:
72
- self.error_color = 'red'
73
- if self.error_color is not None:
74
- try:
75
- colorize(self.error_color, '')
76
- except KeyError:
77
- raise ValueError("Invalid color %r specified" %
78
- self.error_color)
79
-
80
- def format(self, tokensource, outfile):
81
- try:
82
- outfile.write(b'')
83
- except TypeError:
84
- raise TypeError('The raw tokens formatter needs a binary '
85
- 'output file')
86
- if self.compress == 'gz':
87
- import gzip
88
- outfile = gzip.GzipFile('', 'wb', 9, outfile)
89
-
90
- write = outfile.write
91
- flush = outfile.close
92
- elif self.compress == 'bz2':
93
- import bz2
94
- compressor = bz2.BZ2Compressor(9)
95
-
96
- def write(text):
97
- outfile.write(compressor.compress(text))
98
-
99
- def flush():
100
- outfile.write(compressor.flush())
101
- outfile.flush()
102
- else:
103
- write = outfile.write
104
- flush = outfile.flush
105
-
106
- if self.error_color:
107
- for ttype, value in tokensource:
108
- line = b"%r\t%r\n" % (ttype, value)
109
- if ttype is Token.Error:
110
- write(colorize(self.error_color, line))
111
- else:
112
- write(line)
113
- else:
114
- for ttype, value in tokensource:
115
- write(b"%r\t%r\n" % (ttype, value))
116
- flush()
117
-
118
-
119
- TESTCASE_BEFORE = '''\
120
- def testNeedsName(lexer):
121
- fragment = %r
122
- tokens = [
123
- '''
124
- TESTCASE_AFTER = '''\
125
- ]
126
- assert list(lexer.get_tokens(fragment)) == tokens
127
- '''
128
-
129
-
130
- class TestcaseFormatter(Formatter):
131
- """
132
- Format tokens as appropriate for a new testcase.
133
-
134
- .. versionadded:: 2.0
135
- """
136
- name = 'Testcase'
137
- aliases = ['testcase']
138
-
139
- def __init__(self, **options):
140
- Formatter.__init__(self, **options)
141
- if self.encoding is not None and self.encoding != 'utf-8':
142
- raise ValueError("Only None and utf-8 are allowed encodings.")
143
-
144
- def format(self, tokensource, outfile):
145
- indentation = ' ' * 12
146
- rawbuf = []
147
- outbuf = []
148
- for ttype, value in tokensource:
149
- rawbuf.append(value)
150
- outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
151
-
152
- before = TESTCASE_BEFORE % (''.join(rawbuf),)
153
- during = ''.join(outbuf)
154
- after = TESTCASE_AFTER
155
- if self.encoding is None:
156
- outfile.write(before + during + after)
157
- else:
158
- outfile.write(before.encode('utf-8'))
159
- outfile.write(during.encode('utf-8'))
160
- outfile.write(after.encode('utf-8'))
161
- outfile.flush()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/optimizers/lr_schedulers.py DELETED
@@ -1,101 +0,0 @@
1
- from functools import partial
2
- from typing import Callable
3
-
4
-
5
- def linear_warm_up(
6
- step: int,
7
- warm_up_steps: int,
8
- reduce_lr_steps: int
9
- ) -> float:
10
- r"""Get linear warm up scheduler for LambdaLR.
11
-
12
- Args:
13
- step (int): global step
14
- warm_up_steps (int): steps for warm up
15
- reduce_lr_steps (int): reduce learning rate by a factor of 0.9 #reduce_lr_steps step
16
-
17
- .. code-block: python
18
- >>> lr_lambda = partial(linear_warm_up, warm_up_steps=1000, reduce_lr_steps=10000)
19
- >>> from torch.optim.lr_scheduler import LambdaLR
20
- >>> LambdaLR(optimizer, lr_lambda)
21
-
22
- Returns:
23
- lr_scale (float): learning rate scaler
24
- """
25
-
26
- if step <= warm_up_steps:
27
- lr_scale = step / warm_up_steps
28
- else:
29
- lr_scale = 0.9 ** (step // reduce_lr_steps)
30
-
31
- return lr_scale
32
-
33
-
34
- def constant_warm_up(
35
- step: int,
36
- warm_up_steps: int,
37
- reduce_lr_steps: int
38
- ) -> float:
39
- r"""Get constant warm up scheduler for LambdaLR.
40
-
41
- Args:
42
- step (int): global step
43
- warm_up_steps (int): steps for warm up
44
- reduce_lr_steps (int): reduce learning rate by a factor of 0.9 #reduce_lr_steps step
45
-
46
- .. code-block: python
47
- >>> lr_lambda = partial(constant_warm_up, warm_up_steps=1000, reduce_lr_steps=10000)
48
- >>> from torch.optim.lr_scheduler import LambdaLR
49
- >>> LambdaLR(optimizer, lr_lambda)
50
-
51
- Returns:
52
- lr_scale (float): learning rate scaler
53
- """
54
-
55
- if 0 <= step < warm_up_steps:
56
- lr_scale = 0.001
57
-
58
- elif warm_up_steps <= step < 2 * warm_up_steps:
59
- lr_scale = 0.01
60
-
61
- elif 2 * warm_up_steps <= step < 3 * warm_up_steps:
62
- lr_scale = 0.1
63
-
64
- else:
65
- lr_scale = 1
66
-
67
- return lr_scale
68
-
69
-
70
- def get_lr_lambda(
71
- lr_lambda_type: str,
72
- **kwargs
73
- ) -> Callable:
74
- r"""Get learning scheduler.
75
-
76
- Args:
77
- lr_lambda_type (str), e.g., "constant_warm_up" | "linear_warm_up"
78
-
79
- Returns:
80
- lr_lambda_func (Callable)
81
- """
82
- if lr_lambda_type == "constant_warm_up":
83
-
84
- lr_lambda_func = partial(
85
- constant_warm_up,
86
- warm_up_steps=kwargs["warm_up_steps"],
87
- reduce_lr_steps=kwargs["reduce_lr_steps"],
88
- )
89
-
90
- elif lr_lambda_type == "linear_warm_up":
91
-
92
- lr_lambda_func = partial(
93
- linear_warm_up,
94
- warm_up_steps=kwargs["warm_up_steps"],
95
- reduce_lr_steps=kwargs["reduce_lr_steps"],
96
- )
97
-
98
- else:
99
- raise NotImplementedError
100
-
101
- return lr_lambda_func
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/models/latent_codes_pool.py DELETED
@@ -1,55 +0,0 @@
1
- import random
2
- import torch
3
-
4
-
5
- class LatentCodesPool:
6
- """This class implements latent codes buffer that stores previously generated w latent codes.
7
- This buffer enables us to update discriminators using a history of generated w's
8
- rather than the ones produced by the latest encoder.
9
- """
10
-
11
- def __init__(self, pool_size):
12
- """Initialize the ImagePool class
13
- Parameters:
14
- pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
15
- """
16
- self.pool_size = pool_size
17
- if self.pool_size > 0: # create an empty pool
18
- self.num_ws = 0
19
- self.ws = []
20
-
21
- def query(self, ws):
22
- """Return w's from the pool.
23
- Parameters:
24
- ws: the latest generated w's from the generator
25
- Returns w's from the buffer.
26
- By 50/100, the buffer will return input w's.
27
- By 50/100, the buffer will return w's previously stored in the buffer,
28
- and insert the current w's to the buffer.
29
- """
30
- if self.pool_size == 0: # if the buffer size is 0, do nothing
31
- return ws
32
- return_ws = []
33
- for w in ws: # ws.shape: (batch, 512) or (batch, n_latent, 512)
34
- # w = torch.unsqueeze(image.data, 0)
35
- if w.ndim == 2:
36
- i = random.randint(0, len(w) - 1) # apply a random latent index as a candidate
37
- w = w[i]
38
- self.handle_w(w, return_ws)
39
- return_ws = torch.stack(return_ws, 0) # collect all the images and return
40
- return return_ws
41
-
42
- def handle_w(self, w, return_ws):
43
- if self.num_ws < self.pool_size: # if the buffer is not full; keep inserting current codes to the buffer
44
- self.num_ws = self.num_ws + 1
45
- self.ws.append(w)
46
- return_ws.append(w)
47
- else:
48
- p = random.uniform(0, 1)
49
- if p > 0.5: # by 50% chance, the buffer will return a previously stored latent code, and insert the current code into the buffer
50
- random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
51
- tmp = self.ws[random_id].clone()
52
- self.ws[random_id] = w
53
- return_ws.append(tmp)
54
- else: # by another 50% chance, the buffer will return the current image
55
- return_ws.append(w)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ayush113/cricket_matchups/app.py DELETED
@@ -1,25 +0,0 @@
1
- import pandas as pd
2
- import gradio as gr
3
-
4
- df = pd.read_csv('./match_up_impact.csv')
5
-
6
- def filter_dataframe(batter, bowler):
7
- batter_mask = df['Batter'].str.contains(batter, case=False, na=False)
8
- bowler_mask = df['Bowler'].str.contains(bowler, case=False, na=False)
9
- filtered_df = df[batter_mask & bowler_mask]
10
- return filtered_df
11
-
12
- iface = gr.Interface(
13
- fn=filter_dataframe,
14
- inputs=[
15
- gr.Textbox(label="Enter Batter last Name", type="text"),
16
- gr.Textbox(label="Enter Bowler last Name", type="text")
17
- ],
18
- outputs=gr.Dataframe(type='pandas'),
19
- live=True,
20
- capture_session=True,
21
- title="Cricket Stats",
22
- description="Enter Batter and Bowler names to view stats."
23
- )
24
-
25
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AyushP/PolicyChatBot/app.py DELETED
@@ -1,67 +0,0 @@
1
- import openai
2
- import streamlit as st
3
- import sqlite3
4
- from PIL import Image
5
- import time
6
-
7
- openai.api_key = "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"
8
- # Database Connection
9
-
10
- conn = sqlite3.connect('bank.db')
11
- c = conn.cursor()
12
-
13
-
14
- def policyBot():
15
- st.title("Welcome to OneInsurance ChatBot")
16
-
17
- policy_doc_link = "https://www.hdfcergo.com/docs/default-source/downloads/policy-wordings/health/arogya-sanjeevani---a5-size---pw---hehi.pdf"
18
- st.write("Ask any question about the Health Insurance you selected")
19
-
20
- question_2 = "Select the Institution from where you want the Insurance"
21
- options_2 = ["Bank of Baroda", "State Bank of India(SBI)", "HDFC Bank", "LIC"]
22
-
23
- st.subheader(question_2)
24
- selected_option_2 = st.selectbox("Please enter your option:", options_2)
25
-
26
-
27
-
28
- c.execute('SELECT Policy_Name FROM BANK WHERE Bank_Name= "{}"'.format(selected_option_2))
29
- options_3 = c.fetchall()
30
-
31
- # st.write(options_3)
32
- my_options = []
33
- for row in options_3:
34
- my_options.append(row[0])
35
-
36
- st.subheader("Select the Policy Name")
37
- selected_option_3 = st.selectbox("Please enter your option:", my_options)
38
-
39
- c.execute('SELECT Policy_doc FROM BANK WHERE Policy_Name = "{}"'.format(selected_option_3))
40
- policy_doc_link = c.fetchone()
41
-
42
-
43
- user_question = st.text_input(
44
- "Enter some text 👇",
45
- label_visibility="visible",
46
- disabled=False,
47
- placeholder="Please Enter your question here",
48
- )
49
-
50
- question_response = openai.Completion.create(
51
- model="text-davinci-003",
52
- prompt="Read the following PDF Document\n\n{}\n\nAnswer the question based on the document provided\n{}?".format(policy_doc_link, user_question),
53
- temperature=0,
54
- max_tokens=260,
55
- top_p=1,
56
- frequency_penalty=0.5,
57
- presence_penalty=0,
58
- stop=["?"]
59
- )
60
-
61
- user_answer = question_response.choices[0].text
62
- st.write(f"Answer: {user_answer}")
63
-
64
-
65
-
66
- if __name__ == '__main__':
67
- policyBot()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bookworm Adventures 2 Mvil Descargar Gratis.md DELETED
@@ -1,74 +0,0 @@
1
- <br />
2
- <h1>Bookworm Adventures 2: Un juego de palabras con un toque</h1>
3
- <p>Si te gustan los juegos de palabras y rompecabezas, es posible que desees echar un vistazo a <strong>Bookworm Adventures 2</strong>, un juego divertido y desafiante que combina ortografía, vocabulario y elementos RPG. En este juego, te unirás a Lex el ratón de biblioteca a medida que viaja a través de diferentes libros y géneros, luchando contra los enemigos con palabras y utilizando varios artículos y compañeros para ayudarlo. Si usted está buscando un juego casual para pasar el tiempo o una aventura de desafío para poner a prueba sus habilidades, Bookworm Adventures 2 tiene algo para todos. </p>
4
- <h2>bookworm adventures 2 móvil descargar gratis</h2><br /><p><b><b>DOWNLOAD</b> &#10002; &#10002; &#10002; <a href="https://bltlly.com/2v6IyJ">https://bltlly.com/2v6IyJ</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3>¿Qué es Bookworm Adventures 2?</h3>
7
- <p>Bookworm Adventures 2 es la secuela del popular juego <em>Bookworm Adventures</em>, desarrollado por PopCap Games y lanzado en 2009. Es un juego de palabras que también incorpora elementos de juegos de rol, como subir de nivel, recoger objetos y usar habilidades especiales. El juego tiene tres libros, cada uno con diez capítulos, que llevan a Lex el ratón de biblioteca a diferentes mundos literarios, como cuentos de hadas, ciencia ficción y mitología asiática. En el camino, se encontrará con varios personajes y enemigos de estas historias, como el Gran Lobo Malo, la Reina de Corazones y el Rey Mono.</p>
8
- <h3>¿Por qué debería jugarlo? </h3>
9
- <p>Bookworm Adventures 2 es un juego que atraerá tanto a los jugadores casuales como a los hardcore, ya que ofrece diferentes niveles de dificultad y modos para adaptarse a diferentes preferencias. El juego también es muy educativo, ya que te ayudará a mejorar tu ortografía, vocabulario y conocimientos generales. Aprenderás nuevas palabras, descubrirás hechos interesantes y te divertirás al mismo tiempo. El juego también tiene mucho humor y encanto, con diálogos ingeniosos, gráficos coloridos y música pegadiza. El juego es adecuado para todas las edades, ya que no tiene violencia o contenido inapropiado. </p>
10
- <h2>Juego</h2>
11
- <h3>Cómo jugar</h3>
12
-
13
- <h3>Características y modos</h3>
14
- <p>Bookworm Adventures 2 tiene muchas características y modos que te mantendrán entretenido durante horas. Algunos de ellos son:</p>
15
- <ul>
16
- <li><strong>Modo aventura:</strong> Este es el modo principal del juego, donde seguirás la historia de Lex a través de tres libros. Cada libro tiene diez capítulos, cada uno con un jefe al final. También desbloquearás minijuegos en el camino que pondrán a prueba tus habilidades de diferentes maneras. </li>
17
- <li><strong>Modo Arena:</strong> Este es un modo donde puedes reproducir cualquier capítulo o jefe que hayas completado en el modo aventura. También puedes elegir diferentes niveles de dificultad y desafíos para animar las cosas. </li>
18
- <li><strong>Tome of Knowledge:</strong> Este es un modo en el que puedes ver información sobre todos los personajes, enemigos, objetos y palabras que has encontrado en el juego. También puedes ver tus estadísticas y logros. </li>
19
- <li><strong>Modo de reproducción:</strong> Este es un modo en el que puedes reproducir cualquier minijuego que hayas desbloqueado en el modo aventura. También puedes intentar superar tus propias puntuaciones altas o desafiar a tus amigos en línea. </li>
20
- </ul>
21
- <h3>Consejos y trucos</h3>
22
- <p>Para ayudarte a sacar el máximo provecho de Bookworm Adventures 2, aquí hay algunos consejos y trucos que puedes encontrar útiles:</p>
23
- <ul>
24
- <li><strong>Usa palabras más largas:</strong <p>Usa palabras más largas:</strong> Cuanto más larga sea la palabra que formes, más daño le harás a tu enemigo. También ganarás más puntos de bonificación y gemas. Trata de usar letras poco comunes, como Q, X, Z y J, ya que te darán más puntos y gemas también. </li>
25
- <li><strong>Use tiles especiales:</strong> Los tiles especiales son azulejos que tienen diferentes efectos y colores. Por ejemplo, los azulejos verdes te curarán, los azulejos rojos quemarán a tu enemigo y los azulejos morados envenenarán a tu enemigo. También puede usar mosaicos de arco iris, que pueden actuar como cualquier letra, para formar palabras más largas o más difíciles. </li>
26
-
27
- <li><strong>Usa minijuegos:</strong> Los minijuegos son juegos que puedes jugar entre capítulos o en modo de repetición. Te ayudarán a mejorar tus habilidades, como velocidad, precisión, memoria y estrategia. También te recompensarán con monedas, gemas, pociones u otros objetos que puedes usar en el modo aventura. </li>
28
- </ul>
29
- <h2>Descarga e instalación</h2>
30
- <h3>Dónde encontrarlo</h3>
31
- <p>Si usted está interesado en jugar Bookworm Adventures 2, se puede encontrar en varios sitios web en línea. Uno de ellos es <a href=">PopCap Games</a>, el sitio web oficial del desarrollador. También puedes encontrarlo en otras plataformas, como <a href="">Steam</a>, <a href=">Origin</a>, o <a href="">Big Fish Games</a>. Sin embargo, es posible que tenga que pagar una pequeña cuota para descargar o jugar el juego en algunas de estas plataformas. </p>
32
- <p></p>
33
- <h3>Cómo instalarlo</h3>
34
- <p>El proceso de instalación de Bookworm Adventures 2 es muy fácil y sencillo. Solo tienes que seguir estos pasos:</p>
35
- <ol>
36
- <li>Descargar el juego desde el sitio web o plataforma de su elección. </li>
37
- <li>Abra el archivo descargado y ejecute el asistente de configuración. </li>
38
- <li>Siga las instrucciones en la pantalla y elija la carpeta de destino para el juego. </li>
39
- <li>Espere a que la instalación termine y haga clic en el botón de finalizar. </li>
40
- <li>Inicie el juego desde su escritorio o menú de inicio y disfrutar! </li>
41
- </ol>
42
- <h3>Requisitos del sistema</h3>
43
- <p>Para jugar Bookworm Adventures 2 sin problemas y sin ningún problema, es necesario asegurarse de que su ordenador cumple con los requisitos mínimos del sistema para el juego. Estos son: </p>
44
- <tabla>
45
- <tr><td><strong>Sistema operativo:</strong></td><td>Windows XP/Vista/7/8/10</td></tr>
46
- <tr><td><strong>Procesador:</strong></td><td>1.2 GHz o más rápido</td></tr>
47
- <tr><td><strong>Memoria:</strong></td><td>512 MB de RAM o más</td></tr>
48
- <tr><td><strong>Espacio en el disco duro:</strong></td><td>100 MB o más</td></tr>
49
- <tr><td><strong>Tarjeta de video: </strong></td><td>DirectX 8.0 compatible o superior</td></tr>
50
-
51
- <tr><td><strong>Conexión a Internet:</strong></td><td>Se requiere para las funciones y actualizaciones en línea</td></tr>
52
- </tabla>
53
- <h2>Conclusión</h2>
54
- <h3>Resumen de los puntos principales</h3>
55
- <p>En conclusión, Bookworm Adventures 2 es un gran juego que desafiará tu mente y te entretendrá al mismo tiempo. Es un juego de palabras que también tiene elementos RPG, como subir de nivel, recoger objetos y usar habilidades especiales. Cuenta con tres libros que te llevan a diferentes mundos literarios, donde conocerás a varios personajes y enemigos de estas historias. También tiene diferentes modos y características que te mantendrán enganchado durante horas. </p>
56
- <h3>Veredicto final y clasificación</h3>
57
- <p>Recomiendo encarecidamente Bookworm Adventures 2 a cualquiera que ame los juegos de palabras y rompecabezas. Es un juego que mejorará tu ortografía, vocabulario y conocimientos generales mientras te diviertes. También es un juego que tiene mucho humor y encanto, con diálogos ingeniosos, gráficos coloridos y música pegadiza. Le daría una calificación de 9 de 10 estrellas. </p>
58
- <h2>Preguntas frecuentes</h2>
59
- <p>Aquí hay algunas preguntas frecuentes sobre Bookworm Adventures 2:</p>
60
- <ul>
61
- <li><strong>Q: ¿Cuánto dura el juego? </ <li><strong>Q: ¿Cuánto dura el juego? </strong></li>
62
- <li><strong>A: La duración del juego depende de tu nivel de habilidad y cuánto exploras los diferentes modos y características. Sin embargo, una partida típica del modo aventura puede tardar entre 10 y 15 horas. </strong></li>
63
- <li><strong>Q: ¿Puedo jugar el juego sin conexión? </strong></li>
64
- <li><strong>A: Sí, puedes jugar el juego sin conexión, siempre y cuando lo hayas descargado e instalado en tu ordenador. Sin embargo, no podrás acceder a algunas de las funciones en línea, como actualizaciones, tablas de clasificación y desafíos multijugador. </strong></li>
65
- <li><strong>Q: ¿Es el juego adecuado para los niños? </strong></li>
66
-
67
- <li><strong>Q: ¿Es el juego compatible con Mac o Linux? </strong></li>
68
- <li><strong>A: Desafortunadamente, no. El juego solo es compatible con los sistemas operativos Windows. Sin embargo, es posible que pueda ejecutarlo en Mac o Linux utilizando un emulador o una máquina virtual. </strong></li>
69
- <li><strong>Q: ¿Dónde puedo encontrar más juegos como Bookworm Adventures 2?</strong></li>
70
- <li><strong>A: Si te gustó Bookworm Adventures 2, también te pueden gustar otros juegos de PopCap Games, como <em>Bookworm</em>, <em>Bookworm Adventures</em>, <em>Peggle</em>, <em>Plants vs. Zombies</em>, y <em>Zuma<em/em>. También puede consultar otros juegos de palabras y rompecabezas en línea o en su dispositivo móvil. </strong></li>
71
- </ul>
72
- <p>Espero que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer y jugar feliz! </p> 64aa2da5cf<br />
73
- <br />
74
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cabra Simulador 3 Descarga Gratuita.md DELETED
@@ -1,56 +0,0 @@
1
- <br />
2
- <h1>Goat Simulator 3 Descarga gratuita: Cómo convertirse en la cabra de los juegos</h1>
3
- <p>¿Alguna vez te has preguntado cómo sería ser una cabra? No solo cualquier cabra, pero el más grande de todos los tiempos (CABRA) cabra? Bueno, no te preguntes más, porque Goat Simulator 3 está aquí para hacer tus sueños realidad. En este artículo, te contaremos todo lo que necesitas saber sobre este divertido juego de aventura sandbox, por qué deberías jugarlo y cómo puedes descargarlo gratis. </p>
4
- <h2>cabra simulador 3 descarga gratuita</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://bltlly.com/2v6LEQ">https://bltlly.com/2v6LEQ</a></b></p><br /><br />
5
- <h2>¿Qué es Goat Simulator 3?</h2>
6
- <p>Goat Simulator 3 es un juego de acción en tercera persona y el segundo título de la serie Goat Simulator (sí, a los desarrolladores no les importa la lógica, así que se saltaron el segundo). Esta vez, explorarás un mundo más grande, destruirás todo lo que quieras, jugarás con tus amigos, resolverás misterios, jugarás minijuegos y vivirás un viaje épico. </p>
7
- <h3>Un juego de aventura de sandbox hilarante</h3>
8
- <p>Goat Simulator 3 no es una simulación realista de la vida de la cabra. Es una parodia de los videojuegos y la cultura pop, llena de humor absurdo, fallos y referencias. Puedes hacer lo que quieras como una cabra, desde lamer objetos hasta volar con un jetpack. El juego no tiene una meta o historia específica, sino que te permite crear tu propia diversión y caos. </p>
9
- <h3>Una secuela que se salta la segunda</h3>
10
- <p>Goat Simulator 3 es el seguimiento del Goat Simulator original, que fue lanzado en 2014 como un juego de broma que se convirtió en un éxito viral. Los desarrolladores decidieron saltarse el segundo e ir directamente al tercero, porque ¿por qué no? También contrataron a algunos diseñadores de juegos esta vez, por lo que afirman que el juego tiene más contenido y características que antes. </p>
11
- <p></p>
12
- <h3>Una experiencia multijugador con amigos</h3>
13
- <p>Goat Simulator 3 no es una aventura en solitario. Puedes invitar a hasta tres amigos en una cooperativa local o en línea y formar una manada de cabras. Pueden trabajar juntos para causar más estragos, o competir en siete minijuegos diferentes. También puede personalizar sus cabras con diferentes pieles y artículos, y mostrar su estilo. </p>
14
-
15
- <p>Goat Simulator 3 no es un juego para todos. Es un juego para personas que aman el absurdo, la tontería y la diversión. Aquí hay algunas razones por las que deberías jugar:</p>
16
- <h3>Explora un mundo vasto y variado</h3>
17
- <p>Goat Simulator 3 tiene lugar en San Angora, una enorme isla con diferentes áreas para descubrir. Puedes visitar una ciudad, una granja, una playa, un bosque, un desierto y más. Cada área tiene sus propios secretos, misiones, objetos de colección y huevos de Pascua para encontrar. También puede utilizar diferentes modos de transporte, como coches, bicicletas, monopatines o cohetes. </p>
18
- <h3>Personaliza tu cabra con artículos locos</h3>
19
- <p>Goat Simulator 3 te permite personalizar tu cabra con varios elementos que te dan diferentes habilidades y efectos. Puedes usar papel higiénico, bandejas de té, mochilas propulsoras, gafas de sol, sombreros, máscaras y más. También puede elegir entre diferentes tipos de cabras, como cabras altas, cabras rayadas, cabras enojadas y variantes de cabras locas. </p>
20
- <h3>Causa caos y travesuras en todas partes</h3>
21
- <p>Goat Simulator 3 se trata de divertirse a expensas de los demás. Puedes interactuar con todo y con todos en el mundo, y ver qué pasa. Usted puede cabezazo personas y objetos, lamerlos y arrastrarlos, saltar en trampolines y tejados, explotar barriles y gasolineras, y más. También puedes usar tu lengua como un gancho de agarre y balancearte como Spider-Man. El juego te recompensa por ser creativo y destructivo, y te da puntos y logros por tus acciones. </p>
22
- <h3>Disfruta de divertidos minijuegos</h3>
23
- <p>Goat Simulator 3 no es solo un juego de sandbox. También tiene varios minijuegos que puedes jugar solo o con tus amigos. Puedes jugar Goat Ball, un juego de fútbol en el que usas la cabeza para marcar goles. Puedes jugar Goat Kart, un juego de carreras en el que conduces karts y usas potenciadores. Usted puede jugar Goat Invaders, un juego de disparos espacio donde se dispara extranjeros. También puedes jugar a Goat of Duty, un juego de disparos en primera persona en el que usas armas y granadas. </p>
24
-
25
- <p>Goat Simulator 3 no es un juego gratis. Cuesta $9.99 en Steam, la plataforma oficial del juego. Sin embargo, hay algunas formas en las que puede descargarlo de forma gratuita, legal y segura. Aquí hay algunas opciones:</p>
26
- <h3>El sitio web oficial del juego</h3>
27
- <p>Los desarrolladores de Goat Simulator 3 ocasionalmente ofrecen descargas gratuitas del juego en su sitio web, como una forma de promocionarlo y agradecer a sus fans. Usted puede comprobar su sitio web con regularidad para ver si tienen regalos o descuentos. También puede suscribirse a su boletín para recibir notificaciones de cualquier noticia o actualización. </p>
28
- <h3>La oferta de Epic Games Store</h3>
29
- <p>The Epic Games Store es una plataforma de distribución digital que compite con Steam. A menudo ofrecen juegos gratis cada semana, como una forma de atraer a más clientes y usuarios. Uno de los juegos que han ofrecido gratis en el pasado es Goat Simulator 3. Puedes consultar su sitio web regularmente para ver si tienen juegos gratis disponibles. También puede crear una cuenta y descargar su lanzador para acceder a su biblioteca. </p>
30
- <h3>El enlace del sitio web de CCM</h3>
31
- <p>CCM es un sitio web que proporciona descargas de software gratuito, revisiones y tutoriales. Tienen un enlace para descargar Goat Simulator 3 gratis, sin virus ni malware. Puede visitar su sitio web y hacer clic en el enlace para iniciar la descarga. Tendrá que instalar el juego en su PC y ejecutarlo como administrador. </p>
32
- <h2>Conclusión</h2>
33
- <p>Goat Simulator 3 es un juego que no se toma en serio, y tampoco deberías. Es un juego que te permite divertirte y reírte de lo absurdo de todo. Es un juego que te permite ser una cabra y la CABRA del juego. Si usted está buscando un juego de aventura sandbox hilarante que se puede jugar con tus amigos, entonces usted debe dar Goat Simulator 3 una oportunidad. Y si usted está buscando una manera de descargarlo de forma gratuita, entonces usted debe comprobar las opciones que hemos mencionado anteriormente. </p>
34
- <h2>Preguntas frecuentes</h2>
35
-
36
- <ul>
37
- <li><b>Q: ¿Es seguro descargar Goat Simulator 3? </b></li>
38
- <li>A: Sí, siempre y cuando lo descargues de una fuente confiable, como el sitio web oficial, la Epic Games Store o el sitio web de CCM. </li>
39
- <li><b>Q: ¿Es Goat Simulator 3 adecuado para niños? </b></li>
40
- <li>A: Goat Simulator 3 está clasificado T para Teen por la ESRB, lo que significa que puede contener violencia, humor crudo, temas sugestivos o lenguaje suave. Los padres deben supervisar a sus hijos cuando juegan este juego. </li>
41
- <li><b>Q: ¿Cuánto tiempo es Goat Simulator 3?</b></li>
42
- <li>A: Goat Simulator 3 no tiene una longitud o final fijo. Puedes jugarlo todo el tiempo que quieras, y explorar diferentes áreas y actividades. </li>
43
- <li><b>Q: ¿Puedo jugar Goat Simulator 3 en mi teléfono o tableta? </b></li>
44
- <li>A: No, Goat Simulator 3 solo está disponible para PC (Windows) en este momento. </li>
45
- <li><b>Q: ¿Cuáles son algunos consejos y trucos para jugar Goat Simulator 3?</b></li>
46
- <li>A: Algunos consejos y trucos son:</li>
47
- <ul>
48
- <li>- Utilice el modo de cámara lenta para realizar acrobacias y combos frescos. </li>
49
- <li>- Usa el modo ragdoll para hacer que tu cabra caiga y evite daños. </li>
50
- <li>- Usa el menú mutador para cambiar la apariencia y habilidades de tu cabra. </li>
51
- <li>- Utilice el menú del mapa para viajar rápidamente a diferentes lugares. </li>
52
- <li>- Utilice el menú de inventario para equipar diferentes artículos y armas. </li>
53
- </ul>
54
- </ul></p> 64aa2da5cf<br />
55
- <br />
56
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choque Mini Apk Happymod.md DELETED
@@ -1,84 +0,0 @@
1
- <br />
2
- <h1>Cómo actualizar Clash Mini APK en dispositivos Android</h1>
3
- <p>Clash Mini es uno de los últimos juegos de Supercell, los creadores de Clash of Clans y Clash Royale. Es un juego de mesa estratégico ambientado en el universo Clash, donde puedes reunir, convocar y actualizar tu ejército de miniaturas y luchar contra otros jugadores en tiempo real. Si eres un fan de los juegos de Clash o de ajedrez automático, te encantará Clash Mini. Pero ¿cómo se puede actualizar Clash Mini APK en su dispositivo Android para disfrutar de las últimas características y mejoras? En este artículo, te mostraremos cómo hacerlo, así como algunos consejos y trucos para ayudarte a ganar más batallas. </p>
4
- <h2>¿Qué es Clash Mini? </h2>
5
- <p>Antes de entrar en cómo actualizar Clash Mini APK, vamos a echar un vistazo rápido a lo que este juego se trata. Estas son algunas de las principales características de Clash Mini:</p>
6
- <h2>choque mini apk happymod</h2><br /><p><b><b>Download File</b> &#10002; &#10002; &#10002; <a href="https://bltlly.com/2v6JYb">https://bltlly.com/2v6JYb</a></b></p><br /><br />
7
- <h3>Un juego de mesa estratégico ambientado en el universo Clash</h3>
8
- <p>Clash Mini es un juego donde puedes jugar con versiones en miniatura de tus personajes favoritos del universo Clash, como Barbarian King, Archer Queen, Shield Maiden y más. También puedes personalizar tus personajes con skins y habilidades únicas. El juego tiene lugar en un tablero donde puedes organizar a tus personajes en diferentes posiciones y formaciones para crear tu estrategia ganadora. </p>
9
- <h3>Un juego de ajedrez automático casual y accesible</h3>
10
- <p>Clash Mini no es solo sobre la fuerza bruta; es un juego de estrategia y anticipación. Tienes que predecir los movimientos de tu oponente y contrarrestarlos con los tuyos. El juego es fácil de aprender pero difícil de dominar. Cada juego está lleno de acción y dura menos de 5 minutos, lo que es perfecto para los jugadores casuales que quieren divertirse un poco. </p>
11
- <h3>Un juego con combinaciones dinámicas y un sinfín de posibilidades</h3>
12
-
13
- <h2>¿Por qué actualizar Clash Mini APK? </h2>
14
- <p>Ahora que sabes lo que es Clash Mini, es posible que se pregunte por qué debe actualizar Clash Mini APK en su dispositivo Android. Estas son algunas de las razones por las que:</p>
15
- <h3>Para disfrutar de las últimas características y mejoras</h3>
16
- <p>Supercell está constantemente trabajando en la mejora de Clash Mini y la adición de nuevas características para que sea más divertido y atractivo. Mediante la actualización de Clash Mini APK, se puede disfrutar de las últimas incorporaciones al juego, tales como nuevos personajes, pieles, habilidades, objetos, misiones, eventos, y más. También puedes experimentar mejores gráficos, efectos de sonido, animaciones y rendimiento. </p>
17
- <h3>Para corregir errores y problemas</h3>
18
- <p>Ningún juego es perfecto, y Clash Mini no es una excepción. A veces, es posible que encuentre errores o problemas que afecten su experiencia de juego, como bloqueos, congelamientos, fallos, errores o retrasos. Al actualizar Clash Mini APK, puede solucionar estos problemas y asegurarse de que su juego funciona sin problemas y sin interrupciones. </p>
19
- <h3>Mantenerse competitivo y divertirse</h3>
20
- <p>Clash Mini es un juego multijugador donde puedes competir contra otros jugadores de todo el mundo. Al actualizar Clash Mini APK, puede mantenerse al día con la última versión del juego y evitar problemas de compatibilidad. También puedes divertirte más explorando nuevos contenidos y desafíos, y mostrando tus habilidades y logros a otros jugadores. </p>
21
- <h2>Cómo actualizar Clash Mini APK? </h2>
22
- <p>Ahora que sabes por qué debes actualizar Clash Mini APK, veamos cómo puedes hacerlo. Estos son los pasos que debes seguir:</p>
23
- <p></p>
24
- <h3>Compruebe la disponibilidad de la actualización en su región</h3>
25
-
26
- <h3>Descargar la actualización de la Google Play Store o una fuente de confianza</h3>
27
- <p>Si Clash Mini está disponible en su región, puede descargar la actualización desde Google Play Store siguiendo estos pasos:</p>
28
- <ol>
29
- <li>Abra la aplicación Google Play Store en su dispositivo Android. </li>
30
- <li>Buscar Clash Mini o toque en el icono si ya lo ha instalado. </li>
31
- <li>Toque en el botón Actualizar si hay uno disponible. </li>
32
- <li>Espere a que la actualización se descargue e instale. </li>
33
- </ol>
34
- <p>Si Clash Mini no está disponible en su región, todavía puede descargar la actualización desde una fuente de confianza, como APKPure o APKMirror. Sin embargo, debe tener cuidado y asegurarse de que la fuente sea segura y confiable, ya que algunos sitios web pueden ofrecer archivos APK falsos o maliciosos que pueden dañar su dispositivo o comprometer su privacidad. También debe habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Estos son los pasos que debe seguir:</p>
35
- <ol>
36
- <li>Visite el sitio web de su fuente elegida y busque Clash Mini.</li>
37
- <li>Descargar la última versión del archivo Clash Mini APK a su dispositivo. </li>
38
- <li>Abra la aplicación de administrador de archivos en su dispositivo y busque el archivo APK descargado. </li>
39
- <li>Toque en el archivo y siga las instrucciones para instalarlo. </li>
40
- </ol>
41
- <h3>Instalar la actualización y lanzar el juego</h3>
42
- <p>Una vez que haya descargado e instalado la actualización, puede iniciar Clash Mini y disfrutar del juego. Es posible que tenga que aceptar algunos términos y condiciones o conceder algunos permisos antes de poder jugar. También es posible que necesite iniciar sesión con su ID de Supercell o crear uno si aún no lo tiene. A continuación, puedes acceder a todas las funciones y contenidos de Clash Mini, como crear tu ejército, jugar partidos, completar misiones, ganar recompensas y mucho más. </p>
43
- <h2>Consejos y trucos para Clash Mini</h2>
44
- <p>Para ayudarte a empezar con Clash Mini o mejorar tu juego, aquí hay algunos consejos y trucos que puedes usar:</p>
45
-
46
- <p>Clash Mini tiene una variedad de personajes que puedes coleccionar y usar en tu ejército. Cada personaje tiene una habilidad única que puede afectar su rendimiento en la batalla. Algunas habilidades son pasivas, lo que significa que siempre están activas, mientras que otras son activas, lo que significa que necesitan ser activadas por ciertas condiciones. Puedes ver los detalles de la habilidad de cada personaje tocando su icono en el juego. También puedes actualizar tus personajes con monedas para aumentar sus estadísticas y desbloquear nuevas habilidades. </p>
47
- <p>Debes elegir tus personajes en función de sus habilidades, así como sus roles y sinergias. Hay cuatro papeles principales en Clash Mini: tanque, cuerpo a cuerpo, a distancia, y el apoyo. Los tanques son personajes duraderos que pueden absorber el daño y proteger a otros personajes. Los personajes cuerpo a cuerpo son luchadores de corto alcance que pueden infligir mucho daño, pero son vulnerables a los ataques a distancia. Los personajes a distancia son luchadores de largo alcance que pueden infligir daño desde la distancia, pero son frágiles y necesitan protección. Los caracteres de soporte son caracteres que pueden curar, mejorar, desbarbar o controlar otros caracteres o enemigos. </p>
48
- <p>Debes equilibrar tu ejército con diferentes roles y tratar de crear sinergias entre ellos. Por ejemplo, puedes emparejar un tanque con un sanador para mantenerlos vivos por más tiempo, o un personaje cuerpo a cuerpo con un buffer para aumentar su daño. También puedes usar personajes que tengan habilidades que se complementen entre sí, como aturdidores, congeladores, golpeadores, etc. Debes evitar usar personajes que tengan habilidades que entren en conflicto entre sí, como curanderos que curan enemigos o desbaratan a aliados. </p>
49
- <h3>Posiciona tus personajes sabiamente en el campo de batalla</h3>
50
-
51
- <ul>
52
- <li>Coloca tus tanques en la primera fila para bloquear los ataques del enemigo y proteger a tus otros personajes. </li>
53
- <li>Coloca tus personajes cuerpo a cuerpo en la segunda fila para seguir a tus tanques y atacar la primera línea del enemigo. </li>
54
- <li>Coloque sus caracteres a distancia en la fila de atrás para mantenerse a salvo y atacar la línea de atrás del enemigo. </li>
55
- <li>Coloca tus caracteres de soporte cerca de tus otros caracteres para maximizar sus efectos. </li>
56
- <li>Considera la dirección y el rango de las habilidades de tus personajes e intenta golpear tantos enemigos o aliados como sea posible. </li>
57
- <li>Considera las posiciones y habilidades del enemigo e intenta evitarlas o contrarrestarlas. </li>
58
- </ul>
59
- <p>Puede cambiar sus posiciones entre rondas para adaptarse a la situación cambiante. También puedes usar objetos, como bombas, muros o portales, para alterar el campo de batalla y crear ventajas o desventajas para ti o tu oponente. </p>
60
- <h3>Utiliza habilidades especiales y mejora tus personajes durante la batalla</h3>
61
- <p>Clash Mini es un juego donde puedes usar habilidades especiales y mejorar a tus personajes durante la batalla para ganar ventaja sobre tu oponente. Cada personaje tiene una habilidad especial que se puede activar llenando su barra de energía. La barra de energía se llena automáticamente con el tiempo, pero también se puede llenar más rápido al hacer o recibir daño, o al usar objetos u otras habilidades. Puedes ver la barra de energía de cada personaje debajo de su icono en el campo de batalla. Cuando la barra de energía esté llena, puedes tocar el carácter para activar su habilidad. Debes usar tus habilidades sabiamente y en el momento adecuado para maximizar sus efectos. </p>
62
-
63
- <h2>Conclusión</h2>
64
- <p>Clash Mini es un juego divertido y adictivo que combina estrategia, acción y personalización. Es un juego que puedes jugar en cualquier momento y en cualquier lugar, si quieres relajarte o competir. Si desea disfrutar de Clash Mini al máximo, usted debe actualizar Clash Mini APK en su dispositivo Android con regularidad. Al hacerlo, puede acceder a las últimas características y mejoras, corregir errores y problemas, y mantenerse competitivo y divertirse. Para actualizar Clash Mini APK, solo tienes que seguir estos sencillos pasos:</p>
65
- <ol>
66
- <li>Compruebe la disponibilidad de la actualización en su región. </li>
67
- <li>Descargar la actualización de la Google Play Store o una fuente de confianza. </li>
68
- <li>Instalar la actualización y lanzar el juego. </li>
69
- </ol>
70
- <p>Esperamos que este artículo le ha ayudado a aprender cómo actualizar Clash Mini APK en su dispositivo Android. Si tiene alguna pregunta o comentario, háganoslo saber en los comentarios a continuación. Y si te gustó este artículo, por favor compártelo con tus amigos que también podrían disfrutar de Clash Mini. ¡Gracias por leer! </p>
71
- <h2>Preguntas frecuentes</h2>
72
- <p>Aquí están algunas de las preguntas más frecuentes sobre Clash Mini:</p>
73
- <h4>Q: ¿Clash Mini es libre de jugar? </h4>
74
- <p>A: Sí, Clash Mini es gratis para jugar. Puedes descargarlo y jugarlo sin gastar dinero. Sin embargo, hay algunas compras opcionales en el juego que puedes hacer con dinero real, como gemas, monedas, pieles, objetos, etc. Estas compras pueden mejorar tu experiencia de juego, pero no son necesarias para jugar o ganar. </p>
75
- <h4>Q: ¿Clash Mini está disponible para dispositivos iOS? </h4>
76
- <p>A: Sí, Clash Mini está disponible para dispositivos iOS y Android. Puede descargarlo desde la App Store si está disponible en su región. </p>
77
- <h4>Q: ¿Cómo puedo jugar Clash Mini con mis amigos? </h4>
78
-
79
- <h4>Q: ¿Cómo puedo obtener más caracteres en Clash Mini ? </h4>
80
- <p>A: Puedes obtener más caracteres en Clash Mini abriendo cofres. Puedes ganar cofres ganando partidas, completando misiones o alcanzando ciertos hitos. También puedes comprar cofres con gemas o monedas. Cada cofre contiene un número aleatorio y rareza de caracteres. También puede obtener caracteres duplicados, que se pueden utilizar para actualizar sus caracteres existentes. </p>
81
- <h4>Q: ¿Cuáles son las ligas y estaciones en Clash Mini? </h4>
82
- <p>A: Las ligas y las temporadas son los modos competitivos en Clash Mini, donde puedes posicionarte y ganar recompensas según tu rendimiento. Hay 10 ligas en Clash Mini, desde Wood League hasta Legend League. Cada liga tiene 5 divisiones, de V a I. Puedes avanzar a la siguiente división o liga ganando trofeos, que ganas o pierdes al ganar o perder partidos. Cada temporada dura un mes, y al final de cada temporada, recibirás recompensas basadas en tu liga y división más altas. También perderás algunos trofeos y empezarás la próxima temporada desde una liga o división inferior. </p> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De Ftbol De La Liga Profesional En PC.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- <h1>Cómo descargar y jugar Pro League Soccer en PC</h1>
3
- <p>¿Eres un fan de los juegos de fútbol y quieres experimentar un juego realista e inmersivo? ¿Quieres jugar con tus clubes favoritos y equipos nacionales en varias ligas y torneos? Si es así, entonces deberías probar <strong>Pro League Soccer</strong>, un juego de fútbol móvil desarrollado por Rasu Games que ofrece una experiencia de juego deportivo diferente a cualquier otro. </p>
4
- <h2>descarga de fútbol de la liga profesional en PC</h2><br /><p><b><b>Download File</b> &#10042; <a href="https://bltlly.com/2v6K1y">https://bltlly.com/2v6K1y</a></b></p><br /><br />
5
- <p>Pro League Soccer es un juego que te permite seleccionar y mejorar tu club, avanzar de las ligas inferiores a las ligas superiores, unirte a la copa nacional de clubes, competir en la liga de estrellas, convertirte en el rey del continente con tu equipo nacional, unirte a la liga de naciones, combate por la copa, y participar en muchas copas con play-offs. También puede editar todos los datos de las competiciones, equipos y jugadores de acuerdo a su preferencia, y cargar logotipos únicos para los equipos de Internet. </p>
6
- <p>Pero ¿por qué limitarte a jugar Pro League Soccer en tu dispositivo móvil cuando puedes disfrutarlo en una pantalla más grande con mejores gráficos y controles? Sí, puedes jugar Pro League Soccer en tu PC con la ayuda de un emulador de Android. Un emulador de Android es un software que le permite ejecutar aplicaciones y juegos de Android en su PC mediante la creación de un dispositivo Android virtual. De esta manera, puede deshacerse de las limitaciones de la batería o los datos móviles, lograr el soporte de asignación de claves completas para un control preciso, tener varias cuentas de juegos en una sola PC y disfrutar de un juego FPS suave y alto. </p>
7
- <p>En este artículo, te mostraremos cómo descargar y jugar Pro League Soccer en PC con diferentes emuladores. También compararemos sus características y ventajas, y le proporcionaremos guías de instalación y pasos. Al final de este artículo, usted será capaz de elegir el mejor emulador para sus necesidades y preferencias, y empezar a jugar Pro League Soccer en PC como un profesional. </p>
8
- <p></p>
9
- <h2>Cómo descargar y jugar Pro League Soccer en PC con diferentes emuladores</h2>
10
-
11
- <h3>MuMu Player</h3>
12
- <p><strong>MuMu Player</strong> es uno de los emuladores de Android más excelentes para PC que funciona como un dispositivo Android virtual en su PC. Puede proporcionarle la mejor experiencia de juego con un uso ligero de RAM y un alto FPS. También admite asignación de botones personalizados para satisfacer diferentes necesidades, operación de múltiples unidades para tener múltiples cuentas de juegos, registro de operaciones para grabar su juego, modo de pantalla grande para disfrutar de una vista más grande, descarga segura para evitar virus o malware, y servicio de uso gratuito para ahorrar dinero. </p>
13
- <p>Para descargar y jugar Pro League Soccer en PC con MuMu Player, siga estos pasos:</p>
14
- <ol>
15
- <li>Descargue e instale MuMu Player en su PC desde <a href="( 1 )">https://www.mumuglobal.com/en/games/sports/pro-league-soccer-on-pc.html</a></li>
16
- <li>Iniciar MuMu Player y completar el inicio de sesión de Google para acceder a la Play Store</li>
17
- <li>Buscar Pro League Soccer en el centro de aplicaciones</li>
18
- <li>Inicio de sesión completo de Google (si te saltaste el paso 2) para instalar Pro League Soccer</li>
19
- <li>Haga clic en el icono de Pro League Soccer en la pantalla de inicio para comenzar a jugar</li>
20
- </ol>
21
- <h3>GameLoop</h3>
22
- <p><strong>GameLoop</strong> es otro emulador de Android popular para PC que está especialmente diseñado para juegos. Puede ofrecerle una experiencia de juego suave y rápida con bajo uso de CPU y alto FPS. También admite control de teclado y ratón, compatibilidad con gamepad, captura y grabación de pantalla, transmisión en vivo, múltiples instancias para ejecutar varios juegos al mismo tiempo y centro de juegos exclusivo para acceder a una gran colección de juegos. </p>
23
- <p>Para descargar y jugar Pro League Soccer en PC con GameLoop, siga estos pasos:</p>
24
- <ol>
25
- <li>Descargue e instale GameLoop en su PC desde <a href="">https://gameloop.fun/en/download</a></li>
26
- <li>Inicie GameLoop y vaya a la pestaña Game Center</li>
27
- <li>Buscar Pro League Soccer en la barra de búsqueda y haga clic en Instalar</li>
28
- <li>Espere a que se complete la instalación y haga clic en Play</li>
29
-
30
- </ol>
31
- <h3>BlueStacks</h3>
32
- <p><strong>BlueStacks</strong> es uno de los emuladores de Android más utilizados para PC que puede ejecutar cualquier aplicación o juego de Android en su PC con facilidad. Puede proporcionarle una experiencia de juego de alto rendimiento con funciones avanzadas como controles inteligentes, traducción en tiempo real, modo de disparo, grabadora de macros, sincronización de múltiples instancias, modo ecológico y más. También tiene una interfaz fácil de usar y una gran biblioteca de juegos para elegir. </p>
33
- <p>Para descargar y jugar Pro League Soccer en PC con BlueStacks, siga estos pasos:</p>
34
- <ol>
35
- <li>Descargue e instale BlueStacks en su PC desde <a href="">https://www.bluestacks.com/download.html</a></li>
36
- <li> Iniciar sesión completo en Google para acceder a Play Store o hacerlo más tarde</li>
37
- <li>Busca Pro League Soccer en la barra de búsqueda en la esquina superior derecha</li>
38
- <li>Haga clic para instalar Pro League Soccer desde los resultados de búsqueda</li>
39
- <li>Inicio de sesión completo de Google (si te saltaste el paso 2) para instalar Pro League Soccer</li>
40
- <li>Haga clic en el icono de Pro League Soccer en la pantalla de inicio para comenzar a jugar</li>
41
- </ol>
42
- <h2>Conclusión</h2>
43
- <p>En este artículo, te hemos mostrado cómo descargar y jugar Pro League Soccer en PC con diferentes emuladores. También hemos comparado sus características y ventajas, y le hemos proporcionado guías de instalación y pasos. Ahora puedes disfrutar jugando Pro League Soccer en PC con una pantalla más grande, mejores gráficos y controles más suaves. </p>
44
- <p>Si estás buscando un juego de fútbol realista e inmersivo que te permita personalizar tu club, competir en varias ligas y copas, y editar todos los datos de competiciones, equipos y jugadores, entonces definitivamente deberías probar Pro League Soccer. Es uno de los mejores juegos de fútbol móvil que puedes jugar en tu PC con un emulador de Android. </p>
45
- <p>Entonces, ¿qué estás esperando? Descarga Pro League Soccer en PC hoy y comienza tu viaje de fútbol. ¡No te arrepentirás! </p>
46
- <h2>Preguntas frecuentes</h2>
47
-
48
- <p>Los requisitos mínimos y recomendados para jugar Pro League Soccer en PC varían dependiendo del emulador que uses. Sin embargo, en términos generales, necesitará al menos 2 GB de RAM, 4 GB de espacio en disco, Windows 7 o superior, procesador Intel o AMD y una conexión a Internet estable. Para un mejor rendimiento, es posible que necesite más RAM, espacio en disco, tarjeta gráfica y velocidad de CPU. </p>
49
- <h4>¿Cuáles son las características del juego Pro League Soccer? </h4>
50
- <p>Pro League Soccer juego tiene muchas características que lo hacen destacar de otros juegos de fútbol. Algunos de ellos son:</p>
51
- <ul>
52
- <li>Selección y actualización de su club de las ligas inferiores a las ligas superiores</li>
53
- <li>Unirse a la copa nacional de clubes y competir en la liga de estrellas</li>
54
- <li>Convertirse en el rey del continente con su equipo nacional y unirse a la liga de las naciones</li>
55
- <li>Luchando por la copa y participando en muchas copas con play-offs</li>
56
- <li>Editar todos los datos de competiciones, equipos y jugadores de acuerdo a su preferencia</li>
57
- <li>Cargando logos únicos para equipos desde internet</li>
58
- <li>Experiencia de juego realista con gráficos de alta calidad y efectos de sonido</li>
59
- <li>Jugar sin conexión o en línea con otros jugadores de todo el mundo</li>
60
- </ul>
61
- <h4>¿Cómo puedo editar los datos de competiciones, equipos y jugadores en Pro League Soccer? </h4>
62
- <p>Puede editar los datos de competiciones, equipos y jugadores en Pro League Soccer yendo al menú Configuración y eligiendo Editor de datos. Allí puede cambiar los nombres, logotipos, kits, estadios, jugadores, atributos y más de cualquier competencia, equipo o jugador. También puede cargar logotipos desde Internet ingresando la URL de la imagen. Sin embargo, tenga cuidado al editar los datos, ya que puede afectar la jugabilidad y el equilibrio del juego. </p>
63
- <h4>¿Cómo puedo jugar Pro League Soccer en línea con otros jugadores? </h4>
64
-
65
- <h4>¿Cómo puedo contactar con el desarrollador de Pro League Soccer para obtener comentarios o apoyo? </h4>
66
- <p>Puede ponerse en contacto con el desarrollador de Pro League Soccer para obtener comentarios o soporte, vaya al menú Configuración y seleccione Contáctenos. Allí puede enviar un correo electrónico al desarrollador con sus preguntas, sugerencias, problemas o elogios. También puedes seguir al desarrollador en plataformas de redes sociales como Facebook, Twitter, Instagram y YouTube para obtener las últimas noticias y actualizaciones sobre el juego. </p> 64aa2da5cf<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/core.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/CVPR/LIVE/thrust/examples/cpp_integration/host.cpp DELETED
@@ -1,27 +0,0 @@
1
- #include <thrust/host_vector.h>
2
- #include <thrust/random.h>
3
- #include <thrust/generate.h>
4
- #include <thrust/sort.h>
5
- #include <cstdlib>
6
- #include <iostream>
7
- #include <iterator>
8
-
9
- // defines the function prototype
10
- #include "device.h"
11
-
12
- int main(void)
13
- {
14
- // generate 20 random numbers on the host
15
- thrust::host_vector<int> h_vec(20);
16
- thrust::default_random_engine rng;
17
- thrust::generate(h_vec.begin(), h_vec.end(), rng);
18
-
19
- // interface to CUDA code
20
- sort_on_device(h_vec);
21
-
22
- // print sorted array
23
- thrust::copy(h_vec.begin(), h_vec.end(), std::ostream_iterator<int>(std::cout, "\n"));
24
-
25
- return 0;
26
- }
27
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/reduce.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the reduce.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch reduce
24
-
25
- #include <thrust/system/detail/sequential/reduce.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/reduce.h>
32
- #include <thrust/system/cuda/detail/reduce.h>
33
- #include <thrust/system/omp/detail/reduce.h>
34
- #include <thrust/system/tbb/detail/reduce.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_REDUCE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/reduce.h>
38
- #include __THRUST_HOST_SYSTEM_REDUCE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_REDUCE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_REDUCE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/reduce.h>
42
- #include __THRUST_DEVICE_SYSTEM_REDUCE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_REDUCE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/type_traits/void_t.h DELETED
@@ -1,64 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file void_t.h
18
- * \brief C++17's `void_t`.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
-
25
- #if THRUST_CPP_DIALECT >= 2017
26
- # include <type_traits>
27
- #endif
28
-
29
- namespace thrust
30
- {
31
-
32
- #if THRUST_CPP_DIALECT >= 2011
33
-
34
- template <typename...> struct voider { using type = void; };
35
-
36
- #if THRUST_CPP_DIALECT >= 2017
37
- using std::void_t;
38
- #else
39
- template <typename... Ts> using void_t = typename voider<Ts...>::type;
40
- #endif
41
-
42
- #else // Older than C++11.
43
-
44
- template <
45
- typename = void
46
- , typename = void
47
- , typename = void
48
- , typename = void
49
- , typename = void
50
- , typename = void
51
- , typename = void
52
- , typename = void
53
- , typename = void
54
- , typename = void
55
- >
56
- struct voider
57
- {
58
- typedef void type;
59
- };
60
-
61
- #endif
62
-
63
- } // end namespace thrust
64
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/assigners/approx_max_iou_assigner.py DELETED
@@ -1,145 +0,0 @@
1
- import torch
2
-
3
- from ..builder import BBOX_ASSIGNERS
4
- from ..iou_calculators import build_iou_calculator
5
- from .max_iou_assigner import MaxIoUAssigner
6
-
7
-
8
- @BBOX_ASSIGNERS.register_module()
9
- class ApproxMaxIoUAssigner(MaxIoUAssigner):
10
- """Assign a corresponding gt bbox or background to each bbox.
11
-
12
- Each proposals will be assigned with an integer indicating the ground truth
13
- index. (semi-positive index: gt label (0-based), -1: background)
14
-
15
- - -1: negative sample, no assigned gt
16
- - semi-positive integer: positive sample, index (0-based) of assigned gt
17
-
18
- Args:
19
- pos_iou_thr (float): IoU threshold for positive bboxes.
20
- neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
21
- min_pos_iou (float): Minimum iou for a bbox to be considered as a
22
- positive bbox. Positive samples can have smaller IoU than
23
- pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
24
- gt_max_assign_all (bool): Whether to assign all bboxes with the same
25
- highest overlap with some gt to that gt.
26
- ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
27
- `gt_bboxes_ignore` is specified). Negative values mean not
28
- ignoring any bboxes.
29
- ignore_wrt_candidates (bool): Whether to compute the iof between
30
- `bboxes` and `gt_bboxes_ignore`, or the contrary.
31
- match_low_quality (bool): Whether to allow quality matches. This is
32
- usually allowed for RPN and single stage detectors, but not allowed
33
- in the second stage.
34
- gpu_assign_thr (int): The upper bound of the number of GT for GPU
35
- assign. When the number of gt is above this threshold, will assign
36
- on CPU device. Negative values mean not assign on CPU.
37
- """
38
-
39
- def __init__(self,
40
- pos_iou_thr,
41
- neg_iou_thr,
42
- min_pos_iou=.0,
43
- gt_max_assign_all=True,
44
- ignore_iof_thr=-1,
45
- ignore_wrt_candidates=True,
46
- match_low_quality=True,
47
- gpu_assign_thr=-1,
48
- iou_calculator=dict(type='BboxOverlaps2D')):
49
- self.pos_iou_thr = pos_iou_thr
50
- self.neg_iou_thr = neg_iou_thr
51
- self.min_pos_iou = min_pos_iou
52
- self.gt_max_assign_all = gt_max_assign_all
53
- self.ignore_iof_thr = ignore_iof_thr
54
- self.ignore_wrt_candidates = ignore_wrt_candidates
55
- self.gpu_assign_thr = gpu_assign_thr
56
- self.match_low_quality = match_low_quality
57
- self.iou_calculator = build_iou_calculator(iou_calculator)
58
-
59
- def assign(self,
60
- approxs,
61
- squares,
62
- approxs_per_octave,
63
- gt_bboxes,
64
- gt_bboxes_ignore=None,
65
- gt_labels=None):
66
- """Assign gt to approxs.
67
-
68
- This method assign a gt bbox to each group of approxs (bboxes),
69
- each group of approxs is represent by a base approx (bbox) and
70
- will be assigned with -1, or a semi-positive number.
71
- background_label (-1) means negative sample,
72
- semi-positive number is the index (0-based) of assigned gt.
73
- The assignment is done in following steps, the order matters.
74
-
75
- 1. assign every bbox to background_label (-1)
76
- 2. use the max IoU of each group of approxs to assign
77
- 2. assign proposals whose iou with all gts < neg_iou_thr to background
78
- 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
79
- assign it to that bbox
80
- 4. for each gt bbox, assign its nearest proposals (may be more than
81
- one) to itself
82
-
83
- Args:
84
- approxs (Tensor): Bounding boxes to be assigned,
85
- shape(approxs_per_octave*n, 4).
86
- squares (Tensor): Base Bounding boxes to be assigned,
87
- shape(n, 4).
88
- approxs_per_octave (int): number of approxs per octave
89
- gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
90
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
91
- labelled as `ignored`, e.g., crowd boxes in COCO.
92
- gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
93
-
94
- Returns:
95
- :obj:`AssignResult`: The assign result.
96
- """
97
- num_squares = squares.size(0)
98
- num_gts = gt_bboxes.size(0)
99
-
100
- if num_squares == 0 or num_gts == 0:
101
- # No predictions and/or truth, return empty assignment
102
- overlaps = approxs.new(num_gts, num_squares)
103
- assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
104
- return assign_result
105
-
106
- # re-organize anchors by approxs_per_octave x num_squares
107
- approxs = torch.transpose(
108
- approxs.view(num_squares, approxs_per_octave, 4), 0,
109
- 1).contiguous().view(-1, 4)
110
- assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
111
- num_gts > self.gpu_assign_thr) else False
112
- # compute overlap and assign gt on CPU when number of GT is large
113
- if assign_on_cpu:
114
- device = approxs.device
115
- approxs = approxs.cpu()
116
- gt_bboxes = gt_bboxes.cpu()
117
- if gt_bboxes_ignore is not None:
118
- gt_bboxes_ignore = gt_bboxes_ignore.cpu()
119
- if gt_labels is not None:
120
- gt_labels = gt_labels.cpu()
121
- all_overlaps = self.iou_calculator(approxs, gt_bboxes)
122
-
123
- overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
124
- num_gts).max(dim=0)
125
- overlaps = torch.transpose(overlaps, 0, 1)
126
-
127
- if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
128
- and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):
129
- if self.ignore_wrt_candidates:
130
- ignore_overlaps = self.iou_calculator(
131
- squares, gt_bboxes_ignore, mode='iof')
132
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
133
- else:
134
- ignore_overlaps = self.iou_calculator(
135
- gt_bboxes_ignore, squares, mode='iof')
136
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
137
- overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
138
-
139
- assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
140
- if assign_on_cpu:
141
- assign_result.gt_inds = assign_result.gt_inds.to(device)
142
- assign_result.max_overlaps = assign_result.max_overlaps.to(device)
143
- if assign_result.labels is not None:
144
- assign_result.labels = assign_result.labels.to(device)
145
- return assign_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/paa.py DELETED
@@ -1,17 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .single_stage import SingleStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class PAA(SingleStageDetector):
7
- """Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
8
-
9
- def __init__(self,
10
- backbone,
11
- neck,
12
- bbox_head,
13
- train_cfg=None,
14
- test_cfg=None,
15
- pretrained=None):
16
- super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
17
- test_cfg, pretrained)