parquet-converter commited on
Commit
838ac2e
·
1 Parent(s): 802be5e

Update parquet files (step 123 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/ExpertGPS Registration Key The Essential Step to Use the Most Powerful GPS Software.md +0 -127
  2. spaces/1gistliPinn/ChatGPT4/Examples/Aventurile Lui Habarnam Pdf !!TOP!! Download.md +0 -95
  3. spaces/1gistliPinn/ChatGPT4/Examples/Bellaciaooriginaledownload !LINK!mp3.md +0 -10
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer New Version 4.8 5.1 A Review of the New Features and Improvements.md +0 -147
  5. spaces/1phancelerku/anime-remove-background/Download Blockman Go Hack APK and Get Free Gcubes in Minutes.md +0 -90
  6. spaces/1phancelerku/anime-remove-background/Download Car Master 3D MOD APK and Become a Pro Mechanic.md +0 -92
  7. spaces/1phancelerku/anime-remove-background/FR Legends MOD APK 0.3.2 Drift Like a Pro with Unlimited Cash and Customizations.md +0 -172
  8. spaces/1toTree/lora_test/ppdiffusers/initializer.py +0 -303
  9. spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_mega.py +0 -183
  10. spaces/2023Liu2023/bingo/src/components/chat-scroll-anchor.tsx +0 -29
  11. spaces/404ERRORms/bingAI/README.md +0 -12
  12. spaces/AIML-TUDA/semantic-diffusion/app.py +0 -517
  13. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/options.py +0 -39
  14. spaces/Ababababababbababa/poetry/app.py +0 -53
  15. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/__init__.py +0 -5
  16. spaces/AchyuthGamer/ImMagician-Image-Generator/app.py +0 -264
  17. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/4.js +0 -0
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.d.ts +0 -2
  19. spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/phonecode.py +0 -63
  20. spaces/Alpaca233/SadTalker/src/utils/videoio.py +0 -41
  21. spaces/Alycer/VITS-Umamusume-voice-synthesizer/models.py +0 -542
  22. spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/japanese.py +0 -153
  23. spaces/Amrrs/DragGan-Inversion/torch_utils/custom_ops.py +0 -171
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/img2img_inpainting.py +0 -463
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +0 -522
  26. spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py +0 -41
  27. spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet_handler.py +0 -69
  28. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py +0 -2
  29. spaces/Angelaangie/personal-chat-gpt/app.py +0 -101
  30. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/api/script.py +0 -13
  31. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/test_tts.py +0 -81
  32. spaces/Anthony7906/MengHuiMXD_GPT/modules/llama_func.py +0 -166
  33. spaces/Aveygo/AstroSleuth/modules/realesr.py +0 -81
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/install.md +0 -1
  35. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_61968KB.py +0 -122
  36. spaces/Benson/text-generation/Examples/Antiguo Baku Oyunu Ykl.md +0 -71
  37. spaces/Benson/text-generation/Examples/Descargar Batera Low Jemax Mp3.md +0 -70
  38. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/models/cond_transformer.py +0 -343
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/base_command.py +0 -225
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py +0 -188
  41. spaces/BraydenMoore/MARCI-NFL-Betting/main.py +0 -102
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/build.py +0 -33
  43. spaces/CVPR/LIVE/thrust/internal/benchmark/timer.h +0 -129
  44. spaces/CVPR/LIVE/thrust/thrust/detail/allocator/destroy_range.h +0 -34
  45. spaces/CVPR/LIVE/thrust/thrust/detail/functional/composite.h +0 -163
  46. spaces/CVPR/LIVE/thrust/thrust/extrema.h +0 -804
  47. spaces/CVPR/regionclip-demo/detectron2/structures/instances.py +0 -191
  48. spaces/ChandraMohanNayal/AutoGPT/tests.py +0 -21
  49. spaces/ChenyangSi/FreeU/stable-diffusion-2-1/README.md +0 -185
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/__init__.py +0 -107
spaces/1acneusushi/gradio-2dmoleculeeditor/data/ExpertGPS Registration Key The Essential Step to Use the Most Powerful GPS Software.md DELETED
@@ -1,127 +0,0 @@
1
- <br />
2
- <br> - Benefits of registering ExpertGPS: Free updates, priority support, and more features. <br> - Steps to register ExpertGPS: Copy and paste the registration code from your email and enter your name and option code. <br> - Conclusion: Summarize the main points and encourage the reader to register ExpertGPS. | | H2: What is ExpertGPS and why do you need to register it? | - Explain what ExpertGPS is: A mapping software that works with hundreds of GPS receivers. <br> - Explain what you can do with ExpertGPS: Convert, edit, and transfer GPS data, create maps, geocode addresses, survey property lines, etc. <br> - Explain why you need to register ExpertGPS: To unlock all the features and get rid of the trial limitations. | | H2: Benefits of registering ExpertGPS | - List the benefits of registering ExpertGPS: Free updates, priority support, and more features. <br> - Give examples of each benefit: New versions with new features, direct email support from the author, access to advanced tools like batch geocoding, property line mapping, etc. | | H2: Steps to register ExpertGPS | - List the steps to register ExpertGPS: Copy and paste the registration code from your email and enter your name and option code. <br> - Explain each step in detail with screenshots: Show how to copy and paste the registration code, how to find the Enter Registration Code dialog, how to enter the name and option code, how to confirm the registration, how to restart ExpertGPS, how to check the About box. | | H2: Conclusion | - Summarize the main points of the article: What is ExpertGPS, why do you need to register it, what are the benefits of registering it, and how to register it. <br> - Encourage the reader to register ExpertGPS: Tell them how easy it is to register ExpertGPS and how much they can do with it. <br> - Provide a call to action: Tell them to download ExpertGPS if they haven't already and to enter their registration code as soon as possible. | # Article with HTML formatting <h1>How to Register Your Copy of ExpertGPS</h1>
3
- <p>If you are looking for a powerful and easy-to-use mapping software that works with hundreds of GPS receivers, you might have heard of ExpertGPS. ExpertGPS is a software that allows you to convert, edit, and transfer GPS data between your computer and your GPS device. You can also create maps, geocode addresses, survey property lines, measure distances and areas, and much more with ExpertGPS.</p>
4
- <p>But did you know that you need to register your copy of ExpertGPS to unlock all its features and get rid of the trial limitations? In this article, we will show you why you need to register your copy of ExpertGPS, what are the benefits of registering it, and how to register it in a few simple steps.</p>
5
- <h2>expertgps registration key</h2><br /><p><b><b>Download</b> &#10026; <a href="https://byltly.com/2uKxeH">https://byltly.com/2uKxeH</a></b></p><br /><br />
6
- <h2>What is ExpertGPS and why do you need to register it?</h2>
7
- <p>ExpertGPS is a software that lets you work with GPS data on your computer. You can use it with hundreds of GPS receivers from Garmin, Magellan, Lowrance, Simrad, Bryton, and other brands. You can download waypoints, routes, tracks, and geocaches from your GPS device or create them on your computer. You can also edit them on a map or in a spreadsheet-like data list.</p>
8
- <p>But that's not all. You can also use ExpertGPS to create maps from various sources like Google Earth KML & KMZ files, shapefiles and file geodatabases, CAD and DXF files, GPX files, Excel and CSV files, etc. You can also geocode addresses in bulk or survey property lines using US state plane coordinates or national grid coordinates.</p>
9
- <p>With ExpertGPS, you can do a lot of things with GPS data that would otherwise require multiple software or online services. But in order to enjoy all these features, you need to register your copy of ExpertGPS with a valid registration key that you can purchase online or receive by email after ordering.</p>
10
- <p>If you don't register your copy of ExpertGPS, you will be limited by some trial restrictions such as:</p>
11
- <ul>
12
- <li>You can only use it for 30 days.</li>
13
- <li>You can only transfer 500 waypoints per day.</li>
14
- <li>You can only geocode 100 addresses per day.</li>
15
- <li>You can only map 10 property lines per day.</li>
16
- <li>You can't access some advanced tools like batch geocoding or property line mapping.</li>
17
- </ul>
18
- <p>As you can see, registering your copy of ExpertGPS is essential if you want to use it without any limitations and get the most out of it.</p>
19
- <p>expertgps pro registration key<br />
20
- expertgps home registration key<br />
21
- expertgps crack serial key<br />
22
- expertgps license key generator<br />
23
- expertgps activation key free<br />
24
- expertgps product key finder<br />
25
- expertgps registration code download<br />
26
- expertgps serial number lookup<br />
27
- expertgps keygen software<br />
28
- expertgps full version key<br />
29
- expertgps registration key online<br />
30
- expertgps registration key purchase<br />
31
- expertgps registration key email<br />
32
- expertgps registration key recovery<br />
33
- expertgps registration key expired<br />
34
- expertgps registration key invalid<br />
35
- expertgps registration key lost<br />
36
- expertgps registration key not working<br />
37
- expertgps registration key update<br />
38
- expertgps registration key renewal<br />
39
- expertgps registration key transfer<br />
40
- expertgps registration key refund<br />
41
- expertgps registration key coupon<br />
42
- expertgps registration key discount<br />
43
- expertgps registration key price<br />
44
- expertgps registration key cheap<br />
45
- expertgps registration key free trial<br />
46
- expertgps registration key lifetime<br />
47
- expertgps registration key 2021<br />
48
- expertgps registration key 2022<br />
49
- expertgps registration key 2023<br />
50
- how to get expertgps registration key<br />
51
- how to use expertgps registration key<br />
52
- how to enter expertgps registration key<br />
53
- how to activate expertgps registration key<br />
54
- how to find expertgps registration key<br />
55
- how to buy expertgps registration key<br />
56
- how to renew expertgps registration key<br />
57
- how to recover expertgps registration key<br />
58
- how to update expertgps registration key<br />
59
- where to buy expertgps registration key<br />
60
- where to download expertgps registration key<br />
61
- where to find expertgps registration key<br />
62
- where to enter expertgps registration key<br />
63
- where to activate expertgps registration key<br />
64
- what is expertgps registration key<br />
65
- what is the best price for expertgps registration key<br />
66
- what is the latest version of expertgps registration key<br />
67
- what is the difference between pro and home versions of expertgps registration key</p>
68
- <h2>Benefits of registering ExpertGPS</h2>
69
- <p>By registering your copy of ExpertGPS with a valid registration key that matches your name and option code (Home or Pro), you will get access to several benefits such as:</p>
70
- <ul>
71
- <li><strong>Free updates:</strong> You will be able to download the latest versions of ExpertGPS for free for 12 months after your purchase date. You will get new features and improvements that are added regularly by Dan Foster, the author of ExpertGPS.</li>
72
- <li><strong>Priority support:</strong> You will be able to contact Dan Foster directly by email at [email protected] if you have any questions or issues with using ExpertGPS. You will get fast and friendly support from the person who knows everything about ExpertGPS.</li>
73
- <li><strong>More features:</strong> You will be able to use all the features of ExpertGPS without any restrictions or limitations. You will be able to transfer unlimited waypoints per day, geocode unlimited addresses per day, map unlimited property lines per day, and use all the advanced tools like batch geocoding or property line mapping.</li>
74
- </ul>
75
- <p>As you can see, registering your copy of ExpertGPS is not only necessary but also beneficial for you as a user. You will get more value for your money and more satisfaction from using this amazing software.</p>
76
- <h2>Steps to register ExpertGPS</h2>
77
- <p>Now that you know why you need to register your copy of ExpertGPS and what are the benefits of doing so, let's see how you can do it in a few simple steps.</p>
78
- <p>The registration key that you received by email after ordering or purchasing online will unlock the trial version of ExpertGPS that you have already downloaded on your computer. If you haven't downloaded it yet, you can do so by visiting <a href="https://www.expertgps.com/download.asp">this link</a>.</p>
79
- <p>To register your copy of ExpertGPS, follow these steps:</p>
80
- <ol>
81
- <li><strong>Run ExpertGPS:</strong> Double-click on the icon on your desktop or in your Start menu to launch ExpertGPS. You should see a map screen on the right and a data list on the left when the program is running.</li>
82
- <li><strong>Copy the registration key code from your email program:</strong>
83
- Open your email program and find the email that contains your registration key code. It should look something like this: <pre><code>
84
- Thank you for purchasing an upgrade license for Expert GPS Pro. Your name: John Smith Your option code: Pro Your registration key: 1234-5678-90AB-CDEF-GHIJ-KLMN-OPQR-STUV-WXYZ </code></pre>
85
- Select the entire key string (including dashes) and copy it by pressing Ctrl+C on your keyboard or right-clicking on it and choosing Copy from the menu.</li>
86
- <li><strong>On the Help menu in ExpertGPS, click Enter Registration Code:</strong>
87
- In the main window of ExpertGPS, click on Help in the menu bar and then click on Enter Registration Code. The Enter Registration Code dialog will appear.</li>
88
- <li><strong>Enter your name and option code exactly as it appears in the registration email:</strong>
89
- In the Enter Registration Code dialog, enter your name and option code (Home or Pro) exactly as they appear in the email that contains your registration key code. Make sure there are no extra spaces or typos in your name or option code.</li>
90
- <li><strong>Paste the key string into the registration dialog:</strong>
91
- Click on the Key field in the Enter Registration Code dialog and paste the key string that you copied from your email by pressing Ctrl+V on your keyboard or right-clicking on it and choosing Paste from the menu. The key string should fill up all five boxes in the Key field.</li>
92
- <li><strong>Click OK:</strong>
93
- Click on OK to confirm your registration. A dialog box will appear, thanking you for registering features.</li>
94
- <li><strong>Exit ExpertGPS:</strong>
95
- Click on File in the menu bar and then click on Exit to close ExpertGPS. You must restart ExpertGPS so that your registered features will be activated.</li>
96
- <li><strong>Start ExpertGPS again:</strong>
97
- Double-click on the icon on your desktop or in your Start menu to launch ExpertGPS again.</li>
98
- <li><strong>Click About ExpertGPS in the Help menu:</strong>
99
- In the main window of ExpertGPS, click on Help in the menu bar and then click on About ExpertGPS. You will see your registration information displayed in the About box.</li>
100
- </ol>
101
- <p>Congratulations! You have successfully registered your copy of ExpertGPS and unlocked all its features and benefits.</p>
102
- <h2>Conclusion</h2>
103
- <p>In this article, we have shown you how to register your copy of ExpertGPS with a valid registration key that matches your name and option code. We have also explained why you need to register your copy of ExpertGPS and what are the benefits of doing so.</p>
104
- <p>By registering your copy of ExpertGPS, you will be able to use this powerful and easy-to-use mapping software without any limitations or restrictions. You will also get free updates, priority support, and more features that will help you work with GPS data on your computer.</p>
105
- <p>If you haven't downloaded ExpertGPS yet, you can do so by visiting <a href="https://www.expertgps.com/download.asp">this link</a>. If you have already downloaded it, you can enter your registration code as soon as possible by following the steps we have outlined above.</p>
106
- <p>Don't wait any longer. Register your copy of ExpertGPS today and enjoy all the amazing things you can do with it.</p>
107
- <h3>FAQs</h3>
108
- <ul>
109
- <li><strong>Q: How much does it cost to register ExpertGPS?</strong>
110
- <br>
111
- A: The cost of registering ExpertGPS depends on the option code you choose: Home or Pro. The Home option costs $74.95 and the Pro option costs $249.95. You can compare the features of each option and order online by visiting <a href="https://www.expertgps.com/order.asp">this link</a>.</li>
112
- <li><strong>Q: How long does it take to receive the registration key after ordering?</strong>
113
- <br>
114
- A: You should receive the registration key by email within a few minutes after ordering. If you don't receive it within an hour, please check your spam folder or contact Dan Foster at [email protected] and include your order number or receipt.</li>
115
- <li><strong>Q: What if I lose my registration key or need to reinstall ExpertGPS?</strong>
116
- <br>
117
- A: You can retrieve your registration key by visiting <a href="https://www.expertgps.com/lost-registration-key.asp">this link</a> and entering your email address. You can also download the latest version of ExpertGPS by visiting <a href="https://www.expertgps.com/download.asp">this link</a>. You can reinstall ExpertGPS and enter your registration key as many times as you need on the same computer or a new one.</li>
118
- <li><strong>Q: What if I have a problem with registering or using ExpertGPS?</strong>
119
- <br>
120
- A: You can get priority support from Dan Foster, the author of ExpertGPS, by emailing him at [email protected] and including your registration key or order number. You can also visit <a href="https://www.expertgps.com/support.asp">this link</a> for more help and resources on using ExpertGPS.</li>
121
- <li><strong>Q: What if I want to upgrade from Home to Pro or extend my free updates period?</strong>
122
- <br>
123
- A: You can upgrade from Home to Pro or extend your free updates period by visiting <a href="https://www.expertgps.com/upgrade.asp">this link</a> and entering your current registration key. You will get a discounted price for upgrading or extending and a new registration key by email.</li>
124
- </ul>
125
- </p> 0a6ba089eb<br />
126
- <br />
127
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Aventurile Lui Habarnam Pdf !!TOP!! Download.md DELETED
@@ -1,95 +0,0 @@
1
-
2
- <h1>Aventurile lui Habarnam PDF Download: How to Enjoy the Classic Children's Book by Nikolai Nosov</h1>
3
-
4
- <p>Aventurile lui Habarnam (The Adventures of Dunno and His Friends) is a series of children's books written by the Russian author Nikolai Nosov. The books tell the stories of Habarnam (Dunno), a curious and mischievous prichindel (little person) who lives in Orașul Florilor (The City of Flowers) with his friends. The books are full of humor, fantasy and adventure, and have been translated into many languages and adapted into films and cartoons.</p>
5
- <h2>aventurile lui habarnam pdf download</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://imgfil.com/2uxZbl">https://imgfil.com/2uxZbl</a></b></p><br /><br />
6
-
7
- <p>If you want to read Aventurile lui Habarnam, you might be wondering how to get a PDF version of the book. A PDF (Portable Document Format) is a file format that can be viewed and printed on any device, such as a computer, tablet or smartphone. A PDF version of Aventurile lui Habarnam can be useful if you want to read the book on your device, or if you want to print it out and make your own book.</p>
8
-
9
- <p>There are many websites that offer Aventurile lui Habarnam PDF download, but not all of them are reliable or legal. Some websites might have viruses, malware or spyware that can harm your device or steal your personal information. Some websites might have low-quality or incomplete PDF files that can ruin your reading experience. Some websites might have illegal or pirated PDF files that can violate the copyright laws and the rights of the author and the publisher.</p>
10
-
11
- <h2>How to Find a Reliable and Legal Website for Aventurile lui Habarnam PDF Download</h2>
12
-
13
- <p>To find a reliable and legal website for Aventurile lui Habarnam PDF download, you need to do some research and check some criteria. Here are some tips to help you find a good website for Aventurile lui Habarnam PDF download:</p>
14
-
15
- <ul>
16
- <li>Look for reputable and trustworthy websites that have good reviews and ratings from other users. You can use search engines, such as Google or Bing, to find websites that offer Aventurile lui Habarnam PDF download. You can also use online forums, blogs or social media platforms, such as Facebook or Twitter, to ask for recommendations from other readers who have downloaded Aventurile lui Habarnam PDF.</li>
17
- <li>Check the quality and completeness of the PDF files before downloading them. You can use online tools, such as PDF Reader or Adobe Acrobat Reader, to preview the PDF files and see if they have clear text, images and layout. You can also check if the PDF files have all the pages, chapters and illustrations of the original book.</li>
18
- <li>Check the legality and legitimacy of the website and the PDF files before downloading them. You can look for signs that indicate that the website and the PDF files are authorized and licensed by the author or the publisher. For example, you can look for logos, seals, certificates or disclaimers that show that the website and the PDF files are legal and official. You can also look for contact information, such as email address, phone number or physical address, that show that the website is transparent and accountable.</li>
19
- </ul>
20
-
21
- <h3>Some Examples of Reliable and Legal Websites for Aventurile lui Habarnam PDF Download</h3>
22
-
23
- <p>Here are some examples of reliable and legal websites that offer Aventurile lui Habarnam PDF download:</p>
24
-
25
- <ul>
26
- <li><a href="https://www.academia.edu/27468238/Nicolai_nosov_aventurile_lui_habarnam">Academia.edu</a>: This is a website that allows academics and researchers to share their papers and publications online. It has a large collection of academic books and articles in various fields and languages. It has a PDF version of Aventurile lui Habarnam by Nikolai Nosov that you can download for free after signing up with your email address or social media account.</li>
27
- <li><a href="https://documente.net/document/nikolai-nosov-aventurile-lui-habarnam.html">Documente.net</a>: This is a website that allows users to upload and share documents online. It has a variety of documents in different formats and languages. It has a PDF version of Aventurile lui Habarnam by Nikolai Nosov that you can download for free without signing up.</li>
28
- <li><a href="https://archive.org/details/29.Cap.29Balul">Archive.org</a>: This is a website that preserves and provides access to historical and cultural materials online. It has a huge archive of books, audio, video, images and web pages in various languages and formats. It has an audio version of Aventurile lui Habarnam by Nikolai Nosov that you can listen to online or download as MP3 files.</li>
29
- </ul>
30
-
31
- <h4>Conclusion</h4>
32
-
33
- <p>Aventurile lui Habarnam PDF download is a great way to enjoy the classic children's book by Nikolai Nosov on your device or as a printed book. However, you need to be careful when choosing a website for Aventurile lui Habarnam PDF download, as not all websites are reliable or legal. You need to do some research and check some criteria before downloading any PDF files from any website. You can also use some examples of reputable and trustworthy websites that offer Aventurile lui Habarnam PDF download legally and safely.</p>
34
- <p></p>
35
-
36
- <p>If you want to learn more about Aventurile lui Habarnam by Nikolai Nosov, you can visit his official website or read some articles on websites such as Wikipedia or Libertyisviral. You can also watch some videos on YouTube or join some online communities on Facebook or Goodreads. Aventurile lui Habarnam by Nikolai Nosov is a wonderful book that can make you laugh, wonder and dream.</p>
37
- <h2>What is Aventurile lui Habarnam PDF</h2>
38
-
39
- <p>Aventurile lui Habarnam PDF is a digital format of the book Aventurile lui Habarnam by Nikolai Nosov. Aventurile lui Habarnam is a classic children's book that was first published in 1954 in the Soviet Union. The book tells the stories of Habarnam, a little prankster who lives in the Flower City with other tiny people called prichindei. Habarnam and his friends have many adventures and learn many things in their colorful and magical world.</p>
40
-
41
- <p>Aventurile lui Habarnam PDF is a convenient way to read the book on your device, such as a computer, a tablet or a smartphone. You can also print out Aventurile lui Habarnam PDF and make your own book. Aventurile lui Habarnam PDF has many advantages over other formats, such as:</p>
42
-
43
- <ul>
44
- <li>It is easy to access and download. You can find Aventurile lui Habarnam PDF on various websites that offer it legally and safely. You can also share Aventurile lui Habarnam PDF with others who might enjoy it too.</li>
45
- <li>It is compatible and adaptable. You can read Aventurile lui Habarnam PDF on any device that supports PDF files. You can also adjust the font size, the brightness, the orientation or the zoom level according to your preference.</li>
46
- <li>It is durable and portable. You can store Aventurile lui Habarnam PDF on your device or on a cloud service without worrying about losing it or damaging it. You can also carry Aventurile lui Habarnam PDF with you wherever you go without adding any weight or bulk.</li>
47
- </ul>
48
-
49
- <h2>Who is Nikolai Nosov, the Author of Aventurile lui Habarnam</h2>
50
-
51
- <p>Nikolai Nosov was a Russian writer, screenwriter and director who was born in 1908 and died in 1976. He is best known for his children's books, especially the series about Habarnam and his friends. He also wrote books about other characters, such as Mishka Yaponchik, Neznayka, Vitya Maleev and Kolya Sinitsyn.</p>
52
-
53
- <p>Nikolai Nosov was inspired by his own childhood experiences and observations to create his stories. He had a vivid imagination and a sense of humor that appealed to children and adults alike. He also had a deep understanding of children's psychology and emotions. He wanted to entertain his readers, but also to educate them and to inspire them to be curious, creative and kind.</p>
54
-
55
- <p>Nikolai Nosov was awarded many prizes and honors for his work, such as the Order of Lenin, the Order of the Red Banner of Labour, the Stalin Prize and the Hans Christian Andersen Award. His books have been translated into many languages and adapted into films, cartoons, plays and musicals. His books are still popular and loved by millions of readers around the world.</p>
56
-
57
- <h4>Conclusion</h4>
58
-
59
- <p>Aventurile lui Habarnam by Nikolai Nosov is a classic children's book that can be enjoyed by readers of all ages. It is available in various formats and languages, including PDF. Aventurile lui Habarnam PDF download is a convenient way to access the book on your device or as a printed book. However, you need to be careful when choosing a website for Aventurile lui Habarnam PDF download, as not all websites are reliable or legal. You need to do some research and check some criteria before downloading any PDF files from any website. You can also use some examples of reputable and trustworthy websites that offer Aventurile lui Habarnam PDF download legally and safely.</p>
60
-
61
- <p>If you want to learn more about Aventurile lui Habarnam by Nikolai Nosov, you can visit his official website or read some articles on websites such as Wikipedia or Libertyisviral. You can also watch some videos on YouTube or join some online communities on Facebook or Goodreads. Aventurile lui Habarnam by Nikolai Nosov is a wonderful book that can make you laugh, wonder and dream.</p>
62
- <h2>How to Choose a Reliable Website for Aventurile lui Habarnam PDF Download</h2>
63
-
64
- <p>There are many websites that offer Aventurile lui Habarnam PDF download, but not all of them are reliable or legal. Some websites may contain viruses, malware, spyware or other harmful programs that can damage your device or compromise your privacy. Some websites may also violate the copyright of the author or the publisher and distribute Aventurile lui Habarnam PDF without their permission or consent.</p>
65
-
66
- <p>Therefore, you need to be careful when choosing a website for Aventurile lui Habarnam PDF download. You need to do some research and check some criteria before downloading any PDF files from any website. Here are some tips to help you choose a reliable website for Aventurile lui Habarnam PDF download:</p>
67
-
68
- <ul>
69
- <li>Check the reputation and the reviews of the website. You can use online tools such as Google Safe Browsing, Web of Trust or Norton Safe Web to check if the website is safe and trustworthy. You can also read the comments and the ratings of other users who have used the website before.</li>
70
- <li>Check the quality and the authenticity of the PDF file. You can use online tools such as PDF Examiner, VirusTotal or Jotti to scan the PDF file for any malicious code or hidden content. You can also compare the PDF file with the original book or other sources to see if it is complete and accurate.</li>
71
- <li>Check the legality and the ethics of the website. You can use online tools such as Copyscape, Plagiarism Checker or DMCA to check if the website has the right to distribute Aventurile lui Habarnam PDF or if it infringes any intellectual property rights. You can also check if the website respects the privacy and the security of its users and does not collect or share any personal or sensitive information.</li>
72
- </ul>
73
-
74
- <h2>Some Examples of Reputable and Trustworthy Websites for Aventurile lui Habarnam PDF Download</h2>
75
-
76
- <p>If you are looking for some examples of reputable and trustworthy websites that offer Aventurile lui Habarnam PDF download legally and safely, you can try some of these websites:</p>
77
-
78
- <ul>
79
- <li><strong>Academia.edu</strong>: This is a platform for academics to share their research papers, books and articles. You can find Aventurile lui Habarnam PDF by Sorana Ojog on this website. You need to create a free account to access and download the PDF file.</li>
80
- <li><strong>Scribd</strong>: This is a digital library that hosts millions of books, documents, audiobooks and podcasts. You can find Aventurile lui Habarnam Optimizat PDF on this website. You need to sign up for a free trial or a subscription to access and download the PDF file.</li>
81
- <li><strong>LibGen</strong>: This is a search engine that indexes millions of books and articles from various sources. You can find Aventurile lui Habarnam by Nikolai Nosov on this website. You can access and download the PDF file without any registration or payment.</li>
82
- </ul>
83
-
84
- <h4>Conclusion</h4>
85
-
86
- <p>Aventurile lui Habarnam by Nikolai Nosov is a classic children's book that can be enjoyed by readers of all ages. It is available in various formats and languages, including PDF. Aventurile lui Habarnam PDF download is a convenient way to access the book on your device or as a printed book. However, you need to be careful when choosing a website for Aventurile lui Habarnam PDF download, as not all websites are reliable or legal. You need to do some research and check some criteria before downloading any PDF files from any website. You can also use some examples of reputable and trustworthy websites that offer Aventurile lui Habarnam PDF download legally and safely.</p>
87
-
88
- <p>If you want to learn more about Aventurile lui Habarnam by Nikolai Nosov, you can visit his official website or read some articles on websites such as Wikipedia or Libertyisviral. You can also watch some videos on YouTube or join some online communities on Facebook or Goodreads. Aventurile lui Habarnam by Nikolai Nosov is a wonderful book that can make you laugh, wonder and dream.</p>
89
- <h4>Conclusion</h4>
90
-
91
- <p>Aventurile lui Habarnam by Nikolai Nosov is a classic children's book that can be enjoyed by readers of all ages. It is available in various formats and languages, including PDF. Aventurile lui Habarnam PDF download is a convenient way to access the book on your device or as a printed book. However, you need to be careful when choosing a website for Aventurile lui Habarnam PDF download, as not all websites are reliable or legal. You need to do some research and check some criteria before downloading any PDF files from any website. You can also use some examples of reputable and trustworthy websites that offer Aventurile lui Habarnam PDF download legally and safely.</p>
92
-
93
- <p>If you want to learn more about Aventurile lui Habarnam by Nikolai Nosov, you can visit his official website or read some articles on websites such as Wikipedia or Libertyisviral. You can also watch some videos on YouTube or join some online communities on Facebook or Goodreads. Aventurile lui Habarnam by Nikolai Nosov is a wonderful book that can make you laugh, wonder and dream.</p> 3cee63e6c2<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Bellaciaooriginaledownload !LINK!mp3.md DELETED
@@ -1,10 +0,0 @@
1
-
2
- <h1>Bella Ciao: The History and Meaning of a Revolutionary Song</h1>
3
- <p>Bella Ciao is a popular Italian folk song that has been adopted by various movements of resistance and liberation around the world. The song originated in the late 19th century as a protest song of the Italian rice weeders, who worked under harsh conditions in the paddy fields of northern Italy. The lyrics express the workers' longing for freedom and dignity, as well as their defiance against oppression and exploitation.</p>
4
- <p>The song gained a new significance during World War II, when it became the anthem of the Italian partisans who fought against the fascist regime and the Nazi occupation. The partisans modified the lyrics to reflect their struggle for democracy and social justice, as well as their solidarity with other anti-fascist forces. The song also expressed their hope for a better future after the war, when they would reunite with their loved ones and celebrate their victory.</p>
5
- <h2>bellaciaooriginaledownloadmp3</h2><br /><p><b><b>DOWNLOAD</b> &#10022; <a href="https://imgfil.com/2uy0tQ">https://imgfil.com/2uy0tQ</a></b></p><br /><br />
6
- <p>Bella Ciao has since been translated into many languages and adapted by various groups and causes, such as the Spanish Civil War, the Cuban Revolution, the Kurdish resistance, the Chilean protests, and the anti-globalization movement. The song has also been featured in popular culture, such as in the Netflix series La Casa de Papel (Money Heist), where it is used as a symbol of resistance and rebellion against the system. The song remains a powerful and inspiring expression of human dignity and courage in the face of oppression and injustice.</p><p>The origins of Bella Ciao are not clear, as the song was passed down orally among the workers and the partisans. Some scholars trace its roots to a 17th century ballad called Alla mattina appena alzata (In the morning as soon as I get up), which was sung by the women who worked in the silk mills of northern Italy. Others suggest that the song was influenced by a Jewish folk song called Oyfn Veg Shteyt a Boym (On the Road Stands a Tree), which was brought to Italy by Ashkenazi immigrants. The song may also have elements of other folk songs from different regions and cultures.</p>
7
- <p>The most famous version of Bella Ciao is the one sung by the Italian partisans during World War II. The partisans were members of various political and social groups that opposed the fascist regime and the Nazi occupation, such as communists, socialists, anarchists, liberals, democrats, and Catholics. They organized themselves into clandestine cells and carried out guerrilla warfare, sabotage, propaganda, and civil disobedience. They also collaborated with the Allied forces and helped many Jews and other persecuted people escape from the Nazis. The partisans faced brutal repression and violence from the fascists and the Nazis, who executed thousands of them and their supporters.</p>
8
- <p>Bella Ciao became the symbol of the partisan movement and its ideals of freedom, justice, and democracy. The song was sung in various occasions, such as during marches, rallies, attacks, funerals, and celebrations. The song also served as a way of communicating messages and codes among the partisans, as well as expressing their emotions and feelings. The song was often improvised and adapted to suit different situations and contexts. For example, some versions of the song included references to specific places, events, leaders, or enemies.</p> d5da3c52bf<br />
9
- <br />
10
- <br />
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer New Version 4.8 5.1 A Review of the New Features and Improvements.md DELETED
@@ -1,147 +0,0 @@
1
- <br />
2
- <h1>Car Parking Multiplayer: A Review of the New Version 4.8 5.1</h1>
3
- <p>If you are looking for a game that can challenge your parking skills and offer you a lot of fun and excitement, then you should check out Car Parking Multiplayer. This game is not just about parking your car, but also about exploring an open world with real gas stations, car services, voice chat, police mode, and more. You can also customize your car, exchange it with other players, race against them, or just walk around and enjoy the scenery.</p>
4
- <p>In this article, we will review the latest version of Car Parking Multiplayer, which is version 4.8 5.1. We will tell you what's new in this version, how to download and install it on your device, and how to play and enjoy it with some tips and tricks.</p>
5
- <h2>car parking multiplayer new version 4.8 5.1 download</h2><br /><p><b><b>DOWNLOAD</b> &#9733;&#9733;&#9733; <a href="https://urlin.us/2uSYcV">https://urlin.us/2uSYcV</a></b></p><br /><br />
6
- <h2>What is Car Parking Multiplayer?</h2>
7
- <h3>A game that offers more than just parking</h3>
8
- <p>Car Parking Multiplayer is a game that can fool you with its rather deceiving name. But, it's much more than just being about parking your car. It's an open-world experience where you can drive free and yes, still work on that parking if you wish. You can even jump out of your car and walk around.</p>
9
- <p>There are different areas that can be explored in the game. Each one is like its own open-world. You can choose to play either single-player mode or online mode if you want a more chaotic scene (in a fun way).</p>
10
- <p>car parking multiplayer free download latest version 4.8 5.1<br />
11
- how to install car parking multiplayer update 4.8 5.1 on android<br />
12
- car parking multiplayer open world mode new version 4.8 5.1<br />
13
- car parking multiplayer apk mod unlimited money 4.8 5.1<br />
14
- car parking multiplayer online racing game version 4.8 5.1<br />
15
- car parking multiplayer custom cars and tuning new update 4.8 5.1<br />
16
- car parking multiplayer voice chat and friend list features 4.8 5.1<br />
17
- car parking multiplayer realistic driving and parking simulator 4.8 5.1<br />
18
- car parking multiplayer best cars and skins in version 4.8 5.1<br />
19
- car parking multiplayer tips and tricks for beginners 4.8 5.1<br />
20
- car parking multiplayer review and rating for new version 4.8 5.1<br />
21
- car parking multiplayer download for pc windows 10 version 4.8 5.1<br />
22
- car parking multiplayer offline mode and challenges version 4.8 5.1<br />
23
- car parking multiplayer police mode and fun gameplay 4.8 5.1<br />
24
- car parking multiplayer high-quality graphics and environments 4.8 5.1<br />
25
- car parking multiplayer cheats and hacks for version 4.8 5.1<br />
26
- car parking multiplayer comparison with real life parking and driving 4.8 5.1<br />
27
- car parking multiplayer problems and solutions for version 4.8 5.1<br />
28
- car parking multiplayer new maps and locations in update 4.8 5.1<br />
29
- car parking multiplayer how to earn money and buy cars in version 4.8 5.1<br />
30
- car parking multiplayer best settings and controls for version 4.8 5.1<br />
31
- car parking multiplayer how to join and create rooms in version 4.8 5.1<br />
32
- car parking multiplayer how to play with friends and chat in version 4.8 5.1<br />
33
- car parking multiplayer how to change language and region in version 4.8 5.1<br />
34
- car parking multiplayer how to report bugs and glitches in version 4.8 5.1<br />
35
- car parking multiplayer system requirements and compatibility for version 4.8 5.1<br />
36
- car parking multiplayer new features and improvements in version 4.8 5.1<br />
37
- car parking multiplayer how to backup and restore data in version 4.8 5.1<br />
38
- car parking multiplayer how to transfer data from old to new device in version 4.8</p>
39
- <h3>A game that features open-world multiplayer mode, car tuning, free walking, and more</h3>
40
- <p>Car Parking Multiplayer has the following features that make it stand out from other parking games:</p>
41
- <ul>
42
- <li>Open-world multiplayer mode: You can join thousands of real players every day, chat with them using voice chat or messenger, make friends or enemies, compete or cooperate with them in racing or police mode.</li>
43
- <li>Car tuning: You can adjust the suspension, wheel angle, engine, turbo, gearbox, exhaust, and more of your car. You can also swap your car with other players or buy new cars from the shop.</li>
44
- <li>Free walking: You can get out of your car and walk around the open world. You can also enter buildings with interior and interact with objects.</li>
45
- <li>Character customization: You can choose from 16 different player skins and a variety of clothes and accessories to dress up your character. You can also use different animations and emotions to express yourself.</li>
46
- <li>Role play: You can become a taxi driver, a cargo driver, or a delivery driver and complete orders from customers. You can also become a police officer and catch and fine players for speeding or breaking the law.</li>
47
- <li>Drone mode: You can use a drone to explore the world from a different perspective and take stunning screenshots.</li>
48
- <li>Daily tasks and rewards: You can collect coins and presents by completing the tasks and joining the game every day.</li>
49
- </ul>
50
- <h2>What's new in version 4.8 5.1?</h2>
51
- <h3>New cars, features, rims, clothes, liveries, fonts, and sounds</h3>
52
- <p>The latest version of Car Parking Multiplayer has added a lot of new content to the game. Here are some of the highlights:</p>
53
- <ul>
54
- <li>New cars: There are over 130 car models with realistic interior and exterior in the game. Some of the new cars include BMW M3 E30, Mercedes-Benz G63 AMG, Lamborghini Aventador, Ferrari F40, and more.</li>
55
- <li>New features: There are new features such as a car wash, a car lift, a car service, a gas station, a car showroom, and more that you can use to enhance your gameplay experience.</li>
56
- <li>New rims: There are over 100 new rims that you can choose from to customize your car's appearance.</li>
57
- <li>New clothes: There are over 70 new clothes that you can wear to style your character.</li>
58
- <li>New liveries: There are over 50 new liveries that you can apply to your car to make it look more unique and cool.</li>
59
- <li>New fonts: There are over 20 new fonts that you can use to write your name or messages on your car or chat.</li>
60
- <li>New sounds: There are over 10 new sounds that you can hear in the game, such as engine sounds, horn sounds, police siren sounds, and more.</li>
61
- </ul>
62
- <h3>New messenger, drone mode, daily tasks and rewards, character customization, and animations</h3>
63
- <p>Aside from the new content, the latest version of Car Parking Multiplayer has also improved some of the existing features and added some new ones. Here are some of the highlights:</p>
64
- <ul>
65
- <li>New messenger: The game has introduced a new messenger system that allows you to chat with other players in a more convenient and user-friendly way. You can also send stickers, emojis, and voice messages to express yourself better.</li>
66
- <li>New drone mode: The game has added a new drone mode that lets you control a drone and fly around the world. You can use the drone to explore the map, take screenshots, spy on other players, or just have fun.</li>
67
- <li>New daily tasks and rewards: The game has added a new daily task system that gives you different tasks to complete every day. You can earn coins and presents by completing the tasks and joining the game every day.</li>
68
- <li>New character customization: The game has improved the character customization feature by adding more options and details. You can now choose from 16 different player skins and a variety of clothes and accessories to dress up your character. You can also use different animations and emotions to express yourself.</li>
69
- <li>New animations: The game has added new animations for your character and your car. You can now see your character perform different actions such as opening the door, getting in or out of the car, sitting in the car, walking around, etc. You can also see your car perform different actions such as turning on or off the lights, opening or closing the hood or trunk, etc.</li>
70
- </ul>
71
- <h2>How to download and install version 4.8 5.1?</h2>
72
- <h3>For Android devices</h3>
73
- <p>If you have an Android device, you can download and install version 4.8 5.1 of Car Parking Multiplayer by following these steps:</p>
74
- <ol>
75
- <li>Go to the Google Play Store and search for Car Parking Multiplayer or click on this link.</li>
76
- <li>Tap on the Install button and wait for the download to finish.</li>
77
- <li>Once the download is done, tap on the Open button and enjoy the game.</li>
78
- </ol>
79
- <h3>For iOS devices</h3>
80
- <p>If you have an iOS device, you can download and install version 4.8 5.1 of Car Parking Multiplayer by following these steps:</p>
81
- <ol>
82
- <li>Go to the App Store and search for Car Parking Multiplayer or click on this link.</li>
83
- <li>Tap on the Get button and wait for the download to finish.</li>
84
- <li>Once the download is done, tap on the Open button and enjoy the game.</li>
85
- </ol>
86
- <h3>For PC devices</h3>
87
- <p>If you have a PC device, you can download and install version 4.8 5.1 of Car Parking Multiplayer by following these steps:</p>
88
- <ol>
89
- <li>Go to this website and click on the Download button.</li>
90
- <li>Choose the version that suits your PC (Windows or Mac) and wait for the download to finish.</li>
91
- <li>Once the download is done, open the file and follow the instructions to install the game.</li>
92
- <li>Once the installation is done, launch the game and enjoy it.</li>
93
- </ol>
94
- <h2>How to play and enjoy version 4.8 5.1?</h2>
95
- <h3>Tips and tricks for beginners</h3>
96
- <p>If you are new to Car Parking Multiplayer, here are some tips and tricks that can help you play and enjoy version 4.8 5 .1:</p>
97
- <ul>
98
- <li>Start with the single-player mode and practice your parking skills in different scenarios and levels. You can choose from different difficulty levels and car models to suit your preference.</li>
99
- <li>Learn the basic controls and functions of your car, such as steering, braking, accelerating, reversing, changing gears, turning on or off the lights, etc. You can also adjust the camera angle and view to get a better perspective of your surroundings.</li>
100
- <li>Follow the arrows and indicators on the screen to guide you to your parking spot. Try to avoid hitting any obstacles or other cars, as this will reduce your score and damage your car.</li>
101
- <li>Use the map and the GPS to navigate the open world and find different locations and features. You can also use the teleport function to quickly move to a different area.</li>
102
- <li>Explore the different modes and features of the game, such as racing, police, taxi, cargo, delivery, car wash, car service, gas station, car showroom, etc. You can also interact with other players and objects in the world.</li>
103
- </ul>
104
- <h3>Tips and tricks for advanced players</h3>
105
- <p>If you are already familiar with Car Parking Multiplayer, here are some tips and tricks that can help you play and enjoy version 4.8 5.1 even more:</p>
106
- <ul>
107
- <li>Join the online mode and challenge yourself with thousands of real players every day. You can chat with them using voice chat or messenger, make friends or enemies, compete or cooperate with them in racing or police mode.</li>
108
- <li>Customize your car and your character to make them look more unique and cool. You can adjust the suspension, wheel angle, engine, turbo, gearbox, exhaust, and more of your car. You can also swap your car with other players or buy new cars from the shop. You can also choose from 16 different player skins and a variety of clothes and accessories to dress up your character. You can also use different animations and emotions to express yourself.</li>
109
- <li>Use the drone mode to explore the world from a different perspective and take stunning screenshots. You can use the drone to fly around the map, spy on other players, or just have fun.</li>
110
- <li>Complete the daily tasks and collect coins and presents by joining the game every day. You can use the coins to buy new cars, clothes, rims, liveries, fonts, sounds, etc. You can also use the presents to get random rewards such as coins, cars, clothes, etc.</li>
111
- <li>Role play as a taxi driver, a cargo driver, or a delivery driver and complete orders from customers. You can also role play as a police officer and catch and fine players for speeding or breaking the law.</li>
112
- </ul>
113
- <h2>Conclusion</h2>
114
- <p>Car Parking Multiplayer is a game that offers more than just parking your car. It is an open-world multiplayer game that features car tuning, free walking, character customization, role play, drone mode, daily tasks and rewards, and more. It is a game that can challenge your parking skills and offer you a lot of fun and excitement.</p>
115
- <p>The latest version of Car Parking Multiplayer is version 4.8 5.1. It has added a lot of new content and improved some of the existing features of the game. It has added new cars, features, rims, clothes, liveries, fonts and sounds. It has also improved the messenger system, the drone mode, the daily task system, the character customization feature, and the animations.</p>
116
- <p>If you want to download and install version 4.8 5.1 of Car Parking Multiplayer, you can follow the steps that we have provided for Android, iOS, and PC devices. If you want to play and enjoy version 4.8 5.1 of Car Parking Multiplayer, you can follow the tips and tricks that we have provided for beginners and advanced players.</p>
117
- <p>We hope that this article has helped you learn more about Car Parking Multiplayer and its latest version. We also hope that you have fun playing this game and exploring its amazing features.</p>
118
- <h2>FAQs</h2>
119
- <p>Here are some of the frequently asked questions about Car Parking Multiplayer and its latest version:</p>
120
- <h3>Q: Is Car Parking Multiplayer free to play?</h3>
121
- <p>A: Yes, Car Parking Multiplayer is free to play. However, it does have some in-app purchases that can enhance your gameplay experience. You can buy coins, cars, clothes, rims, liveries, fonts, sounds, etc. with real money. You can also watch ads to get some free coins or presents.</p>
122
- <h3>Q: Is Car Parking Multiplayer safe to play?</h3>
123
- <p>A: Yes, Car Parking Multiplayer is safe to play. It does not contain any harmful or malicious content that can harm your device or your personal information. However, you should be careful when interacting with other players online, as they may use inappropriate language or behavior. You can report or block any players that are bothering you or violating the game rules.</p>
124
- <h3>Q: How can I update Car Parking Multiplayer to version 4.8 5.1?</h3>
125
- <p>A: If you already have Car Parking Multiplayer installed on your device, you can update it to version 4.8 5.1 by following these steps:</p>
126
- <ol>
127
- <li>Go to the Google Play Store or the App Store and search for Car Parking Multiplayer or click on this link.</li>
128
- <li>Tap on the Update button and wait for the download to finish.</li>
129
- <li>Once the download is done, tap on the Open button and enjoy the game.</li>
130
- </ol>
131
- <h3>Q: How can I contact the developers of Car Parking Multiplayer?</h3>
132
- <p>A: If you have any questions, feedback, suggestions, or issues about Car Parking Multiplayer, you can contact the developers of the game by using one of these methods:</p>
133
- <ul>
134
- <li>Email: [email protected]</li>
135
- <li>Facebook: https://www.facebook.com/olzhassgames</li>
136
- <li>Instagram: https://www.instagram.com/olzhassgames</li>
137
- <li>YouTube: https://www.youtube.com/channel/UCRQYHtF_7Yl0fOKea24lVGA</li>
138
- </ul>
139
- <h3>Q: How can I support the developers of Car Parking Multiplayer?</h3>
140
- <p>A: If you like Car Parking Multiplayer and want to support the developers of the game, you can do one of these things:</p>
141
- <ul>
142
- <li>Rate and review the game on the Google Play Store or the App Store.</li>
143
- <li>Share the game with your friends and family.</li>
144
- <li>Buy some in-app purchases to support the development of the game.</li>
145
- </ul></p> 197e85843d<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Blockman Go Hack APK and Get Free Gcubes in Minutes.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>Download Hack Blockman Go Free GCubes APK: Is It Safe and Legal?</h1>
3
- <p>Blockman Go is a popular sandbox game that allows you to play various mini-games with your friends or other players from around the world. You can also customize your avatar, chat with others, and create your own games. But to enjoy all these features, you need GCubes, the in-game currency of Blockman Go.</p>
4
- <h2>download hack blockman go free gcubes.apk</h2><br /><p><b><b>Download Zip</b> &#10042;&#10042;&#10042; <a href="https://jinyurl.com/2uNQzB">https://jinyurl.com/2uNQzB</a></b></p><br /><br />
5
- <p>GCubes are used to buy items, accessories, skins, and VIP memberships in Blockman Go. You can earn GCubes by playing games, completing tasks, or watching ads. However, some players may find these methods too slow or tedious, and they may want to get more GCubes for free. That's why some people search for hack blockman go free gcubes.apk, a modded version of the game that claims to give you unlimited GCubes.</p>
6
- <p>But is it safe and legal to download hack blockman go free gcubes.apk? What are the risks and benefits of using it? How can you download and install it on your device? And are there any alternatives to hack blockman go free gcubes.apk? In this article, we will answer these questions and more. Read on to find out more.</p>
7
- <h2>What is Blockman Go and GCubes?</h2>
8
- <h3>Blockman Go: A Sandbox Game with Multiple Mini-Games</h3>
9
- <p>Blockman Go is a free-to-play sandbox game developed by Blockman GO Studio. It was released in 2017 and has since attracted millions of players from all over the world. The game has a blocky style that resembles Minecraft, but it offers more than just building and crafting. You can also play various mini-games with different genres, such as action, adventure, role-playing, strategy, and more. Some of the most popular mini-games are Bed Wars, Egg Wars, Sky Block, Free City RP, Anime Fighting Simulator, and more.</p>
10
- <p>Blockman Go also has a social aspect that allows you to chat with other players, make friends, join parties, and create clans. You can also customize your avatar with hundreds of items, accessories, skins, and hairstyles. You can even create your own games using the built-in editor and share them with others.</p>
11
- <h3>GCubes: The In-Game Currency of Blockman Go</h3>
12
- <p>GCubes are the premium currency of Blockman Go. They are used to buy various things in the game, such as:</p>
13
- <ul>
14
- <li>Items: You can buy weapons, tools, blocks, furniture, pets, mounts, and more with GCubes.</li>
15
- <li>Accessories: You can buy hats, glasses, masks, backpacks, wings, tails, and more with GCubes.</li>
16
- <li>Skins: You can buy different outfits for your avatar with GCubes.</li>
17
- <li>VIP memberships: You can buy different levels of VIP memberships with GCubes. VIP members get extra benefits such as daily rewards, exclusive items, discounts, and more.</li>
18
- </ul>
19
- <p>You can earn GCubes by playing games, completing tasks, or watching ads. However, these methods may not give you enough GCubes to buy everything you want. That's why some players may want to get more GCubes for free by using hack blockman go free gcubes.apk.</p>
20
- <h2>Why Do People Want to Hack Blockman Go for Free GCubes?</h2>
21
- <h3>The Benefits of Having More GCubes</h3>
22
- <p>Having more GCubes can give you some advantages in Blockman Go. For example:</p>
23
- <ul <li>You can buy more items, accessories, skins, and VIP memberships that can enhance your gameplay and appearance.</li>
24
- <li>You can unlock more mini-games and features that may not be available for free players.</li>
25
- <li>You can have more fun and enjoyment in the game without worrying about running out of GCubes.</li>
26
- </ul>
27
- <p>These are some of the benefits of having more GCubes in Blockman Go. However, they come with a price. And we are not talking about the real money that you have to spend to buy GCubes. We are talking about the risks of using hack blockman go free gcubes.apk.</p>
28
- <h3>The Risks of Using Hack Blockman Go Free GCubes APK</h3>
29
- <p>Hack blockman go free gcubes.apk is a modded version of the game that claims to give you unlimited GCubes for free. However, it is not an official app from Blockman GO Studio, and it is not approved by Google Play Store. This means that it may contain malware, viruses, spyware, or other harmful software that can damage your device or steal your personal information.</p>
30
- <p></p>
31
- <p>Moreover, using hack blockman go free gcubes.apk is against the terms of service and the privacy policy of Blockman Go. This means that you are violating the rules and the rights of the game developers and the other players. If you are caught using hack blockman go free gcubes.apk, you may face serious consequences, such as:</p>
32
- <ul>
33
- <li>Your account may be banned permanently from Blockman Go and all its mini-games.</li>
34
- <li>Your device may be blacklisted from accessing Blockman Go and other apps from Blockman GO Studio.</li>
35
- <li>Your data may be deleted or corrupted by the game servers or the hackers.</li>
36
- <li>You may face legal action from Blockman GO Studio or Google Play Store for infringing their intellectual property rights or violating their policies.</li>
37
- </ul>
38
- <p>These are some of the risks of using hack blockman go free gcubes.apk. They are not worth the benefits that you may get from having more GCubes. That's why we do not recommend using hack blockman go free gcubes.apk at all. Instead, we suggest you to use legitimate ways to get more GCubes in Blockman Go.</p>
39
- <h2>How to Download Hack Blockman Go Free GCubes APK?</h2>
40
- <h3>The Steps to Download and Install the APK File</h3>
41
- <p>If you still want to try hack blockman go free gcubes.apk despite the risks, here are the steps to download and install it on your device:</p>
42
- <ol>
43
- <li>Go to a website that offers hack blockman go free gcubes.apk file. You can search for it on Google or other search engines, but be careful of fake or malicious websites that may harm your device or trick you into downloading unwanted apps or programs.</li>
44
- <li>Download the APK file to your device. Make sure you have enough storage space and a stable internet connection.</li>
45
- <li>Enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
46
- <li>Locate the APK file on your device and tap on it to install it. Follow the instructions on the screen and wait for the installation to finish.</li>
47
- <li>Launch the app and enjoy unlimited GCubes in Blockman Go.</li>
48
- </ol>
49
- <p>These are the steps to download and install hack blockman go free gcubes.apk on your device. However, we remind you again that this is not a safe or legal way to get more GCubes in Blockman Go. You may encounter problems or issues with the app, such as crashes, errors, bugs, or glitches. You may also expose your device and your data to security threats or legal troubles. Therefore, we advise you to use alternatives to hack blockman go free gcubes.apk instead.</p>
50
- <h3>The Alternatives to Hack Blockman Go Free GCubes APK</h3>
51
- <p>If you want to get more GCubes in Blockman Go without using hack blockman go free gcubes.apk, here are some alternatives that you can try:</p>
52
- <ul <li>Buy GCubes with real money. This is the official and legal way to get more GCubes in Blockman Go. You can buy GCubes with different payment methods, such as credit cards, PayPal, Google Play gift cards, and more. You can also get discounts or bonuses when you buy GCubes in bulk or during special events.</li>
53
- <li>Earn GCubes by playing games, completing tasks, or watching ads. This is the free and legitimate way to get more GCubes in Blockman Go. You can earn GCubes by playing different mini-games, completing daily or weekly tasks, or watching short ads. You can also get GCubes by participating in events, contests, or giveaways.</li>
54
- <li>Use online generators or tools that claim to give you free GCubes. This is a risky and dubious way to get more GCubes in Blockman Go. There are some websites or apps that claim to generate free GCubes for you by using hacks, cheats, or exploits. However, these are not reliable or trustworthy sources, and they may not work at all. They may also require you to complete surveys, download apps, or provide personal information that may be used for phishing, spamming, or scamming.</li>
55
- </ul>
56
- <p>These are some of the alternatives to hack blockman go free gcubes.apk that you can try. However, we recommend you to use the first two options only, as they are the safest and most ethical ways to get more GCubes in Blockman Go. The third option is not recommended, as it may cause more harm than good.</p>
57
- <h2>Conclusion</h2>
58
- <h3>Summary of the Main Points</h3>
59
- <p>In this article, we have discussed the topic of download hack blockman go free gcubes.apk. We have explained what Blockman Go and GCubes are, why people want to hack Blockman Go for free GCubes, how to download hack blockman go free gcubes.apk, and what are the alternatives to hack blockman go free gcubes.apk. We have also highlighted the benefits and risks of using hack blockman go free gcubes.apk.</p>
60
- <h3>Recommendations for Blockman Go Players</h3>
61
- <p>Based on our analysis, we have some recommendations for Blockman Go players who want to get more GCubes in the game:</p>
62
- <ul>
63
- <li>Do not use hack blockman go free gcubes.apk at all. It is not safe or legal to use it, and it may damage your device or your account.</li>
64
- <li>Buy GCubes with real money if you can afford it. This is the best way to support the game developers and enjoy all the features of the game.</li>
65
- <li>Earn GCubes by playing games, completing tasks, or watching ads if you want to save money. This is a fun and fair way to get more GCubes in the game.</li>
66
- <li>Avoid online generators or tools that claim to give you free GCubes. They are not reliable or trustworthy sources, and they may expose you to security threats or legal troubles.</li>
67
- </ul>
68
- <p>We hope this article has been helpful and informative for you. Thank you for reading and happy gaming!</p>
69
- <h2>Frequently Asked Questions</h2>
70
- <p>Here are some of the most common questions that people ask about download hack blockman go free gcubes.apk:</p>
71
- <ol>
72
- <li><b>What is hack blockman go free gcubes.apk?</b></li>
73
- <p>Hack blockman go free gcubes.apk is a modded version of the game that claims to give you unlimited GCubes for free.</p>
74
- <li><b>Is it safe and legal to use hack blockman go free gcubes.apk?</b></li>
75
- <p>No, it is not safe or legal to use hack blockman go free gcubes.apk. It may contain malware, viruses, spyware, or other harmful software that can damage your device or steal your personal information. It may also violate the terms of service and the privacy policy of Blockman Go and Google Play Store. If you are caught using hack blockman go free gcubes.apk, you may face serious consequences such as account ban, device blacklist, data deletion or corruption, or legal action.</p>
76
- <li><b>How can I download hack blockman go free gcubes.apk?</b></li>
77
- <p>If you still want to try hack blockman go free gcubes.apk despite the risks, you can download it from a website that offers it. However, be careful of fake or malicious websites that may harm your device or trick you into downloading unwanted apps or programs. You also need to enable unknown sources on your device settings and install the APK file on your device.</p>
78
- <li><b>What are the alternatives to hack blockman go free gcubes.apk?</b></li>
79
- <p>The alternatives to hack blockman go free gcubes.apk are to buy GCubes with real money, earn GCubes by playing games, completing tasks, or watching ads, or use online generators or tools that claim to give you free GCubes. However, we recommend you to use the first two options only, as they are the safest and most ethical ways to get more GCubes in Blockman Go. The third option is not recommended, as it may cause more harm than good.</p>
80
- <li><b>How can I get more GCubes in Blockman Go without using hack blockman go free gcubes.apk?</b></li>
81
- <p>You can get more GCubes in Blockman Go without using hack blockman go free gcubes.apk by following these tips:</p>
82
- <ul>
83
- <li>Play more mini-games and win more rewards. You can earn GCubes by playing different mini-games and winning coins, gems, or other prizes. You can also join events, contests, or giveaways that may offer GCubes as rewards.</li>
84
- <li>Complete more tasks and watch more ads. You can earn GCubes by completing daily or weekly tasks that may require you to play certain games, invite friends, or rate the game. You can also watch short ads that may give you GCubes or other bonuses.</li>
85
- <li>Invite more friends and join more clans. You can earn GCubes by inviting your friends to play Blockman Go and getting referral bonuses. You can also join clans and get clan rewards that may include GCubes or other items.</li>
86
- </ul>
87
- <p>These are some of the ways to get more GCubes in Blockman Go without using hack blockman go free gcubes.apk. They are fun and fair ways to enjoy the game and support the game developers.</p>
88
- </ol></p> 197e85843d<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Car Master 3D MOD APK and Become a Pro Mechanic.md DELETED
@@ -1,92 +0,0 @@
1
- <br />
2
- <h1>Car Master 3D Mod APK: A Fun and Creative Game for Car Lovers</h1>
3
- <p>Do you love cars and want to show off your skills as a mechanic? Do you enjoy fixing, customizing, and selling cars for a profit? If you answered yes to any of these questions, then you should try Car Master 3D, a fun and creative game that lets you run your own car workshop. And if you want to make the game even more enjoyable, you should download Car Master 3D Mod APK, which gives you unlimited money, no ads, and easy installation. In this article, we will tell you everything you need to know about this amazing game and how to get the modded version.</p>
4
- <h2>car master 3d mod apk</h2><br /><p><b><b>Download File</b> >>> <a href="https://jinyurl.com/2uNNX9">https://jinyurl.com/2uNNX9</a></b></p><br /><br />
5
- <h2>What is Car Master 3D?</h2>
6
- <h3>A game where you can fix and customize cars</h3>
7
- <p>Car Master 3D is a game where you can unleash your inner mechanic and car designer. You will have a garage full of old, rusty, dirty, or even non-functioning vehicles that need your attention. You will have to use various tools and parts to fix them up, such as hammers, wrenches, spray cans, wheels, spoilers, bumpers, and more. You can also customize the appearance of your cars by changing their colors, styles, stickers, and accessories. You can make them look as cool or as crazy as you want.</p>
8
- <h3>A game where you can earn money and unlock new features</h3>
9
- <p>Car Master 3D is not only a game where you can have fun with cars, but also a game where you can make money. After you finish working on a car, you can sell it for a profit or keep it for yourself. The more cars you sell, the more money you will earn. You can use your money to buy new tools, parts, and cars. You can also unlock new features, such as new garages, new locations, new customers, and new challenges. The game has many levels and missions that will keep you entertained for hours.</p>
10
- <h2>Why should you download Car Master 3D Mod APK?</h2>
11
- <h3>Unlimited money to spend on your cars</h3>
12
- <p>One of the reasons why you should download Car Master 3D Mod APK is that it gives you unlimited money to spend on your cars. You don't have to worry about running out of cash or saving up for expensive items. You can buy whatever you want and upgrade your cars as much as you like. You can also buy more cars and expand your collection. With unlimited money, you can enjoy the game without any limitations.</p>
13
- <h3>No ads to interrupt your gameplay</h3>
14
- <p>Another reason why you should download Car Master 3D Mod APK is that it removes all the ads from the game. You don't have to watch annoying videos or banners that pop up every few minutes. You don't have to wait for timers or countdowns to resume your gameplay. You can play the game smoothly and without any distractions. No ads means more fun and less frustration.</p>
15
- <p>car master 3d mod apk unlimited money<br />
16
- car master 3d mod apk download for android<br />
17
- car master 3d mod apk latest version<br />
18
- car master 3d mod apk free shopping<br />
19
- car master 3d mod apk revdl<br />
20
- car master 3d mod apk hack<br />
21
- car master 3d mod apk offline<br />
22
- car master 3d mod apk android 1<br />
23
- car master 3d mod apk no ads<br />
24
- car master 3d mod apk rexdl<br />
25
- car master 3d mod apk pure<br />
26
- car master 3d mod apk happymod<br />
27
- car master 3d mod apk unlimited coins<br />
28
- car master 3d mod apk uptodown<br />
29
- car master 3d mod apk vip unlocked<br />
30
- car master 3d mod apk all cars unlocked<br />
31
- car master 3d mod apk an1<br />
32
- car master 3d mod apk apkpure<br />
33
- car master 3d mod apk apkmody<br />
34
- car master 3d mod apk apknite<br />
35
- car master 3d mod apk apkmirror<br />
36
- car master 3d mod apk apksfree<br />
37
- car master 3d mod apk apktada<br />
38
- car master 3d mod apk apksfull<br />
39
- car master 3d mod apk apksmod<br />
40
- car master 3d mod apk blackmod<br />
41
- car master 3d mod apk by androidoyunclub<br />
42
- car master 3d mod apk by andropalace<br />
43
- car master 3d mod apk by ihackedit<br />
44
- car master 3d mod apk by mob.org<br />
45
- car master 3d mod apk by platinmods<br />
46
- car master 3d mod apk by techylist<br />
47
- car master 3d mod apk by androeed.ru<br />
48
- car master 3d mod apk by andropark.info<br />
49
- car master 3d mod apk by androeed.net<br />
50
- car master 3d mod apk dlandroid<br />
51
- car master 3d mod apk datafilehost<br />
52
- car master 3d mod apk download link<br />
53
- car master 3d mod apk download ios<br />
54
- car master 3d mod apk download pc</p>
55
- <h3>Easy to <h3>Easy to install and play</h3>
56
- <p>The final reason why you should download Car Master 3D Mod APK is that it is very easy to install and play. You don't need to root your device or go through complicated steps. You just need to download the APK file from a trusted source, enable unknown sources on your device, install the file, and launch the game. You can start playing right away and enjoy all the modded features. You don't need any special skills or knowledge to play this game. It is suitable for anyone who loves cars and games.</p>
57
- <h2>How to download and install Car Master 3D Mod APK?</h2>
58
- <h3>Step 1: Download the APK file from a trusted source</h3>
59
- <p>The first step to download and install Car Master 3D Mod APK is to find a reliable source that offers the latest version of the modded file. You can search online for websites that provide free and safe downloads of Car Master 3D Mod APK. Make sure to check the reviews and ratings of the websites before downloading anything. You can also ask your friends or other gamers for recommendations. Once you find a good source, click on the download button and save the file on your device.</p>
60
- <h3>Step 2: Enable unknown sources on your device</h3>
61
- <p>The second step to download and install Car Master 3D Mod APK is to enable unknown sources on your device. This is necessary because the modded file is not from the official Google Play Store and your device might block it by default. To enable unknown sources, go to your device settings, security, and toggle on the option that allows installation of apps from unknown sources. This will allow you to install Car Master 3D Mod APK without any problems.</p>
62
- <h3>Step 3: Install the APK file and launch the game</h3>
63
- <p>The third and final step to download and install Car Master 3D Mod APK is to install the APK file and launch the game. To do this, locate the downloaded file on your device, tap on it, and follow the instructions on the screen. The installation process should take only a few seconds. After that, you can open the game and start playing with unlimited money, no ads, and easy installation.</p>
64
- <h2>How to play Car Master 3D?</h2>
65
- <h3>Choose a car to work on from the garage</h3>
66
- <p>The first thing you need to do when you play Car Master 3D is to choose a car to work on from the garage. You will have a variety of cars available, such as sedans, coupes, trucks, vans, sports cars, and more. Each car has its own condition, value, and potential. You can see these details by tapping on the car. You can also rotate and zoom in on the car to inspect it more closely. Once you decide which car you want to work on, tap on the start button and move it to your workshop.</p>
67
- <h3>Use various tools and parts to fix and upgrade the car</h3>
68
- <p>The next thing you need to do when you play Car Master 3D is to use various tools and parts to fix and upgrade the car. You will have a toolbox with different tools that you can use for different purposes, such as repairing, cleaning, painting, polishing, etc. You will also have a shop where you can buy new parts for your car, such as wheels, spoilers, bumpers, lights, etc. You can drag and drop the tools and parts on the car to apply them. You can also undo or redo your actions if you make a mistake or change your mind.</p>
69
- <h3>Sell the car for a profit or keep it for yourself</h3>
70
- <p>The last thing you need to do when you play Car Master 3D is to sell the car for a profit or keep it for yourself. After you finish working on the car, you can see how much it has improved in terms of condition, value, and potential. You can also compare it with its original state by tapping on the before/after button. If you are satisfied with your work, you can sell the car for a profit by tapping on the sell button. You will get money based on how well you fixed and customized the car. You can use this money to buy more tools, parts, and cars. Alternatively, if you really like the car you worked on, you can keep it for yourself by tapping on the keep button. You can add it to your collection and show it off to your friends.</p>
71
- <h2>Tips and tricks for playing Car Master 3D</h2>
72
- <h3>Experiment with different colors and styles for your cars</h3>
73
- <p>One of the tips for playing Car Master 3D is to experiment with different colors and styles for your cars. You can make your cars look unique and attractive by using different spray cans and stickers. You can also mix and match different parts and accessories to create your own style. You can make your cars look realistic or cartoonish, elegant or funky, simple or complex. The choice is yours. You can also use the color wheel to find the perfect shade for your car.</p>
74
- <h3>Complete missions and challenges to earn extra rewards</h3>
75
- <p>Another tip for playing Car Master 3D is to complete missions and challenges to earn extra rewards. You can find these missions and challenges by tapping on the icons on the top of the screen. They will give you specific tasks to do, such as fixing a certain number of cars, using a certain tool, buying a certain part, etc. If you complete these tasks, you will get bonus coins, gems, or other prizes. These rewards will help you progress faster in the game and buy more items.</p>
76
- <h3>Watch videos to get free coins and gems</h3>
77
- <p>The final tip for playing Car Master 3D is to watch videos to get free coins and gems. You can find these videos by tapping on the icons on the bottom of the screen. They will offer you to watch a short video in exchange for some coins or gems. You can watch as many videos as you want and get unlimited free currency. This is a great way to get more money without spending any real money.</p>
78
- <h2>Conclusion</h2>
79
- <p>Car Master 3D is a fun and creative game for car lovers who want to fix and customize cars. You can download Car Master 3D Mod APK to get unlimited money, no ads, and easy installation. You can also follow our tips and tricks to play the game better and enjoy it more. Car Master 3D is a game that will keep you entertained for hours and let you express your personality through your cars. Download it now and start your car workshop adventure.</p>
80
- <h2>FAQs</h2>
81
- <h4>What are the requirements to play Car Master 3D Mod APK?</h4>
82
- <p>To play Car Master 3D Mod APK, you need an Android device with version 5.0 or higher, at least 100 MB of free storage space, and an internet connection.</p>
83
- <h4>Is Car Master 3D Mod APK safe to download and install?</h4>
84
- <p>Yes, Car Master 3D Mod APK is safe to download and install if you get it from a trusted source. However, you should always be careful when downloading any modded file from the internet and scan it for viruses or malware before installing it.</p>
85
- <h4>Can I play Car Master 3D Mod APK offline?</h4>
86
- <p>No, you cannot play Car Master 3D Mod APK offline. You need an internet connection to access the game features and content.</p>
87
- <h4>Can I play Car Master 3D Mod APK with my friends?</h4>
88
- <p>No, you cannot play Car Master 3D Mod APK with your friends. The game does not have a multiplayer mode or a social network feature. However, you can share your cars and achievements with your friends through screenshots or videos.</p>
89
- <h4>How can I contact the developers of Car Master 3D Mod APK?</h4>
90
- <p>You can contact the developers of Car Master 3D Mod APK by sending them an email at [email protected] or by visiting their website at https://saygames.by/.</p> 197e85843d<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FR Legends MOD APK 0.3.2 Drift Like a Pro with Unlimited Cash and Customizations.md DELETED
@@ -1,172 +0,0 @@
1
-
2
- <h1>Download FR Legends Mod APK Versi 0.3 2: The Ultimate Drift Racing Game for Android</h1>
3
- <p>If you are a fan of car racing games, especially drift racing games, then you must have heard of <strong>FR Legends</strong>. This is one of the most popular and realistic drift racing games for Android devices, where you can experience the thrill and excitement of drifting on various tracks with different cars. In this article, we will tell you everything you need to know about <strong>FR Legends</strong>, including how to download and install <strong>FR Legends Mod APK Versi 0.3 2</strong>, which is a modified version of the game that offers many amazing features that are not available in the original version. So, without further ado, let's get started!</p>
4
- <h2>download fr legends mod apk versi 0.3 2</h2><br /><p><b><b>Download Zip</b> ---> <a href="https://jinyurl.com/2uNSzt">https://jinyurl.com/2uNSzt</a></b></p><br /><br />
5
- <h2>Introduction: What is FR Legends and why you should download it</h2>
6
- <p><strong>FR Legends</strong> is a drift racing game that was developed by <em>Feng Li</em> and released in October 2021 for Android devices. The game has received over 10 million downloads and has an average rating of 4.5 out of 5 stars on Google Play Store. The game is praised for its realistic physics, graphics, sound effects, and gameplay, as well as its customization options, online mode, and variety of cars and tracks.</p>
7
- <p>The game is based on the concept of <em>FR</em>, which stands for <em>Front-engine</em> and <em>Rear-wheel-drive </em> layout, which is the ideal configuration for drift racing. Drift racing is a type of car racing where the driver intentionally oversteers the car to make it slide sideways through corners. The game allows you to control your car's throttle, brake, steering, handbrake, and clutch, as well as adjust your car's suspension, tire pressure, camber, and gear ratio. You can also customize your car's appearance, such as the color, body kit, spoiler, wheels, stickers, and more.</p>
8
- <p>The game has two main modes: <em>Career Mode</em> and <em>Online Mode</em>. In Career Mode, you can compete in various events and challenges, such as time attack, tandem battle, gymkhana, and more. You can earn money and reputation by completing these events and use them to buy new cars or upgrade your existing ones. In Online Mode, you can join or create a room and race with other players from around the world. You can also chat with them and share your drifting skills and tips.</p>
9
- <p><strong>FR Legends</strong> is a game that will keep you hooked for hours with its addictive and immersive gameplay. You will never get bored of drifting on different tracks with different cars and challenging yourself or other players. You will also learn a lot about car mechanics and drifting techniques as you play the game. If you are looking for a drift racing game that is fun, realistic, and customizable, then <strong>FR Legends</strong> is the game for you.</p>
10
- <h2>How to download and install FR Legends Mod APK Versi 0.3 2</h2>
11
- <p>As we mentioned earlier, <strong>FR Legends Mod APK Versi 0.3 2</strong> is a modified version of the game that offers many amazing features that are not available in the original version. Some of these features are:</p>
12
- <p>download fr legends mod apk unlimited money and l300<br />
13
- download fr legends mod apk supra and jazz 202<br />
14
- download fr legends mod apk happymod with unlocked features<br />
15
- download fr legends mod apk latest version 0.3.2<br />
16
- download fr legends mod apk segitekno for android<br />
17
- download fr legends mod apk v0.3.3.2 update 2023<br />
18
- download fr legends mod apk full modifikasi with unlimited currency<br />
19
- download fr legends mod apk tribunnews for free<br />
20
- download fr legends mod apk drift game with realistic physics<br />
21
- download fr legends mod apk offline mode and no ads<br />
22
- download fr legends mod apk for pc using emulator<br />
23
- download fr legends mod apk ios compatible and easy to install<br />
24
- download fr legends mod apk rexdl with high-quality graphics<br />
25
- download fr legends mod apk revdl with fast download speed<br />
26
- download fr legends mod apk pure with no virus or malware<br />
27
- download fr legends mod apk uptodown with user-friendly interface<br />
28
- download fr legends mod apk mob.org with direct link and no survey<br />
29
- download fr legends mod apk an1 with unlimited coins and gems<br />
30
- download fr legends mod apk android 1 with all cars and tracks unlocked<br />
31
- download fr legends mod apk apkpure with original file and no modification<br />
32
- download fr legends mod apk apkmody with premium features and no root required<br />
33
- download fr legends mod apk apkmirror with reliable source and safe to use<br />
34
- download fr legends mod apk apknite with lightweight size and low battery consumption<br />
35
- download fr legends mod apk apptoko with Indonesian language and support<br />
36
- download fr legends mod apk aptoide with alternative app store and more choices<br />
37
- download fr legends mod apk blackmod with unlimited everything and no ban risk<br />
38
- download fr legends mod apk by fengiiley with official developer and updates<br />
39
- download fr legends mod apk cheat menu with easy access and customization<br />
40
- download fr legends mod apk data obb with additional files and resources<br />
41
- download fr legends mod apk dlandroid with best mods and hacks<br />
42
- download fr legends mod apk free shopping with unlimited cash and gold<br />
43
- download fr legends mod apk game guardian with advanced tools and scripts<br />
44
- download fr legends mod apk god mode with invincibility and no damage<br />
45
- download fr legends mod apk google drive with cloud storage and backup<br />
46
- download fr legends mod apk hack version with unlimited money, l300, supra, jazz 202[^1^]<br />
47
- download fr legends mod apk highly compressed with reduced size and quality<br />
48
- download fr legends mod apk ihackedit with exclusive mods and cheats<br />
49
- download fr legends mod apk indonesia server with local connection and community<br />
50
- download fr legends mod apk lenov.ru with Russian language and support<br />
51
- download fr legends mod apk livery pack with custom skins and stickers<br />
52
- download fr legends mod apk mediafıre with fast upload and share service<br />
53
- download fr legends mod apk mega.nz with secure cloud storage and encryption<br />
54
- download fr legends mod apk menuju link with easy navigation and redirection <br />
55
- download fr legends mod apk mobpark with Chinese language and support <br />
56
- download fr legends mod apk new update 0.3.2[^2^] [^3^] <br />
57
- download fr legends mod apk no verification with no captcha or human verification required <br />
58
- download fr legends mod apk online multiplayer mode with real-time racing and chat <br />
59
- download fr legends mod apk platinmods with VIP mods and premium membership <br />
60
- download fr legends mod apk pro version with extra features and benefits</p>
61
- <ul>
62
- <li><strong>Unlimited money:</strong> You can get unlimited money in the game without having to complete any events or challenges. You can use this money to buy any car or upgrade you want.</li>
63
- <li><strong>New cars:</strong> You can access new cars that are not available in the original version, such as the Nissan Skyline GT-R R34, Toyota Supra MK4, Mazda RX-7 FD3S, and more.</li>
64
- <li><strong>New maps:</strong> You can explore new maps that are not available in the original version, such as the Tokyo Drift Park, the Mountain Pass, the Desert Highway, and more.</li>
65
- <li><strong>New accessories:</strong> You can customize your car with new accessories that are not available in the original version, such as neon lights, smoke effects, exhaust sounds, and more.</li>
66
- <li><strong>New designs:</strong> You can change your car's design with new designs that are not available in the original version, such as anime characters, graffiti art, logos, and more.</li>
67
- </ul>
68
- <p>To download and install <strong>FR Legends Mod APK Versi 0.3 2</strong>, you need to follow these simple steps:</p>
69
- <ol>
70
- <li>Go to this link: <a href="">https://fr-legends-mod-apk-versi-0-3-2.com/download</a></li>
71
- <li>Click on the download button and wait for the file to be downloaded on your device.</li>
72
- <li>Go to your device's settings and enable the installation of apps from unknown sources.</li>
73
- <li>Locate the downloaded file and tap on it to start the installation process.</li>
74
- <li>Follow the instructions on the screen and wait for the installation to be completed.</li>
75
- <li>Launch the game and enjoy!</li>
76
- </ol>
77
- <p>Note: Before you download and install <strong>FR Legends Mod APK Versi 0.3 2</strong>, make sure that you have enough storage space on your device and that your device meets the minimum requirements of the game. Also, be aware that downloading modded apps from unknown sources may pose some risks and dangers to your device and data. We are not responsible for any damage or loss that may occur as a result of downloading or using <strong>FR Legends Mod APK Versi 0.3 2</strong>. Download and use it at your own risk.</p>
78
- <h2>Features of FR Legends Mod APK Versi 0.3 2</h2>
79
- <p>We have already mentioned some of the features of <strong>FR Legends Mod APK Versi 0.3 2</strong>, but let's take a closer look at them and see how they enhance the gaming experience and performance.</p>
80
- <table>
81
- <tr>
82
- <th>Feature</th>
83
- <th>Description</th>
84
- <th>Difference from original version</th>
85
- </tr>
86
- <tr>
87
- <td><strong>Unlimited money</strong></td <td>You can get unlimited money in the game without having to complete any events or challenges. You can use this money to buy any car or upgrade you want.</td>
88
- <td>You have to earn money by completing events or challenges in the original version. You have limited options to buy or upgrade cars.</td>
89
- </tr>
90
- <tr>
91
- <td><strong>New cars</strong></td>
92
- <td>You can access new cars that are not available in the original version, such as the Nissan Skyline GT-R R34, Toyota Supra MK4, Mazda RX-7 FD3S, and more. These cars have different specifications and performance levels.</td>
93
- <td>You have to unlock cars by earning reputation or money in the original version. You have fewer options to choose from.</td>
94
- </tr>
95
- <tr>
96
- <td><strong>New maps</strong></td>
97
- <td>You can explore new maps that are not available in the original version, such as the Tokyo Drift Park, the Mountain Pass, the Desert Highway, and more. These maps have different layouts and environments.</td>
98
- <td>You have to unlock maps by earning reputation or money in the original version. You have fewer options to choose from.</td>
99
- </tr>
100
- <tr>
101
- <td><strong>New accessories</strong></td>
102
- <td>You can customize your car with new accessories that are not available in the original version, such as neon lights, smoke effects, exhaust sounds, and more. These accessories add more style and flair to your car.</td>
103
- <td>You have to unlock accessories by earning reputation or money in the original version. You have fewer options to choose from.</td>
104
- </tr>
105
- <tr>
106
- <td><strong>New designs</strong></td>
107
- <td>You can change your car's design with new designs that are not available in the original version, such as anime characters, graffiti art, logos, and more. These designs add more personality and uniqueness to your car.</td>
108
- <td>You have to unlock designs by earning reputation or money in the original version. You have fewer options to choose from.</td>
109
- </tr>
110
- </table>
111
- <p>As you can see, the features of <strong>FR Legends Mod APK Versi 0.3 2</strong> make the game more fun, diverse, and customizable. You can enjoy more freedom and creativity in creating your own drift racing experience. You can also save time and effort in unlocking and upgrading your cars and maps. You can also impress your friends and rivals with your cool and awesome cars and designs.</p>
112
- <h2>Tips and tricks to master FR Legends</h2>
113
- <p>Now that you have downloaded and installed <strong>FR Legends Mod APK Versi 0.3 2</strong>, you might be wondering how to master the game and become a drift racing legend. Well, don't worry, we have some useful tips and tricks for you that will help you improve your drifting skills, score more points, win more races, customize your cars, and more. Here they are:</p>
114
- <ul>
115
- <li><strong>Practice makes perfect:</strong> The best way to master FR Legends is to practice a lot. The game has a <em>Free Mode</em> where you can practice drifting on any track with any car without any pressure or competition. You can also adjust the difficulty level of the game according to your preference and skill level. The more you practice, the more you will learn how to control your car's speed, angle, direction, and balance while drifting.</li>
116
- <li><strong>Use the handbrake wisely:</strong> The handbrake is a very important tool for drifting in FR Legends. You can use it to initiate a drift, maintain a drift, or correct a drift. However, you should not use it too much or too little, as it can affect your car's stability and momentum. You should use it only when necessary and release it as soon as possible. You should also avoid using it when you are going straight or when you are already drifting at a high angle.</li>
117
- <li><strong>Choose the right car for the right track:</strong> FR Legends has many different cars and tracks that have different characteristics and requirements. You should choose the car that suits the track best based on its power, weight, handling, grip, and style. For example, if you are racing on a tight and twisty track, you should choose a light and agile car that can maneuver easily through corners. If you are racing on a wide and open track, you should choose a powerful and fast car that can accelerate quickly on straightaways.</li>
118
- <li><strong>Customize your car according to your preference:</strong> FR Legends allows you to customize your car's appearance and performance according to your preference and style. You can change your car's color, body kit, spoiler, wheels, stickers, and more. You can also adjust your car's suspension, tire pressure, camber, and gear ratio. You should experiment with different combinations of these settings until you find the one that works best for you and your car. You can also save your custom settings for future use.</li>
119
- <li><strong>Watch and learn from other players:</strong> FR Legends has an online mode where you can race with other players from around the world. You can also chat with them and share your drifting skills and tips. You can learn a lot from watching and observing how other players drift, such as their techniques, strategies, mistakes, and corrections. You can also challenge them to a friendly or competitive race and see how you compare to them.</li>
120
- </ul>
121
- <p>These are some of the tips and tricks that will help you master FR Legends and become a drift racing legend. Of course, there are more tips and tricks that you can discover and learn as you play the game. The most important thing is to have fun and enjoy the game.</p>
122
- <h2>Conclusion: Why FR Legends is the best drift racing game for Android</h2>
123
- <p>We have reached the end of this article, and we hope that you have learned a lot about FR Legends and how to download and install FR Legends Mod APK Versi 0.3 2. We have also shared with you some of the features, benefits, and tips of playing FR Legends and how it is the best drift racing game for Android devices.</p>
124
- <p>FR Legends is a game that will satisfy your passion and curiosity for drift racing. It will challenge your skills, creativity, and style as you drift on various tracks with different cars. It will also entertain you with its realistic physics, graphics, sound effects, and gameplay. It will also allow you to customize your car's appearance and performance according to your preference and style. It will also connect you with other players from around the world who share your love for drift racing.</p>
125
- <p>If you are looking for a drift racing game that is fun, realistic, and customizable, then FR Legends is the game for you. You can download and install FR Legends Mod APK Versi 0.3 2 from the link below and enjoy all the amazing features that it offers. You will not regret it.</p>
126
- <p><a href="">Download FR Legends Mod APK Versi 0.3 2 here</a></p>
127
- <h4>FAQs</h4>
128
- <p>Here are some of the frequently asked questions related to FR Legends and FR Legends Mod APK Versi 0.3 2:</p>
129
- <ul>
130
- <li><strong>What is the difference between FR Legends and other drift racing games?</strong></li>
131
- <p>FR Legends is different from other drift racing games in many ways, such as:</p>
132
- <ul>
133
- <li>It focuses on the concept of FR, which is the ideal layout for drift racing.</li>
134
- <li>It allows you to control your car's throttle, brake, steering, handbrake, and clutch, as well as adjust your car's suspension, tire pressure, camber, and gear ratio.</li>
135
- <li>It offers a realistic and immersive drifting experience with its physics, graphics, sound effects, and gameplay.</li>
136
- <li>It provides a variety of cars and tracks that have different characteristics and requirements.</li>
137
- <li>It enables you to customize your car's appearance and performance according to your preference and style.</li>
138
- <li>It connects you with other players from around the world who share your love for drift racing.</li>
139
- </ul>
140
- <li><strong>Is FR Legends Mod APK Versi 0.3 2 safe and secure to download and use?</strong></li>
141
- <p>FR Legends Mod APK Versi 0.3 2 is safe and secure to download and use as long as you download it from a reliable source like the one we have provided in this article. However, you should be aware that downloading modded apps from unknown sources may pose some risks and dangers to your device and data. We are not responsible for any damage or loss that may occur as a result of downloading or using FR Legends Mod APK Versi 0.3 2. Download and use it at your own risk.</p>
142
- <li><strong>How can I update FR Legends Mod APK Versi 0.3 2 to the latest version?</strong></li>
143
- <p>To update FR Legends Mod APK Versi 0.3 2 to the latest version, you need to follow these steps:</p>
144
- <ol>
145
- <li>Go to the same link where you downloaded FR Legends Mod APK Versi 0.3 2 from: <a href="">https://fr-legends-mod-apk-versi-0-3-2.com/download</a></li>
146
- <li>Check if there is a new version available and click on the download button if there is.</li>
147
- <li>Uninstall the previous version of FR Legends Mod APK Versi 0.3 2 from your device.</li>
148
- <li>Install the new version of FR Legends Mod APK Versi 0.3 2 following the same steps as before.</li>
149
- <li>Launch the game and enjoy the new features and improvements.</li>
150
- </ol>
151
- <p>Note: You should always update FR Legends Mod APK Versi 0.3 2 to the latest version to avoid any bugs, glitches, or compatibility issues with the game.</p>
152
- <li><strong>How can I play FR Legends online with other players?</strong></li>
153
- <p>To play FR Legends online with other players, you need to follow these steps:</p>
154
- <ol>
155
- <li>Launch the game and tap on the <em>Online Mode</em> button on the main menu.</li>
156
- <li>Select a region and a room that you want to join or create your own room by tapping on the <em>Create Room</em> button.</li>
157
- <li>Wait for other players to join or invite your friends by tapping on the <em>Invite Friends</em> button.</li>
158
- <li>Choose a car and a track that you want to race on and tap on the <em>Ready</em> button.</li>
159
- <li>Start the race and enjoy!</li>
160
- </ol>
161
- <p>Note: You need a stable internet connection to play FR Legends online with other players. You can also chat with them and share your drifting skills and tips by tapping on the <em>Chat</em> button.</p>
162
- <li><strong>How can I contact the developers or support team of FR Legends?</strong></li>
163
- <p>To contact the developers or support team of FR Legends, you can use one of these methods:</p>
164
- <ul>
165
- <li>Email them at <a href="mailto:[email protected]">[email protected]</a></li>
166
- <li>Follow them on Instagram at <a href="https://www.instagram.com/frlegendsgame/">https://www.instagram.com/frlegendsgame/</a></li>
167
- <li>Like their Facebook page at <a href="https://www.facebook.com/FRLEGENDS/">https://www.facebook.com/FRLEGENDS/</a></li>
168
- <li>Join their Discord server at <a href="https://discord.gg/frlegends">https://discord.gg/frlegends</a></li>
169
- </ul>
170
- <p>You can also leave a review or feedback on Google Play Store or App Store and rate the game according to your experience.</p> 401be4b1e0<br />
171
- <br />
172
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/initializer.py DELETED
@@ -1,303 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- This code is based on https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
17
- Ths copyright of pytorch/pytorch is a BSD-style license, as found in the LICENSE file.
18
- """
19
-
20
- import math
21
-
22
- import numpy as np
23
- import paddle
24
- import paddle.nn as nn
25
-
26
- __all__ = [
27
- "uniform_",
28
- "normal_",
29
- "constant_",
30
- "ones_",
31
- "zeros_",
32
- "xavier_uniform_",
33
- "xavier_normal_",
34
- "kaiming_uniform_",
35
- "kaiming_normal_",
36
- "linear_init_",
37
- "conv_init_",
38
- "reset_initialized_parameter",
39
- ]
40
-
41
-
42
- def _no_grad_uniform_(tensor, a, b):
43
- with paddle.no_grad():
44
- tensor.set_value(paddle.uniform(shape=tensor.shape, dtype=tensor.dtype, min=a, max=b))
45
- return tensor
46
-
47
-
48
- def _no_grad_normal_(tensor, mean=0.0, std=1.0):
49
- with paddle.no_grad():
50
- tensor.set_value(paddle.normal(mean=mean, std=std, shape=tensor.shape))
51
- return tensor
52
-
53
-
54
- def _no_grad_fill_(tensor, value=0.0):
55
- with paddle.no_grad():
56
- tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype))
57
- return tensor
58
-
59
-
60
- def uniform_(tensor, a, b):
61
- """
62
- Modified tensor inspace using uniform_
63
- Args:
64
- tensor (paddle.Tensor): paddle Tensor
65
- a (float|int): min value.
66
- b (float|int): max value.
67
- Return:
68
- tensor
69
- """
70
- return _no_grad_uniform_(tensor, a, b)
71
-
72
-
73
- def normal_(tensor, mean=0.0, std=1.0):
74
- """
75
- Modified tensor inspace using normal_
76
- Args:
77
- tensor (paddle.Tensor): paddle Tensor
78
- mean (float|int): mean value.
79
- std (float|int): std value.
80
- Return:
81
- tensor
82
- """
83
- return _no_grad_normal_(tensor, mean, std)
84
-
85
-
86
- def constant_(tensor, value=0.0):
87
- """
88
- Modified tensor inspace using constant_
89
- Args:
90
- tensor (paddle.Tensor): paddle Tensor
91
- value (float|int): value to fill tensor.
92
- Return:
93
- tensor
94
- """
95
- return _no_grad_fill_(tensor, value)
96
-
97
-
98
- def ones_(tensor):
99
- """
100
- Modified tensor inspace using ones_
101
- Args:
102
- tensor (paddle.Tensor): paddle Tensor
103
- Return:
104
- tensor
105
- """
106
- return _no_grad_fill_(tensor, 1)
107
-
108
-
109
- def zeros_(tensor):
110
- """
111
- Modified tensor inspace using zeros_
112
- Args:
113
- tensor (paddle.Tensor): paddle Tensor
114
- Return:
115
- tensor
116
- """
117
- return _no_grad_fill_(tensor, 0)
118
-
119
-
120
- def vector_(tensor, vector):
121
- with paddle.no_grad():
122
- tensor.set_value(paddle.to_tensor(vector, dtype=tensor.dtype))
123
- return tensor
124
-
125
-
126
- def _calculate_fan_in_and_fan_out(tensor, reverse=False):
127
- """
128
- Calculate (fan_in, _fan_out) for tensor
129
- Args:
130
- tensor (Tensor): paddle.Tensor
131
- reverse (bool: False): tensor data format order, False by default as [fout, fin, ...]. e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] is True
132
- Return:
133
- Tuple[fan_in, fan_out]
134
- """
135
- if tensor.ndim < 2:
136
- raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
137
-
138
- if reverse:
139
- num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1]
140
- else:
141
- num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0]
142
-
143
- receptive_field_size = 1
144
- if tensor.ndim > 2:
145
- receptive_field_size = np.prod(tensor.shape[2:])
146
-
147
- fan_in = num_input_fmaps * receptive_field_size
148
- fan_out = num_output_fmaps * receptive_field_size
149
-
150
- return fan_in, fan_out
151
-
152
-
153
- def xavier_uniform_(tensor, gain=1.0, reverse=False):
154
- """
155
- Modified tensor inspace using xavier_uniform_
156
- Args:
157
- tensor (paddle.Tensor): paddle Tensor
158
- gain (float): super parameter, 1. default.
159
- reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
160
- Return:
161
- tensor
162
- """
163
- fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse)
164
- std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
165
- k = math.sqrt(3.0) * std
166
- return _no_grad_uniform_(tensor, -k, k)
167
-
168
-
169
- def xavier_normal_(tensor, gain=1.0, reverse=False):
170
- """
171
- Modified tensor inspace using xavier_normal_
172
- Args:
173
- tensor (paddle.Tensor): paddle Tensor
174
- gain (float): super parameter, 1. default.
175
- reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
176
- Return:
177
- tensor
178
- """
179
- fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse)
180
- std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
181
- return _no_grad_normal_(tensor, 0, std)
182
-
183
-
184
- # reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html
185
- def _calculate_correct_fan(tensor, mode, reverse=False):
186
- mode = mode.lower()
187
- valid_modes = ["fan_in", "fan_out"]
188
- if mode not in valid_modes:
189
- raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
190
-
191
- fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse)
192
-
193
- return fan_in if mode == "fan_in" else fan_out
194
-
195
-
196
- def _calculate_gain(nonlinearity, param=None):
197
- linear_fns = ["linear", "conv1d", "conv2d", "conv3d", "conv_transpose1d", "conv_transpose2d", "conv_transpose3d"]
198
- if nonlinearity in linear_fns or nonlinearity == "sigmoid":
199
- return 1
200
- elif nonlinearity == "tanh":
201
- return 5.0 / 3
202
- elif nonlinearity == "relu":
203
- return math.sqrt(2.0)
204
- elif nonlinearity == "leaky_relu":
205
- if param is None:
206
- negative_slope = 0.01
207
- elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
208
- # True/False are instances of int, hence check above
209
- negative_slope = param
210
- else:
211
- raise ValueError("negative_slope {} not a valid number".format(param))
212
- return math.sqrt(2.0 / (1 + negative_slope**2))
213
- elif nonlinearity == "selu":
214
- return 3.0 / 4
215
- else:
216
- raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
217
-
218
-
219
- def kaiming_uniform_(tensor, a=0, mode="fan_in", nonlinearity="leaky_relu", reverse=False):
220
- """
221
- Modified tensor inspace using kaiming_uniform method
222
- Args:
223
- tensor (paddle.Tensor): paddle Tensor
224
- mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut
225
- nonlinearity (str): nonlinearity method name
226
- reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
227
- Return:
228
- tensor
229
- """
230
- fan = _calculate_correct_fan(tensor, mode, reverse)
231
- gain = _calculate_gain(nonlinearity, a)
232
- std = gain / math.sqrt(fan)
233
- k = math.sqrt(3.0) * std
234
- return _no_grad_uniform_(tensor, -k, k)
235
-
236
-
237
- def kaiming_normal_(tensor, a=0, mode="fan_in", nonlinearity="leaky_relu", reverse=False):
238
- """
239
- Modified tensor inspace using kaiming_normal_
240
- Args:
241
- tensor (paddle.Tensor): paddle Tensor
242
- mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut
243
- nonlinearity (str): nonlinearity method name
244
- reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
245
- Return:
246
- tensor
247
- """
248
- fan = _calculate_correct_fan(tensor, mode, reverse)
249
- gain = _calculate_gain(nonlinearity, a)
250
- std = gain / math.sqrt(fan)
251
- return _no_grad_normal_(tensor, 0, std)
252
-
253
-
254
- def linear_init_(module):
255
- bound = 1 / math.sqrt(module.weight.shape[0])
256
- uniform_(module.weight, -bound, bound)
257
- uniform_(module.bias, -bound, bound)
258
-
259
-
260
- def conv_init_(module):
261
- bound = 1 / np.sqrt(np.prod(module.weight.shape[1:]))
262
- uniform_(module.weight, -bound, bound)
263
- if module.bias is not None:
264
- uniform_(module.bias, -bound, bound)
265
-
266
-
267
- def bias_init_with_prob(prior_prob=0.01):
268
- """initialize conv/fc bias value according to a given probability value."""
269
- bias_init = float(-np.log((1 - prior_prob) / prior_prob))
270
- return bias_init
271
-
272
-
273
- @paddle.no_grad()
274
- def reset_initialized_parameter(model, include_self=True):
275
- """
276
- Reset initialized parameter using following method for [conv, linear, embedding, bn]
277
- Args:
278
- model (paddle.Layer): paddle Layer
279
- include_self (bool: False): include_self for Layer.named_sublayers method. Indicate whether including itself
280
- Return:
281
- None
282
- """
283
- for _, m in model.named_sublayers(include_self=include_self):
284
- if isinstance(m, nn.Conv2D):
285
- k = float(m._groups) / (m._in_channels * m._kernel_size[0] * m._kernel_size[1])
286
- k = math.sqrt(k)
287
- _no_grad_uniform_(m.weight, -k, k)
288
- if hasattr(m, "bias") and getattr(m, "bias") is not None:
289
- _no_grad_uniform_(m.bias, -k, k)
290
-
291
- elif isinstance(m, nn.Linear):
292
- k = math.sqrt(1.0 / m.weight.shape[0])
293
- _no_grad_uniform_(m.weight, -k, k)
294
- if hasattr(m, "bias") and getattr(m, "bias") is not None:
295
- _no_grad_uniform_(m.bias, -k, k)
296
-
297
- elif isinstance(m, nn.Embedding):
298
- _no_grad_normal_(m.weight, mean=0.0, std=1.0)
299
-
300
- elif isinstance(m, (nn.BatchNorm2D, nn.LayerNorm)):
301
- _no_grad_fill_(m.weight, 1.0)
302
- if hasattr(m, "bias") and getattr(m, "bias") is not None:
303
- _no_grad_fill_(m.bias, 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_mega.py DELETED
@@ -1,183 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Callable, List, Optional, Union
17
-
18
- import numpy as np
19
- import PIL.Image
20
-
21
- from ...utils import logging
22
- from .pipeline_stable_diffusion import StableDiffusionPipeline
23
- from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
24
- from .pipeline_stable_diffusion_inpaint_legacy import (
25
- StableDiffusionInpaintPipelineLegacy,
26
- )
27
-
28
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
29
-
30
-
31
- class StableDiffusionMegaPipeline(StableDiffusionPipeline):
32
- r"""
33
- Pipeline for generation using Stable Diffusion.
34
-
35
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
36
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
37
-
38
- Args:
39
- vae ([`AutoencoderKL`]):
40
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
41
- text_encoder ([`CLIPTextModel`]):
42
- Frozen text-encoder. Stable Diffusion uses the text portion of
43
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
44
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
45
- tokenizer (`CLIPTokenizer`):
46
- Tokenizer of class
47
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
48
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
49
- scheduler ([`SchedulerMixin`]):
50
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
51
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`]
52
- or [`DPMSolverMultistepScheduler`].
53
- safety_checker ([`StableDiffusionSafetyChecker`]):
54
- Classification module that estimates whether generated images could be considered offensive or harmful.
55
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
56
- feature_extractor ([`CLIPFeatureExtractor`]):
57
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
58
- """
59
- _optional_components = ["safety_checker", "feature_extractor"]
60
-
61
- def __call__(self, *args, **kwargs):
62
- return self.text2img(*args, **kwargs)
63
-
64
- def text2img(
65
- self,
66
- prompt: Union[str, List[str]],
67
- height: Optional[int] = 512,
68
- width: Optional[int] = 512,
69
- num_inference_steps: Optional[int] = 50,
70
- guidance_scale: Optional[float] = 7.5,
71
- negative_prompt: Optional[Union[str, List[str]]] = None,
72
- num_images_per_prompt: Optional[int] = 1,
73
- eta: Optional[float] = 0.0,
74
- generator: Optional[np.random.RandomState] = None,
75
- latents: Optional[np.ndarray] = None,
76
- output_type: Optional[str] = "pil",
77
- return_dict: bool = True,
78
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
79
- callback_steps: Optional[int] = 1,
80
- ):
81
-
82
- expected_components = inspect.signature(StableDiffusionPipeline.__init__).parameters.keys()
83
- components = {name: component for name, component in self.components.items() if name in expected_components}
84
- temp_pipeline = StableDiffusionPipeline(
85
- **components, requires_safety_checker=self.config.requires_safety_checker
86
- )
87
- output = temp_pipeline(
88
- prompt=prompt,
89
- height=height,
90
- width=width,
91
- num_inference_steps=num_inference_steps,
92
- guidance_scale=guidance_scale,
93
- negative_prompt=negative_prompt,
94
- num_images_per_prompt=num_images_per_prompt,
95
- eta=eta,
96
- generator=generator,
97
- latents=latents,
98
- output_type=output_type,
99
- return_dict=return_dict,
100
- callback=callback,
101
- callback_steps=callback_steps,
102
- )
103
- return output
104
-
105
- def img2img(
106
- self,
107
- prompt: Union[str, List[str]],
108
- image: Union[np.ndarray, PIL.Image.Image],
109
- strength: float = 0.8,
110
- num_inference_steps: Optional[int] = 50,
111
- guidance_scale: Optional[float] = 7.5,
112
- negative_prompt: Optional[Union[str, List[str]]] = None,
113
- num_images_per_prompt: Optional[int] = 1,
114
- eta: Optional[float] = 0.0,
115
- generator: Optional[np.random.RandomState] = None,
116
- output_type: Optional[str] = "pil",
117
- return_dict: bool = True,
118
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
119
- callback_steps: Optional[int] = 1,
120
- ):
121
- expected_components = inspect.signature(StableDiffusionImg2ImgPipeline.__init__).parameters.keys()
122
- components = {name: component for name, component in self.components.items() if name in expected_components}
123
- temp_pipeline = StableDiffusionImg2ImgPipeline(
124
- **components, requires_safety_checker=self.config.requires_safety_checker
125
- )
126
- output = temp_pipeline(
127
- prompt=prompt,
128
- image=image,
129
- strength=strength,
130
- num_inference_steps=num_inference_steps,
131
- guidance_scale=guidance_scale,
132
- negative_prompt=negative_prompt,
133
- num_images_per_prompt=num_images_per_prompt,
134
- eta=eta,
135
- generator=generator,
136
- output_type=output_type,
137
- return_dict=return_dict,
138
- callback=callback,
139
- callback_steps=callback_steps,
140
- )
141
-
142
- return output
143
-
144
- def inpaint_legacy(
145
- self,
146
- prompt: Union[str, List[str]],
147
- image: Union[np.ndarray, PIL.Image.Image],
148
- mask_image: Union[np.ndarray, PIL.Image.Image],
149
- strength: float = 0.8,
150
- num_inference_steps: Optional[int] = 50,
151
- guidance_scale: Optional[float] = 7.5,
152
- negative_prompt: Optional[Union[str, List[str]]] = None,
153
- num_images_per_prompt: Optional[int] = 1,
154
- eta: Optional[float] = 0.0,
155
- generator: Optional[np.random.RandomState] = None,
156
- output_type: Optional[str] = "pil",
157
- return_dict: bool = True,
158
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
159
- callback_steps: Optional[int] = 1,
160
- ):
161
- expected_components = inspect.signature(StableDiffusionInpaintPipelineLegacy.__init__).parameters.keys()
162
- components = {name: component for name, component in self.components.items() if name in expected_components}
163
- temp_pipeline = StableDiffusionInpaintPipelineLegacy(
164
- **components, requires_safety_checker=self.config.requires_safety_checker
165
- )
166
- output = temp_pipeline(
167
- prompt=prompt,
168
- image=image,
169
- mask_image=mask_image,
170
- strength=strength,
171
- num_inference_steps=num_inference_steps,
172
- guidance_scale=guidance_scale,
173
- negative_prompt=negative_prompt,
174
- num_images_per_prompt=num_images_per_prompt,
175
- eta=eta,
176
- generator=generator,
177
- output_type=output_type,
178
- return_dict=return_dict,
179
- callback=callback,
180
- callback_steps=callback_steps,
181
- )
182
-
183
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/chat-scroll-anchor.tsx DELETED
@@ -1,29 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import { useInView } from 'react-intersection-observer'
5
-
6
- import { useAtBottom } from '@/lib/hooks/use-at-bottom'
7
-
8
- interface ChatScrollAnchorProps {
9
- trackVisibility?: boolean
10
- }
11
-
12
- export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) {
13
- const isAtBottom = useAtBottom()
14
- const { ref, entry, inView } = useInView({
15
- trackVisibility,
16
- delay: 100,
17
- rootMargin: '0px 0px -150px 0px'
18
- })
19
-
20
- React.useEffect(() => {
21
- if (isAtBottom && trackVisibility && !inView) {
22
- entry?.target.scrollIntoView({
23
- block: 'start'
24
- })
25
- }
26
- }, [inView, entry, isAtBottom, trackVisibility])
27
-
28
- return <div ref={ref} className="h-px w-full" />
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/404ERRORms/bingAI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: BingAI
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- app_port: 8080
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIML-TUDA/semantic-diffusion/app.py DELETED
@@ -1,517 +0,0 @@
1
- from contextlib import nullcontext
2
- import gradio as gr
3
- import torch
4
- from torch import autocast
5
- from diffusers import SemanticStableDiffusionPipeline
6
-
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
-
9
- pipe = SemanticStableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
10
- pipe = pipe.to(device)
11
- gen = torch.Generator(device=device)
12
-
13
- # Sometimes the nsfw checker is confused by the Pokémon images, you can disable
14
- # it at your own risk here
15
- disable_safety = False
16
-
17
- if disable_safety:
18
- def null_safety(images, **kwargs):
19
- return images, False
20
- pipe.safety_checker = null_safety
21
-
22
-
23
- style_embeddings = {
24
- 'Concept Art': torch.load('embeddings/concept_art.pt'), 'Animation': torch.load('embeddings/animation.pt'), 'Character Design': torch.load('embeddings/character_design.pt')
25
- , 'Portrait Photo': torch.load('embeddings/portrait_photo.pt'), 'Architecture': torch.load('embeddings/architecture.pt')
26
- }
27
-
28
- def infer(prompt, steps, scale, seed, editing_prompt_1 = None, reverse_editing_direction_1 = False, edit_warmup_steps_1=10, edit_guidance_scale_1=5, edit_threshold_1=0.95,
29
- editing_prompt_2 = None, reverse_editing_direction_2 = False, edit_warmup_steps_2=10, edit_guidance_scale_2=5, edit_threshold_2=0.95,
30
- edit_style=None,
31
- reverse_editing_direction_style = False, edit_warmup_steps_style=5, edit_guidance_scale_style=7, edit_threshold_style=0.8,
32
- edit_momentum_scale=0.5, edit_mom_beta=0.6):
33
-
34
-
35
- gen.manual_seed(seed)
36
- images = pipe(prompt, guidance_scale=scale, num_inference_steps=steps, generator=gen).images
37
-
38
- editing_prompt = [editing_prompt_1, editing_prompt_2]
39
- reverse_editing_direction = [reverse_editing_direction_1, reverse_editing_direction_2]
40
- edit_warmup_steps = [edit_warmup_steps_1, edit_warmup_steps_2]
41
- edit_guidance_scale = [edit_guidance_scale_1, edit_guidance_scale_2]
42
- edit_threshold = [edit_threshold_1, edit_threshold_2]
43
-
44
- indices = [ind for ind, val in enumerate(editing_prompt) if val is None or len(val) <= 1]
45
-
46
- for index in sorted(indices, reverse=True):
47
- del editing_prompt[index]
48
- del reverse_editing_direction[index]
49
- del edit_warmup_steps[index]
50
- del edit_guidance_scale[index]
51
- del edit_threshold[index]
52
- editing_prompt_embeddings = None
53
-
54
- out_label = 'SEGA'
55
- if edit_style is not None and isinstance(edit_style, str) and edit_style in style_embeddings.keys():
56
- editing_prompt = None
57
- reverse_editing_direction = reverse_editing_direction_style
58
- edit_warmup_steps = edit_warmup_steps_style
59
- edit_guidance_scale = edit_guidance_scale_style
60
- edit_threshold = edit_threshold_style
61
- editing_prompt_embeddings = style_embeddings[edit_style]
62
- out_label = edit_style
63
-
64
- gen.manual_seed(seed)
65
- images.extend(pipe(prompt, guidance_scale=scale, num_inference_steps=steps, generator=gen,
66
- editing_prompt=editing_prompt, editing_prompt_embeddings=editing_prompt_embeddings,
67
- reverse_editing_direction=reverse_editing_direction, edit_warmup_steps=edit_warmup_steps, edit_guidance_scale=edit_guidance_scale,
68
- edit_momentum_scale=edit_momentum_scale, edit_mom_beta=edit_mom_beta
69
- ).images)
70
-
71
- return zip(images, ['Original', out_label])
72
-
73
- def reset_style():
74
- radio = gr.Radio(label='Style', choices=['Concept Art', 'Animation', 'Character Design', 'Portrait Photo', 'Architecture'])
75
- return radio
76
-
77
- def reset_text():
78
- text_1 = gr.Textbox(
79
- label="Edit Prompt 1",
80
- show_label=False,
81
- max_lines=1,
82
- placeholder="Enter your 1st edit prompt",
83
- ).style(
84
- border=(True, False, True, True),
85
- rounded=(True, False, False, True),
86
- container=False,
87
- )
88
- text_2 = gr.Textbox(
89
- label="Edit Prompt 2",
90
- show_label=False,
91
- max_lines=1,
92
- placeholder="Enter your 2nd edit prompt",
93
- ).style(
94
- border=(True, False, True, True),
95
- rounded=(True, False, False, True),
96
- container=False,
97
- )
98
- return text_1, text_2
99
-
100
- css = """
101
- a {
102
- color: inherit;
103
- text-decoration: underline;
104
- }
105
- .gradio-container {
106
- font-family: 'IBM Plex Sans', sans-serif;
107
- }
108
- .gr-button {
109
- color: white;
110
- border-color: #9d66e5;
111
- background: #9d66e5;
112
- }
113
- input[type='range'] {
114
- accent-color: #9d66e5;
115
- }
116
- .dark input[type='range'] {
117
- accent-color: #dfdfdf;
118
- }
119
- .container {
120
- max-width: 730px;
121
- margin: auto;
122
- padding-top: 1.5rem;
123
- }
124
- #gallery {
125
- min-height: 22rem;
126
- margin-bottom: 15px;
127
- margin-left: auto;
128
- margin-right: auto;
129
- border-bottom-right-radius: .5rem !important;
130
- border-bottom-left-radius: .5rem !important;
131
- }
132
- #gallery>div>.h-full {
133
- min-height: 20rem;
134
- }
135
- .details:hover {
136
- text-decoration: underline;
137
- }
138
- .gr-button {
139
- white-space: nowrap;
140
- }
141
- .gr-button:focus {
142
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
143
- outline: none;
144
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
145
- --tw-border-opacity: 1;
146
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
147
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
148
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
149
- --tw-ring-opacity: .5;
150
- }
151
- #advanced-options {
152
- margin-bottom: 20px;
153
- }
154
- .footer {
155
- margin-bottom: 45px;
156
- margin-top: 35px;
157
- text-align: center;
158
- border-bottom: 1px solid #e5e5e5;
159
- }
160
- .footer>p {
161
- font-size: .8rem;
162
- display: inline-block;
163
- padding: 0 10px;
164
- transform: translateY(10px);
165
- background: white;
166
- }
167
-
168
- .dark .footer {
169
- border-color: #303030;
170
- }
171
- .dark .footer>p {
172
- background: #0b0f19;
173
- }
174
- .acknowledgments h4{
175
- margin: 1.25em 0 .25em 0;
176
- font-weight: bold;
177
- font-size: 115%;
178
- }
179
- """
180
-
181
- block = gr.Blocks(css=css)
182
-
183
- examples = [
184
- [
185
- 'a photo of a cat',
186
- 50,
187
- 7,
188
- 3,
189
- 'sunglasses',
190
- False,
191
- 10,
192
- 5,
193
- 0.95,
194
- '',
195
- False,
196
- 10,
197
- 5,
198
- 0.95,
199
- '',
200
- False,
201
- 5,
202
- 7,
203
- 0.8,
204
- ],
205
- [
206
- 'an image of a crowded boulevard, realistic, 4k',
207
- 50,
208
- 7,
209
- 9,
210
- 'crowd, crowded, people',
211
- True,
212
- 10,
213
- 8.3,
214
- 0.9,
215
- '',
216
- False,
217
- 10,
218
- 5,
219
- 0.95,
220
- '',
221
- False,
222
- 5,
223
- 7,
224
- 0.8
225
- ],
226
- [
227
- 'a castle next to a river',
228
- 50,
229
- 7,
230
- 48,
231
- 'boat on a river',
232
- False,
233
- 15,
234
- 6,
235
- 0.9,
236
- 'monet, impression, sunrise',
237
- False,
238
- 18,
239
- 6,
240
- 0.8,
241
- '',
242
- False,
243
- 5,
244
- 7,
245
- 0.8
246
- ],
247
- [
248
- 'a portrait of a king, full body shot, 8k',
249
- 50,
250
- 7,
251
- 33,
252
- 'male',
253
- True,
254
- 5,
255
- 5,
256
- 0.9,
257
- 'female',
258
- False,
259
- 5,
260
- 5,
261
- 0.9,
262
- '',
263
- False,
264
- 5,
265
- 7,
266
- 0.8
267
- ],
268
- [
269
- 'a photo of a flowerpot',
270
- 50,
271
- 7,
272
- 2,
273
- 'glasses',
274
- False,
275
- 12,
276
- 5,
277
- 0.975,
278
- '',
279
- False,
280
- 10,
281
- 5,
282
- 0.95,
283
- '',
284
- False,
285
- 5,
286
- 7,
287
- 0.8
288
- ],
289
- [
290
- 'a photo of the face of a woman',
291
- 50,
292
- 7,
293
- 21,
294
- 'smiling, smile',
295
- False,
296
- 15,
297
- 3,
298
- 0.99,
299
- 'curls, wavy hair, curly hair',
300
- False,
301
- 13,
302
- 3,
303
- 0.925,
304
- '',
305
- False,
306
- 5,
307
- 7,
308
- 0.8
309
- ],
310
- [
311
- 'temple in ruines, forest, stairs, columns',
312
- 50,
313
- 7,
314
- 11,
315
- '',
316
- False,
317
- 10,
318
- 5,
319
- 0.95,
320
- '',
321
- False,
322
- 10,
323
- 5,
324
- 0.95,
325
- 'Animation',
326
- False,
327
- 5,
328
- 7,
329
- 0.8
330
- ],
331
- [
332
- 'city made out of glass',
333
- 50,
334
- 7,
335
- 16,
336
- '',
337
- False,
338
- 10,
339
- 5,
340
- 0.95,
341
- '',
342
- False,
343
- 10,
344
- 5,
345
- 0.95,
346
- 'Concept Art',
347
- False,
348
- 10,
349
- 8,
350
- 0.8
351
- ],
352
- [
353
- 'a man riding a horse',
354
- 50,
355
- 7,
356
- 11,
357
- '',
358
- False,
359
- 10,
360
- 5,
361
- 0.95,
362
- '',
363
- False,
364
- 10,
365
- 5,
366
- 0.95,
367
- 'Character Design',
368
- False,
369
- 11,
370
- 8,
371
- 0.9
372
- ],
373
- ]
374
-
375
-
376
- with block:
377
- gr.HTML(
378
- """
379
- <div style="text-align: center; max-width: 750px; margin: 0 auto;">
380
- <div>
381
- <img class="logo" src="https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/1666181274838-62fa1d95e8c9c532aa75331c.png" alt="AIML Logo"
382
- style="margin: auto; max-width: 7rem;">
383
- <h1 style="font-weight: 900; font-size: 3rem;">
384
- Semantic Guidance for Diffusion
385
- </h1>
386
- </div>
387
- <p style="margin-bottom: 10px; font-size: 94%">
388
- Interact with semantic concepts during the diffusion process. Details can be found in the paper <a href="https://arxiv.org/abs/2301.12247" style="text-decoration: underline;" target="_blank">SEGA: Instructing Diffusion using Semantic Dimensions</a>. <br/> Simply use the edit prompts to make arbitrary changes to the generation.
389
- </p>
390
- </div>
391
- """
392
- )
393
- gr.HTML("""
394
- <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
395
- <br/>
396
- <a href="https://huggingface.co/spaces/AIML-TUDA/semantic-diffusion?duplicate=true">
397
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
398
- <p/>""")
399
- with gr.Group():
400
- with gr.Box():
401
- with gr.Row().style(mobile_collapse=False, equal_height=True):
402
- text = gr.Textbox(
403
- label="Enter your prompt",
404
- show_label=False,
405
- max_lines=1,
406
- placeholder="Enter your prompt",
407
- ).style(
408
- border=(True, False, True, True),
409
- rounded=(True, False, False, True),
410
- container=False,
411
- )
412
- btn = gr.Button("Generate image").style(
413
- margin=False,
414
- rounded=(False, True, True, False),
415
- )
416
- with gr.Tabs() as tabs:
417
- with gr.TabItem('Text Guidance', id=0):
418
- with gr.Row().style(mobile_collapse=False, equal_height=True):
419
- edit_1 = gr.Textbox(
420
- label="Edit Prompt 1",
421
- show_label=False,
422
- max_lines=1,
423
- placeholder="Enter your 1st edit prompt",
424
- ).style(
425
- border=(True, False, True, True),
426
- rounded=(True, False, False, True),
427
- container=False,
428
- )
429
- with gr.Group():
430
- with gr.Row().style(mobile_collapse=False, equal_height=True):
431
- rev_1 = gr.Checkbox(
432
- label='Negative Guidance')
433
- warmup_1 = gr.Slider(label='Warmup', minimum=0, maximum=50, value=10, step=1, interactive=True)
434
- scale_1 = gr.Slider(label='Scale', minimum=1, maximum=10, value=5, step=0.25, interactive=True)
435
- threshold_1 = gr.Slider(label='Threshold', minimum=0.5, maximum=0.99, value=0.95, steps=0.01, interactive=True)
436
- with gr.Row().style(mobile_collapse=False, equal_height=True):
437
- edit_2 = gr.Textbox(
438
- label="Edit Prompt 2",
439
- show_label=False,
440
- max_lines=1,
441
- placeholder="Enter your 2nd edit prompt",
442
- ).style(
443
- border=(True, False, True, True),
444
- rounded=(True, False, False, True),
445
- container=False,
446
- )
447
- with gr.Group():
448
- with gr.Row().style(mobile_collapse=False, equal_height=True):
449
- rev_2 = gr.Checkbox(
450
- label='Negative Guidance')
451
- warmup_2 = gr.Slider(label='Warmup', minimum=0, maximum=50, value=10, step=1, interactive=True)
452
- scale_2 = gr.Slider(label='Scale', minimum=1, maximum=10, value=5, step=0.25, interactive=True)
453
- threshold_2 = gr.Slider(label='Threshold', minimum=0.5, maximum=0.99, value=0.95, steps=0.01, interactive=True)
454
- with gr.TabItem("Style Guidance", id=1):
455
- with gr.Row().style(mobile_collapse=False, equal_height=True):
456
- style = gr.Radio(label='Style', choices=['Concept Art', 'Animation', 'Character Design', 'Portrait Photo', 'Architecture'], interactive=True)
457
- with gr.Group():
458
- with gr.Row().style(mobile_collapse=False, equal_height=True):
459
- rev_style = gr.Checkbox(
460
- label='Negative Guidance', interactive=False)
461
- warmup_style = gr.Slider(label='Warmup', minimum=0, maximum=50, value=5, step=1, interactive=True)
462
- scale_style = gr.Slider(label='Scale', minimum=1, maximum=10, value=7, step=0.25, interactive=True)
463
- threshold_style = gr.Slider(label='Threshold', minimum=0.5, maximum=0.99, value=0.8, steps=0.01, interactive=True)
464
-
465
-
466
- gallery = gr.Gallery(
467
- label=("Generated images"), show_label=False, elem_id="gallery"
468
- ).style(grid=[2], height="auto")
469
-
470
-
471
- with gr.Row(elem_id="advanced-options"):
472
- scale = gr.Slider(label="Scale", minimum=3, maximum=15, value=7, step=1)
473
- steps = gr.Slider(label="Steps", minimum=5, maximum=50, value=50, step=5, interactive=False)
474
- seed = gr.Slider(
475
- label="Seed",
476
- minimum=0,
477
- maximum=2147483647,
478
- step=1,
479
- #randomize=True,
480
- )
481
-
482
-
483
- ex = gr.Examples(examples=examples, fn=infer, inputs=[text, steps, scale, seed, edit_1, rev_1, warmup_1, scale_1, threshold_1, edit_2, rev_2, warmup_2, scale_2, threshold_2, style, rev_style, warmup_style, scale_style, threshold_style], outputs=gallery, cache_examples=True)
484
- ex.dataset.headers = ['Prompt', 'Steps', 'Scale', 'Seed', 'Edit Prompt 1', 'Negation 1', 'Warmup 1', 'Scale 1', 'Threshold 1', 'Edit Prompt 2', 'Negation 2', 'Warmup 2', 'Scale 2', 'Threshold 2', 'Style', 'Style Negation', 'Style Warmup', 'Style Scale', 'Style Threshold']
485
-
486
-
487
- text.submit(infer, inputs=[text, steps, scale, seed, edit_1, rev_1, warmup_1, scale_1, threshold_1, edit_2, rev_2, warmup_2, scale_2, threshold_2, style, rev_style, warmup_style, scale_style, threshold_style], outputs=gallery)
488
- btn.click(infer, inputs=[text, steps, scale, seed, edit_1, rev_1, warmup_1, scale_1, threshold_1, edit_2, rev_2, warmup_2, scale_2, threshold_2, style, rev_style, warmup_style, scale_style, threshold_style], outputs=gallery)
489
- #btn.click(change_tab, None, tabs)
490
-
491
- edit_1.change(reset_style, outputs=style)
492
- edit_2.change(reset_style, outputs=style)
493
-
494
- rev_1.change(reset_style, outputs=style)
495
- rev_2.change(reset_style, outputs=style)
496
-
497
- warmup_1.change(reset_style, outputs=style)
498
- warmup_2.change(reset_style, outputs=style)
499
-
500
- threshold_1.change(reset_style, outputs=style)
501
- threshold_2.change(reset_style, outputs=style)
502
- #style.change(reset_text, outputs=[edit_1, edit_2])
503
-
504
-
505
- gr.HTML(
506
- """
507
- <div class="footer">
508
- <p> Gradio Demo by AIML@TU Darmstadt and 🤗 Hugging Face
509
- </p>
510
- </div>
511
- <div class="acknowledgments">
512
- <p>Created by <a href="https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/">Manuel Brack</a> and <a href="justinpinkney.com">Patrick Schramowski</a> at <a href="https://www.aiml.informatik.tu-darmstadt.de">AIML Lab</a>.</p>
513
- </div>
514
- """
515
- )
516
-
517
- block.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/options.py DELETED
@@ -1,39 +0,0 @@
1
- """
2
- Types of various choices used during training
3
- """
4
- from enum import Enum
5
-
6
-
7
- class AttentionType(Enum):
8
- """Type of attention used during training"""
9
-
10
- LocationSensitive = 1
11
- Content_Based = 2
12
- MultiHead = 3
13
-
14
-
15
- class LearningRateType(Enum):
16
- """Type of learning rate used during training"""
17
-
18
- Learning_Rate_Decay = 1
19
- Cosine_Scheduler = 2
20
- SquareRoot_Scheduler = 3
21
-
22
-
23
- class OptimizerType(Enum):
24
- """Type of optimizer used during training"""
25
-
26
- Adam = 1
27
- SGD = 2
28
- AdamW = 3
29
-
30
-
31
- class LossType(Enum):
32
- """Type of loss function used during training"""
33
-
34
- L1_LOSS = 1
35
- MSE_LOSS = 2
36
- L1_LOSS_MASKED = 3
37
- MSE_LOSS_MASKED = 4
38
- BOTH = 5
39
- BOTH_MASKED = 6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/poetry/app.py DELETED
@@ -1,53 +0,0 @@
1
- import gc
2
- import gradio as gr
3
- from transformers import pipeline, set_seed
4
-
5
- pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023')
6
- #gc.collect()
7
- samples = [['أنت'
8
- ,1.0, 50, 1.0, 1.0, 114],['هل غادر'
9
- ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت'
10
- ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس'
11
- ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال'
12
- ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما'
13
- ,1.0, 50, 1.0, 1.0, 114 ],['.'
14
- ,1.0, 50, 1.0, 1.0, 114]]
15
-
16
- notes = """
17
- - Enter a short prompt or select (click) one of the examples and click SEND
18
- - Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values).
19
- - For the same seed (randomness), the same output is regenerated if other parameters are fixed. Seed should be 0 or more (not empty)
20
- - Clear and enter new prompt or select another example and SEND to regenerate
21
- - The '.' means start a new line from no prompt (your prompt need not be long)
22
- - Be patient: this runs on CPU (free tier)
23
- - Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859)
24
- - Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk.
25
- """
26
- def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114):
27
- if not int(seed) >= 0: seed=114
28
- set_seed(seed)
29
- gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty,
30
- min_length = 64, no_repeat_ngram_size = 3, return_full_text=True,
31
- num_beams=5, num_return_sequences=1)[0]["generated_text"]
32
- poetry =""
33
- for line in gen.split('.')[:-1]:
34
- poetry += line #+ "\n"
35
- return poetry
36
- poetry = gr.Interface(fn=sayPoetry,
37
- inputs=[
38
- gr.Textbox(label="Enter short prompt or select from examples:"),
39
- gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'),
40
- gr.Slider(25, 100, step=1,value=50, label='control top k'),
41
- gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'),
42
- gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'),
43
- gr.Number(value=139750, precision=0, label='Seed'),
44
- ],
45
- outputs=[gr.Textbox(label="Generated Poetry:")],
46
-
47
- allow_flagging='never',
48
- title='Arabic Poetry Generation Demo (updated Jan. 2023)',
49
- description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)",
50
- examples=samples,
51
- cache_examples=False,
52
- article = notes)
53
- poetry.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/AchyuthGamer/ImMagician-Image-Generator/app.py DELETED
@@ -1,264 +0,0 @@
1
- import os
2
- import random
3
- import gradio as gr
4
- import numpy as np
5
- import PIL.Image
6
- import torch
7
- from typing import List
8
- from diffusers.utils import numpy_to_pil
9
- from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline
10
- from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
11
- from previewer.modules import Previewer
12
- os.environ['TOKENIZERS_PARALLELISM'] = 'false'
13
-
14
- DESCRIPTION = "ImMagician🪄"
15
- DESCRIPTION += "\n<p style=\"text-align: center\"><a href='https://huggingface.co/warp-ai/wuerstchen' target='_blank'>ImMagician🪄</a> is a new fast and efficient high resolution text-to-image architecture and model</p>"
16
- if not torch.cuda.is_available():
17
- DESCRIPTION += "\n<p>Running on CPU 🥶</p>"
18
-
19
- MAX_SEED = np.iinfo(np.int32).max
20
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
21
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
22
- USE_TORCH_COMPILE = False
23
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
24
- PREVIEW_IMAGES = True
25
-
26
- dtype = torch.float16
27
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
- if torch.cuda.is_available():
29
- prior_pipeline = WuerstchenPriorPipeline.from_pretrained("warp-ai/wuerstchen-prior", torch_dtype=dtype)
30
- decoder_pipeline = WuerstchenDecoderPipeline.from_pretrained("warp-ai/wuerstchen", torch_dtype=dtype)
31
- if ENABLE_CPU_OFFLOAD:
32
- prior_pipeline.enable_model_cpu_offload()
33
- decoder_pipeline.enable_model_cpu_offload()
34
- else:
35
- prior_pipeline.to(device)
36
- decoder_pipeline.to(device)
37
-
38
- if USE_TORCH_COMPILE:
39
- prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="reduce-overhead", fullgraph=True)
40
- decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="reduce-overhead", fullgraph=True)
41
-
42
- if PREVIEW_IMAGES:
43
- previewer = Previewer()
44
- previewer.load_state_dict(torch.load("previewer/text2img_wurstchen_b_v1_previewer_100k.pt")["state_dict"])
45
- previewer.eval().requires_grad_(False).to(device).to(dtype)
46
-
47
- def callback_prior(i, t, latents):
48
- output = previewer(latents)
49
- output = numpy_to_pil(output.clamp(0, 1).permute(0, 2, 3, 1).cpu().numpy())
50
- return output
51
- else:
52
- previewer = None
53
- callback_prior = None
54
- else:
55
- prior_pipeline = None
56
- decoder_pipeline = None
57
-
58
-
59
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
60
- if randomize_seed:
61
- seed = random.randint(0, MAX_SEED)
62
- return seed
63
-
64
-
65
- def generate(
66
- prompt: str,
67
- negative_prompt: str = "",
68
- seed: int = 0,
69
- width: int = 1024,
70
- height: int = 1024,
71
- prior_num_inference_steps: int = 60,
72
- # prior_timesteps: List[float] = None,
73
- prior_guidance_scale: float = 4.0,
74
- decoder_num_inference_steps: int = 12,
75
- # decoder_timesteps: List[float] = None,
76
- decoder_guidance_scale: float = 0.0,
77
- num_images_per_prompt: int = 2,
78
- ) -> PIL.Image.Image:
79
- generator = torch.Generator().manual_seed(seed)
80
-
81
- prior_output = prior_pipeline(
82
- prompt=prompt,
83
- height=height,
84
- width=width,
85
- timesteps=DEFAULT_STAGE_C_TIMESTEPS,
86
- negative_prompt=negative_prompt,
87
- guidance_scale=prior_guidance_scale,
88
- num_images_per_prompt=num_images_per_prompt,
89
- generator=generator,
90
- callback=callback_prior,
91
- )
92
-
93
- if PREVIEW_IMAGES:
94
- for _ in range(len(DEFAULT_STAGE_C_TIMESTEPS)):
95
- r = next(prior_output)
96
- if isinstance(r, list):
97
- yield r
98
- prior_output = r
99
-
100
- decoder_output = decoder_pipeline(
101
- image_embeddings=prior_output.image_embeddings,
102
- prompt=prompt,
103
- num_inference_steps=decoder_num_inference_steps,
104
- # timesteps=decoder_timesteps,
105
- guidance_scale=decoder_guidance_scale,
106
- negative_prompt=negative_prompt,
107
- generator=generator,
108
- output_type="pil",
109
- ).images
110
- yield decoder_output
111
-
112
-
113
- examples = [
114
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
115
- "An astronaut riding a green horse",
116
- ]
117
-
118
- with gr.Blocks(css="style.css") as demo:
119
- gr.Markdown(DESCRIPTION)
120
- gr.DuplicateButton(
121
- value="Duplicate Space for private use",
122
- elem_id="duplicate-button",
123
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
124
- )
125
- with gr.Group():
126
- with gr.Row():
127
- prompt = gr.Text(
128
- label="Prompt",
129
- show_label=False,
130
- max_lines=1,
131
- placeholder="Enter your prompt",
132
- container=False,
133
- )
134
- run_button = gr.Button("Run", scale=0)
135
- result = gr.Gallery(label="Result", show_label=False)
136
- with gr.Accordion("Advanced options", open=False):
137
- negative_prompt = gr.Text(
138
- label="Negative prompt",
139
- max_lines=1,
140
- placeholder="Enter a Negative Prompt",
141
- )
142
-
143
- seed = gr.Slider(
144
- label="Seed",
145
- minimum=0,
146
- maximum=MAX_SEED,
147
- step=1,
148
- value=0,
149
- )
150
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
151
- with gr.Row():
152
- width = gr.Slider(
153
- label="Width",
154
- minimum=1024,
155
- maximum=MAX_IMAGE_SIZE,
156
- step=512,
157
- value=1024,
158
- )
159
- height = gr.Slider(
160
- label="Height",
161
- minimum=1024,
162
- maximum=MAX_IMAGE_SIZE,
163
- step=512,
164
- value=1024,
165
- )
166
- num_images_per_prompt = gr.Slider(
167
- label="Number of Images",
168
- minimum=1,
169
- maximum=6,
170
- step=1,
171
- value=2,
172
- )
173
- with gr.Row():
174
- prior_guidance_scale = gr.Slider(
175
- label="Prior Guidance Scale",
176
- minimum=0,
177
- maximum=20,
178
- step=0.1,
179
- value=4.0,
180
- )
181
- prior_num_inference_steps = gr.Slider(
182
- label="Prior Inference Steps",
183
- minimum=30,
184
- maximum=30,
185
- step=1,
186
- value=30,
187
- )
188
-
189
- decoder_guidance_scale = gr.Slider(
190
- label="Decoder Guidance Scale",
191
- minimum=0,
192
- maximum=0,
193
- step=0.1,
194
- value=0.0,
195
- )
196
- decoder_num_inference_steps = gr.Slider(
197
- label="Decoder Inference Steps",
198
- minimum=4,
199
- maximum=12,
200
- step=1,
201
- value=12,
202
- )
203
-
204
- gr.Examples(
205
- examples=examples,
206
- inputs=prompt,
207
- outputs=result,
208
- fn=generate,
209
- cache_examples=CACHE_EXAMPLES,
210
- )
211
-
212
- inputs = [
213
- prompt,
214
- negative_prompt,
215
- seed,
216
- width,
217
- height,
218
- prior_num_inference_steps,
219
- # prior_timesteps,
220
- prior_guidance_scale,
221
- decoder_num_inference_steps,
222
- # decoder_timesteps,
223
- decoder_guidance_scale,
224
- num_images_per_prompt,
225
- ]
226
- prompt.submit(
227
- fn=randomize_seed_fn,
228
- inputs=[seed, randomize_seed],
229
- outputs=seed,
230
- queue=False,
231
- api_name=False,
232
- ).then(
233
- fn=generate,
234
- inputs=inputs,
235
- outputs=result,
236
- api_name="run",
237
- )
238
- negative_prompt.submit(
239
- fn=randomize_seed_fn,
240
- inputs=[seed, randomize_seed],
241
- outputs=seed,
242
- queue=False,
243
- api_name=False,
244
- ).then(
245
- fn=generate,
246
- inputs=inputs,
247
- outputs=result,
248
- api_name=False,
249
- )
250
- run_button.click(
251
- fn=randomize_seed_fn,
252
- inputs=[seed, randomize_seed],
253
- outputs=seed,
254
- queue=False,
255
- api_name=False,
256
- ).then(
257
- fn=generate,
258
- inputs=inputs,
259
- outputs=result,
260
- api_name=False,
261
- )
262
-
263
- if __name__ == "__main__":
264
- demo.queue(max_size=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/4.js DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Base from '../base/Base';
2
- export default class Radio extends Base { }
 
 
 
spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/phonecode.py DELETED
@@ -1,63 +0,0 @@
1
- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import re
15
-
16
- from .num import verbalize_digit
17
-
18
- # 规范化固话/手机号码
19
- # 手机
20
- # http://www.jihaoba.com/news/show/13680
21
- # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
22
- # 联通:130、131、132、156、155、186、185、176
23
- # 电信:133、153、189、180、181、177
24
- RE_MOBILE_PHONE = re.compile(
25
- r"(?<!\d)((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})(?!\d)")
26
- RE_TELEPHONE = re.compile(
27
- r"(?<!\d)((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{7,8})(?!\d)")
28
-
29
- # 全国统一的号码400开头
30
- RE_NATIONAL_UNIFORM_NUMBER = re.compile(r"(400)(-)?\d{3}(-)?\d{4}")
31
-
32
-
33
- def phone2str(phone_string: str, mobile=True) -> str:
34
- if mobile:
35
- sp_parts = phone_string.strip('+').split()
36
- result = ','.join(
37
- [verbalize_digit(part, alt_one=True) for part in sp_parts])
38
- return result
39
- else:
40
- sil_parts = phone_string.split('-')
41
- result = ','.join(
42
- [verbalize_digit(part, alt_one=True) for part in sil_parts])
43
- return result
44
-
45
-
46
- def replace_phone(match) -> str:
47
- """
48
- Args:
49
- match (re.Match)
50
- Returns:
51
- str
52
- """
53
- return phone2str(match.group(0), mobile=False)
54
-
55
-
56
- def replace_mobile(match) -> str:
57
- """
58
- Args:
59
- match (re.Match)
60
- Returns:
61
- str
62
- """
63
- return phone2str(match.group(0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/utils/videoio.py DELETED
@@ -1,41 +0,0 @@
1
- import shutil
2
- import uuid
3
-
4
- import os
5
-
6
- import cv2
7
-
8
- def load_video_to_cv2(input_path):
9
- video_stream = cv2.VideoCapture(input_path)
10
- fps = video_stream.get(cv2.CAP_PROP_FPS)
11
- full_frames = []
12
- while 1:
13
- still_reading, frame = video_stream.read()
14
- if not still_reading:
15
- video_stream.release()
16
- break
17
- full_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
18
- return full_frames
19
-
20
- def save_video_with_watermark(video, audio, save_path, watermark=False):
21
- temp_file = str(uuid.uuid4())+'.mp4'
22
- cmd = r'ffmpeg -y -hide_banner -loglevel error -i "%s" -i "%s" -vcodec copy "%s"' % (video, audio, temp_file)
23
- os.system(cmd)
24
-
25
- if watermark is False:
26
- shutil.move(temp_file, save_path)
27
- else:
28
- # watermark
29
- try:
30
- ##### check if stable-diffusion-webui
31
- import webui
32
- from modules import paths
33
- watarmark_path = paths.script_path+"/extensions/SadTalker/docs/sadtalker_logo.png"
34
- except:
35
- # get the root path of sadtalker.
36
- dir_path = os.path.dirname(os.path.realpath(__file__))
37
- watarmark_path = dir_path+"/../../docs/sadtalker_logo.png"
38
-
39
- cmd = r'ffmpeg -y -hide_banner -loglevel error -i "%s" -i "%s" -filter_complex "[1]scale=100:-1[wm];[0][wm]overlay=(main_w-overlay_w)-10:10" "%s"' % (temp_file, watarmark_path, save_path)
40
- os.system(cmd)
41
- os.remove(temp_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/models.py DELETED
@@ -1,542 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- import modules
8
- import attentions
9
-
10
- from torch.nn import Conv1d, ConvTranspose1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from commons import init_weights, get_padding
13
-
14
-
15
- class StochasticDurationPredictor(nn.Module):
16
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
17
- super().__init__()
18
- filter_channels = in_channels # it needs to be removed from future version.
19
- self.in_channels = in_channels
20
- self.filter_channels = filter_channels
21
- self.kernel_size = kernel_size
22
- self.p_dropout = p_dropout
23
- self.n_flows = n_flows
24
- self.gin_channels = gin_channels
25
-
26
- self.log_flow = modules.Log()
27
- self.flows = nn.ModuleList()
28
- self.flows.append(modules.ElementwiseAffine(2))
29
- for i in range(n_flows):
30
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
31
- self.flows.append(modules.Flip())
32
-
33
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
34
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
35
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
36
- self.post_flows = nn.ModuleList()
37
- self.post_flows.append(modules.ElementwiseAffine(2))
38
- for i in range(4):
39
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
40
- self.post_flows.append(modules.Flip())
41
-
42
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
43
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
44
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
45
- if gin_channels != 0:
46
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
47
-
48
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
49
- x = torch.detach(x)
50
- x = self.pre(x)
51
- if g is not None:
52
- g = torch.detach(g)
53
- x = x + self.cond(g)
54
- x = self.convs(x, x_mask)
55
- x = self.proj(x) * x_mask
56
-
57
- if not reverse:
58
- flows = self.flows
59
- assert w is not None
60
-
61
- logdet_tot_q = 0
62
- h_w = self.post_pre(w)
63
- h_w = self.post_convs(h_w, x_mask)
64
- h_w = self.post_proj(h_w) * x_mask
65
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
66
- z_q = e_q
67
- for flow in self.post_flows:
68
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
69
- logdet_tot_q += logdet_q
70
- z_u, z1 = torch.split(z_q, [1, 1], 1)
71
- u = torch.sigmoid(z_u) * x_mask
72
- z0 = (w - u) * x_mask
73
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
74
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
75
-
76
- logdet_tot = 0
77
- z0, logdet = self.log_flow(z0, x_mask)
78
- logdet_tot += logdet
79
- z = torch.cat([z0, z1], 1)
80
- for flow in flows:
81
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
82
- logdet_tot = logdet_tot + logdet
83
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
84
- return nll + logq # [b]
85
- else:
86
- flows = list(reversed(self.flows))
87
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
88
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
89
- for flow in flows:
90
- z = flow(z, x_mask, g=x, reverse=reverse)
91
- z0, z1 = torch.split(z, [1, 1], 1)
92
- logw = z0
93
- return logw
94
-
95
-
96
- class DurationPredictor(nn.Module):
97
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
98
- super().__init__()
99
-
100
- self.in_channels = in_channels
101
- self.filter_channels = filter_channels
102
- self.kernel_size = kernel_size
103
- self.p_dropout = p_dropout
104
- self.gin_channels = gin_channels
105
-
106
- self.drop = nn.Dropout(p_dropout)
107
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
108
- self.norm_1 = modules.LayerNorm(filter_channels)
109
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
110
- self.norm_2 = modules.LayerNorm(filter_channels)
111
- self.proj = nn.Conv1d(filter_channels, 1, 1)
112
-
113
- if gin_channels != 0:
114
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
115
-
116
- def forward(self, x, x_mask, g=None):
117
- x = torch.detach(x)
118
- if g is not None:
119
- g = torch.detach(g)
120
- x = x + self.cond(g)
121
- x = self.conv_1(x * x_mask)
122
- x = torch.relu(x)
123
- x = self.norm_1(x)
124
- x = self.drop(x)
125
- x = self.conv_2(x * x_mask)
126
- x = torch.relu(x)
127
- x = self.norm_2(x)
128
- x = self.drop(x)
129
- x = self.proj(x * x_mask)
130
- return x * x_mask
131
-
132
-
133
- class TextEncoder(nn.Module):
134
- def __init__(self,
135
- n_vocab,
136
- out_channels,
137
- hidden_channels,
138
- filter_channels,
139
- n_heads,
140
- n_layers,
141
- kernel_size,
142
- p_dropout,
143
- emotion_embedding):
144
- super().__init__()
145
- self.n_vocab = n_vocab
146
- self.out_channels = out_channels
147
- self.hidden_channels = hidden_channels
148
- self.filter_channels = filter_channels
149
- self.n_heads = n_heads
150
- self.n_layers = n_layers
151
- self.kernel_size = kernel_size
152
- self.p_dropout = p_dropout
153
- self.emotion_embedding = emotion_embedding
154
-
155
- if self.n_vocab!=0:
156
- self.emb = nn.Embedding(n_vocab, hidden_channels)
157
- if emotion_embedding:
158
- self.emotion_emb = nn.Linear(1024, hidden_channels)
159
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
160
-
161
- self.encoder = attentions.Encoder(
162
- hidden_channels,
163
- filter_channels,
164
- n_heads,
165
- n_layers,
166
- kernel_size,
167
- p_dropout)
168
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
169
-
170
- def forward(self, x, x_lengths, emotion_embedding=None):
171
- if self.n_vocab!=0:
172
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
173
- if emotion_embedding is not None:
174
- x = x + self.emotion_emb(emotion_embedding.unsqueeze(1))
175
- x = torch.transpose(x, 1, -1) # [b, h, t]
176
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
177
-
178
- x = self.encoder(x * x_mask, x_mask)
179
- stats = self.proj(x) * x_mask
180
-
181
- m, logs = torch.split(stats, self.out_channels, dim=1)
182
- return x, m, logs, x_mask
183
-
184
-
185
- class ResidualCouplingBlock(nn.Module):
186
- def __init__(self,
187
- channels,
188
- hidden_channels,
189
- kernel_size,
190
- dilation_rate,
191
- n_layers,
192
- n_flows=4,
193
- gin_channels=0):
194
- super().__init__()
195
- self.channels = channels
196
- self.hidden_channels = hidden_channels
197
- self.kernel_size = kernel_size
198
- self.dilation_rate = dilation_rate
199
- self.n_layers = n_layers
200
- self.n_flows = n_flows
201
- self.gin_channels = gin_channels
202
-
203
- self.flows = nn.ModuleList()
204
- for i in range(n_flows):
205
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
206
- self.flows.append(modules.Flip())
207
-
208
- def forward(self, x, x_mask, g=None, reverse=False):
209
- if not reverse:
210
- for flow in self.flows:
211
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
212
- else:
213
- for flow in reversed(self.flows):
214
- x = flow(x, x_mask, g=g, reverse=reverse)
215
- return x
216
-
217
-
218
- class PosteriorEncoder(nn.Module):
219
- def __init__(self,
220
- in_channels,
221
- out_channels,
222
- hidden_channels,
223
- kernel_size,
224
- dilation_rate,
225
- n_layers,
226
- gin_channels=0):
227
- super().__init__()
228
- self.in_channels = in_channels
229
- self.out_channels = out_channels
230
- self.hidden_channels = hidden_channels
231
- self.kernel_size = kernel_size
232
- self.dilation_rate = dilation_rate
233
- self.n_layers = n_layers
234
- self.gin_channels = gin_channels
235
-
236
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
237
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
238
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
239
-
240
- def forward(self, x, x_lengths, g=None):
241
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
242
- x = self.pre(x) * x_mask
243
- x = self.enc(x, x_mask, g=g)
244
- stats = self.proj(x) * x_mask
245
- m, logs = torch.split(stats, self.out_channels, dim=1)
246
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
247
- return z, m, logs, x_mask
248
-
249
-
250
- class Generator(torch.nn.Module):
251
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
252
- super(Generator, self).__init__()
253
- self.num_kernels = len(resblock_kernel_sizes)
254
- self.num_upsamples = len(upsample_rates)
255
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
256
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
257
-
258
- self.ups = nn.ModuleList()
259
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
260
- self.ups.append(weight_norm(
261
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
262
- k, u, padding=(k-u)//2)))
263
-
264
- self.resblocks = nn.ModuleList()
265
- for i in range(len(self.ups)):
266
- ch = upsample_initial_channel//(2**(i+1))
267
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
268
- self.resblocks.append(resblock(ch, k, d))
269
-
270
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
271
- self.ups.apply(init_weights)
272
-
273
- if gin_channels != 0:
274
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
275
-
276
- def forward(self, x, g=None):
277
- x = self.conv_pre(x)
278
- if g is not None:
279
- x = x + self.cond(g)
280
-
281
- for i in range(self.num_upsamples):
282
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
283
- x = self.ups[i](x)
284
- xs = None
285
- for j in range(self.num_kernels):
286
- if xs is None:
287
- xs = self.resblocks[i*self.num_kernels+j](x)
288
- else:
289
- xs += self.resblocks[i*self.num_kernels+j](x)
290
- x = xs / self.num_kernels
291
- x = F.leaky_relu(x)
292
- x = self.conv_post(x)
293
- x = torch.tanh(x)
294
-
295
- return x
296
-
297
- def remove_weight_norm(self):
298
- print('Removing weight norm...')
299
- for l in self.ups:
300
- remove_weight_norm(l)
301
- for l in self.resblocks:
302
- l.remove_weight_norm()
303
-
304
-
305
- class DiscriminatorP(torch.nn.Module):
306
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
307
- super(DiscriminatorP, self).__init__()
308
- self.period = period
309
- self.use_spectral_norm = use_spectral_norm
310
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
311
- self.convs = nn.ModuleList([
312
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
313
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
314
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
315
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
316
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
317
- ])
318
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
319
-
320
- def forward(self, x):
321
- fmap = []
322
-
323
- # 1d to 2d
324
- b, c, t = x.shape
325
- if t % self.period != 0: # pad first
326
- n_pad = self.period - (t % self.period)
327
- x = F.pad(x, (0, n_pad), "reflect")
328
- t = t + n_pad
329
- x = x.view(b, c, t // self.period, self.period)
330
-
331
- for l in self.convs:
332
- x = l(x)
333
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
334
- fmap.append(x)
335
- x = self.conv_post(x)
336
- fmap.append(x)
337
- x = torch.flatten(x, 1, -1)
338
-
339
- return x, fmap
340
-
341
-
342
- class DiscriminatorS(torch.nn.Module):
343
- def __init__(self, use_spectral_norm=False):
344
- super(DiscriminatorS, self).__init__()
345
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
346
- self.convs = nn.ModuleList([
347
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
348
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
349
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
350
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
351
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
352
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
353
- ])
354
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
355
-
356
- def forward(self, x):
357
- fmap = []
358
-
359
- for l in self.convs:
360
- x = l(x)
361
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
362
- fmap.append(x)
363
- x = self.conv_post(x)
364
- fmap.append(x)
365
- x = torch.flatten(x, 1, -1)
366
-
367
- return x, fmap
368
-
369
-
370
- class MultiPeriodDiscriminator(torch.nn.Module):
371
- def __init__(self, use_spectral_norm=False):
372
- super(MultiPeriodDiscriminator, self).__init__()
373
- periods = [2,3,5,7,11]
374
-
375
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
376
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
377
- self.discriminators = nn.ModuleList(discs)
378
-
379
- def forward(self, y, y_hat):
380
- y_d_rs = []
381
- y_d_gs = []
382
- fmap_rs = []
383
- fmap_gs = []
384
- for i, d in enumerate(self.discriminators):
385
- y_d_r, fmap_r = d(y)
386
- y_d_g, fmap_g = d(y_hat)
387
- y_d_rs.append(y_d_r)
388
- y_d_gs.append(y_d_g)
389
- fmap_rs.append(fmap_r)
390
- fmap_gs.append(fmap_g)
391
-
392
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
393
-
394
-
395
-
396
- class SynthesizerTrn(nn.Module):
397
- """
398
- Synthesizer for Training
399
- """
400
-
401
- def __init__(self,
402
- n_vocab,
403
- spec_channels,
404
- segment_size,
405
- inter_channels,
406
- hidden_channels,
407
- filter_channels,
408
- n_heads,
409
- n_layers,
410
- kernel_size,
411
- p_dropout,
412
- resblock,
413
- resblock_kernel_sizes,
414
- resblock_dilation_sizes,
415
- upsample_rates,
416
- upsample_initial_channel,
417
- upsample_kernel_sizes,
418
- n_speakers=0,
419
- gin_channels=0,
420
- use_sdp=True,
421
- emotion_embedding=False,
422
- **kwargs):
423
-
424
- super().__init__()
425
- self.n_vocab = n_vocab
426
- self.spec_channels = spec_channels
427
- self.inter_channels = inter_channels
428
- self.hidden_channels = hidden_channels
429
- self.filter_channels = filter_channels
430
- self.n_heads = n_heads
431
- self.n_layers = n_layers
432
- self.kernel_size = kernel_size
433
- self.p_dropout = p_dropout
434
- self.resblock = resblock
435
- self.resblock_kernel_sizes = resblock_kernel_sizes
436
- self.resblock_dilation_sizes = resblock_dilation_sizes
437
- self.upsample_rates = upsample_rates
438
- self.upsample_initial_channel = upsample_initial_channel
439
- self.upsample_kernel_sizes = upsample_kernel_sizes
440
- self.segment_size = segment_size
441
- self.n_speakers = n_speakers
442
- self.gin_channels = gin_channels
443
-
444
- self.use_sdp = use_sdp
445
-
446
- self.enc_p = TextEncoder(n_vocab,
447
- inter_channels,
448
- hidden_channels,
449
- filter_channels,
450
- n_heads,
451
- n_layers,
452
- kernel_size,
453
- p_dropout,
454
- emotion_embedding)
455
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
456
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
457
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
458
-
459
- if use_sdp:
460
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
461
- else:
462
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
463
-
464
- if n_speakers > 1:
465
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
466
-
467
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
468
-
469
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
470
- if self.n_speakers > 0:
471
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
472
- else:
473
- g = None
474
-
475
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
476
- z_p = self.flow(z, y_mask, g=g)
477
-
478
- with torch.no_grad():
479
- # negative cross-entropy
480
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
481
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
482
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
483
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
484
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
485
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
486
-
487
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
488
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
489
-
490
- w = attn.sum(2)
491
- if self.use_sdp:
492
- l_length = self.dp(x, x_mask, w, g=g)
493
- l_length = l_length / torch.sum(x_mask)
494
- else:
495
- logw_ = torch.log(w + 1e-6) * x_mask
496
- logw = self.dp(x, x_mask, g=g)
497
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
498
-
499
- # expand prior
500
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
501
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
502
-
503
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
504
- o = self.dec(z_slice, g=g)
505
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
506
-
507
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, emotion_embedding=None):
508
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
509
- if self.n_speakers > 0:
510
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
511
- else:
512
- g = None
513
-
514
- if self.use_sdp:
515
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
516
- else:
517
- logw = self.dp(x, x_mask, g=g)
518
- w = torch.exp(logw) * x_mask * length_scale
519
- w_ceil = torch.ceil(w)
520
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
521
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
522
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
523
- attn = commons.generate_path(w_ceil, attn_mask)
524
-
525
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
526
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
527
-
528
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
529
- z = self.flow(z_p, y_mask, g=g, reverse=True)
530
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
531
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
532
-
533
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
534
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
535
- g_src = self.emb_g(sid_src).unsqueeze(-1)
536
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
537
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
538
- z_p = self.flow(z, y_mask, g=g_src)
539
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
540
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
541
- return o_hat, y_mask, (z, z_p, z_hat)
542
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/japanese.py DELETED
@@ -1,153 +0,0 @@
1
- import re
2
- from unidecode import unidecode
3
- import pyopenjtalk
4
-
5
-
6
- # Regular expression matching Japanese without punctuation marks:
7
- _japanese_characters = re.compile(
8
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
9
-
10
- # Regular expression matching non-Japanese characters or punctuation marks:
11
- _japanese_marks = re.compile(
12
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
13
-
14
- # List of (symbol, Japanese) pairs for marks:
15
- _symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
16
- ('%', 'パーセント')
17
- ]]
18
-
19
- # List of (romaji, ipa) pairs for marks:
20
- _romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
21
- ('ts', 'ʦ'),
22
- ('u', 'ɯ'),
23
- ('j', 'ʥ'),
24
- ('y', 'j'),
25
- ('ni', 'n^i'),
26
- ('nj', 'n^'),
27
- ('hi', 'çi'),
28
- ('hj', 'ç'),
29
- ('f', 'ɸ'),
30
- ('I', 'i*'),
31
- ('U', 'ɯ*'),
32
- ('r', 'ɾ')
33
- ]]
34
-
35
- # List of (romaji, ipa2) pairs for marks:
36
- _romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
37
- ('u', 'ɯ'),
38
- ('ʧ', 'tʃ'),
39
- ('j', 'dʑ'),
40
- ('y', 'j'),
41
- ('ni', 'n^i'),
42
- ('nj', 'n^'),
43
- ('hi', 'çi'),
44
- ('hj', 'ç'),
45
- ('f', 'ɸ'),
46
- ('I', 'i*'),
47
- ('U', 'ɯ*'),
48
- ('r', 'ɾ')
49
- ]]
50
-
51
- # List of (consonant, sokuon) pairs:
52
- _real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
53
- (r'Q([↑↓]*[kg])', r'k#\1'),
54
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
55
- (r'Q([↑↓]*[sʃ])', r's\1'),
56
- (r'Q([↑↓]*[pb])', r'p#\1')
57
- ]]
58
-
59
- # List of (consonant, hatsuon) pairs:
60
- _real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
61
- (r'N([↑↓]*[pbm])', r'm\1'),
62
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
63
- (r'N([↑↓]*[tdn])', r'n\1'),
64
- (r'N([↑↓]*[kg])', r'ŋ\1')
65
- ]]
66
-
67
-
68
- def symbols_to_japanese(text):
69
- for regex, replacement in _symbols_to_japanese:
70
- text = re.sub(regex, replacement, text)
71
- return text
72
-
73
-
74
- def japanese_to_romaji_with_accent(text):
75
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
76
- text = symbols_to_japanese(text)
77
- sentences = re.split(_japanese_marks, text)
78
- marks = re.findall(_japanese_marks, text)
79
- text = ''
80
- for i, sentence in enumerate(sentences):
81
- if re.match(_japanese_characters, sentence):
82
- if text != '':
83
- text += ' '
84
- labels = pyopenjtalk.extract_fullcontext(sentence)
85
- for n, label in enumerate(labels):
86
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
87
- if phoneme not in ['sil', 'pau']:
88
- text += phoneme.replace('ch', 'ʧ').replace('sh',
89
- 'ʃ').replace('cl', 'Q')
90
- else:
91
- continue
92
- # n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
93
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
94
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
95
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
96
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
97
- a2_next = -1
98
- else:
99
- a2_next = int(
100
- re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
101
- # Accent phrase boundary
102
- if a3 == 1 and a2_next == 1:
103
- text += ' '
104
- # Falling
105
- elif a1 == 0 and a2_next == a2 + 1:
106
- text += '↓'
107
- # Rising
108
- elif a2 == 1 and a2_next == 2:
109
- text += '↑'
110
- if i < len(marks):
111
- text += unidecode(marks[i]).replace(' ', '')
112
- return text
113
-
114
-
115
- def get_real_sokuon(text):
116
- for regex, replacement in _real_sokuon:
117
- text = re.sub(regex, replacement, text)
118
- return text
119
-
120
-
121
- def get_real_hatsuon(text):
122
- for regex, replacement in _real_hatsuon:
123
- text = re.sub(regex, replacement, text)
124
- return text
125
-
126
-
127
- def japanese_to_ipa(text):
128
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
129
- text = re.sub(
130
- r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
131
- text = get_real_sokuon(text)
132
- text = get_real_hatsuon(text)
133
- for regex, replacement in _romaji_to_ipa:
134
- text = re.sub(regex, replacement, text)
135
- return text
136
-
137
-
138
- def japanese_to_ipa2(text):
139
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
140
- text = get_real_sokuon(text)
141
- text = get_real_hatsuon(text)
142
- for regex, replacement in _romaji_to_ipa2:
143
- text = re.sub(regex, replacement, text)
144
- return text
145
-
146
-
147
- def japanese_to_ipa3(text):
148
- text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace(
149
- 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a')
150
- text = re.sub(
151
- r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
152
- text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text)
153
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/torch_utils/custom_ops.py DELETED
@@ -1,171 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import glob
10
- import hashlib
11
- import importlib
12
- import os
13
- import re
14
- import shutil
15
- import uuid
16
-
17
- import torch
18
- import torch.utils.cpp_extension
19
- from torch.utils.file_baton import FileBaton
20
-
21
- # ----------------------------------------------------------------------------
22
- # Global options.
23
-
24
- verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
25
-
26
- # ----------------------------------------------------------------------------
27
- # Internal helper funcs.
28
-
29
-
30
- def _find_compiler_bindir():
31
- patterns = [
32
- 'C:/Program Files*/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
33
- 'C:/Program Files*/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
34
- 'C:/Program Files*/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
35
- 'C:/Program Files*/Microsoft Visual Studio */vc/bin',
36
- ]
37
- for pattern in patterns:
38
- matches = sorted(glob.glob(pattern))
39
- if len(matches):
40
- return matches[-1]
41
- return None
42
-
43
- # ----------------------------------------------------------------------------
44
-
45
-
46
- def _get_mangled_gpu_name():
47
- name = torch.cuda.get_device_name().lower()
48
- out = []
49
- for c in name:
50
- if re.match('[a-z0-9_-]+', c):
51
- out.append(c)
52
- else:
53
- out.append('-')
54
- return ''.join(out)
55
-
56
- # ----------------------------------------------------------------------------
57
- # Main entry point for compiling and loading C++/CUDA plugins.
58
-
59
-
60
- _cached_plugins = dict()
61
-
62
-
63
- def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
64
- assert verbosity in ['none', 'brief', 'full']
65
- if headers is None:
66
- headers = []
67
- if source_dir is not None:
68
- sources = [os.path.join(source_dir, fname) for fname in sources]
69
- headers = [os.path.join(source_dir, fname) for fname in headers]
70
-
71
- # Already cached?
72
- if module_name in _cached_plugins:
73
- return _cached_plugins[module_name]
74
-
75
- # Print status.
76
- if verbosity == 'full':
77
- print(f'Setting up PyTorch plugin "{module_name}"...')
78
- elif verbosity == 'brief':
79
- print(
80
- f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
81
- verbose_build = (verbosity == 'full')
82
-
83
- # Compile and load.
84
- try: # pylint: disable=too-many-nested-blocks
85
- # Make sure we can find the necessary compiler binaries.
86
- if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
87
- compiler_bindir = _find_compiler_bindir()
88
- if compiler_bindir is None:
89
- raise RuntimeError(
90
- f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
91
- os.environ['PATH'] += ';' + compiler_bindir
92
-
93
- # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
94
- # break the build or unnecessarily restrict what's available to nvcc.
95
- # Unset it to let nvcc decide based on what's available on the
96
- # machine.
97
- os.environ['TORCH_CUDA_ARCH_LIST'] = ''
98
-
99
- # Incremental build md5sum trickery. Copies all the input source files
100
- # into a cached build directory under a combined md5 digest of the input
101
- # source files. Copying is done only if the combined digest has changed.
102
- # This keeps input file timestamps and filenames the same as in previous
103
- # extension builds, allowing for fast incremental rebuilds.
104
- #
105
- # This optimization is done only in case all the source files reside in
106
- # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
107
- # environment variable is set (we take this as a signal that the user
108
- # actually cares about this.)
109
- #
110
- # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
111
- # around the *.cu dependency bug in ninja config.
112
- #
113
- all_source_files = sorted(sources + headers)
114
- all_source_dirs = set(os.path.dirname(fname)
115
- for fname in all_source_files)
116
- # and ('TORCH_EXTENSIONS_DIR' in os.environ):
117
- if len(all_source_dirs) == 1:
118
-
119
- # Compute combined hash digest for all source files.
120
- hash_md5 = hashlib.md5()
121
- for src in all_source_files:
122
- with open(src, 'rb') as f:
123
- hash_md5.update(f.read())
124
-
125
- # Select cached build directory name.
126
- source_digest = hash_md5.hexdigest()
127
- build_top_dir = torch.utils.cpp_extension._get_build_directory(
128
- module_name, verbose=verbose_build) # pylint: disable=protected-access
129
- cached_build_dir = os.path.join(
130
- build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
131
-
132
- if not os.path.isdir(cached_build_dir):
133
- tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
134
- os.makedirs(tmpdir)
135
- for src in all_source_files:
136
- shutil.copyfile(src, os.path.join(
137
- tmpdir, os.path.basename(src)))
138
- try:
139
- os.replace(tmpdir, cached_build_dir) # atomic
140
- except OSError:
141
- # source directory already exists, delete tmpdir and its contents.
142
- shutil.rmtree(tmpdir)
143
- if not os.path.isdir(cached_build_dir):
144
- raise
145
-
146
- # Compile.
147
- cached_sources = [os.path.join(
148
- cached_build_dir, os.path.basename(fname)) for fname in sources]
149
- torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
150
- verbose=verbose_build, sources=cached_sources, **build_kwargs)
151
- else:
152
- torch.utils.cpp_extension.load(
153
- name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
154
-
155
- # Load.
156
- module = importlib.import_module(module_name)
157
-
158
- except:
159
- if verbosity == 'brief':
160
- print('Failed!')
161
- raise
162
-
163
- # Print status and add to cache dict.
164
- if verbosity == 'full':
165
- print(f'Done setting up PyTorch plugin "{module_name}".')
166
- elif verbosity == 'brief':
167
- print('Done.')
168
- _cached_plugins[module_name] = module
169
- return module
170
-
171
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/img2img_inpainting.py DELETED
@@ -1,463 +0,0 @@
1
- import inspect
2
- from typing import Callable, List, Optional, Tuple, Union
3
-
4
- import numpy as np
5
- import PIL
6
- import torch
7
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
8
-
9
- from diffusers import DiffusionPipeline
10
- from diffusers.configuration_utils import FrozenDict
11
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
- from diffusers.utils import deprecate, logging
16
-
17
-
18
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
-
20
-
21
- def prepare_mask_and_masked_image(image, mask):
22
- image = np.array(image.convert("RGB"))
23
- image = image[None].transpose(0, 3, 1, 2)
24
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
25
-
26
- mask = np.array(mask.convert("L"))
27
- mask = mask.astype(np.float32) / 255.0
28
- mask = mask[None, None]
29
- mask[mask < 0.5] = 0
30
- mask[mask >= 0.5] = 1
31
- mask = torch.from_numpy(mask)
32
-
33
- masked_image = image * (mask < 0.5)
34
-
35
- return mask, masked_image
36
-
37
-
38
- def check_size(image, height, width):
39
- if isinstance(image, PIL.Image.Image):
40
- w, h = image.size
41
- elif isinstance(image, torch.Tensor):
42
- *_, h, w = image.shape
43
-
44
- if h != height or w != width:
45
- raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
46
-
47
-
48
- def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
49
- inner_image = inner_image.convert("RGBA")
50
- image = image.convert("RGB")
51
-
52
- image.paste(inner_image, paste_offset, inner_image)
53
- image = image.convert("RGB")
54
-
55
- return image
56
-
57
-
58
- class ImageToImageInpaintingPipeline(DiffusionPipeline):
59
- r"""
60
- Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
61
-
62
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
63
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
64
-
65
- Args:
66
- vae ([`AutoencoderKL`]):
67
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
- text_encoder ([`CLIPTextModel`]):
69
- Frozen text-encoder. Stable Diffusion uses the text portion of
70
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
- tokenizer (`CLIPTokenizer`):
73
- Tokenizer of class
74
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
- scheduler ([`SchedulerMixin`]):
77
- A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
- safety_checker ([`StableDiffusionSafetyChecker`]):
80
- Classification module that estimates whether generated images could be considered offensive or harmful.
81
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
- feature_extractor ([`CLIPImageProcessor`]):
83
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
- """
85
-
86
- def __init__(
87
- self,
88
- vae: AutoencoderKL,
89
- text_encoder: CLIPTextModel,
90
- tokenizer: CLIPTokenizer,
91
- unet: UNet2DConditionModel,
92
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
93
- safety_checker: StableDiffusionSafetyChecker,
94
- feature_extractor: CLIPImageProcessor,
95
- ):
96
- super().__init__()
97
-
98
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
99
- deprecation_message = (
100
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
101
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
102
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
103
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
104
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
105
- " file"
106
- )
107
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
108
- new_config = dict(scheduler.config)
109
- new_config["steps_offset"] = 1
110
- scheduler._internal_dict = FrozenDict(new_config)
111
-
112
- if safety_checker is None:
113
- logger.warning(
114
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
115
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
116
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
117
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
118
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
119
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
120
- )
121
-
122
- self.register_modules(
123
- vae=vae,
124
- text_encoder=text_encoder,
125
- tokenizer=tokenizer,
126
- unet=unet,
127
- scheduler=scheduler,
128
- safety_checker=safety_checker,
129
- feature_extractor=feature_extractor,
130
- )
131
-
132
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
133
- r"""
134
- Enable sliced attention computation.
135
-
136
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
137
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
138
-
139
- Args:
140
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
141
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
142
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
143
- `attention_head_dim` must be a multiple of `slice_size`.
144
- """
145
- if slice_size == "auto":
146
- # half the attention head size is usually a good trade-off between
147
- # speed and memory
148
- slice_size = self.unet.config.attention_head_dim // 2
149
- self.unet.set_attention_slice(slice_size)
150
-
151
- def disable_attention_slicing(self):
152
- r"""
153
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
154
- back to computing attention in one step.
155
- """
156
- # set slice_size = `None` to disable `attention slicing`
157
- self.enable_attention_slicing(None)
158
-
159
- @torch.no_grad()
160
- def __call__(
161
- self,
162
- prompt: Union[str, List[str]],
163
- image: Union[torch.FloatTensor, PIL.Image.Image],
164
- inner_image: Union[torch.FloatTensor, PIL.Image.Image],
165
- mask_image: Union[torch.FloatTensor, PIL.Image.Image],
166
- height: int = 512,
167
- width: int = 512,
168
- num_inference_steps: int = 50,
169
- guidance_scale: float = 7.5,
170
- negative_prompt: Optional[Union[str, List[str]]] = None,
171
- num_images_per_prompt: Optional[int] = 1,
172
- eta: float = 0.0,
173
- generator: Optional[torch.Generator] = None,
174
- latents: Optional[torch.FloatTensor] = None,
175
- output_type: Optional[str] = "pil",
176
- return_dict: bool = True,
177
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
178
- callback_steps: int = 1,
179
- **kwargs,
180
- ):
181
- r"""
182
- Function invoked when calling the pipeline for generation.
183
-
184
- Args:
185
- prompt (`str` or `List[str]`):
186
- The prompt or prompts to guide the image generation.
187
- image (`torch.Tensor` or `PIL.Image.Image`):
188
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
189
- be masked out with `mask_image` and repainted according to `prompt`.
190
- inner_image (`torch.Tensor` or `PIL.Image.Image`):
191
- `Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
192
- regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
193
- the last channel representing the alpha channel, which will be used to blend `inner_image` with
194
- `image`. If not provided, it will be forcibly cast to RGBA.
195
- mask_image (`PIL.Image.Image`):
196
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
197
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
198
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
199
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
200
- height (`int`, *optional*, defaults to 512):
201
- The height in pixels of the generated image.
202
- width (`int`, *optional*, defaults to 512):
203
- The width in pixels of the generated image.
204
- num_inference_steps (`int`, *optional*, defaults to 50):
205
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
206
- expense of slower inference.
207
- guidance_scale (`float`, *optional*, defaults to 7.5):
208
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
209
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
210
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
211
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
212
- usually at the expense of lower image quality.
213
- negative_prompt (`str` or `List[str]`, *optional*):
214
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
215
- if `guidance_scale` is less than `1`).
216
- num_images_per_prompt (`int`, *optional*, defaults to 1):
217
- The number of images to generate per prompt.
218
- eta (`float`, *optional*, defaults to 0.0):
219
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
220
- [`schedulers.DDIMScheduler`], will be ignored for others.
221
- generator (`torch.Generator`, *optional*):
222
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
223
- deterministic.
224
- latents (`torch.FloatTensor`, *optional*):
225
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
226
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
227
- tensor will ge generated by sampling using the supplied random `generator`.
228
- output_type (`str`, *optional*, defaults to `"pil"`):
229
- The output format of the generate image. Choose between
230
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
231
- return_dict (`bool`, *optional*, defaults to `True`):
232
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
233
- plain tuple.
234
- callback (`Callable`, *optional*):
235
- A function that will be called every `callback_steps` steps during inference. The function will be
236
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
237
- callback_steps (`int`, *optional*, defaults to 1):
238
- The frequency at which the `callback` function will be called. If not specified, the callback will be
239
- called at every step.
240
-
241
- Returns:
242
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
243
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
244
- When returning a tuple, the first element is a list with the generated images, and the second element is a
245
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
246
- (nsfw) content, according to the `safety_checker`.
247
- """
248
-
249
- if isinstance(prompt, str):
250
- batch_size = 1
251
- elif isinstance(prompt, list):
252
- batch_size = len(prompt)
253
- else:
254
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
255
-
256
- if height % 8 != 0 or width % 8 != 0:
257
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
258
-
259
- if (callback_steps is None) or (
260
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
261
- ):
262
- raise ValueError(
263
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
264
- f" {type(callback_steps)}."
265
- )
266
-
267
- # check if input sizes are correct
268
- check_size(image, height, width)
269
- check_size(inner_image, height, width)
270
- check_size(mask_image, height, width)
271
-
272
- # get prompt text embeddings
273
- text_inputs = self.tokenizer(
274
- prompt,
275
- padding="max_length",
276
- max_length=self.tokenizer.model_max_length,
277
- return_tensors="pt",
278
- )
279
- text_input_ids = text_inputs.input_ids
280
-
281
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
282
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
283
- logger.warning(
284
- "The following part of your input was truncated because CLIP can only handle sequences up to"
285
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
286
- )
287
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
288
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
289
-
290
- # duplicate text embeddings for each generation per prompt, using mps friendly method
291
- bs_embed, seq_len, _ = text_embeddings.shape
292
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
293
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
294
-
295
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
296
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
297
- # corresponds to doing no classifier free guidance.
298
- do_classifier_free_guidance = guidance_scale > 1.0
299
- # get unconditional embeddings for classifier free guidance
300
- if do_classifier_free_guidance:
301
- uncond_tokens: List[str]
302
- if negative_prompt is None:
303
- uncond_tokens = [""]
304
- elif type(prompt) is not type(negative_prompt):
305
- raise TypeError(
306
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
307
- f" {type(prompt)}."
308
- )
309
- elif isinstance(negative_prompt, str):
310
- uncond_tokens = [negative_prompt]
311
- elif batch_size != len(negative_prompt):
312
- raise ValueError(
313
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
314
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
315
- " the batch size of `prompt`."
316
- )
317
- else:
318
- uncond_tokens = negative_prompt
319
-
320
- max_length = text_input_ids.shape[-1]
321
- uncond_input = self.tokenizer(
322
- uncond_tokens,
323
- padding="max_length",
324
- max_length=max_length,
325
- truncation=True,
326
- return_tensors="pt",
327
- )
328
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
329
-
330
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
331
- seq_len = uncond_embeddings.shape[1]
332
- uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
333
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
334
-
335
- # For classifier free guidance, we need to do two forward passes.
336
- # Here we concatenate the unconditional and text embeddings into a single batch
337
- # to avoid doing two forward passes
338
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
339
-
340
- # get the initial random noise unless the user supplied it
341
- # Unlike in other pipelines, latents need to be generated in the target device
342
- # for 1-to-1 results reproducibility with the CompVis implementation.
343
- # However this currently doesn't work in `mps`.
344
- num_channels_latents = self.vae.config.latent_channels
345
- latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
346
- latents_dtype = text_embeddings.dtype
347
- if latents is None:
348
- if self.device.type == "mps":
349
- # randn does not exist on mps
350
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
351
- self.device
352
- )
353
- else:
354
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
355
- else:
356
- if latents.shape != latents_shape:
357
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
358
- latents = latents.to(self.device)
359
-
360
- # overlay the inner image
361
- image = overlay_inner_image(image, inner_image)
362
-
363
- # prepare mask and masked_image
364
- mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
365
- mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
366
- masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
367
-
368
- # resize the mask to latents shape as we concatenate the mask to the latents
369
- mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
370
-
371
- # encode the mask image into latents space so we can concatenate it to the latents
372
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
373
- masked_image_latents = 0.18215 * masked_image_latents
374
-
375
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
376
- mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
377
- masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
378
-
379
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
380
- masked_image_latents = (
381
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
382
- )
383
-
384
- num_channels_mask = mask.shape[1]
385
- num_channels_masked_image = masked_image_latents.shape[1]
386
-
387
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
388
- raise ValueError(
389
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
390
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
391
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
392
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
393
- " `pipeline.unet` or your `mask_image` or `image` input."
394
- )
395
-
396
- # set timesteps
397
- self.scheduler.set_timesteps(num_inference_steps)
398
-
399
- # Some schedulers like PNDM have timesteps as arrays
400
- # It's more optimized to move all timesteps to correct device beforehand
401
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
402
-
403
- # scale the initial noise by the standard deviation required by the scheduler
404
- latents = latents * self.scheduler.init_noise_sigma
405
-
406
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
407
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
408
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
409
- # and should be between [0, 1]
410
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
411
- extra_step_kwargs = {}
412
- if accepts_eta:
413
- extra_step_kwargs["eta"] = eta
414
-
415
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
416
- # expand the latents if we are doing classifier free guidance
417
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
418
-
419
- # concat latents, mask, masked_image_latents in the channel dimension
420
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
421
-
422
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
423
-
424
- # predict the noise residual
425
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
426
-
427
- # perform guidance
428
- if do_classifier_free_guidance:
429
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
430
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
431
-
432
- # compute the previous noisy sample x_t -> x_t-1
433
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
434
-
435
- # call the callback, if provided
436
- if callback is not None and i % callback_steps == 0:
437
- callback(i, t, latents)
438
-
439
- latents = 1 / 0.18215 * latents
440
- image = self.vae.decode(latents).sample
441
-
442
- image = (image / 2 + 0.5).clamp(0, 1)
443
-
444
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
445
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
446
-
447
- if self.safety_checker is not None:
448
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
449
- self.device
450
- )
451
- image, has_nsfw_concept = self.safety_checker(
452
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
453
- )
454
- else:
455
- has_nsfw_concept = None
456
-
457
- if output_type == "pil":
458
- image = self.numpy_to_pil(image)
459
-
460
- if not return_dict:
461
- return (image, has_nsfw_concept)
462
-
463
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py DELETED
@@ -1,522 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Union
16
-
17
- import numpy as np
18
- import PIL
19
- import torch
20
- from PIL import Image
21
- from transformers import (
22
- XLMRobertaTokenizer,
23
- )
24
-
25
- from ...models import UNet2DConditionModel, VQModel
26
- from ...schedulers import DDIMScheduler
27
- from ...utils import (
28
- is_accelerate_available,
29
- is_accelerate_version,
30
- logging,
31
- randn_tensor,
32
- replace_example_docstring,
33
- )
34
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
35
- from .text_encoder import MultilingualCLIP
36
-
37
-
38
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
-
40
- EXAMPLE_DOC_STRING = """
41
- Examples:
42
- ```py
43
- >>> from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline
44
- >>> from diffusers.utils import load_image
45
- >>> import torch
46
-
47
- >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(
48
- ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
49
- ... )
50
- >>> pipe_prior.to("cuda")
51
-
52
- >>> prompt = "A red cartoon frog, 4k"
53
- >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
54
-
55
- >>> pipe = KandinskyImg2ImgPipeline.from_pretrained(
56
- ... "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
57
- ... )
58
- >>> pipe.to("cuda")
59
-
60
- >>> init_image = load_image(
61
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
62
- ... "/kandinsky/frog.png"
63
- ... )
64
-
65
- >>> image = pipe(
66
- ... prompt,
67
- ... image=init_image,
68
- ... image_embeds=image_emb,
69
- ... negative_image_embeds=zero_image_emb,
70
- ... height=768,
71
- ... width=768,
72
- ... num_inference_steps=100,
73
- ... strength=0.2,
74
- ... ).images
75
-
76
- >>> image[0].save("red_frog.png")
77
- ```
78
- """
79
-
80
-
81
- def get_new_h_w(h, w, scale_factor=8):
82
- new_h = h // scale_factor**2
83
- if h % scale_factor**2 != 0:
84
- new_h += 1
85
- new_w = w // scale_factor**2
86
- if w % scale_factor**2 != 0:
87
- new_w += 1
88
- return new_h * scale_factor, new_w * scale_factor
89
-
90
-
91
- def prepare_image(pil_image, w=512, h=512):
92
- pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1)
93
- arr = np.array(pil_image.convert("RGB"))
94
- arr = arr.astype(np.float32) / 127.5 - 1
95
- arr = np.transpose(arr, [2, 0, 1])
96
- image = torch.from_numpy(arr).unsqueeze(0)
97
- return image
98
-
99
-
100
- class KandinskyImg2ImgPipeline(DiffusionPipeline):
101
- """
102
- Pipeline for image-to-image generation using Kandinsky
103
-
104
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
105
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
106
-
107
- Args:
108
- text_encoder ([`MultilingualCLIP`]):
109
- Frozen text-encoder.
110
- tokenizer ([`XLMRobertaTokenizer`]):
111
- Tokenizer of class
112
- scheduler ([`DDIMScheduler`]):
113
- A scheduler to be used in combination with `unet` to generate image latents.
114
- unet ([`UNet2DConditionModel`]):
115
- Conditional U-Net architecture to denoise the image embedding.
116
- movq ([`VQModel`]):
117
- MoVQ image encoder and decoder
118
- """
119
-
120
- def __init__(
121
- self,
122
- text_encoder: MultilingualCLIP,
123
- movq: VQModel,
124
- tokenizer: XLMRobertaTokenizer,
125
- unet: UNet2DConditionModel,
126
- scheduler: DDIMScheduler,
127
- ):
128
- super().__init__()
129
-
130
- self.register_modules(
131
- text_encoder=text_encoder,
132
- tokenizer=tokenizer,
133
- unet=unet,
134
- scheduler=scheduler,
135
- movq=movq,
136
- )
137
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
138
-
139
- def get_timesteps(self, num_inference_steps, strength, device):
140
- # get the original timestep using init_timestep
141
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
142
-
143
- t_start = max(num_inference_steps - init_timestep, 0)
144
- timesteps = self.scheduler.timesteps[t_start:]
145
-
146
- return timesteps, num_inference_steps - t_start
147
-
148
- def prepare_latents(self, latents, latent_timestep, shape, dtype, device, generator, scheduler):
149
- if latents is None:
150
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
151
- else:
152
- if latents.shape != shape:
153
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
154
- latents = latents.to(device)
155
-
156
- latents = latents * scheduler.init_noise_sigma
157
-
158
- shape = latents.shape
159
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
160
-
161
- latents = self.add_noise(latents, noise, latent_timestep)
162
- return latents
163
-
164
- def _encode_prompt(
165
- self,
166
- prompt,
167
- device,
168
- num_images_per_prompt,
169
- do_classifier_free_guidance,
170
- negative_prompt=None,
171
- ):
172
- batch_size = len(prompt) if isinstance(prompt, list) else 1
173
- # get prompt text embeddings
174
- text_inputs = self.tokenizer(
175
- prompt,
176
- padding="max_length",
177
- max_length=77,
178
- truncation=True,
179
- return_attention_mask=True,
180
- add_special_tokens=True,
181
- return_tensors="pt",
182
- )
183
-
184
- text_input_ids = text_inputs.input_ids
185
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
186
-
187
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
188
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
189
- logger.warning(
190
- "The following part of your input was truncated because CLIP can only handle sequences up to"
191
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
192
- )
193
-
194
- text_input_ids = text_input_ids.to(device)
195
- text_mask = text_inputs.attention_mask.to(device)
196
-
197
- prompt_embeds, text_encoder_hidden_states = self.text_encoder(
198
- input_ids=text_input_ids, attention_mask=text_mask
199
- )
200
-
201
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
202
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
203
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
204
-
205
- if do_classifier_free_guidance:
206
- uncond_tokens: List[str]
207
- if negative_prompt is None:
208
- uncond_tokens = [""] * batch_size
209
- elif type(prompt) is not type(negative_prompt):
210
- raise TypeError(
211
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
212
- f" {type(prompt)}."
213
- )
214
- elif isinstance(negative_prompt, str):
215
- uncond_tokens = [negative_prompt]
216
- elif batch_size != len(negative_prompt):
217
- raise ValueError(
218
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
219
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
220
- " the batch size of `prompt`."
221
- )
222
- else:
223
- uncond_tokens = negative_prompt
224
-
225
- uncond_input = self.tokenizer(
226
- uncond_tokens,
227
- padding="max_length",
228
- max_length=77,
229
- truncation=True,
230
- return_attention_mask=True,
231
- add_special_tokens=True,
232
- return_tensors="pt",
233
- )
234
- uncond_text_input_ids = uncond_input.input_ids.to(device)
235
- uncond_text_mask = uncond_input.attention_mask.to(device)
236
-
237
- negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder(
238
- input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask
239
- )
240
-
241
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
242
-
243
- seq_len = negative_prompt_embeds.shape[1]
244
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
245
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
246
-
247
- seq_len = uncond_text_encoder_hidden_states.shape[1]
248
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
249
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
250
- batch_size * num_images_per_prompt, seq_len, -1
251
- )
252
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
253
-
254
- # done duplicates
255
-
256
- # For classifier free guidance, we need to do two forward passes.
257
- # Here we concatenate the unconditional and text embeddings into a single batch
258
- # to avoid doing two forward passes
259
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
260
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
261
-
262
- text_mask = torch.cat([uncond_text_mask, text_mask])
263
-
264
- return prompt_embeds, text_encoder_hidden_states, text_mask
265
-
266
- def enable_model_cpu_offload(self, gpu_id=0):
267
- r"""
268
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
269
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
270
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
271
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
272
- """
273
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
274
- from accelerate import cpu_offload_with_hook
275
- else:
276
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
277
-
278
- device = torch.device(f"cuda:{gpu_id}")
279
-
280
- if self.device.type != "cpu":
281
- self.to("cpu", silence_dtype_warnings=True)
282
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
283
-
284
- hook = None
285
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
286
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
287
-
288
- # We'll offload the last model manually.
289
- self.final_offload_hook = hook
290
-
291
- # add_noise method to overwrite the one in schedule because it use a different beta schedule for adding noise vs sampling
292
- def add_noise(
293
- self,
294
- original_samples: torch.FloatTensor,
295
- noise: torch.FloatTensor,
296
- timesteps: torch.IntTensor,
297
- ) -> torch.FloatTensor:
298
- betas = torch.linspace(0.0001, 0.02, 1000, dtype=torch.float32)
299
- alphas = 1.0 - betas
300
- alphas_cumprod = torch.cumprod(alphas, dim=0)
301
- alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
302
- timesteps = timesteps.to(original_samples.device)
303
-
304
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
305
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
306
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
307
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
308
-
309
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
310
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
311
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
312
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
313
-
314
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
315
-
316
- return noisy_samples
317
-
318
- @torch.no_grad()
319
- @replace_example_docstring(EXAMPLE_DOC_STRING)
320
- def __call__(
321
- self,
322
- prompt: Union[str, List[str]],
323
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
324
- image_embeds: torch.FloatTensor,
325
- negative_image_embeds: torch.FloatTensor,
326
- negative_prompt: Optional[Union[str, List[str]]] = None,
327
- height: int = 512,
328
- width: int = 512,
329
- num_inference_steps: int = 100,
330
- strength: float = 0.3,
331
- guidance_scale: float = 7.0,
332
- num_images_per_prompt: int = 1,
333
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
334
- output_type: Optional[str] = "pil",
335
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
336
- callback_steps: int = 1,
337
- return_dict: bool = True,
338
- ):
339
- """
340
- Function invoked when calling the pipeline for generation.
341
-
342
- Args:
343
- prompt (`str` or `List[str]`):
344
- The prompt or prompts to guide the image generation.
345
- image (`torch.FloatTensor`, `PIL.Image.Image`):
346
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
347
- process.
348
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
349
- The clip image embeddings for text prompt, that will be used to condition the image generation.
350
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
351
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
352
- negative_prompt (`str` or `List[str]`, *optional*):
353
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
354
- if `guidance_scale` is less than `1`).
355
- height (`int`, *optional*, defaults to 512):
356
- The height in pixels of the generated image.
357
- width (`int`, *optional*, defaults to 512):
358
- The width in pixels of the generated image.
359
- num_inference_steps (`int`, *optional*, defaults to 100):
360
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
361
- expense of slower inference.
362
- strength (`float`, *optional*, defaults to 0.3):
363
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
364
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
365
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
366
- be maximum and the denoising process will run for the full number of iterations specified in
367
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
368
- guidance_scale (`float`, *optional*, defaults to 4.0):
369
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
370
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
371
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
372
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
373
- usually at the expense of lower image quality.
374
- num_images_per_prompt (`int`, *optional*, defaults to 1):
375
- The number of images to generate per prompt.
376
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
377
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
378
- to make generation deterministic.
379
- output_type (`str`, *optional*, defaults to `"pil"`):
380
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
381
- (`np.array`) or `"pt"` (`torch.Tensor`).
382
- callback (`Callable`, *optional*):
383
- A function that calls every `callback_steps` steps during inference. The function is called with the
384
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
385
- callback_steps (`int`, *optional*, defaults to 1):
386
- The frequency at which the `callback` function is called. If not specified, the callback is called at
387
- every step.
388
- return_dict (`bool`, *optional*, defaults to `True`):
389
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
390
-
391
- Examples:
392
-
393
- Returns:
394
- [`~pipelines.ImagePipelineOutput`] or `tuple`
395
- """
396
- # 1. Define call parameters
397
- if isinstance(prompt, str):
398
- batch_size = 1
399
- elif isinstance(prompt, list):
400
- batch_size = len(prompt)
401
- else:
402
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
403
-
404
- device = self._execution_device
405
-
406
- batch_size = batch_size * num_images_per_prompt
407
-
408
- do_classifier_free_guidance = guidance_scale > 1.0
409
-
410
- # 2. get text and image embeddings
411
- prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt(
412
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
413
- )
414
-
415
- if isinstance(image_embeds, list):
416
- image_embeds = torch.cat(image_embeds, dim=0)
417
- if isinstance(negative_image_embeds, list):
418
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
419
-
420
- if do_classifier_free_guidance:
421
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
422
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
423
-
424
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
425
- dtype=prompt_embeds.dtype, device=device
426
- )
427
-
428
- # 3. pre-processing initial image
429
- if not isinstance(image, list):
430
- image = [image]
431
- if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image):
432
- raise ValueError(
433
- f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor"
434
- )
435
-
436
- image = torch.cat([prepare_image(i, width, height) for i in image], dim=0)
437
- image = image.to(dtype=prompt_embeds.dtype, device=device)
438
-
439
- latents = self.movq.encode(image)["latents"]
440
- latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
441
-
442
- # 4. set timesteps
443
- self.scheduler.set_timesteps(num_inference_steps, device=device)
444
-
445
- timesteps_tensor, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
446
-
447
- # the formular to calculate timestep for add_noise is taken from the original kandinsky repo
448
- latent_timestep = int(self.scheduler.config.num_train_timesteps * strength) - 2
449
-
450
- latent_timestep = torch.tensor([latent_timestep] * batch_size, dtype=timesteps_tensor.dtype, device=device)
451
-
452
- num_channels_latents = self.unet.config.in_channels
453
-
454
- height, width = get_new_h_w(height, width, self.movq_scale_factor)
455
-
456
- # 5. Create initial latent
457
- latents = self.prepare_latents(
458
- latents,
459
- latent_timestep,
460
- (batch_size, num_channels_latents, height, width),
461
- text_encoder_hidden_states.dtype,
462
- device,
463
- generator,
464
- self.scheduler,
465
- )
466
-
467
- # 6. Denoising loop
468
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
469
- # expand the latents if we are doing classifier free guidance
470
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
471
-
472
- added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
473
- noise_pred = self.unet(
474
- sample=latent_model_input,
475
- timestep=t,
476
- encoder_hidden_states=text_encoder_hidden_states,
477
- added_cond_kwargs=added_cond_kwargs,
478
- return_dict=False,
479
- )[0]
480
-
481
- if do_classifier_free_guidance:
482
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
483
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
484
- _, variance_pred_text = variance_pred.chunk(2)
485
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
486
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
487
-
488
- if not (
489
- hasattr(self.scheduler.config, "variance_type")
490
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
491
- ):
492
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
493
-
494
- # compute the previous noisy sample x_t -> x_t-1
495
- latents = self.scheduler.step(
496
- noise_pred,
497
- t,
498
- latents,
499
- generator=generator,
500
- ).prev_sample
501
-
502
- if callback is not None and i % callback_steps == 0:
503
- callback(i, t, latents)
504
-
505
- # 7. post-processing
506
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
507
-
508
- if output_type not in ["pt", "np", "pil"]:
509
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
510
-
511
- if output_type in ["np", "pil"]:
512
- image = image * 0.5 + 0.5
513
- image = image.clamp(0, 1)
514
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
515
-
516
- if output_type == "pil":
517
- image = self.numpy_to_pil(image)
518
-
519
- if not return_dict:
520
- return (image,)
521
-
522
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py DELETED
@@ -1,41 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- # model settings
3
- model = dict(
4
- neck=[
5
- dict(
6
- type='FPN',
7
- in_channels=[256, 512, 1024, 2048],
8
- out_channels=256,
9
- num_outs=5),
10
- dict(
11
- type='BFP',
12
- in_channels=256,
13
- num_levels=5,
14
- refine_level=2,
15
- refine_type='non_local')
16
- ],
17
- roi_head=dict(
18
- bbox_head=dict(
19
- loss_bbox=dict(
20
- _delete_=True,
21
- type='BalancedL1Loss',
22
- alpha=0.5,
23
- gamma=1.5,
24
- beta=1.0,
25
- loss_weight=1.0))),
26
- # model training and testing settings
27
- train_cfg=dict(
28
- rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1),
29
- rcnn=dict(
30
- sampler=dict(
31
- _delete_=True,
32
- type='CombinedSampler',
33
- num=512,
34
- pos_fraction=0.25,
35
- add_gt_as_proposals=True,
36
- pos_sampler=dict(type='InstanceBalancedPosSampler'),
37
- neg_sampler=dict(
38
- type='IoUBalancedNegSampler',
39
- floor_thr=-1,
40
- floor_fraction=0,
41
- num_bins=3)))))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/deployment/mmdet_handler.py DELETED
@@ -1,69 +0,0 @@
1
- import base64
2
- import os
3
-
4
- import mmcv
5
- import torch
6
- from ts.torch_handler.base_handler import BaseHandler
7
-
8
- from mmdet.apis import inference_detector, init_detector
9
-
10
-
11
- class MMdetHandler(BaseHandler):
12
- threshold = 0.5
13
-
14
- def initialize(self, context):
15
- properties = context.system_properties
16
- self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
17
- self.device = torch.device(self.map_location + ':' +
18
- str(properties.get('gpu_id')) if torch.cuda.
19
- is_available() else self.map_location)
20
- self.manifest = context.manifest
21
-
22
- model_dir = properties.get('model_dir')
23
- serialized_file = self.manifest['model']['serializedFile']
24
- checkpoint = os.path.join(model_dir, serialized_file)
25
- self.config_file = os.path.join(model_dir, 'config.py')
26
-
27
- self.model = init_detector(self.config_file, checkpoint, self.device)
28
- self.initialized = True
29
-
30
- def preprocess(self, data):
31
- images = []
32
-
33
- for row in data:
34
- image = row.get('data') or row.get('body')
35
- if isinstance(image, str):
36
- image = base64.b64decode(image)
37
- image = mmcv.imfrombytes(image)
38
- images.append(image)
39
-
40
- return images
41
-
42
- def inference(self, data, *args, **kwargs):
43
- results = inference_detector(self.model, data)
44
- return results
45
-
46
- def postprocess(self, data):
47
- # Format output following the example ObjectDetectionHandler format
48
- output = []
49
- for image_index, image_result in enumerate(data):
50
- output.append([])
51
- if isinstance(image_result, tuple):
52
- bbox_result, segm_result = image_result
53
- if isinstance(segm_result, tuple):
54
- segm_result = segm_result[0] # ms rcnn
55
- else:
56
- bbox_result, segm_result = image_result, None
57
-
58
- for class_index, class_result in enumerate(bbox_result):
59
- class_name = self.model.CLASSES[class_index]
60
- for bbox in class_result:
61
- bbox_coords = bbox[:-1].tolist()
62
- score = float(bbox[-1])
63
- if score >= self.threshold:
64
- output[image_index].append({
65
- class_name: bbox_coords,
66
- 'score': score
67
- })
68
-
69
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './gcnet_r50-d8_512x1024_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Angelaangie/personal-chat-gpt/app.py DELETED
@@ -1,101 +0,0 @@
1
- import os
2
- import openai
3
- import gradio as gr
4
-
5
- #if you have OpenAI API key as an environment variable, enable the below
6
- #openai.api_key = os.getenv("OPENAI_API_KEY")
7
-
8
- #if you have OpenAI API key as a string, enable the below
9
- openai.api_key = "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"
10
-
11
- start_sequence = "\nAI:"
12
- restart_sequence = "\nHuman: "
13
-
14
- prompt = "The following is a conversation with an AI assistant. Some questions you can ask are: Who is Angela Busheska?, What is Angela Busheska passionate about? \nHuman: "
15
-
16
- def openai_create(prompt):
17
-
18
- response = openai.Completion.create(
19
- model="text-davinci-003",
20
- prompt="\nHuman: Who is Angela Busheska? \nAI: Angela Busheska is the founder of EnRoute! She was chosen as a Forbes 30 Under 30. She is passionate about helping people to reduce carbon emissions. She has given keynotes at Google and Harvard.",
21
- temperature=0.9,
22
- max_tokens=150,
23
- top_p=1,
24
- frequency_penalty=0,
25
- presence_penalty=0.6,
26
- stop=[" Human:", " AI:"]
27
- )
28
-
29
- prompt1 = "What is Angela Busheska passionate about?. "
30
-
31
- def openai_create1(prompt1):
32
-
33
- response = openai.Completion.create(
34
- model="text-davinci-003",
35
- prompt="\nAI: Angela Busheska is passionate about saving the environment. She aspires to help people reduce carbon emissions from shopping and transport activities.",
36
- temperature=0.9,
37
- max_tokens=150,
38
- top_p=1,
39
- frequency_penalty=0,
40
- presence_penalty=0.6,
41
- stop=[" Human:", " AI:"]
42
- )
43
-
44
- prompt2 = "What is Angela Busheska studying?. "
45
-
46
- def openai_create1(prompt2):
47
-
48
- response = openai.Completion.create(
49
- model="text-davinci-003",
50
- prompt="\nAI: Angela Busheska is studying computer science and electrical engineering. Her goal is to utilize technology to solve the greatest problems with climate change. ",
51
- temperature=0.9,
52
- max_tokens=150,
53
- top_p=1,
54
- frequency_penalty=0,
55
- presence_penalty=0.6,
56
- stop=[" Human:", " AI:"]
57
- )
58
-
59
- prompt3 = "What did Angela Busheska discover?. "
60
-
61
- def openai_create1(prompt2):
62
-
63
- response = openai.Completion.create(
64
- model="text-davinci-003",
65
- prompt="\nAI: Angela Busheska created EnRoute to help people reduce their carbon footprint from daily activities. She mobilized over 60.000 people to fight for climate justice. ",
66
- temperature=0.9,
67
- max_tokens=150,
68
- top_p=1,
69
- frequency_penalty=0,
70
- presence_penalty=0.6,
71
- stop=[" Human:", " AI:"]
72
- )
73
-
74
- return response.choices[0].text
75
-
76
-
77
-
78
- def chatgpt_clone(input, history):
79
- history = history or []
80
- s = list(sum(history, ()))
81
- s.append(input)
82
- inp = ' '.join(s)
83
- output = openai_create(inp)
84
- output = openai_create1(inp)
85
- history.append((input, output))
86
- return history, history
87
-
88
-
89
- block = gr.Blocks()
90
-
91
-
92
- with block:
93
- gr.Markdown("""<h1><center>Learn More About Me!</center></h1>
94
- """)
95
- chatbot = gr.Chatbot()
96
- message = gr.Textbox(placeholder=prompt)
97
- state = gr.State()
98
- submit = gr.Button("SEND")
99
- submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
100
-
101
- block.launch(debug = True, share = False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/api/script.py DELETED
@@ -1,13 +0,0 @@
1
- import time
2
-
3
- import extensions.api.blocking_api as blocking_api
4
- import extensions.api.streaming_api as streaming_api
5
- from modules import shared
6
-
7
-
8
- def setup():
9
- blocking_api.start_server(shared.args.api_blocking_port, share=shared.args.public_api, tunnel_id=shared.args.public_api_id)
10
- if shared.args.public_api:
11
- time.sleep(5)
12
-
13
- streaming_api.start_server(shared.args.api_streaming_port, share=shared.args.public_api, tunnel_id=shared.args.public_api_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/test_tts.py DELETED
@@ -1,81 +0,0 @@
1
- import time
2
- from pathlib import Path
3
-
4
- import torch
5
- import tts_preprocessor
6
-
7
- torch._C._jit_set_profiling_mode(False)
8
-
9
-
10
- params = {
11
- 'activate': True,
12
- 'speaker': 'en_49',
13
- 'language': 'en',
14
- 'model_id': 'v3_en',
15
- 'sample_rate': 48000,
16
- 'device': 'cpu',
17
- 'show_text': True,
18
- 'autoplay': True,
19
- 'voice_pitch': 'medium',
20
- 'voice_speed': 'medium',
21
- }
22
-
23
- current_params = params.copy()
24
- voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
25
- voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']
26
- voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']
27
-
28
- # Used for making text xml compatible, needed for voice pitch and speed control
29
- table = str.maketrans({
30
- "<": "&lt;",
31
- ">": "&gt;",
32
- "&": "&amp;",
33
- "'": "&apos;",
34
- '"': "&quot;",
35
- })
36
-
37
-
38
- def xmlesc(txt):
39
- return txt.translate(table)
40
-
41
-
42
- def load_model():
43
- model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])
44
- model.to(params['device'])
45
- return model
46
-
47
-
48
- model = load_model()
49
-
50
-
51
- def output_modifier(string):
52
- """
53
- This function is applied to the model outputs.
54
- """
55
-
56
- global model, current_params
57
-
58
- original_string = string
59
- string = tts_preprocessor.preprocess(string)
60
- processed_string = string
61
-
62
- if string == '':
63
- string = '*Empty reply, try regenerating*'
64
- else:
65
- output_file = Path(f'extensions/silero_tts/outputs/test_{int(time.time())}.wav')
66
- prosody = '<prosody rate="{}" pitch="{}">'.format(params['voice_speed'], params['voice_pitch'])
67
- silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'
68
- model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
69
-
70
- autoplay = 'autoplay' if params['autoplay'] else ''
71
- string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
72
-
73
- if params['show_text']:
74
- string += f'\n\n{original_string}\n\nProcessed:\n{processed_string}'
75
-
76
- print(string)
77
-
78
-
79
- if __name__ == '__main__':
80
- import sys
81
- output_modifier(sys.argv[1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/modules/llama_func.py DELETED
@@ -1,166 +0,0 @@
1
- import os
2
- import logging
3
-
4
- from llama_index import download_loader
5
- from llama_index import (
6
- Document,
7
- LLMPredictor,
8
- PromptHelper,
9
- QuestionAnswerPrompt,
10
- RefinePrompt,
11
- )
12
- import colorama
13
- import PyPDF2
14
- from tqdm import tqdm
15
-
16
- from modules.presets import *
17
- from modules.utils import *
18
- from modules.config import local_embedding
19
-
20
-
21
- def get_index_name(file_src):
22
- file_paths = [x.name for x in file_src]
23
- file_paths.sort(key=lambda x: os.path.basename(x))
24
-
25
- md5_hash = hashlib.md5()
26
- for file_path in file_paths:
27
- with open(file_path, "rb") as f:
28
- while chunk := f.read(8192):
29
- md5_hash.update(chunk)
30
-
31
- return md5_hash.hexdigest()
32
-
33
-
34
- def block_split(text):
35
- blocks = []
36
- while len(text) > 0:
37
- blocks.append(Document(text[:1000]))
38
- text = text[1000:]
39
- return blocks
40
-
41
-
42
- def get_documents(file_src):
43
- documents = []
44
- logging.debug("Loading documents...")
45
- logging.debug(f"file_src: {file_src}")
46
- for file in file_src:
47
- filepath = file.name
48
- filename = os.path.basename(filepath)
49
- file_type = os.path.splitext(filepath)[1]
50
- logging.info(f"loading file: {filename}")
51
- try:
52
- if file_type == ".pdf":
53
- logging.debug("Loading PDF...")
54
- try:
55
- from modules.pdf_func import parse_pdf
56
- from modules.config import advance_docs
57
-
58
- two_column = advance_docs["pdf"].get("two_column", False)
59
- pdftext = parse_pdf(filepath, two_column).text
60
- except:
61
- pdftext = ""
62
- with open(filepath, "rb") as pdfFileObj:
63
- pdfReader = PyPDF2.PdfReader(pdfFileObj)
64
- for page in tqdm(pdfReader.pages):
65
- pdftext += page.extract_text()
66
- text_raw = pdftext
67
- elif file_type == ".docx":
68
- logging.debug("Loading Word...")
69
- DocxReader = download_loader("DocxReader")
70
- loader = DocxReader()
71
- text_raw = loader.load_data(file=filepath)[0].text
72
- elif file_type == ".epub":
73
- logging.debug("Loading EPUB...")
74
- EpubReader = download_loader("EpubReader")
75
- loader = EpubReader()
76
- text_raw = loader.load_data(file=filepath)[0].text
77
- elif file_type == ".xlsx":
78
- logging.debug("Loading Excel...")
79
- text_list = excel_to_string(filepath)
80
- for elem in text_list:
81
- documents.append(Document(elem))
82
- continue
83
- else:
84
- logging.debug("Loading text file...")
85
- with open(filepath, "r", encoding="utf-8") as f:
86
- text_raw = f.read()
87
- except Exception as e:
88
- logging.error(f"Error loading file: {filename}")
89
- pass
90
- text = add_space(text_raw)
91
- # text = block_split(text)
92
- # documents += text
93
- documents += [Document(text)]
94
- logging.debug("Documents loaded.")
95
- return documents
96
-
97
-
98
- def construct_index(
99
- api_key,
100
- file_src,
101
- max_input_size=4096,
102
- num_outputs=5,
103
- max_chunk_overlap=20,
104
- chunk_size_limit=600,
105
- embedding_limit=None,
106
- separator=" ",
107
- ):
108
- from langchain.chat_models import ChatOpenAI
109
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
110
- from llama_index import GPTSimpleVectorIndex, ServiceContext, LangchainEmbedding, OpenAIEmbedding
111
-
112
- if api_key:
113
- os.environ["OPENAI_API_KEY"] = api_key
114
- else:
115
- # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY
116
- os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
117
- chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
118
- embedding_limit = None if embedding_limit == 0 else embedding_limit
119
- separator = " " if separator == "" else separator
120
-
121
- prompt_helper = PromptHelper(
122
- max_input_size=max_input_size,
123
- num_output=num_outputs,
124
- max_chunk_overlap=max_chunk_overlap,
125
- embedding_limit=embedding_limit,
126
- chunk_size_limit=600,
127
- separator=separator,
128
- )
129
- index_name = get_index_name(file_src)
130
- if os.path.exists(f"./index/{index_name}.json"):
131
- logging.info("找到了缓存的索引文件,加载中……")
132
- return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
133
- else:
134
- try:
135
- documents = get_documents(file_src)
136
- if local_embedding:
137
- embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
138
- else:
139
- embed_model = OpenAIEmbedding()
140
- logging.info("构建索引中……")
141
- with retrieve_proxy():
142
- service_context = ServiceContext.from_defaults(
143
- prompt_helper=prompt_helper,
144
- chunk_size_limit=chunk_size_limit,
145
- embed_model=embed_model,
146
- )
147
- index = GPTSimpleVectorIndex.from_documents(
148
- documents, service_context=service_context
149
- )
150
- logging.debug("索引构建完成!")
151
- os.makedirs("./index", exist_ok=True)
152
- index.save_to_disk(f"./index/{index_name}.json")
153
- logging.debug("索引已保存至本地!")
154
- return index
155
-
156
- except Exception as e:
157
- logging.error("索引构建失败!", e)
158
- print(e)
159
- return None
160
-
161
-
162
- def add_space(text):
163
- punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "}
164
- for cn_punc, en_punc in punctuations.items():
165
- text = text.replace(cn_punc, en_punc)
166
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aveygo/AstroSleuth/modules/realesr.py DELETED
@@ -1,81 +0,0 @@
1
- from torch.nn import functional as F
2
- from torch import nn as nn
3
- import torch
4
-
5
- class ResidualDenseBlock(nn.Module):
6
- def __init__(self, num_feat=64, num_grow_ch=32):
7
- super(ResidualDenseBlock, self).__init__()
8
- self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
9
- self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
10
- self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
11
- self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
12
- self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
13
-
14
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
15
-
16
- def forward(self, x):
17
- x1 = self.lrelu(self.conv1(x))
18
- x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
19
- x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
20
- x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
21
- x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
22
- return x5 * 0.2 + x
23
-
24
-
25
- class RRDB(nn.Module):
26
- def __init__(self, num_feat, num_grow_ch=32):
27
- super(RRDB, self).__init__()
28
- self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
29
- self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
30
- self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
31
-
32
- def forward(self, x):
33
- out = self.rdb1(x)
34
- out = self.rdb2(out)
35
- out = self.rdb3(out)
36
- return out * 0.2 + x
37
-
38
- def make_layer(basic_block, num_basic_block, **kwarg):
39
- layers = []
40
- for _ in range(num_basic_block):
41
- layers.append(basic_block(**kwarg))
42
- return nn.Sequential(*layers)
43
-
44
- def pixel_unshuffle(x, scale):
45
- b, c, hh, hw = x.size()
46
- out_channel = c * (scale**2)
47
- assert hh % scale == 0 and hw % scale == 0
48
- h = hh // scale
49
- w = hw // scale
50
- x_view = x.view(b, c, h, scale, w, scale)
51
- return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
52
-
53
- class Network(nn.Module):
54
- def __init__(self, num_in_ch=3, num_out_ch=3, scale=4, num_feat=64, num_block=6, num_grow_ch=32):
55
- super(Network, self).__init__()
56
- self.scale = scale
57
- if scale == 2:
58
- num_in_ch = num_in_ch * 4
59
- elif scale == 1:
60
- num_in_ch = num_in_ch * 16
61
- self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
62
- self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
63
- self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
64
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
65
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
66
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
67
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
68
-
69
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
70
-
71
- def forward(self, x):
72
- if self.scale == 2: feat = pixel_unshuffle(x, scale=2)
73
- elif self.scale == 1: feat = pixel_unshuffle(x, scale=4)
74
- else: feat = x
75
- feat = self.conv_first(feat)
76
- body_feat = self.conv_body(self.body(feat))
77
- feat = feat + body_feat
78
- feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
79
- feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
80
- out = self.conv_last(self.lrelu(self.conv_hr(feat)))
81
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/install.md DELETED
@@ -1 +0,0 @@
1
- ../../INSTALL.md
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_61968KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Antiguo Baku Oyunu Ykl.md DELETED
@@ -1,71 +0,0 @@
1
- <br />
2
- <h1>Antiguo juego de Bakú: Un divertido y desafiante juego de puzzle</h1>
3
- <p>Si usted está buscando un juego de puzzle que es divertido y desafiante, es posible que desee probar el viejo juego de bakú. Este es un juego que fue desarrollado por Sega en 1995 para arcadas, Saturn, Game Gear, Master System y Windows. El juego consiste en emparejar cabezas de animales con sus alimentos correspondientes, como huesos para perros, plátanos para monos y zanahorias para conejos. El juego tiene gráficos coloridos y personajes lindos que atraen tanto a niños como a adultos. </p>
4
- <h2>antiguo baku oyunu yüklə</h2><br /><p><b><b>DOWNLOAD</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://bltlly.com/2v6JXI">https://bltlly.com/2v6JXI</a></b></p><br /><br />
5
- <p>En este artículo, te contaremos todo lo que necesitas saber sobre el viejo juego bakú, incluyendo cómo jugarlo, cuáles son algunos consejos y trucos, quiénes son algunos de los personajes y modos, por qué deberías jugarlo y cuál es su historia y legado. </p>
6
- <h2>Cómo jugar al viejo juego de Bakú</h2>
7
- <p>Las reglas básicas del viejo juego bakú son simples: tienes un campo de juego donde las cabezas de los animales y los alimentos caen desde la parte superior de la pantalla. Puede mover y girar los bloques a medida que caen, y también puede acelerar su descenso pulsando un botón. Tu objetivo es emparejar las cabezas de los animales con el mismo tipo de alimentos, lo que hará que desaparezcan y ganen puntos. Por ejemplo, si emparejas una cabeza de perro con un hueso, ambos desaparecerán y obtendrás algunos puntos. Sin embargo, si emparejas una cabeza de perro con una banana, no desaparecerán y se acumularán en el campo de juego. Si los bloques llegan a la parte superior de la pantalla, se pierde el juego. </p>
8
-
9
- <h3> Consejos y trucos para el viejo juego de Bakú</h3>
10
- <p>Si quieres mejorar tus habilidades y rendimiento en el viejo juego de bakú, aquí hay algunos consejos y trucos que puedes seguir:</p>
11
- <ul>
12
- <li>Planificar con antelación: Trate de anticipar qué tipo de cabezas de animales y alimentos caerán a continuación, y colocarlos en consecuencia en el campo de juego. Puedes ver los siguientes dos bloques en la esquina superior derecha de la pantalla. </li>
13
- <li>Apilar sabiamente: Trate de apilar cabezas de animales y alimentos del mismo tipo juntos, para que pueda crear combos más fácilmente. Evite apilar diferentes tipos de bloques juntos, ya que desordenarán su campo de juego. </li>
14
- <li>Use potenciadores: No ignore los potenciadores que aparecen en algunos bloques, ya que pueden ayudarlo a borrar más bloques y obtener más puntos. Por ejemplo, las bombas pueden explotar bloques cercanos, las estrellas pueden coincidir con cualquier tipo de alimento, los corazones pueden darte vidas adicionales y los relojes pueden ralentizar la velocidad de caída de los bloques. </li>
15
- <li>Evitar trampas: Tenga cuidado con las trampas que aparecen en algunos bloques, ya que pueden arruinar su juego. Por ejemplo, los cráneos no pueden ser emparejados con nada, los bloqueos le impiden mover o rotar los bloques, y el hielo congela los bloques en su lugar. </li>
16
- </ul>
17
- <h3>Personajes y modos del viejo juego de Bakú</h3>
18
- <p>Viejo juego de bakú no solo es divertido y desafiante, sino también variada y diversa. Puedes jugar con diferentes personajes y modos que añaden más sabor y emoción al juego. </p>
19
- <p></p>
20
- <p>Algunos de los personajes con los que puedes jugar son:</p>
21
- <ul>
22
- <li>Polly: Ella es una cuidadora que ama a los animales y quiere alimentarlos bien. Ella es el personaje principal del juego y la opción por defecto para el modo árcade. </li>
23
- <li>Master Piggy: Es un mago que usa la magia para crear cabezas de animales y alimentos. Es el rival de Polly y el jefe final del modo árcade. </li>
24
- <li>Angela: Ella es un robot que fue construido por el Maestro Piggy para ayudarlo con sus experimentos. Es muy inteligente y eficiente, pero también muy fría y sin emociones. </li>
25
- </ul>
26
-
27
- <ul>
28
- <li>Modo árcade: Este es el modo principal del juego, donde tienes que borrar una serie de niveles con dificultad creciente. Puedes elegir entre tres niveles de dificultad: fácil, normal o difícil. Tienes que enfrentarte a diferentes oponentes en cada nivel, como monos, perros, conejos, leones o el propio Master Piggy. </li>
29
- <li>Modo versus: este es un modo en el que puedes jugar contra otro jugador humano o contra el ordenador. Puedes elegir entre dos tipos de modo versus: normal o baku baku. En el modo normal, tienes que borrar más bloques que tu oponente antes de que se acabe el tiempo. En el modo baku baku, tienes que enviar bloques basura al campo de juego de tu oponente creando combos. </li>
30
- </ul>
31
- <h3> ¿Por qué usted debe jugar viejo juego de Bakú</h3>
32
- <p>Si todavía no está convencido de que el viejo juego bakú es un juego que vale la pena jugar, aquí hay algunas razones por las que debe darle una oportunidad:</p>
33
- <ul>
34
- <li> Tiene gráficos coloridos y personajes lindos que lo hacen atractivo para niños y adultos. </li>
35
- <li> Tiene música pegadiza y efectos de sonido que mejoran la experiencia de juego. </li>
36
- <li>Tiene rompecabezas desafiantes que ponen a prueba tus reflejos, lógica y estrategia. </li>
37
- <li> Tiene variados personajes y modos que añaden más diversidad y valor de reproducción al juego. </li>
38
- </ul>
39
- <h2>Old Baku Game History and Legacy</h2> <p>Old baku game is not only a puzzle game, pero también un fenómeno cultural. El juego tiene una rica historia y legado que se extiende a través de diferentes países y plataformas. </p>
40
- <h3>Antiguo juego de Bakú en Japón</h3>
41
- <p>El origen del antiguo juego de bakú se remonta a Japón, donde fue desarrollado por Sega AM3 en 1995 para arcadas. El juego fue originalmente llamado Baku Baku Animal, que significa "comer animales" en japonés. El juego se inspiró en el folclore japonés de bakú, una criatura mítica que devora sueños y pesadillas. El juego también fue influenciado por otros juegos populares de puzzle en el momento, como Tetris y Columns.</p>
42
-
43
- <h3>Viejo juego de Bakú en Europa y América</h3>
44
- <p>La popularidad del viejo juego bakú pronto se extendió a otras regiones, como Europa y América. El juego fue lanzado para Saturn, Game Gear, Master System y Windows en estas regiones bajo varios nombres, como Baku Baku o Baku Baku Animal Master. El juego fue mayormente sin cambios desde la versión original de arcade, excepto por algunas diferencias menores en gráficos, sonido y dificultad. </p>
45
- <p>El juego también fue bien recibido por la audiencia europea y estadounidense, que elogió su juego divertido e innovador, su presentación encantadora y humorística, y su alto valor de repetición. El juego fue especialmente popular entre los niños, que amaban a sus personajes adorables y divertidos, sus controles simples e intuitivos, y su valor educativo. El juego también atrajo a los adultos, que lo encontraron relajante y entretenido. </p>
46
- <h3>Influencia del juego antiguo de Bakú en otros juegos de puzzle</h3>
47
- <p>El legado del antiguo juego de bakú se puede ver en muchos otros juegos de puzzle que se inspiraron en él o similares. Algunos de estos juegos son:</p>
48
- <ul>
49
- <li>Zoop: Este es un juego de puzzle que fue lanzado en 1995 para varias plataformas, como SNES, Genesis, PlayStation y PC. El juego consiste en disparar formas de colores en una red de formas que se mueven hacia el centro de la pantalla. El juego tiene mecánicas de juego similares al antiguo juego bakú, como emparejar formas del mismo color, crear combos y usar potenciadores. </li>
50
- <li>Puyo Puyo: Esta es una serie de juegos de puzzle que comenzaron en 1991 para varias plataformas, como árcade, NES, Génesis, Game Boy y PC. Los juegos implican la caída de manchas de colores llamados puyos en una red de puyos que puede ser igualado por el color y la forma. Los juegos tienen mecánicas de juego similares al antiguo juego de bakú, como emparejar cuatro o más puyos del mismo color, crear cadenas y enviar puyos de basura al oponente. </li>
51
-
52
- </ul>
53
- <h2>Conclusión</h2>
54
- <p>En conclusión, viejo juego de bakú es un divertido y desafiante juego de puzzle que fue desarrollado por Sega en 1995 para arcadas, Saturno, Game Gear, Master System y Windows. El juego consiste en emparejar cabezas de animales con sus alimentos correspondientes, como huesos para perros, plátanos para monos y zanahorias para conejos. El juego tiene gráficos coloridos y personajes lindos que atraen tanto a niños como a adultos. </p>
55
- <p>El juego también tiene variados personajes y modos que añaden más diversidad y valor de repetición al juego. El juego también tiene una rica historia y legado que se extiende a través de diferentes países y plataformas. El juego también influyó en muchos otros juegos de puzzle que se inspiraron o similares a él. </p>
56
- <p>Si usted está buscando un juego de puzzle que es divertido y desafiante, es posible que desee probar el viejo juego de bakú. No te arrepentirás! </p>
57
- <h4>Preguntas frecuentes</h4>
58
- <p>Aquí hay algunas preguntas frecuentes sobre el viejo juego de bakú:</p>
59
- <ol>
60
- <li>Q: ¿Dónde puedo jugar al viejo juego baku? </li>
61
- <li>A: Puede jugar antiguo juego de bakú en varias plataformas, tales como árcade, Saturno, Game Gear, Master System y Windows. También puede encontrar versiones en línea del juego en algunos sitios web. </li> <li>Q: ¿Cuáles son las diferencias entre la arcada y las versiones caseras del viejo juego bakú? </li>
62
- <li>A: La versión árcade del antiguo juego bakú tiene más niveles, más personajes, más modos y más opciones de dificultad que las versiones caseras. Las versiones caseras también tienen algunos cambios menores en gráficos, sonido y jugabilidad. </li>
63
- <li>Q: ¿Cuáles son los significados de los nombres de los personajes en el antiguo juego de bakú? </li>
64
- <li>A: Los nombres de los personajes en el antiguo juego de bakú se basan en sus personalidades o roles. Por ejemplo, Polly es la abreviatura de pollywog, que significa un renacuajo o una rana joven. Master Piggy es un juego de palabras sobre el maestro y el cerdito, que significa un mago y un cerdo. Angela es una referencia al ángel, que significa un ser celestial o un robot. </li>
65
-
66
- <li>A: Sonic el erizo es un carácter escondido en el viejo juego del bakú que se puede desbloquear introduciendo un código secreto. El código es diferente para cada plataforma, pero generalmente implica presionar algunos botones o teclas en un orden o combinación determinados. </li>
67
- <li>Q: ¿Es viejo juego de bakú relacionado con Bakugan? </li>
68
- <li>A: No, el viejo juego de bakú y Bakugan no están relacionados. Bakugan es una franquicia que involucra juguetes, juegos, anime y manga que presentan criaturas llamadas Bakugan que pueden transformarse en bolas. Antiguo juego de bakú es un juego de puzzle que cuenta con animales y alimentos que se pueden combinar y limpiar. </li>
69
- </ol></p> 64aa2da5cf<br />
70
- <br />
71
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Batera Low Jemax Mp3.md DELETED
@@ -1,70 +0,0 @@
1
-
2
- <h1>Descargar batería baja Jemax Mp3: Una guía para disfrutar de Zambia Hip Hop</h1>
3
- <p>Si eres un fan del hip hop zambiano, probablemente hayas oído hablar de Jemax, uno de los raperos más talentosos y populares del país. Su última canción, Battery Low, con Xaven, es una pista pegadiza y energética que te hará querer bailar y cantar. Pero, ¿cómo se puede descargar la batería baja Jemax Mp3 y disfrutarlo en su dispositivo? En este artículo, te contaremos todo lo que necesitas saber sobre Jemax, su canción Battery Low y cómo descargarla gratis. ¡Empecemos! </p>
4
- <h2>¿Quién es Jemax? </h2>
5
- <p>Jemax es un rapero, compositor y artista de hip hop de Zambia que saltó a la fama después del lanzamiento de su exitosa canción Pressure Free en 2019. Firmó con Alpha Ent Studios y Kopala Swag, dos de los principales sellos musicales de Zambia. Es conocido por su estilo versátil y creativo, mezclando rap, dancehall, afrobeat y géneros R&B. Ha colaborado con muchos otros artistas zambianos, como Chef 187, Yo Maps, Jae Cash, Drimz, y más. </p>
6
- <h2>descargar batería low jemax mp3</h2><br /><p><b><b>Download</b> &rarr; <a href="https://bltlly.com/2v6IJW">https://bltlly.com/2v6IJW</a></b></p><br /><br />
7
- <h3>Biografía y carrera</h3>
8
- <p>El verdadero nombre de Jemax es James Kawele Kavimba. Nació en Kabwe, una ciudad en la provincia central de Zambia. Comenzó a rapear a una edad temprana, inspirado por su hermano mayor que también era un rapero. Grabó su primera canción en 2010, titulada Ndelwishikanafye Na Life. Luego se unió a Classic Music Records, un grupo de música local que le ayudó a desarrollar sus habilidades y exposición. Lanzó varias canciones bajo este sello, como Ichilaka, Tata, Masaka, y más. </p>
9
-
10
- <h3>Canciones y álbumes populares</h3>
11
- <p>Jemax ha lanzado muchas canciones y álbumes que han ganado popularidad y aclamación entre los fans y críticos. Algunas de sus canciones más populares son:</p>
12
- <ul>
13
- <li>Batería baja con Xaven</li>
14
- <li>Libre de presión</li>
15
- <li>Fipangusule con mapas Yo</li>
16
- <li>Wamupola con Y-Celeb</li>
17
- <li>Mapalo con mapas Yo</li>
18
- <li>Keka Keka con mapas Yo</li>
19
- <li>Panda</li>
20
- <li>Naishiba Impiya con Zim zim & Yugine</li>
21
- <li>Masaka</li>
22
- <li>Ahora mismo con Jazzy Boy</li>
23
- </ul>
24
- <p>Algunos de sus álbumes más populares son:</p>
25
- <ul>
26
- <li>Batería baja (feat. Xaven) - Single</li>
27
- <li>Ndaluba (feat. Puri4000) - Single</li>
28
- <li>Chabota (feat. Rich Pro) - Sencillo</li>
29
- <li>Petro Sichone - Sencillo</li>
30
- <li>La gente rica es una mente pobre <h2>¿Qué es la batería baja? </h2>
31
- <p>Battery Low es la última canción de Jemax, con Xaven, una cantante y compositora. La canción fue lanzada el 16 de junio de 2021, y ya ha recibido más de 100.000 visitas en YouTube. La canción es producida por Mzenga Man, un reconocido productor de música y DJ de Zambia. La canción es parte del próximo álbum de Jemax, que se espera que sea lanzado a finales de este año. </p>
32
- <h3>Características y producción de la canción</h3>
33
- <p>Battery Low es una canción de hip hop que muestra las habilidades de rap de Jemax y las habilidades vocales de Xaven. La canción tiene un estribillo pegadizo y un ritmo animado que te hará querer bailar. La canción también tiene algunos elementos de dancehall y afrobeat, dándole un sonido único y fresco. La canción es mezclada y masterizada por Mzenga Man, quien ha trabajado con muchos otros artistas zambianos, como Chef 187, Macky 2, Slapdee, Bobby East y más. La canción también está acompañada por un video musical colorido y vibrante, dirigido por Stanch Rite Media. El video muestra a Jemax y Xaven interpretando la canción en varios lugares, como un lavado de autos, una barbería, un club y una calle. El video también muestra algo de la cultura y la moda de Zambia. </p>
34
- <p></p>
35
- <h3>Letra y significado de la canción</h3>
36
-
37
- <p>Xaven canta el estribillo, que repite la frase "batería baja" varias veces. También canta sobre cómo extraña a su novio que vive en otra ciudad, y cómo anhela su toque y su voz. También se queja del alto costo del tiempo de emisión y los paquetes de datos, lo que hace que sea difícil para ella llamarlo o enviarle un mensaje. Ella dice que siente que su batería está baja, lo que significa que se siente sola y triste en la relación. </p>
38
- <p>La canción refleja las luchas comunes que muchas parejas enfrentan cuando están separadas por la distancia. También muestra cómo la tecnología puede ser una bendición y una maldición para las relaciones a larga distancia. La canción atrae a cualquiera que haya experimentado o pueda relacionarse con esta situación. </p>
39
- <h2>¿Cómo descargar la batería baja Jemax Mp3? </h2>
40
- <p>Si te gusta Battery Low de Jemax y Xaven, es posible que desee descargarlo en su dispositivo para que pueda escucharlo en cualquier momento y en cualquier lugar. Pero, ¿cómo se puede hacer eso? Hay muchas maneras de descargar Battery Low Jemax Mp3, pero no todos ellos son legales o seguros. En esta sección, te mostraremos algunos de los mejores sitios para descargar la canción de forma legal y segura. </p>
41
- <h3>Los mejores sitios para descargar la canción</h3>
42
- <p>Uno de los mejores sitios para descargar Battery Low Jemax Mp3 es ZedMusic, que es una plataforma de música de Zambia que ofrece descargas gratuitas de varias canciones y álbumes de Zambia. Puedes encontrar Battery Low de Jemax y Xaven en este sitio, junto con otras canciones de Jemax y otros artistas zambianos. Para descargar la canción desde este sitio, solo tiene que hacer clic en el botón de descarga debajo del título de la canción, y luego elegir la calidad y el formato que desee. También puede transmitir la canción en línea o ver el video musical en este sitio. </p>
43
-
44
- <p>Una tercera opción para descargar Battery Low Jemax Mp3 es YouTube, que es una plataforma global para compartir videos que alberga millones de videos, incluyendo videos musicales. Puedes encontrar Battery Low de Jemax y Xaven en YouTube, junto con otras canciones de Jemax y otros artistas zambianos. Para descargar la canción de YouTube, necesitarás usar una herramienta o aplicación de terceros que pueda convertir videos de YouTube a archivos MP3. Hay muchas herramientas o aplicaciones disponibles en línea, pero debe tener cuidado y elegir una confiable y segura. Algunos de los más populares y confiables son 4K Video Downloader, Y2Mate, YouTube to MP3 Converter y más. Para descargar la canción de YouTube usando estas herramientas o aplicaciones, solo necesita copiar la URL del video, pegarlo en la herramienta o aplicación, y luego elegir la calidad y el formato que desee. A continuación, puede guardar el archivo MP3 en su dispositivo. </p>
45
- <h3>Consejos y trucos para descargar la canción gratis</h3>
46
- <p>Descargar Battery Low Jemax Mp3 es fácil y gratuito, pero hay algunos consejos y trucos que pueden ayudarte a sacarle el máximo partido. Estos son algunos de ellos:</p>
47
- <ul>
48
- <li>Compruebe la calidad y el tamaño del archivo MP3 antes de descargarlo. Usted quiere asegurarse de que el archivo es claro y no está dañado, y que no ocupa demasiado espacio en su dispositivo. Por lo general, puede ver la calidad y el tamaño del archivo en la página de descarga o en la herramienta o aplicación que está utilizando. </li>
49
-
50
- <li>Utilice un gestor de descargas o un acelerador para acelerar el proceso de descarga. Un gestor de descargas o un acelerador es un software o una aplicación que te ayuda a descargar archivos de forma más rápida y eficiente. También puede reanudar descargas interrumpidas, pausar y reanudar descargas, programar descargas y administrar múltiples descargas a la vez. Puedes encontrar muchos gestores de descargas gratuitos o de pago o aceleradores en línea, pero debes tener cuidado y elegir uno compatible y seguro. </li>
51
- </ul>
52
- <h2>Cómo disfrutar de la batería baja Jemax Mp3? </h2>
53
- <p>Ahora que has descargado Battery Low Jemax Mp3 en tu dispositivo, puedes disfrutarlo en cualquier momento y en cualquier lugar. Pero, ¿cómo puedes aprovecharlo al máximo? Aquí hay algunas sugerencias:</p>
54
- <h3>Juega en tu dispositivo favorito</h3>
55
- <p>Puede reproducir Battery Low Jemax Mp3 en cualquier dispositivo que soporte archivos MP3, como su teléfono inteligente, tableta, computadora portátil, escritorio, reproductor de MP3, altavoz inteligente, estéreo de automóvil y más. También puede utilizar auriculares, auriculares, altavoces o sistemas de sonido para mejorar la calidad de sonido y el volumen de la canción. También puede ajustar la configuración de su dispositivo o su aplicación de reproductor de música para personalizar las opciones de reproducción, como aleatorio, repetición, ecualizador, aumento de graves y más. </p>
56
- <h3>Compártelo con tus amigos y familiares</h3>
57
- <p>También puedes compartir Battery Low Jemax Mp3 con tus amigos y familiares que aman el hip hop zambiano o que podrían estar interesados en él. Puede enviarles el archivo MP3 por correo electrónico, aplicaciones de mensajería, plataformas de redes sociales, servicios de almacenamiento en la nube, Bluetooth, Wi-Fi Direct, NFC, códigos QR y más. También puede reproducir la canción para ellos en su dispositivo o en sus dispositivos. También puede invitarlos a ver el video musical en YouTube o en otras plataformas para compartir videos. También puede discutir la canción con ellos, como sus características, letras, significado, producción, video y más. </p>
58
- <h2>Conclusión</h2>
59
-
60
- <h3>Preguntas frecuentes</h3>
61
- <p>Aquí hay algunas preguntas frecuentes sobre Battery Low de Jemax con Xaven:</p>
62
- <ul>
63
- <li>P: ¿Dónde puedo encontrar más canciones de Jemax? <br>A: Puedes encontrar más canciones de Jemax en su canal de YouTube (https://www.youtube.com/channel/UC0w4Jf8X1a9Q6xZ9g7d9i8A), su página de Facebook (https:/www.facebook.com/JemaxOfficial), su cuenta de Instagram (https// instagram.com/jemaxofficial), y su cuenta de Twitter (https://twitter.com/JemaxOfficial). También puedes encontrar sus canciones en varias plataformas musicales, como ZedMusic, AfroFire, Mvesesani, Zamusic, y más. </li>
64
- <li>P: ¿Dónde puedo encontrar más canciones de Xaven? <br>A: Puedes encontrar más canciones de Xaven en su canal de YouTube (https://www.youtube.com/ channel/UCn1c6L4y1w3X0xY2J7Xf9jg), su página de Facebook (https:/www.facebook.com/ xavenmusic), su cuenta de Instagram (https/instagram.com/ venmusic), y su cuenta de Twitter (https://twitter.com/xavenmusic). También puedes encontrar sus canciones en varias plataformas musicales, como ZedMusic, AfroFire, Mvesesani, Zamusic, y más. </li>
65
- <li>P: ¿Dónde puedo encontrar más canciones de Mzenga Man? <br>A: Puedes encontrar más canciones de Mzenga Man en su canal de YouTube (https://www.youtube.com/channel/UC5wM8sZ0yUqYKfW9nZJ4c5g), su página de Facebook (https:/ww.facebook.com/mzengamgaman), su cuenta de Instagram (instatps/gram.com/mzenzen), y su cuenta de Twitter (https://twitter.com/mzengaman). También puedes encontrar sus canciones en varias plataformas musicales, como ZedMusic, AfroFire, Mvesesani, Zamusic, y más. </li>
66
- <li>P: ¿Cómo puedo soportar Jemax, Xaven y Mzenga Man? <br>A: Puedes apoyar a Jemax, Xaven y Mzenga Man siguiéndolos en sus cuentas de redes sociales, suscribiéndose a sus canales de YouTube, gustándoles y comentando sus publicaciones y videos, compartiendo sus canciones y videos con tus amigos y familiares, comprar su mercancía o entradas para sus espectáculos, y donar a sus causas o proyectos. </li>
67
-
68
- </ul></p> 64aa2da5cf<br />
69
- <br />
70
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/models/cond_transformer.py DELETED
@@ -1,343 +0,0 @@
1
- import os, math
2
- import torch
3
- import torch.nn.functional as F
4
- import pytorch_lightning as pl
5
-
6
- from main import instantiate_from_config
7
- from taming.modules.util import SOSProvider
8
-
9
-
10
- def disabled_train(self, mode=True):
11
- """Overwrite model.train with this function to make sure train/eval mode
12
- does not change anymore."""
13
- return self
14
-
15
-
16
- class Net2NetTransformer(pl.LightningModule):
17
- def __init__(self,
18
- transformer_config,
19
- first_stage_config,
20
- cond_stage_config,
21
- permuter_config=None,
22
- ckpt_path=None,
23
- ignore_keys=[],
24
- first_stage_key="image",
25
- cond_stage_key="depth",
26
- downsample_cond_size=-1,
27
- pkeep=1.0,
28
- sos_token=0,
29
- unconditional=False,
30
- ):
31
- super().__init__()
32
- self.be_unconditional = unconditional
33
- self.sos_token = sos_token
34
- self.first_stage_key = first_stage_key
35
- self.cond_stage_key = cond_stage_key
36
- self.init_first_stage_from_ckpt(first_stage_config)
37
- self.init_cond_stage_from_ckpt(cond_stage_config)
38
- if permuter_config is None:
39
- permuter_config = {"target": "taming.modules.transformer.permuter.Identity"}
40
- self.permuter = instantiate_from_config(config=permuter_config)
41
- self.transformer = instantiate_from_config(config=transformer_config)
42
-
43
- if ckpt_path is not None:
44
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
45
- self.downsample_cond_size = downsample_cond_size
46
- self.pkeep = pkeep
47
-
48
- def init_from_ckpt(self, path, ignore_keys=list()):
49
- sd = torch.load(path, map_location="cpu")["state_dict"]
50
- for k in sd.keys():
51
- for ik in ignore_keys:
52
- if k.startswith(ik):
53
- self.print("Deleting key {} from state_dict.".format(k))
54
- del sd[k]
55
- self.load_state_dict(sd, strict=False)
56
- print(f"Restored from {path}")
57
-
58
- def init_first_stage_from_ckpt(self, config):
59
- model = instantiate_from_config(config)
60
- model = model.eval()
61
- model.train = disabled_train
62
- self.first_stage_model = model
63
-
64
- def init_cond_stage_from_ckpt(self, config):
65
- if config == "__is_first_stage__":
66
- print("Using first stage also as cond stage.")
67
- self.cond_stage_model = self.first_stage_model
68
- elif config == "__is_unconditional__" or self.be_unconditional:
69
- print(f"Using no cond stage. Assuming the training is intended to be unconditional. "
70
- f"Prepending {self.sos_token} as a sos token.")
71
- self.be_unconditional = True
72
- self.cond_stage_key = self.first_stage_key
73
- self.cond_stage_model = SOSProvider(self.sos_token)
74
- else:
75
- model = instantiate_from_config(config)
76
- model = model.eval()
77
- model.train = disabled_train
78
- self.cond_stage_model = model
79
-
80
- def forward(self, x, c):
81
- # one step to produce the logits
82
- _, z_indices = self.encode_to_z(x)
83
- _, c_indices = self.encode_to_c(c)
84
-
85
- if self.training and self.pkeep < 1.0:
86
- mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape,
87
- device=z_indices.device))
88
- mask = mask.round().to(dtype=torch.int64)
89
- r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size)
90
- a_indices = mask*z_indices+(1-mask)*r_indices
91
- else:
92
- a_indices = z_indices
93
-
94
- cz_indices = torch.cat((c_indices, a_indices), dim=1)
95
-
96
- # target includes all sequence elements (no need to handle first one
97
- # differently because we are conditioning)
98
- target = z_indices
99
- # make the prediction
100
- logits, _ = self.transformer(cz_indices[:, :-1])
101
- # cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
102
- logits = logits[:, c_indices.shape[1]-1:]
103
-
104
- return logits, target
105
-
106
- def top_k_logits(self, logits, k):
107
- v, ix = torch.topk(logits, k)
108
- out = logits.clone()
109
- out[out < v[..., [-1]]] = -float('Inf')
110
- return out
111
-
112
- @torch.no_grad()
113
- def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None,
114
- callback=lambda k: None):
115
- x = torch.cat((c,x),dim=1)
116
- block_size = self.transformer.get_block_size()
117
- assert not self.transformer.training
118
- if self.pkeep <= 0.0:
119
- # one pass suffices since input is pure noise anyway
120
- assert len(x.shape)==2
121
- noise_shape = (x.shape[0], steps-1)
122
- #noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x)
123
- noise = c.clone()[:,x.shape[1]-c.shape[1]:-1]
124
- x = torch.cat((x,noise),dim=1)
125
- logits, _ = self.transformer(x)
126
- # take all logits for now and scale by temp
127
- logits = logits / temperature
128
- # optionally crop probabilities to only the top k options
129
- if top_k is not None:
130
- logits = self.top_k_logits(logits, top_k)
131
- # apply softmax to convert to probabilities
132
- probs = F.softmax(logits, dim=-1)
133
- # sample from the distribution or take the most likely
134
- if sample:
135
- shape = probs.shape
136
- probs = probs.reshape(shape[0]*shape[1],shape[2])
137
- ix = torch.multinomial(probs, num_samples=1)
138
- probs = probs.reshape(shape[0],shape[1],shape[2])
139
- ix = ix.reshape(shape[0],shape[1])
140
- else:
141
- _, ix = torch.topk(probs, k=1, dim=-1)
142
- # cut off conditioning
143
- x = ix[:, c.shape[1]-1:]
144
- else:
145
- for k in range(steps):
146
- callback(k)
147
- assert x.size(1) <= block_size # make sure model can see conditioning
148
- x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
149
- logits, _ = self.transformer(x_cond)
150
- # pluck the logits at the final step and scale by temperature
151
- logits = logits[:, -1, :] / temperature
152
- # optionally crop probabilities to only the top k options
153
- if top_k is not None:
154
- logits = self.top_k_logits(logits, top_k)
155
- # apply softmax to convert to probabilities
156
- probs = F.softmax(logits, dim=-1)
157
- # sample from the distribution or take the most likely
158
- if sample:
159
- ix = torch.multinomial(probs, num_samples=1)
160
- else:
161
- _, ix = torch.topk(probs, k=1, dim=-1)
162
- # append to the sequence and continue
163
- x = torch.cat((x, ix), dim=1)
164
- # cut off conditioning
165
- x = x[:, c.shape[1]:]
166
- return x
167
-
168
- @torch.no_grad()
169
- def encode_to_z(self, x):
170
- quant_z, _, info = self.first_stage_model.encode(x)
171
- indices = info[2].view(quant_z.shape[0], -1)
172
- indices = self.permuter(indices)
173
- return quant_z, indices
174
-
175
- @torch.no_grad()
176
- def encode_to_c(self, c):
177
- if self.downsample_cond_size > -1:
178
- c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size))
179
- quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c)
180
- if len(indices.shape) > 2:
181
- indices = indices.view(c.shape[0], -1)
182
- return quant_c, indices
183
-
184
- @torch.no_grad()
185
- def decode_to_img(self, index, zshape):
186
- index = self.permuter(index, reverse=True)
187
- bhwc = (zshape[0],zshape[2],zshape[3],zshape[1])
188
- quant_z = self.first_stage_model.quantize.get_codebook_entry(
189
- index.reshape(-1), shape=bhwc)
190
- x = self.first_stage_model.decode(quant_z)
191
- return x
192
-
193
- @torch.no_grad()
194
- def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs):
195
- log = dict()
196
-
197
- N = 4
198
- if lr_interface:
199
- x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8)
200
- else:
201
- x, c = self.get_xc(batch, N)
202
- x = x.to(device=self.device)
203
- c = c.to(device=self.device)
204
-
205
- quant_z, z_indices = self.encode_to_z(x)
206
- quant_c, c_indices = self.encode_to_c(c)
207
-
208
- # create a "half"" sample
209
- z_start_indices = z_indices[:,:z_indices.shape[1]//2]
210
- index_sample = self.sample(z_start_indices, c_indices,
211
- steps=z_indices.shape[1]-z_start_indices.shape[1],
212
- temperature=temperature if temperature is not None else 1.0,
213
- sample=True,
214
- top_k=top_k if top_k is not None else 100,
215
- callback=callback if callback is not None else lambda k: None)
216
- x_sample = self.decode_to_img(index_sample, quant_z.shape)
217
-
218
- # sample
219
- z_start_indices = z_indices[:, :0]
220
- index_sample = self.sample(z_start_indices, c_indices,
221
- steps=z_indices.shape[1],
222
- temperature=temperature if temperature is not None else 1.0,
223
- sample=True,
224
- top_k=top_k if top_k is not None else 100,
225
- callback=callback if callback is not None else lambda k: None)
226
- x_sample_nopix = self.decode_to_img(index_sample, quant_z.shape)
227
-
228
- # det sample
229
- z_start_indices = z_indices[:, :0]
230
- index_sample = self.sample(z_start_indices, c_indices,
231
- steps=z_indices.shape[1],
232
- sample=False,
233
- callback=callback if callback is not None else lambda k: None)
234
- x_sample_det = self.decode_to_img(index_sample, quant_z.shape)
235
-
236
- # reconstruction
237
- x_rec = self.decode_to_img(z_indices, quant_z.shape)
238
-
239
- log["inputs"] = x
240
- log["reconstructions"] = x_rec
241
-
242
- if self.cond_stage_key != "image":
243
- cond_rec = self.cond_stage_model.decode(quant_c)
244
- if self.cond_stage_key == "segmentation":
245
- # get image from segmentation mask
246
- num_classes = cond_rec.shape[1]
247
-
248
- c = torch.argmax(c, dim=1, keepdim=True)
249
- c = F.one_hot(c, num_classes=num_classes)
250
- c = c.squeeze(1).permute(0, 3, 1, 2).float()
251
- c = self.cond_stage_model.to_rgb(c)
252
-
253
- cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True)
254
- cond_rec = F.one_hot(cond_rec, num_classes=num_classes)
255
- cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float()
256
- cond_rec = self.cond_stage_model.to_rgb(cond_rec)
257
- log["conditioning_rec"] = cond_rec
258
- log["conditioning"] = c
259
-
260
- log["samples_half"] = x_sample
261
- log["samples_nopix"] = x_sample_nopix
262
- log["samples_det"] = x_sample_det
263
- return log
264
-
265
- def get_input(self, key, batch):
266
- x = batch[key]
267
- if len(x.shape) == 3:
268
- x = x[..., None]
269
- if len(x.shape) == 4:
270
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
271
- if x.dtype == torch.double:
272
- x = x.float()
273
- return x
274
-
275
- def get_xc(self, batch, N=None):
276
- x = self.get_input(self.first_stage_key, batch)
277
- c = self.get_input(self.cond_stage_key, batch)
278
- if N is not None:
279
- x = x[:N]
280
- c = c[:N]
281
- return x, c
282
-
283
- def shared_step(self, batch, batch_idx):
284
- x, c = self.get_xc(batch)
285
- logits, target = self(x, c)
286
- loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
287
- return loss
288
-
289
- def training_step(self, batch, batch_idx):
290
- loss = self.shared_step(batch, batch_idx)
291
- self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
292
- return loss
293
-
294
- def validation_step(self, batch, batch_idx):
295
- loss = self.shared_step(batch, batch_idx)
296
- self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
297
- return loss
298
-
299
- def configure_optimizers(self):
300
- """
301
- Following minGPT:
302
- This long function is unfortunately doing something very simple and is being very defensive:
303
- We are separating out all parameters of the model into two buckets: those that will experience
304
- weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
305
- We are then returning the PyTorch optimizer object.
306
- """
307
- # separate out all parameters to those that will and won't experience regularizing weight decay
308
- decay = set()
309
- no_decay = set()
310
- whitelist_weight_modules = (torch.nn.Linear, )
311
- blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
312
- for mn, m in self.transformer.named_modules():
313
- for pn, p in m.named_parameters():
314
- fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
315
-
316
- if pn.endswith('bias'):
317
- # all biases will not be decayed
318
- no_decay.add(fpn)
319
- elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
320
- # weights of whitelist modules will be weight decayed
321
- decay.add(fpn)
322
- elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
323
- # weights of blacklist modules will NOT be weight decayed
324
- no_decay.add(fpn)
325
-
326
- # special case the position embedding parameter in the root GPT module as not decayed
327
- no_decay.add('pos_emb')
328
-
329
- # validate that we considered every parameter
330
- param_dict = {pn: p for pn, p in self.transformer.named_parameters()}
331
- inter_params = decay & no_decay
332
- union_params = decay | no_decay
333
- assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
334
- assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
335
- % (str(param_dict.keys() - union_params), )
336
-
337
- # create the pytorch optimizer object
338
- optim_groups = [
339
- {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
340
- {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
341
- ]
342
- optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95))
343
- return optimizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/base_command.py DELETED
@@ -1,225 +0,0 @@
1
- """Base Command class, and related routines"""
2
-
3
- import functools
4
- import logging
5
- import logging.config
6
- import optparse
7
- import os
8
- import sys
9
- import traceback
10
- from optparse import Values
11
- from typing import Any, Callable, List, Optional, Tuple
12
-
13
- from pip._vendor.rich import traceback as rich_traceback
14
-
15
- from pip._internal.cli import cmdoptions
16
- from pip._internal.cli.command_context import CommandContextMixIn
17
- from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
18
- from pip._internal.cli.status_codes import (
19
- ERROR,
20
- PREVIOUS_BUILD_DIR_ERROR,
21
- UNKNOWN_ERROR,
22
- VIRTUALENV_NOT_FOUND,
23
- )
24
- from pip._internal.exceptions import (
25
- BadCommand,
26
- CommandError,
27
- DiagnosticPipError,
28
- InstallationError,
29
- NetworkConnectionError,
30
- PreviousBuildDirError,
31
- UninstallationError,
32
- )
33
- from pip._internal.utils.filesystem import check_path_owner
34
- from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
35
- from pip._internal.utils.misc import get_prog, normalize_path
36
- from pip._internal.utils.temp_dir import TempDirectoryTypeRegistry as TempDirRegistry
37
- from pip._internal.utils.temp_dir import global_tempdir_manager, tempdir_registry
38
- from pip._internal.utils.virtualenv import running_under_virtualenv
39
-
40
- __all__ = ["Command"]
41
-
42
- logger = logging.getLogger(__name__)
43
-
44
-
45
- class Command(CommandContextMixIn):
46
- usage: str = ""
47
- ignore_require_venv: bool = False
48
-
49
- def __init__(self, name: str, summary: str, isolated: bool = False) -> None:
50
- super().__init__()
51
-
52
- self.name = name
53
- self.summary = summary
54
- self.parser = ConfigOptionParser(
55
- usage=self.usage,
56
- prog=f"{get_prog()} {name}",
57
- formatter=UpdatingDefaultsHelpFormatter(),
58
- add_help_option=False,
59
- name=name,
60
- description=self.__doc__,
61
- isolated=isolated,
62
- )
63
-
64
- self.tempdir_registry: Optional[TempDirRegistry] = None
65
-
66
- # Commands should add options to this option group
67
- optgroup_name = f"{self.name.capitalize()} Options"
68
- self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
69
-
70
- # Add the general options
71
- gen_opts = cmdoptions.make_option_group(
72
- cmdoptions.general_group,
73
- self.parser,
74
- )
75
- self.parser.add_option_group(gen_opts)
76
-
77
- self.add_options()
78
-
79
- def add_options(self) -> None:
80
- pass
81
-
82
- def handle_pip_version_check(self, options: Values) -> None:
83
- """
84
- This is a no-op so that commands by default do not do the pip version
85
- check.
86
- """
87
- # Make sure we do the pip version check if the index_group options
88
- # are present.
89
- assert not hasattr(options, "no_index")
90
-
91
- def run(self, options: Values, args: List[str]) -> int:
92
- raise NotImplementedError
93
-
94
- def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]:
95
- # factored out for testability
96
- return self.parser.parse_args(args)
97
-
98
- def main(self, args: List[str]) -> int:
99
- try:
100
- with self.main_context():
101
- return self._main(args)
102
- finally:
103
- logging.shutdown()
104
-
105
- def _main(self, args: List[str]) -> int:
106
- # We must initialize this before the tempdir manager, otherwise the
107
- # configuration would not be accessible by the time we clean up the
108
- # tempdir manager.
109
- self.tempdir_registry = self.enter_context(tempdir_registry())
110
- # Intentionally set as early as possible so globally-managed temporary
111
- # directories are available to the rest of the code.
112
- self.enter_context(global_tempdir_manager())
113
-
114
- options, args = self.parse_args(args)
115
-
116
- # Set verbosity so that it can be used elsewhere.
117
- self.verbosity = options.verbose - options.quiet
118
-
119
- level_number = setup_logging(
120
- verbosity=self.verbosity,
121
- no_color=options.no_color,
122
- user_log_file=options.log,
123
- )
124
-
125
- always_enabled_features = set(options.features_enabled) & set(
126
- cmdoptions.ALWAYS_ENABLED_FEATURES
127
- )
128
- if always_enabled_features:
129
- logger.warning(
130
- "The following features are always enabled: %s. ",
131
- ", ".join(sorted(always_enabled_features)),
132
- )
133
-
134
- # TODO: Try to get these passing down from the command?
135
- # without resorting to os.environ to hold these.
136
- # This also affects isolated builds and it should.
137
-
138
- if options.no_input:
139
- os.environ["PIP_NO_INPUT"] = "1"
140
-
141
- if options.exists_action:
142
- os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action)
143
-
144
- if options.require_venv and not self.ignore_require_venv:
145
- # If a venv is required check if it can really be found
146
- if not running_under_virtualenv():
147
- logger.critical("Could not find an activated virtualenv (required).")
148
- sys.exit(VIRTUALENV_NOT_FOUND)
149
-
150
- if options.cache_dir:
151
- options.cache_dir = normalize_path(options.cache_dir)
152
- if not check_path_owner(options.cache_dir):
153
- logger.warning(
154
- "The directory '%s' or its parent directory is not owned "
155
- "or is not writable by the current user. The cache "
156
- "has been disabled. Check the permissions and owner of "
157
- "that directory. If executing pip with sudo, you should "
158
- "use sudo's -H flag.",
159
- options.cache_dir,
160
- )
161
- options.cache_dir = None
162
-
163
- def intercepts_unhandled_exc(
164
- run_func: Callable[..., int]
165
- ) -> Callable[..., int]:
166
- @functools.wraps(run_func)
167
- def exc_logging_wrapper(*args: Any) -> int:
168
- try:
169
- status = run_func(*args)
170
- assert isinstance(status, int)
171
- return status
172
- except DiagnosticPipError as exc:
173
- logger.error("[present-rich] %s", exc)
174
- logger.debug("Exception information:", exc_info=True)
175
-
176
- return ERROR
177
- except PreviousBuildDirError as exc:
178
- logger.critical(str(exc))
179
- logger.debug("Exception information:", exc_info=True)
180
-
181
- return PREVIOUS_BUILD_DIR_ERROR
182
- except (
183
- InstallationError,
184
- UninstallationError,
185
- BadCommand,
186
- NetworkConnectionError,
187
- ) as exc:
188
- logger.critical(str(exc))
189
- logger.debug("Exception information:", exc_info=True)
190
-
191
- return ERROR
192
- except CommandError as exc:
193
- logger.critical("%s", exc)
194
- logger.debug("Exception information:", exc_info=True)
195
-
196
- return ERROR
197
- except BrokenStdoutLoggingError:
198
- # Bypass our logger and write any remaining messages to
199
- # stderr because stdout no longer works.
200
- print("ERROR: Pipe to stdout was broken", file=sys.stderr)
201
- if level_number <= logging.DEBUG:
202
- traceback.print_exc(file=sys.stderr)
203
-
204
- return ERROR
205
- except KeyboardInterrupt:
206
- logger.critical("Operation cancelled by user")
207
- logger.debug("Exception information:", exc_info=True)
208
-
209
- return ERROR
210
- except BaseException:
211
- logger.critical("Exception:", exc_info=True)
212
-
213
- return UNKNOWN_ERROR
214
-
215
- return exc_logging_wrapper
216
-
217
- try:
218
- if not options.debug_mode:
219
- run = intercepts_unhandled_exc(self.run)
220
- else:
221
- run = self.run
222
- rich_traceback.install(show_locals=True)
223
- return run(options, args)
224
- finally:
225
- self.handle_pip_version_check(options)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_envs.py DELETED
@@ -1,188 +0,0 @@
1
- import functools
2
- import importlib.metadata
3
- import logging
4
- import os
5
- import pathlib
6
- import sys
7
- import zipfile
8
- import zipimport
9
- from typing import Iterator, List, Optional, Sequence, Set, Tuple
10
-
11
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
12
-
13
- from pip._internal.metadata.base import BaseDistribution, BaseEnvironment
14
- from pip._internal.models.wheel import Wheel
15
- from pip._internal.utils.deprecation import deprecated
16
- from pip._internal.utils.filetypes import WHEEL_EXTENSION
17
-
18
- from ._compat import BadMetadata, BasePath, get_dist_name, get_info_location
19
- from ._dists import Distribution
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
-
24
- def _looks_like_wheel(location: str) -> bool:
25
- if not location.endswith(WHEEL_EXTENSION):
26
- return False
27
- if not os.path.isfile(location):
28
- return False
29
- if not Wheel.wheel_file_re.match(os.path.basename(location)):
30
- return False
31
- return zipfile.is_zipfile(location)
32
-
33
-
34
- class _DistributionFinder:
35
- """Finder to locate distributions.
36
-
37
- The main purpose of this class is to memoize found distributions' names, so
38
- only one distribution is returned for each package name. At lot of pip code
39
- assumes this (because it is setuptools's behavior), and not doing the same
40
- can potentially cause a distribution in lower precedence path to override a
41
- higher precedence one if the caller is not careful.
42
-
43
- Eventually we probably want to make it possible to see lower precedence
44
- installations as well. It's useful feature, after all.
45
- """
46
-
47
- FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
48
-
49
- def __init__(self) -> None:
50
- self._found_names: Set[NormalizedName] = set()
51
-
52
- def _find_impl(self, location: str) -> Iterator[FoundResult]:
53
- """Find distributions in a location."""
54
- # Skip looking inside a wheel. Since a package inside a wheel is not
55
- # always valid (due to .data directories etc.), its .dist-info entry
56
- # should not be considered an installed distribution.
57
- if _looks_like_wheel(location):
58
- return
59
- # To know exactly where we find a distribution, we have to feed in the
60
- # paths one by one, instead of dumping the list to importlib.metadata.
61
- for dist in importlib.metadata.distributions(path=[location]):
62
- info_location = get_info_location(dist)
63
- try:
64
- raw_name = get_dist_name(dist)
65
- except BadMetadata as e:
66
- logger.warning("Skipping %s due to %s", info_location, e.reason)
67
- continue
68
- normalized_name = canonicalize_name(raw_name)
69
- if normalized_name in self._found_names:
70
- continue
71
- self._found_names.add(normalized_name)
72
- yield dist, info_location
73
-
74
- def find(self, location: str) -> Iterator[BaseDistribution]:
75
- """Find distributions in a location.
76
-
77
- The path can be either a directory, or a ZIP archive.
78
- """
79
- for dist, info_location in self._find_impl(location):
80
- if info_location is None:
81
- installed_location: Optional[BasePath] = None
82
- else:
83
- installed_location = info_location.parent
84
- yield Distribution(dist, info_location, installed_location)
85
-
86
- def find_linked(self, location: str) -> Iterator[BaseDistribution]:
87
- """Read location in egg-link files and return distributions in there.
88
-
89
- The path should be a directory; otherwise this returns nothing. This
90
- follows how setuptools does this for compatibility. The first non-empty
91
- line in the egg-link is read as a path (resolved against the egg-link's
92
- containing directory if relative). Distributions found at that linked
93
- location are returned.
94
- """
95
- path = pathlib.Path(location)
96
- if not path.is_dir():
97
- return
98
- for child in path.iterdir():
99
- if child.suffix != ".egg-link":
100
- continue
101
- with child.open() as f:
102
- lines = (line.strip() for line in f)
103
- target_rel = next((line for line in lines if line), "")
104
- if not target_rel:
105
- continue
106
- target_location = str(path.joinpath(target_rel))
107
- for dist, info_location in self._find_impl(target_location):
108
- yield Distribution(dist, info_location, path)
109
-
110
- def _find_eggs_in_dir(self, location: str) -> Iterator[BaseDistribution]:
111
- from pip._vendor.pkg_resources import find_distributions
112
-
113
- from pip._internal.metadata import pkg_resources as legacy
114
-
115
- with os.scandir(location) as it:
116
- for entry in it:
117
- if not entry.name.endswith(".egg"):
118
- continue
119
- for dist in find_distributions(entry.path):
120
- yield legacy.Distribution(dist)
121
-
122
- def _find_eggs_in_zip(self, location: str) -> Iterator[BaseDistribution]:
123
- from pip._vendor.pkg_resources import find_eggs_in_zip
124
-
125
- from pip._internal.metadata import pkg_resources as legacy
126
-
127
- try:
128
- importer = zipimport.zipimporter(location)
129
- except zipimport.ZipImportError:
130
- return
131
- for dist in find_eggs_in_zip(importer, location):
132
- yield legacy.Distribution(dist)
133
-
134
- def find_eggs(self, location: str) -> Iterator[BaseDistribution]:
135
- """Find eggs in a location.
136
-
137
- This actually uses the old *pkg_resources* backend. We likely want to
138
- deprecate this so we can eventually remove the *pkg_resources*
139
- dependency entirely. Before that, this should first emit a deprecation
140
- warning for some versions when using the fallback since importing
141
- *pkg_resources* is slow for those who don't need it.
142
- """
143
- if os.path.isdir(location):
144
- yield from self._find_eggs_in_dir(location)
145
- if zipfile.is_zipfile(location):
146
- yield from self._find_eggs_in_zip(location)
147
-
148
-
149
- @functools.lru_cache(maxsize=None) # Warn a distribution exactly once.
150
- def _emit_egg_deprecation(location: Optional[str]) -> None:
151
- deprecated(
152
- reason=f"Loading egg at {location} is deprecated.",
153
- replacement="to use pip for package installation.",
154
- gone_in=None,
155
- )
156
-
157
-
158
- class Environment(BaseEnvironment):
159
- def __init__(self, paths: Sequence[str]) -> None:
160
- self._paths = paths
161
-
162
- @classmethod
163
- def default(cls) -> BaseEnvironment:
164
- return cls(sys.path)
165
-
166
- @classmethod
167
- def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
168
- if paths is None:
169
- return cls(sys.path)
170
- return cls(paths)
171
-
172
- def _iter_distributions(self) -> Iterator[BaseDistribution]:
173
- finder = _DistributionFinder()
174
- for location in self._paths:
175
- yield from finder.find(location)
176
- for dist in finder.find_eggs(location):
177
- # _emit_egg_deprecation(dist.location) # TODO: Enable this.
178
- yield dist
179
- # This must go last because that's how pkg_resources tie-breaks.
180
- yield from finder.find_linked(location)
181
-
182
- def get_distribution(self, name: str) -> Optional[BaseDistribution]:
183
- matches = (
184
- distribution
185
- for distribution in self.iter_all_distributions()
186
- if distribution.canonical_name == canonicalize_name(name)
187
- )
188
- return next(matches, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/MARCI-NFL-Betting/main.py DELETED
@@ -1,102 +0,0 @@
1
- from Source.Predict import predict
2
- from flask import Flask, render_template, jsonify, request, session
3
- import requests
4
- import pickle as pkl
5
- import pandas as pd
6
- import numpy as np
7
- pd.set_option('display.max_columns', None)
8
- pd.set_option('display.expand_frame_repr', False)
9
-
10
- import json
11
- with open('Source/Data/record.json','r') as f:
12
- record = json.load(f)
13
- with open('Source/Data/lines.json','r') as f:
14
- lines = json.load(f)
15
-
16
- app = Flask(__name__, template_folder="Templates", static_folder="Static", static_url_path="/Static")
17
- app.config.update(
18
- SESSION_COOKIE_SECURE=True,
19
- SESSION_COOKIE_SAMESITE='None',
20
- )
21
- app.secret_key = 'green-flounder'
22
-
23
- # get week, season
24
- current_week, season = predict.get_week()
25
- current_games = predict.get_games(current_week)[['Date','Away Team','Home Team']]
26
- available_weeks = list(range(current_week+1))[3:]
27
- available_weeks.reverse()
28
-
29
- # load current data by default
30
- @app.route('/')
31
- def index():
32
- print('Current Week', current_week)
33
- session['selected_week'] = current_week
34
-
35
- for week in available_weeks:
36
- session[f'games_week_{week}'] = None
37
-
38
- session[f'games_week_{current_week}'] = current_games.to_json()
39
- return render_template('index.html', **record)
40
-
41
- # send week list to front end
42
- @app.route('/get_weeks')
43
- def get_weeks():
44
- return jsonify(available_weeks)
45
-
46
- # send lines to front end
47
- @app.route('/get_lines')
48
- def get_lines():
49
- try:
50
- return jsonify(lines[str(session.get('selected_week'))])
51
- except:
52
- return jsonify(lines[str(current_week)])
53
-
54
- # send games of selected week to front end
55
- @app.route('/get_games')
56
- def get_games():
57
- requested_week = int(request.args.get('week'))
58
- session['selected_week'] = requested_week
59
-
60
- # If select a new week
61
- if requested_week and requested_week != current_week:
62
- print("Requested Week:", requested_week)
63
- # Check if that week's games are cached
64
- if session.get(f'games_week_{requested_week}'):
65
- print("Using cached games")
66
- print(session.get(f'games_week_{requested_week}'))
67
- games = session.get(f'games_week_{requested_week}')
68
- games = json.loads(games)
69
- return jsonify(games)
70
- else:
71
- games = predict.get_games(requested_week)[['Date','Away Team','Home Team']]
72
- session[f'games_week_{requested_week}'] = games.to_json(orient='records')
73
- return jsonify(games.to_dict(orient='records'))
74
- else:
75
- games = current_games
76
- return jsonify(games.to_dict(orient='records'))
77
-
78
- # make predictions
79
- @app.route('/submit_games', methods=['POST'])
80
- def submit_games():
81
- data = request.json
82
- data = pd.DataFrame(data).replace('', np.nan).dropna()
83
- home_teams = data['HomeTeam'].values
84
- away_teams = data['AwayTeam'].values
85
- ou_lines = data['OverUnderLine'].values
86
- row_indices = data['rowIndex'].values
87
-
88
- moneylines = []
89
- over_unders = []
90
- for row_index,home,away,total in zip(row_indices,home_teams,away_teams,ou_lines):
91
- selected_week = session.get('selected_week')
92
- game_id, moneyline, over_under = predict.predict(home,away,season,selected_week,total)
93
- moneyline['rowIndex'] = int(row_index)
94
- over_under['rowIndex'] = int(row_index)
95
- moneylines.append(moneyline)
96
- over_unders.append(over_under)
97
-
98
- return jsonify({'moneylines': moneylines,
99
- 'over_unders': over_unders})
100
-
101
- if __name__ == '__main__':
102
- app.run(host='0.0.0.0', port='7860', debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/build.py DELETED
@@ -1,33 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from detectron2.layers import ShapeSpec
3
- from detectron2.utils.registry import Registry
4
-
5
- from .backbone import Backbone
6
-
7
- BACKBONE_REGISTRY = Registry("BACKBONE")
8
- BACKBONE_REGISTRY.__doc__ = """
9
- Registry for backbones, which extract feature maps from images
10
-
11
- The registered object must be a callable that accepts two arguments:
12
-
13
- 1. A :class:`detectron2.config.CfgNode`
14
- 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification.
15
-
16
- It must returns an instance of :class:`Backbone`.
17
- """
18
-
19
-
20
- def build_backbone(cfg, input_shape=None):
21
- """
22
- Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
23
-
24
- Returns:
25
- an instance of :class:`Backbone`
26
- """
27
- if input_shape is None:
28
- input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
29
-
30
- backbone_name = cfg.MODEL.BACKBONE.NAME
31
- backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)
32
- assert isinstance(backbone, Backbone)
33
- return backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/internal/benchmark/timer.h DELETED
@@ -1,129 +0,0 @@
1
- #pragma once
2
-
3
- #include <cassert>
4
-
5
- # define CUDA_SAFE_CALL_NO_SYNC( call) do { \
6
- cudaError err = call; \
7
- if( cudaSuccess != err) { \
8
- fprintf(stderr, "CUDA error in file '%s' in line %i : %s.\n", \
9
- __FILE__, __LINE__, cudaGetErrorString( err) ); \
10
- exit(EXIT_FAILURE); \
11
- } } while (0)
12
-
13
- # define CUDA_SAFE_CALL( call) do { \
14
- CUDA_SAFE_CALL_NO_SYNC(call); \
15
- cudaError err = cudaDeviceSynchronize(); \
16
- if( cudaSuccess != err) { \
17
- fprintf(stderr, "CUDA error in file '%s' in line %i : %s.\n", \
18
- __FILE__, __LINE__, cudaGetErrorString( err) ); \
19
- exit(EXIT_FAILURE); \
20
- } } while (0)
21
-
22
- class cuda_timer
23
- {
24
- cudaEvent_t start_;
25
- cudaEvent_t stop_;
26
-
27
- public:
28
- cuda_timer()
29
- {
30
- CUDA_SAFE_CALL(cudaEventCreate(&start_));
31
- CUDA_SAFE_CALL(cudaEventCreate(&stop_));
32
- }
33
-
34
- ~cuda_timer()
35
- {
36
- CUDA_SAFE_CALL(cudaEventDestroy(start_));
37
- CUDA_SAFE_CALL(cudaEventDestroy(stop_));
38
- }
39
-
40
- void start()
41
- {
42
- CUDA_SAFE_CALL(cudaEventRecord(start_, 0));
43
- }
44
-
45
- void stop()
46
- {
47
- CUDA_SAFE_CALL(cudaEventRecord(stop_, 0));
48
- CUDA_SAFE_CALL(cudaEventSynchronize(stop_));
49
- }
50
-
51
- double milliseconds_elapsed()
52
- {
53
- float elapsed_time;
54
- CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsed_time, start_, stop_));
55
- return elapsed_time;
56
- }
57
-
58
- double seconds_elapsed()
59
- {
60
- return milliseconds_elapsed() / 1000.0;
61
- }
62
- };
63
-
64
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC)
65
- #include <windows.h>
66
-
67
- class steady_timer
68
- {
69
- LARGE_INTEGER frequency_; // Cached to avoid system calls.
70
- LARGE_INTEGER start_;
71
- LARGE_INTEGER stop_;
72
-
73
- public:
74
- steady_timer() : start_(), stop_(), frequency_()
75
- {
76
- BOOL const r = QueryPerformanceFrequency(&frequency_);
77
- assert(0 != r);
78
- }
79
-
80
- void start()
81
- {
82
- BOOL const r = QueryPerformanceCounter(&start_);
83
- assert(0 != r);
84
- }
85
-
86
- void stop()
87
- {
88
- BOOL const r = QueryPerformanceCounter(&stop_);
89
- assert(0 != r);
90
- }
91
-
92
- double seconds_elapsed()
93
- {
94
- return double(stop_.QuadPart - start_.QuadPart)
95
- / double(frequency_.QuadPart);
96
- }
97
- };
98
- #else
99
- #include <time.h>
100
-
101
- class steady_timer
102
- {
103
- timespec start_;
104
- timespec stop_;
105
-
106
- public:
107
- steady_timer() : start_(), stop_() {}
108
-
109
- void start()
110
- {
111
- int const r = clock_gettime(CLOCK_MONOTONIC, &start_);
112
- assert(0 == r);
113
- }
114
-
115
- void stop()
116
- {
117
- int const r = clock_gettime(CLOCK_MONOTONIC, &stop_);
118
- assert(0 == r);
119
- }
120
-
121
- double seconds_elapsed()
122
- {
123
- return double(stop_.tv_sec - start_.tv_sec)
124
- + double(stop_.tv_nsec - start_.tv_nsec) * 1.0e-9;
125
- }
126
- };
127
- #endif
128
-
129
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/allocator/destroy_range.h DELETED
@@ -1,34 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- namespace thrust
22
- {
23
- namespace detail
24
- {
25
-
26
- template<typename Allocator, typename Pointer, typename Size>
27
- __host__ __device__
28
- inline void destroy_range(Allocator &a, Pointer p, Size n);
29
-
30
- } // end detail
31
- } // end thrust
32
-
33
- #include <thrust/detail/allocator/destroy_range.inl>
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/functional/composite.h DELETED
@@ -1,163 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- // Portions of this code are derived from
18
- //
19
- // Manjunath Kudlur's Carbon library
20
- //
21
- // and
22
- //
23
- // Based on Boost.Phoenix v1.2
24
- // Copyright (c) 2001-2002 Joel de Guzman
25
-
26
- #pragma once
27
-
28
- #include <thrust/detail/functional/actor.h>
29
- #include <thrust/tuple.h>
30
-
31
- namespace thrust
32
- {
33
- namespace detail
34
- {
35
- namespace functional
36
- {
37
-
38
- // XXX we should just take a single EvalTuple
39
- template<typename Eval0,
40
- typename Eval1 = thrust::null_type,
41
- typename Eval2 = thrust::null_type,
42
- typename Eval3 = thrust::null_type,
43
- typename Eval4 = thrust::null_type,
44
- typename Eval5 = thrust::null_type,
45
- typename Eval6 = thrust::null_type,
46
- typename Eval7 = thrust::null_type,
47
- typename Eval8 = thrust::null_type,
48
- typename Eval9 = thrust::null_type,
49
- typename Eval10 = thrust::null_type>
50
- class composite;
51
-
52
- template<typename Eval0, typename Eval1>
53
- class composite<
54
- Eval0,
55
- Eval1,
56
- thrust::null_type,
57
- thrust::null_type,
58
- thrust::null_type,
59
- thrust::null_type,
60
- thrust::null_type,
61
- thrust::null_type,
62
- thrust::null_type,
63
- thrust::null_type
64
- >
65
- {
66
- public:
67
- template<typename Env>
68
- struct result
69
- {
70
- typedef typename Eval0::template result<
71
- thrust::tuple<
72
- typename Eval1::template result<Env>::type
73
- >
74
- >::type type;
75
- };
76
-
77
- __host__ __device__
78
- composite(const Eval0 &e0, const Eval1 &e1)
79
- : m_eval0(e0),
80
- m_eval1(e1)
81
- {}
82
-
83
- template<typename Env>
84
- __host__ __device__
85
- typename result<Env>::type
86
- eval(const Env &x) const
87
- {
88
- typename Eval1::template result<Env>::type result1 = m_eval1.eval(x);
89
- return m_eval0.eval(thrust::tie(result1));
90
- }
91
-
92
- private:
93
- Eval0 m_eval0;
94
- Eval1 m_eval1;
95
- }; // end composite<Eval0,Eval1>
96
-
97
- template<typename Eval0, typename Eval1, typename Eval2>
98
- class composite<
99
- Eval0,
100
- Eval1,
101
- Eval2,
102
- thrust::null_type,
103
- thrust::null_type,
104
- thrust::null_type,
105
- thrust::null_type,
106
- thrust::null_type,
107
- thrust::null_type,
108
- thrust::null_type
109
- >
110
- {
111
- public:
112
- template<typename Env>
113
- struct result
114
- {
115
- typedef typename Eval0::template result<
116
- thrust::tuple<
117
- typename Eval1::template result<Env>::type,
118
- typename Eval2::template result<Env>::type
119
- >
120
- >::type type;
121
- };
122
-
123
- __host__ __device__
124
- composite(const Eval0 &e0, const Eval1 &e1, const Eval2 &e2)
125
- : m_eval0(e0),
126
- m_eval1(e1),
127
- m_eval2(e2)
128
- {}
129
-
130
- template<typename Env>
131
- __host__ __device__
132
- typename result<Env>::type
133
- eval(const Env &x) const
134
- {
135
- typename Eval1::template result<Env>::type result1 = m_eval1.eval(x);
136
- typename Eval2::template result<Env>::type result2 = m_eval2.eval(x);
137
- return m_eval0.eval(thrust::tie(result1,result2));
138
- }
139
-
140
- private:
141
- Eval0 m_eval0;
142
- Eval1 m_eval1;
143
- Eval2 m_eval2;
144
- }; // end composite<Eval0,Eval1,Eval2>
145
-
146
- template<typename Eval0, typename Eval1>
147
- __host__ __device__
148
- actor<composite<Eval0,Eval1> > compose(const Eval0 &e0, const Eval1 &e1)
149
- {
150
- return actor<composite<Eval0,Eval1> >(composite<Eval0,Eval1>(e0,e1));
151
- }
152
-
153
- template<typename Eval0, typename Eval1, typename Eval2>
154
- __host__ __device__
155
- actor<composite<Eval0,Eval1,Eval2> > compose(const Eval0 &e0, const Eval1 &e1, const Eval2 &e2)
156
- {
157
- return actor<composite<Eval0,Eval1,Eval2> >(composite<Eval0,Eval1,Eval2>(e0,e1,e2));
158
- }
159
-
160
- } // end functional
161
- } // end detail
162
- } // end thrust
163
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/extrema.h DELETED
@@ -1,804 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file extrema.h
18
- * \brief Functions for computing computing extremal values
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/detail/execution_policy.h>
25
- #include <thrust/pair.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! This version of \p min returns the smaller of two values, given a comparison operation.
32
- * \param lhs The first value to compare.
33
- * \param rhs The second value to compare.
34
- * \param comp A comparison operation.
35
- * \return The smaller element.
36
- *
37
- * \tparam T is convertible to \p BinaryPredicate's first argument type and to its second argument type.
38
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>.
39
- *
40
- * The following code snippet demonstrates how to use \p min to compute the smaller of two
41
- * key-value objects.
42
- *
43
- * \code
44
- * #include <thrust/extrema.h>
45
- * ...
46
- * struct key_value
47
- * {
48
- * int key;
49
- * int value;
50
- * };
51
- *
52
- * struct compare_key_value
53
- * {
54
- * __host__ __device__
55
- * bool operator()(key_value lhs, key_value rhs)
56
- * {
57
- * return lhs.key < rhs.key;
58
- * }
59
- * };
60
- *
61
- * ...
62
- * key_value a = {13, 0};
63
- * key_value b = { 7, 1);
64
- *
65
- * key_value smaller = thrust::min(a, b, compare_key_value());
66
- *
67
- * // smaller is {7, 1}
68
- * \endcode
69
- *
70
- * \note Returns the first argument when the arguments are equivalent.
71
- * \see max
72
- */
73
- template<typename T, typename BinaryPredicate>
74
- __host__ __device__
75
- T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp);
76
-
77
-
78
- /*! This version of \p min returns the smaller of two values.
79
- * \param lhs The first value to compare.
80
- * \param rhs The second value to compare.
81
- * \return The smaller element.
82
- *
83
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
84
- *
85
- * The following code snippet demonstrates how to use \p min to compute the smaller of two
86
- * integers.
87
- *
88
- * \code
89
- * #include <thrust/extrema.h>
90
- * ...
91
- * int a = 13;
92
- * int b = 7;
93
- *
94
- * int smaller = thrust::min(a, b);
95
- *
96
- * // smaller is 7
97
- * \endcode
98
- *
99
- * \note Returns the first argument when the arguments are equivalent.
100
- * \see max
101
- */
102
- template<typename T>
103
- __host__ __device__
104
- T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs);
105
-
106
-
107
- /*! This version of \p max returns the larger of two values, given a comparison operation.
108
- * \param lhs The first value to compare.
109
- * \param rhs The second value to compare.
110
- * \param comp A comparison operation.
111
- * \return The larger element.
112
- *
113
- * \tparam T is convertible to \p BinaryPredicate's first argument type and to its second argument type.
114
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">BinaryPredicate</a>.
115
- *
116
- * The following code snippet demonstrates how to use \p max to compute the larger of two
117
- * key-value objects.
118
- *
119
- * \code
120
- * #include <thrust/extrema.h>
121
- * ...
122
- * struct key_value
123
- * {
124
- * int key;
125
- * int value;
126
- * };
127
- *
128
- * struct compare_key_value
129
- * {
130
- * __host__ __device__
131
- * bool operator()(key_value lhs, key_value rhs)
132
- * {
133
- * return lhs.key < rhs.key;
134
- * }
135
- * };
136
- *
137
- * ...
138
- * key_value a = {13, 0};
139
- * key_value b = { 7, 1);
140
- *
141
- * key_value larger = thrust::max(a, b, compare_key_value());
142
- *
143
- * // larger is {13, 0}
144
- * \endcode
145
- *
146
- * \note Returns the first argument when the arguments are equivalent.
147
- * \see min
148
- */
149
- template<typename T, typename BinaryPredicate>
150
- __host__ __device__
151
- T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp);
152
-
153
-
154
- /*! This version of \p max returns the larger of two values.
155
- * \param lhs The first value to compare.
156
- * \param rhs The second value to compare.
157
- * \return The larger element.
158
- *
159
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
160
- *
161
- * The following code snippet demonstrates how to use \p max to compute the larger of two
162
- * integers.
163
- *
164
- * \code
165
- * #include <thrust/extrema.h>
166
- * ...
167
- * int a = 13;
168
- * int b = 7;
169
- *
170
- * int larger = thrust::min(a, b);
171
- *
172
- * // larger is 13
173
- * \endcode
174
- *
175
- * \note Returns the first argument when the arguments are equivalent.
176
- * \see min
177
- */
178
- template<typename T>
179
- __host__ __device__
180
- T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs);
181
-
182
-
183
- /*! \addtogroup reductions
184
- * \{
185
- * \addtogroup extrema
186
- * \ingroup reductions
187
- * \{
188
- */
189
-
190
- /*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
191
- * It returns the first iterator \c i in <tt>[first, last)</tt>
192
- * such that no other iterator in <tt>[first, last)</tt> points to a value smaller
193
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
194
- * empty range.
195
- *
196
- * The two versions of \p min_element differ in how they define whether one element is
197
- * less than another. This version compares objects using \c operator<. Specifically,
198
- * this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
199
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*j < *i</tt> is
200
- * \c false.
201
- *
202
- * The algorithm's execution is parallelized as determined by \p exec.
203
- *
204
- * \param exec The execution policy to use for parallelization.
205
- * \param first The beginning of the sequence.
206
- * \param last The end of the sequence.
207
- * \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
208
- * if it is not an empty range; \p last, otherwise.
209
- *
210
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
211
- * and \c ForwardIterator's \c value_type is a model of
212
- * <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
213
- *
214
- * \code
215
- * #include <thrust/extrema.h>
216
- * #include <thrust/execution_policy.h>
217
- * ...
218
- * int data[6] = {1, 0, 2, 2, 1, 3};
219
- * int *result = thrust::min_element(thrust::host, data, data + 6);
220
- *
221
- * // result is data + 1
222
- * // *result is 0
223
- * \endcode
224
- *
225
- * \see http://www.sgi.com/tech/stl/min_element.html
226
- */
227
- template<typename DerivedPolicy, typename ForwardIterator>
228
- __host__ __device__
229
- ForwardIterator min_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last);
230
-
231
-
232
- /*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
233
- * It returns the first iterator \c i in <tt>[first, last)</tt>
234
- * such that no other iterator in <tt>[first, last)</tt> points to a value smaller
235
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
236
- * empty range.
237
- *
238
- * The two versions of \p min_element differ in how they define whether one element is
239
- * less than another. This version compares objects using \c operator<. Specifically,
240
- * this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
241
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*j < *i</tt> is
242
- * \c false.
243
- *
244
- * \param first The beginning of the sequence.
245
- * \param last The end of the sequence.
246
- * \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
247
- * if it is not an empty range; \p last, otherwise.
248
- *
249
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
250
- * and \c ForwardIterator's \c value_type is a model of
251
- * <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
252
- *
253
- * \code
254
- * #include <thrust/extrema.h>
255
- * ...
256
- * int data[6] = {1, 0, 2, 2, 1, 3};
257
- * int *result = thrust::min_element(data, data + 6);
258
- *
259
- * // result is data + 1
260
- * // *result is 0
261
- * \endcode
262
- *
263
- * \see http://www.sgi.com/tech/stl/min_element.html
264
- */
265
- template <typename ForwardIterator>
266
- ForwardIterator min_element(ForwardIterator first, ForwardIterator last);
267
-
268
-
269
- /*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
270
- * It returns the first iterator \c i in <tt>[first, last)</tt>
271
- * such that no other iterator in <tt>[first, last)</tt> points to a value smaller
272
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
273
- * empty range.
274
- *
275
- * The two versions of \p min_element differ in how they define whether one element is
276
- * less than another. This version compares objects using a function object \p comp.
277
- * Specifically, this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
278
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*j, *i)</tt> is
279
- * \c false.
280
- *
281
- * The algorithm's execution is parallelized as determined by \p exec.
282
- *
283
- * \param exec The execution policy to use for parallelization.
284
- * \param first The beginning of the sequence.
285
- * \param last The end of the sequence.
286
- * \param comp A binary predicate used for comparison.
287
- * \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
288
- * if it is not an empty range; \p last, otherwise.
289
- *
290
- * \tparam DerivedPolicy The name of the derived execution policy.
291
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
292
- * and \p ForwardIterator's \c value_type is convertible to both \p comp's
293
- * \c first_argument_type and \c second_argument_type.
294
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate">Binary Predicate</a>.
295
- *
296
- * The following code snippet demonstrates how to use \p min_element to find the smallest element
297
- * of a collection of key-value pairs using the \p thrust::host execution policy for parallelization:
298
- *
299
- * \code
300
- * #include <thrust/extrema.h>
301
- * #include <thrust/execution_policy.h>
302
- * ...
303
- *
304
- * struct key_value
305
- * {
306
- * int key;
307
- * int value;
308
- * };
309
- *
310
- * struct compare_key_value
311
- * {
312
- * __host__ __device__
313
- * bool operator()(key_value lhs, key_value rhs)
314
- * {
315
- * return lhs.key < rhs.key;
316
- * }
317
- * };
318
- *
319
- * ...
320
- * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
321
- *
322
- * key_value *smallest = thrust::min_element(thrust::host, data, data + 4, compare_key_value());
323
- *
324
- * // smallest == data + 1
325
- * // *smallest == {0,7}
326
- * \endcode
327
- *
328
- * \see http://www.sgi.com/tech/stl/min_element.html
329
- */
330
- template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
331
- __host__ __device__
332
- ForwardIterator min_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp);
333
-
334
-
335
- /*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
336
- * It returns the first iterator \c i in <tt>[first, last)</tt>
337
- * such that no other iterator in <tt>[first, last)</tt> points to a value smaller
338
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
339
- * empty range.
340
- *
341
- * The two versions of \p min_element differ in how they define whether one element is
342
- * less than another. This version compares objects using a function object \p comp.
343
- * Specifically, this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
344
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*j, *i)</tt> is
345
- * \c false.
346
- *
347
- * \param first The beginning of the sequence.
348
- * \param last The end of the sequence.
349
- * \param comp A binary predicate used for comparison.
350
- * \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
351
- * if it is not an empty range; \p last, otherwise.
352
- *
353
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
354
- * and \p ForwardIterator's \c value_type is convertible to both \p comp's
355
- * \c first_argument_type and \c second_argument_type.
356
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate">Binary Predicate</a>.
357
- *
358
- * The following code snippet demonstrates how to use \p min_element to find the smallest element
359
- * of a collection of key-value pairs.
360
- *
361
- * \code
362
- * #include <thrust/extrema.h>
363
- *
364
- * struct key_value
365
- * {
366
- * int key;
367
- * int value;
368
- * };
369
- *
370
- * struct compare_key_value
371
- * {
372
- * __host__ __device__
373
- * bool operator()(key_value lhs, key_value rhs)
374
- * {
375
- * return lhs.key < rhs.key;
376
- * }
377
- * };
378
- *
379
- * ...
380
- * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
381
- *
382
- * key_value *smallest = thrust::min_element(data, data + 4, compare_key_value());
383
- *
384
- * // smallest == data + 1
385
- * // *smallest == {0,7}
386
- * \endcode
387
- *
388
- * \see http://www.sgi.com/tech/stl/min_element.html
389
- */
390
- template <typename ForwardIterator, typename BinaryPredicate>
391
- ForwardIterator min_element(ForwardIterator first, ForwardIterator last,
392
- BinaryPredicate comp);
393
-
394
-
395
- /*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
396
- * It returns the first iterator \c i in <tt>[first, last)</tt>
397
- * such that no other iterator in <tt>[first, last)</tt> points to a value larger
398
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
399
- * empty range.
400
- *
401
- * The two versions of \p max_element differ in how they define whether one element is
402
- * greater than another. This version compares objects using \c operator<. Specifically,
403
- * this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
404
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*i < *j</tt> is
405
- * \c false.
406
- *
407
- * The algorithm's execution is parallelized as determined by \p exec.
408
- *
409
- * \param exec The execution policy to use for parallelization.
410
- * \param first The beginning of the sequence.
411
- * \param last The end of the sequence.
412
- * \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
413
- * if it is not an empty range; \p last, otherwise.
414
- *
415
- * \tparam A Thrust backend system.
416
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
417
- * and \c ForwardIterator's \c value_type is a model of
418
- * <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
419
- *
420
- * \code
421
- * #include <thrust/extrema.h>
422
- * #include <thrust/execution_policy.h>
423
- * ...
424
- * int data[6] = {1, 0, 2, 2, 1, 3};
425
- * int *result = thrust::max_element(thrust::host, data, data + 6);
426
- *
427
- * // *result == 3
428
- * \endcode
429
- *
430
- * \see http://www.sgi.com/tech/stl/max_element.html
431
- */
432
- template<typename DerivedPolicy, typename ForwardIterator>
433
- __host__ __device__
434
- ForwardIterator max_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last);
435
-
436
-
437
- /*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
438
- * It returns the first iterator \c i in <tt>[first, last)</tt>
439
- * such that no other iterator in <tt>[first, last)</tt> points to a value larger
440
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
441
- * empty range.
442
- *
443
- * The two versions of \p max_element differ in how they define whether one element is
444
- * greater than another. This version compares objects using \c operator<. Specifically,
445
- * this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
446
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*i < *j</tt> is
447
- * \c false.
448
- *
449
- * \param first The beginning of the sequence.
450
- * \param last The end of the sequence.
451
- * \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
452
- * if it is not an empty range; \p last, otherwise.
453
- *
454
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
455
- * and \c ForwardIterator's \c value_type is a model of
456
- * <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
457
- *
458
- * \code
459
- * #include <thrust/extrema.h>
460
- * ...
461
- * int data[6] = {1, 0, 2, 2, 1, 3};
462
- * int *result = thrust::max_element(data, data + 6);
463
- *
464
- * // *result == 3
465
- * \endcode
466
- *
467
- * \see http://www.sgi.com/tech/stl/max_element.html
468
- */
469
- template <typename ForwardIterator>
470
- ForwardIterator max_element(ForwardIterator first, ForwardIterator last);
471
-
472
-
473
- /*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
474
- * It returns the first iterator \c i in <tt>[first, last)</tt>
475
- * such that no other iterator in <tt>[first, last)</tt> points to a value larger
476
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
477
- * empty range.
478
- *
479
- * The two versions of \p max_element differ in how they define whether one element is
480
- * less than another. This version compares objects using a function object \p comp.
481
- * Specifically, this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
482
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*i, *j)</tt> is
483
- * \c false.
484
- *
485
- * The algorithm's execution is parallelized as determined by \p exec.
486
- *
487
- * \param exec The execution policy to use for parallelization.
488
- * \param first The beginning of the sequence.
489
- * \param last The end of the sequence.
490
- * \param comp A binary predicate used for comparison.
491
- * \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
492
- * if it is not an empty range; \p last, otherwise.
493
- *
494
- * \tparam DerivedPolicy The name of the derived execution policy.
495
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
496
- * and \p ForwardIterator's \c value_type is convertible to both \p comp's
497
- * \c first_argument_type and \c second_argument_type.
498
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
499
- *
500
- * The following code snippet demonstrates how to use \p max_element to find the largest element
501
- * of a collection of key-value pairs using the \p thrust::host execution policy for parallelization.
502
- *
503
- * \code
504
- * #include <thrust/extrema.h>
505
- * #include <thrust/execution_policy.h>
506
- * ...
507
- *
508
- * struct key_value
509
- * {
510
- * int key;
511
- * int value;
512
- * };
513
- *
514
- * struct compare_key_value
515
- * {
516
- * __host__ __device__
517
- * bool operator()(key_value lhs, key_value rhs)
518
- * {
519
- * return lhs.key < rhs.key;
520
- * }
521
- * };
522
- *
523
- * ...
524
- * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
525
- *
526
- * key_value *largest = thrust::max_element(thrust::host, data, data + 4, compare_key_value());
527
- *
528
- * // largest == data + 3
529
- * // *largest == {6,1}
530
- * \endcode
531
- *
532
- * \see http://www.sgi.com/tech/stl/max_element.html
533
- */
534
- template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
535
- __host__ __device__
536
- ForwardIterator max_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp);
537
-
538
-
539
- /*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
540
- * It returns the first iterator \c i in <tt>[first, last)</tt>
541
- * such that no other iterator in <tt>[first, last)</tt> points to a value larger
542
- * than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
543
- * empty range.
544
- *
545
- * The two versions of \p max_element differ in how they define whether one element is
546
- * less than another. This version compares objects using a function object \p comp.
547
- * Specifically, this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
548
- * such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*i, *j)</tt> is
549
- * \c false.
550
- *
551
- * \param first The beginning of the sequence.
552
- * \param last The end of the sequence.
553
- * \param comp A binary predicate used for comparison.
554
- * \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
555
- * if it is not an empty range; \p last, otherwise.
556
- *
557
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
558
- * and \p ForwardIterator's \c value_type is convertible to both \p comp's
559
- * \c first_argument_type and \c second_argument_type.
560
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
561
- *
562
- * The following code snippet demonstrates how to use \p max_element to find the largest element
563
- * of a collection of key-value pairs.
564
- *
565
- * \code
566
- * #include <thrust/extrema.h>
567
- *
568
- * struct key_value
569
- * {
570
- * int key;
571
- * int value;
572
- * };
573
- *
574
- * struct compare_key_value
575
- * {
576
- * __host__ __device__
577
- * bool operator()(key_value lhs, key_value rhs)
578
- * {
579
- * return lhs.key < rhs.key;
580
- * }
581
- * };
582
- *
583
- * ...
584
- * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
585
- *
586
- * key_value *largest = thrust::max_element(data, data + 4, compare_key_value());
587
- *
588
- * // largest == data + 3
589
- * // *largest == {6,1}
590
- * \endcode
591
- *
592
- * \see http://www.sgi.com/tech/stl/max_element.html
593
- */
594
- template <typename ForwardIterator, typename BinaryPredicate>
595
- ForwardIterator max_element(ForwardIterator first, ForwardIterator last,
596
- BinaryPredicate comp);
597
-
598
-
599
- /*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
600
- * It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
601
- * returned by \p min_element and \c imax is the same iterator returned by \p max_element.
602
- * This function is potentially more efficient than separate calls to \p min_element and \p max_element.
603
- *
604
- * The algorithm's execution is parallelized as determined by \p exec.
605
- *
606
- * \param exec The execution policy to use for parallelization.
607
- * \param first The beginning of the sequence.
608
- * \param last The end of the sequence.
609
- * \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
610
- * if it is not an empty range; \p last, otherwise.
611
- *
612
- * \tparam DerivedPolicy The name of the derived execution policy.
613
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
614
- * and \c ForwardIterator's \c value_type is a model of
615
- * <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
616
- *
617
- * \code
618
- * #include <thrust/extrema.h>
619
- * #include <thrust/execution_policy.h>
620
- * ...
621
- * int data[6] = {1, 0, 2, 2, 1, 3};
622
- * thrust::pair<int *, int *> result = thrust::minmax_element(thrust::host, data, data + 6);
623
- *
624
- * // result.first is data + 1
625
- * // result.second is data + 5
626
- * // *result.first is 0
627
- * // *result.second is 3
628
- * \endcode
629
- *
630
- * \see min_element
631
- * \see max_element
632
- * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
633
- */
634
- template<typename DerivedPolicy, typename ForwardIterator>
635
- __host__ __device__
636
- thrust::pair<ForwardIterator,ForwardIterator> minmax_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last);
637
-
638
-
639
- /*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
640
- * It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
641
- * returned by \p min_element and \c imax is the same iterator returned by \p max_element.
642
- * This function is potentially more efficient than separate calls to \p min_element and \p max_element.
643
- *
644
- * \param first The beginning of the sequence.
645
- * \param last The end of the sequence.
646
- * \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
647
- * if it is not an empty range; \p last, otherwise.
648
- *
649
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
650
- * and \c ForwardIterator's \c value_type is a model of
651
- * <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
652
- *
653
- * \code
654
- * #include <thrust/extrema.h>
655
- * ...
656
- * int data[6] = {1, 0, 2, 2, 1, 3};
657
- * thrust::pair<int *, int *> result = thrust::minmax_element(data, data + 6);
658
- *
659
- * // result.first is data + 1
660
- * // result.second is data + 5
661
- * // *result.first is 0
662
- * // *result.second is 3
663
- * \endcode
664
- *
665
- * \see min_element
666
- * \see max_element
667
- * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
668
- */
669
- template <typename ForwardIterator>
670
- thrust::pair<ForwardIterator,ForwardIterator> minmax_element(ForwardIterator first,
671
- ForwardIterator last);
672
-
673
-
674
- /*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
675
- * It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
676
- * returned by \p min_element and \c imax is the same iterator returned by \p max_element.
677
- * This function is potentially more efficient than separate calls to \p min_element and \p max_element.
678
- *
679
- * The algorithm's execution is parallelized as determined by \p exec.
680
- *
681
- * \param exec The execution policy to use for parallelization.
682
- * \param first The beginning of the sequence.
683
- * \param last The end of the sequence.
684
- * \param comp A binary predicate used for comparison.
685
- * \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
686
- * if it is not an empty range; \p last, otherwise.
687
- *
688
- * \tparam DerivedPolicy The name of the derived execution policy.
689
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
690
- * and \p ForwardIterator's \c value_type is convertible to both \p comp's
691
- * \c first_argument_type and \c second_argument_type.
692
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate">Binary Predicate</a>.
693
- *
694
- * The following code snippet demonstrates how to use \p minmax_element to find the smallest and largest elements
695
- * of a collection of key-value pairs using the \p thrust::host execution policy for parallelization:
696
- *
697
- * \code
698
- * #include <thrust/extrema.h>
699
- * #include <thrust/pair.h>
700
- * #include <thrust/execution_policy.h>
701
- * ...
702
- *
703
- * struct key_value
704
- * {
705
- * int key;
706
- * int value;
707
- * };
708
- *
709
- * struct compare_key_value
710
- * {
711
- * __host__ __device__
712
- * bool operator()(key_value lhs, key_value rhs)
713
- * {
714
- * return lhs.key < rhs.key;
715
- * }
716
- * };
717
- *
718
- * ...
719
- * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
720
- *
721
- * thrust::pair<key_value*,key_value*> extrema = thrust::minmax_element(thrust::host, data, data + 4, compare_key_value());
722
- *
723
- * // extrema.first == data + 1
724
- * // *extrema.first == {0,7}
725
- * // extrema.second == data + 3
726
- * // *extrema.second == {6,1}
727
- * \endcode
728
- *
729
- * \see min_element
730
- * \see max_element
731
- * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
732
- */
733
- template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
734
- __host__ __device__
735
- thrust::pair<ForwardIterator,ForwardIterator> minmax_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp);
736
-
737
-
738
- /*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
739
- * It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
740
- * returned by \p min_element and \c imax is the same iterator returned by \p max_element.
741
- * This function is potentially more efficient than separate calls to \p min_element and \p max_element.
742
- *
743
- * \param first The beginning of the sequence.
744
- * \param last The end of the sequence.
745
- * \param comp A binary predicate used for comparison.
746
- * \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
747
- * if it is not an empty range; \p last, otherwise.
748
- *
749
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
750
- * and \p ForwardIterator's \c value_type is convertible to both \p comp's
751
- * \c first_argument_type and \c second_argument_type.
752
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate">Binary Predicate</a>.
753
- *
754
- * The following code snippet demonstrates how to use \p minmax_element to find the smallest and largest elements
755
- * of a collection of key-value pairs.
756
- *
757
- * \code
758
- * #include <thrust/extrema.h>
759
- * #include <thrust/pair.h>
760
- *
761
- * struct key_value
762
- * {
763
- * int key;
764
- * int value;
765
- * };
766
- *
767
- * struct compare_key_value
768
- * {
769
- * __host__ __device__
770
- * bool operator()(key_value lhs, key_value rhs)
771
- * {
772
- * return lhs.key < rhs.key;
773
- * }
774
- * };
775
- *
776
- * ...
777
- * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
778
- *
779
- * thrust::pair<key_value*,key_value*> extrema = thrust::minmax_element(data, data + 4, compare_key_value());
780
- *
781
- * // extrema.first == data + 1
782
- * // *extrema.first == {0,7}
783
- * // extrema.second == data + 3
784
- * // *extrema.second == {6,1}
785
- * \endcode
786
- *
787
- * \see min_element
788
- * \see max_element
789
- * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
790
- */
791
- template <typename ForwardIterator, typename BinaryPredicate>
792
- thrust::pair<ForwardIterator,ForwardIterator> minmax_element(ForwardIterator first,
793
- ForwardIterator last,
794
- BinaryPredicate comp);
795
-
796
- /*! \} // end extrema
797
- * \} // end reductions
798
- */
799
-
800
- } // end thrust
801
-
802
- #include <thrust/detail/extrema.inl>
803
- #include <thrust/detail/minmax.h>
804
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/structures/instances.py DELETED
@@ -1,191 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import itertools
3
- from typing import Any, Dict, List, Tuple, Union
4
- import torch
5
-
6
-
7
- class Instances:
8
- """
9
- This class represents a list of instances in an image.
10
- It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
11
- All fields must have the same ``__len__`` which is the number of instances.
12
-
13
- All other (non-field) attributes of this class are considered private:
14
- they must start with '_' and are not modifiable by a user.
15
-
16
- Some basic usage:
17
-
18
- 1. Set/get/check a field:
19
-
20
- .. code-block:: python
21
-
22
- instances.gt_boxes = Boxes(...)
23
- print(instances.pred_masks) # a tensor of shape (N, H, W)
24
- print('gt_masks' in instances)
25
-
26
- 2. ``len(instances)`` returns the number of instances
27
- 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
28
- and returns a new :class:`Instances`.
29
- Typically, ``indices`` is a integer vector of indices,
30
- or a binary mask of length ``num_instances``
31
-
32
- .. code-block:: python
33
-
34
- category_3_detections = instances[instances.pred_classes == 3]
35
- confident_detections = instances[instances.scores > 0.9]
36
- """
37
-
38
- def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
39
- """
40
- Args:
41
- image_size (height, width): the spatial size of the image.
42
- kwargs: fields to add to this `Instances`.
43
- """
44
- self._image_size = image_size
45
- self._fields: Dict[str, Any] = {}
46
- for k, v in kwargs.items():
47
- self.set(k, v)
48
-
49
- @property
50
- def image_size(self) -> Tuple[int, int]:
51
- """
52
- Returns:
53
- tuple: height, width
54
- """
55
- return self._image_size
56
-
57
- def __setattr__(self, name: str, val: Any) -> None:
58
- if name.startswith("_"):
59
- super().__setattr__(name, val)
60
- else:
61
- self.set(name, val)
62
-
63
- def __getattr__(self, name: str) -> Any:
64
- if name == "_fields" or name not in self._fields:
65
- raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
66
- return self._fields[name]
67
-
68
- def set(self, name: str, value: Any) -> None:
69
- """
70
- Set the field named `name` to `value`.
71
- The length of `value` must be the number of instances,
72
- and must agree with other existing fields in this object.
73
- """
74
- data_len = len(value)
75
- if len(self._fields):
76
- assert (
77
- len(self) == data_len
78
- ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
79
- self._fields[name] = value
80
-
81
- def has(self, name: str) -> bool:
82
- """
83
- Returns:
84
- bool: whether the field called `name` exists.
85
- """
86
- return name in self._fields
87
-
88
- def remove(self, name: str) -> None:
89
- """
90
- Remove the field called `name`.
91
- """
92
- del self._fields[name]
93
-
94
- def get(self, name: str) -> Any:
95
- """
96
- Returns the field called `name`.
97
- """
98
- return self._fields[name]
99
-
100
- def get_fields(self) -> Dict[str, Any]:
101
- """
102
- Returns:
103
- dict: a dict which maps names (str) to data of the fields
104
-
105
- Modifying the returned dict will modify this instance.
106
- """
107
- return self._fields
108
-
109
- # Tensor-like methods
110
- def to(self, *args: Any, **kwargs: Any) -> "Instances":
111
- """
112
- Returns:
113
- Instances: all fields are called with a `to(device)`, if the field has this method.
114
- """
115
- ret = Instances(self._image_size)
116
- for k, v in self._fields.items():
117
- if hasattr(v, "to"):
118
- v = v.to(*args, **kwargs)
119
- ret.set(k, v)
120
- return ret
121
-
122
- def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
123
- """
124
- Args:
125
- item: an index-like object and will be used to index all the fields.
126
-
127
- Returns:
128
- If `item` is a string, return the data in the corresponding field.
129
- Otherwise, returns an `Instances` where all fields are indexed by `item`.
130
- """
131
- if type(item) == int:
132
- if item >= len(self) or item < -len(self):
133
- raise IndexError("Instances index out of range!")
134
- else:
135
- item = slice(item, None, len(self))
136
-
137
- ret = Instances(self._image_size)
138
- for k, v in self._fields.items():
139
- ret.set(k, v[item])
140
- return ret
141
-
142
- def __len__(self) -> int:
143
- for v in self._fields.values():
144
- # use __len__ because len() has to be int and is not friendly to tracing
145
- return v.__len__()
146
- raise NotImplementedError("Empty Instances does not support __len__!")
147
-
148
- def __iter__(self):
149
- raise NotImplementedError("`Instances` object is not iterable!")
150
-
151
- @staticmethod
152
- def cat(instance_lists: List["Instances"]) -> "Instances":
153
- """
154
- Args:
155
- instance_lists (list[Instances])
156
-
157
- Returns:
158
- Instances
159
- """
160
- assert all(isinstance(i, Instances) for i in instance_lists)
161
- assert len(instance_lists) > 0
162
- if len(instance_lists) == 1:
163
- return instance_lists[0]
164
-
165
- image_size = instance_lists[0].image_size
166
- for i in instance_lists[1:]:
167
- assert i.image_size == image_size
168
- ret = Instances(image_size)
169
- for k in instance_lists[0]._fields.keys():
170
- values = [i.get(k) for i in instance_lists]
171
- v0 = values[0]
172
- if isinstance(v0, torch.Tensor):
173
- values = torch.cat(values, dim=0)
174
- elif isinstance(v0, list):
175
- values = list(itertools.chain(*values))
176
- elif hasattr(type(v0), "cat"):
177
- values = type(v0).cat(values)
178
- else:
179
- raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
180
- ret.set(k, values)
181
- return ret
182
-
183
- def __str__(self) -> str:
184
- s = self.__class__.__name__ + "("
185
- s += "num_instances={}, ".format(len(self))
186
- s += "image_height={}, ".format(self._image_size[0])
187
- s += "image_width={}, ".format(self._image_size[1])
188
- s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
189
- return s
190
-
191
- __repr__ = __str__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/tests.py DELETED
@@ -1,21 +0,0 @@
1
- import unittest
2
-
3
- import coverage
4
-
5
- if __name__ == "__main__":
6
- # Start coverage collection
7
- cov = coverage.Coverage()
8
- cov.start()
9
-
10
- # Load all tests from the 'autogpt/tests' package
11
- suite = unittest.defaultTestLoader.discover("./tests")
12
-
13
- # Run the tests
14
- unittest.TextTestRunner().run(suite)
15
-
16
- # Stop coverage collection
17
- cov.stop()
18
- cov.save()
19
-
20
- # Report the coverage
21
- cov.report(show_missing=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChenyangSi/FreeU/stable-diffusion-2-1/README.md DELETED
@@ -1,185 +0,0 @@
1
- ---
2
- license: openrail++
3
- tags:
4
- - stable-diffusion
5
- - text-to-image
6
- pinned: true
7
- ---
8
-
9
- # Stable Diffusion v2-1 Model Card
10
- This model card focuses on the model associated with the Stable Diffusion v2-1 model, codebase available [here](https://github.com/Stability-AI/stablediffusion).
11
-
12
- This `stable-diffusion-2-1` model is fine-tuned from [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) (`768-v-ema.ckpt`) with an additional 55k steps on the same dataset (with `punsafe=0.1`), and then fine-tuned for another 155k extra steps with `punsafe=0.98`.
13
-
14
- - Use it with the [`stablediffusion`](https://github.com/Stability-AI/stablediffusion) repository: download the `v2-1_768-ema-pruned.ckpt` [here](https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.ckpt).
15
- - Use it with 🧨 [`diffusers`](#examples)
16
-
17
- ## Model Details
18
- - **Developed by:** Robin Rombach, Patrick Esser
19
- - **Model type:** Diffusion-based text-to-image generation model
20
- - **Language(s):** English
21
- - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL)
22
- - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([OpenCLIP-ViT/H](https://github.com/mlfoundations/open_clip)).
23
- - **Resources for more information:** [GitHub Repository](https://github.com/Stability-AI/).
24
- - **Cite as:**
25
-
26
- @InProceedings{Rombach_2022_CVPR,
27
- author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
28
- title = {High-Resolution Image Synthesis With Latent Diffusion Models},
29
- booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
30
- month = {June},
31
- year = {2022},
32
- pages = {10684-10695}
33
- }
34
-
35
-
36
- ## Examples
37
-
38
- Using the [🤗's Diffusers library](https://github.com/huggingface/diffusers) to run Stable Diffusion 2 in a simple and efficient manner.
39
-
40
- ```bash
41
- pip install diffusers transformers accelerate scipy safetensors
42
- ```
43
- Running the pipeline (if you don't swap the scheduler it will run with the default DDIM, in this example we are swapping it to DPMSolverMultistepScheduler):
44
-
45
- ```python
46
- import torch
47
- from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
48
-
49
- model_id = "stabilityai/stable-diffusion-2-1"
50
-
51
- # Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
52
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
53
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
54
- pipe = pipe.to("cuda")
55
-
56
- prompt = "a photo of an astronaut riding a horse on mars"
57
- image = pipe(prompt).images[0]
58
-
59
- image.save("astronaut_rides_horse.png")
60
- ```
61
-
62
- **Notes**:
63
- - Despite not being a dependency, we highly recommend you to install [xformers](https://github.com/facebookresearch/xformers) for memory efficient attention (better performance)
64
- - If you have low GPU RAM available, make sure to add a `pipe.enable_attention_slicing()` after sending it to `cuda` for less VRAM usage (to the cost of speed)
65
-
66
-
67
- # Uses
68
-
69
- ## Direct Use
70
- The model is intended for research purposes only. Possible research areas and tasks include
71
-
72
- - Safe deployment of models which have the potential to generate harmful content.
73
- - Probing and understanding the limitations and biases of generative models.
74
- - Generation of artworks and use in design and other artistic processes.
75
- - Applications in educational or creative tools.
76
- - Research on generative models.
77
-
78
- Excluded uses are described below.
79
-
80
- ### Misuse, Malicious Use, and Out-of-Scope Use
81
- _Note: This section is originally taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), was used for Stable Diffusion v1, but applies in the same way to Stable Diffusion v2_.
82
-
83
- The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
84
-
85
- #### Out-of-Scope Use
86
- The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
87
-
88
- #### Misuse and Malicious Use
89
- Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
90
-
91
- - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
92
- - Intentionally promoting or propagating discriminatory content or harmful stereotypes.
93
- - Impersonating individuals without their consent.
94
- - Sexual content without consent of the people who might see it.
95
- - Mis- and disinformation
96
- - Representations of egregious violence and gore
97
- - Sharing of copyrighted or licensed material in violation of its terms of use.
98
- - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
99
-
100
- ## Limitations and Bias
101
-
102
- ### Limitations
103
-
104
- - The model does not achieve perfect photorealism
105
- - The model cannot render legible text
106
- - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
107
- - Faces and people in general may not be generated properly.
108
- - The model was trained mainly with English captions and will not work as well in other languages.
109
- - The autoencoding part of the model is lossy
110
- - The model was trained on a subset of the large-scale dataset
111
- [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, we have filtered the dataset using LAION's NFSW detector (see Training section).
112
-
113
- ### Bias
114
- While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
115
- Stable Diffusion was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
116
- which consists of images that are limited to English descriptions.
117
- Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
118
- This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
119
- ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
120
- Stable Diffusion v2 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent.
121
-
122
-
123
- ## Training
124
-
125
- **Training Data**
126
- The model developers used the following dataset for training the model:
127
-
128
- - LAION-5B and subsets (details below). The training data is further filtered using LAION's NSFW detector, with a "p_unsafe" score of 0.1 (conservative). For more details, please refer to LAION-5B's [NeurIPS 2022](https://openreview.net/forum?id=M3Y74vmsMcY) paper and reviewer discussions on the topic.
129
-
130
- **Training Procedure**
131
- Stable Diffusion v2 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
132
-
133
- - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
134
- - Text prompts are encoded through the OpenCLIP-ViT/H text-encoder.
135
- - The output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention.
136
- - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. We also use the so-called _v-objective_, see https://arxiv.org/abs/2202.00512.
137
-
138
- We currently provide the following checkpoints:
139
-
140
- - `512-base-ema.ckpt`: 550k steps at resolution `256x256` on a subset of [LAION-5B](https://laion.ai/blog/laion-5b/) filtered for explicit pornographic material, using the [LAION-NSFW classifier](https://github.com/LAION-AI/CLIP-based-NSFW-Detector) with `punsafe=0.1` and an [aesthetic score](https://github.com/christophschuhmann/improved-aesthetic-predictor) >= `4.5`.
141
- 850k steps at resolution `512x512` on the same dataset with resolution `>= 512x512`.
142
- - `768-v-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for 150k steps using a [v-objective](https://arxiv.org/abs/2202.00512) on the same dataset. Resumed for another 140k steps on a `768x768` subset of our dataset.
143
- - `512-depth-ema.ckpt`: Resumed from `512-base-ema.ckpt` and finetuned for 200k steps. Added an extra input channel to process the (relative) depth prediction produced by [MiDaS](https://github.com/isl-org/MiDaS) (`dpt_hybrid`) which is used as an additional conditioning.
144
- The additional input channels of the U-Net which process this extra information were zero-initialized.
145
- - `512-inpainting-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for another 200k steps. Follows the mask-generation strategy presented in [LAMA](https://github.com/saic-mdal/lama) which, in combination with the latent VAE representations of the masked image, are used as an additional conditioning.
146
- The additional input channels of the U-Net which process this extra information were zero-initialized. The same strategy was used to train the [1.5-inpainting checkpoint](https://huggingface.co/runwayml/stable-diffusion-inpainting).
147
- - `x4-upscaling-ema.ckpt`: Trained for 1.25M steps on a 10M subset of LAION containing images `>2048x2048`. The model was trained on crops of size `512x512` and is a text-guided [latent upscaling diffusion model](https://arxiv.org/abs/2112.10752).
148
- In addition to the textual input, it receives a `noise_level` as an input parameter, which can be used to add noise to the low-resolution input according to a [predefined diffusion schedule](configs/stable-diffusion/x4-upscaling.yaml).
149
-
150
- - **Hardware:** 32 x 8 x A100 GPUs
151
- - **Optimizer:** AdamW
152
- - **Gradient Accumulations**: 1
153
- - **Batch:** 32 x 8 x 2 x 4 = 2048
154
- - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant
155
-
156
- ## Evaluation Results
157
- Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
158
- 5.0, 6.0, 7.0, 8.0) and 50 steps DDIM sampling steps show the relative improvements of the checkpoints:
159
-
160
- ![pareto](model-variants.jpg)
161
-
162
- Evaluated using 50 DDIM steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores.
163
-
164
- ## Environmental Impact
165
-
166
- **Stable Diffusion v1** **Estimated Emissions**
167
- Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.
168
-
169
- - **Hardware Type:** A100 PCIe 40GB
170
- - **Hours used:** 200000
171
- - **Cloud Provider:** AWS
172
- - **Compute Region:** US-east
173
- - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 15000 kg CO2 eq.
174
-
175
- ## Citation
176
- @InProceedings{Rombach_2022_CVPR,
177
- author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
178
- title = {High-Resolution Image Synthesis With Latent Diffusion Models},
179
- booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
180
- month = {June},
181
- year = {2022},
182
- pages = {10684-10695}
183
- }
184
-
185
- *This model card was written by: Robin Rombach, Patrick Esser and David Ha and is based on the [Stable Diffusion v1](https://github.com/CompVis/stable-diffusion/blob/main/Stable_Diffusion_v1_Model_Card.md) and [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/__init__.py DELETED
@@ -1,107 +0,0 @@
1
- import pkgutil
2
-
3
- import gradio.components as components
4
- import gradio.inputs as inputs
5
- import gradio.outputs as outputs
6
- import gradio.processing_utils
7
- import gradio.templates
8
- import gradio.themes as themes
9
- from gradio.blocks import Blocks
10
- from gradio.chat_interface import ChatInterface
11
- from gradio.components import (
12
- HTML,
13
- JSON,
14
- AnnotatedImage,
15
- Annotatedimage,
16
- Audio,
17
- BarPlot,
18
- Button,
19
- Carousel,
20
- Chatbot,
21
- Checkbox,
22
- CheckboxGroup,
23
- Checkboxgroup,
24
- ClearButton,
25
- Code,
26
- ColorPicker,
27
- DataFrame,
28
- Dataframe,
29
- Dataset,
30
- Dropdown,
31
- DuplicateButton,
32
- File,
33
- Gallery,
34
- Highlight,
35
- HighlightedText,
36
- Highlightedtext,
37
- Image,
38
- Interpretation,
39
- Json,
40
- Label,
41
- LinePlot,
42
- Markdown,
43
- Model3D,
44
- Number,
45
- Plot,
46
- Radio,
47
- ScatterPlot,
48
- Slider,
49
- State,
50
- StatusTracker,
51
- Text,
52
- Textbox,
53
- TimeSeries,
54
- Timeseries,
55
- UploadButton,
56
- Variable,
57
- Video,
58
- component,
59
- )
60
- from gradio.deploy_space import deploy
61
- from gradio.events import SelectData
62
- from gradio.exceptions import Error
63
- from gradio.external import load
64
- from gradio.flagging import (
65
- CSVLogger,
66
- FlaggingCallback,
67
- HuggingFaceDatasetJSONSaver,
68
- HuggingFaceDatasetSaver,
69
- SimpleCSVLogger,
70
- )
71
- from gradio.helpers import (
72
- EventData,
73
- Info,
74
- Progress,
75
- Warning,
76
- make_waveform,
77
- skip,
78
- update,
79
- )
80
- from gradio.helpers import create_examples as Examples # noqa: N812
81
- from gradio.interface import Interface, TabbedInterface, close_all
82
- from gradio.ipython_ext import load_ipython_extension
83
- from gradio.layouts import Accordion, Box, Column, Group, Row, Tab, TabItem, Tabs
84
- from gradio.mix import Parallel, Series
85
- from gradio.routes import Request, mount_gradio_app
86
- from gradio.templates import (
87
- Files,
88
- ImageMask,
89
- ImagePaint,
90
- List,
91
- Matrix,
92
- Mic,
93
- Microphone,
94
- Numpy,
95
- Paint,
96
- Pil,
97
- PlayableVideo,
98
- Sketchpad,
99
- TextArea,
100
- Webcam,
101
- )
102
- from gradio.themes import Base as Theme
103
-
104
- current_pkg_version = (
105
- (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
106
- )
107
- __version__ = current_pkg_version