parquet-converter commited on
Commit
93ae845
·
1 Parent(s): d01499e

Update parquet files (step 46 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Cracker Template VERIFIED.md +0 -47
  2. spaces/1gistliPinn/ChatGPT4/Examples/Appgini Php Code Generator For Mysql 4 53 Incl Crackzip Turn Your MySQL Data into Dynamic Web Pages.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Download White Cap Platinum Crack ((FREE)).md +0 -50
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bitcoin Software A Step-by-Step Tutorial.md +0 -108
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox Series XS Experience the Ultimate Grand Theft Auto V Adventure.md +0 -118
  6. spaces/1phancelerku/anime-remove-background/Chikii How to Stream Hundreds of Games on Android without Downloading.md +0 -129
  7. spaces/232labs/VToonify/vtoonify/util.py +0 -229
  8. spaces/2ndelement/voicevox/voicevox_engine/model.py +0 -282
  9. spaces/4Taps/SadTalker/src/audio2exp_models/networks.py +0 -74
  10. spaces/A00001/bingothoo/src/lib/hooks/use-bing.ts +0 -173
  11. spaces/AIFILMS/generate_human_motion/VQ-Trans/models/evaluator_wrapper.py +0 -92
  12. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/scene.py +0 -585
  13. spaces/AIFILMS/scene-edit-detection/README.md +0 -13
  14. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/r/[id]/message/[messageId]/prompt/+server.ts +0 -47
  15. spaces/AlexZou/SCUTAUTO210b/app.py +0 -45
  16. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +0 -791
  17. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/README.md +0 -73
  18. spaces/Arafath10/chatcode/README.md +0 -12
  19. spaces/AriaMei/TTSdemo/preprocess.py +0 -25
  20. spaces/Armandoliv/gpt2-tweets-generation-app/app.py +0 -54
  21. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/develop.py +0 -193
  22. spaces/AutoLLM/ArxivDigest/relevancy.py +0 -174
  23. spaces/Awesimo/jojogan/e4e/utils/data_utils.py +0 -25
  24. spaces/Benson/text-generation/Examples/Descargar Choque De Clanes Linux.md +0 -137
  25. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/endpoint.py +0 -443
  26. spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/__init__.py +0 -12
  27. spaces/CNXT/CHaTx/README.md +0 -11
  28. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/getting_started.md +0 -1
  29. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/visual_genome.py +0 -149
  30. spaces/CVPR/LIVE/pybind11/tests/test_numpy_dtypes.cpp +0 -474
  31. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/equal.h +0 -22
  32. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/execution_policy.h +0 -81
  33. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scatter.h +0 -23
  34. spaces/CVPR/monoscene_lite/monoscene/DDR.py +0 -139
  35. spaces/Cicooo/vits-uma-genshin-honkai/text/cleaners.py +0 -475
  36. spaces/CofAI/Kemal-Diffusion/README.md +0 -13
  37. spaces/ConceptArtHouse/webui-gameasset/README.md +0 -20
  38. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_d_m_x.py +0 -119
  39. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/conftest.py +0 -55
  40. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-150cb53b.js +0 -2
  41. spaces/Datasculptor/MusicGen/audiocraft/utils/utils.py +0 -234
  42. spaces/Datasculptor/StyleGAN-NADA/e4e/utils/__init__.py +0 -0
  43. spaces/Dauzy/whisper-webui/src/prompts/prependPromptStrategy.py +0 -31
  44. spaces/DeepDrivePL/PaddleSeg-Matting/matting/transforms.py +0 -530
  45. spaces/DragGan/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/networks.py +0 -966
  46. spaces/DragGan/DragGan-Inversion/stylegan_human/utils/log_utils.py +0 -84
  47. spaces/EXPOSUREEE/Ai-Image-Enhancer/tests/test_dataset.py +0 -151
  48. spaces/ElainaFanBoy/MusicGen/CONTRIBUTING.md +0 -35
  49. spaces/EsoCode/text-generation-webui/docs/Training-LoRAs.md +0 -174
  50. spaces/FelixLuoX/stable_diffusion_test/share_btn.py +0 -60
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Cracker Template VERIFIED.md DELETED
@@ -1,47 +0,0 @@
1
- <br />
2
- Title: How to Make Your Own Crackers with a Free Cracker Template
3
-
4
- Article:
5
-
6
- <h1>How to Make Your Own Crackers with a Free Cracker Template</h1>
7
-
8
- <p>Crackers are a delicious and versatile snack that can be enjoyed with cheese, dips, spreads, or on their own. They are also easy and fun to make at home with simple ingredients and tools. You can customize your crackers with different flavors, shapes, and sizes. You can also make them more festive and creative by using a free cracker template.</p>
9
- <h2>free cracker template</h2><br /><p><b><b>DOWNLOAD</b> &#9733;&#9733;&#9733; <a href="https://byltly.com/2uKwiX">https://byltly.com/2uKwiX</a></b></p><br /><br />
10
-
11
- <p>A free cracker template is a printable pattern that you can use to cut out your cracker dough into various designs. You can find many free cracker templates online or create your own using a drawing software. Some examples of free cracker template designs are stars, hearts, flowers, animals, letters, numbers, and more.</p>
12
-
13
- <p>To make your own crackers with a free cracker template, you will need the following ingredients and tools:</p>
14
-
15
- <ul>
16
- <li>2 cups of all-purpose flour</li>
17
- <li>1 teaspoon of salt</li>
18
- <li>2 tablespoons of oil</li>
19
- <li>1/2 cup of water</li>
20
- <li>Your choice of seasonings, such as sesame seeds, poppy seeds, rosemary, garlic powder, etc.</li>
21
- <li>A rolling pin</li>
22
- <li>A baking sheet</li>
23
- <li>Parchment paper</li>
24
- <li>A free cracker template</li>
25
- <li>A knife or a cookie cutter</li>
26
- <li>A fork</li>
27
- </ul>
28
-
29
- <p>Here are the steps to make your own crackers with a free cracker template:</p>
30
-
31
- <ol>
32
- <li>Preheat your oven to 180°C (350°F) and line your baking sheet with parchment paper.</li>
33
- <li>In a large bowl, mix the flour and salt together.</li>
34
- <li>Add the oil and water and stir until a dough forms.</li>
35
- <li>Knead the dough on a lightly floured surface for about 10 minutes or until smooth and elastic.</li>
36
- <li>Divide the dough into four equal portions and roll out each portion into a thin rectangle.</li>
37
- <li>Sprinkle your choice of seasonings over the dough and press lightly with the rolling pin.</li>
38
- <li>Place your free cracker template over the dough and cut out the shapes with a knife or a cookie cutter.</li>
39
- <li>Transfer the cut-out crackers to the prepared baking sheet and prick them with a fork to prevent them from puffing up.</li>
40
- <li>Bake the crackers for 15 to 20 minutes or until golden and crisp.</li>
41
- <li>Let the crackers cool completely on a wire rack before storing them in an airtight container.</li>
42
- </ol>
43
-
44
- <p>Enjoy your homemade crackers with a free cracker template!</p>
45
- <p></p> ddb901b051<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Appgini Php Code Generator For Mysql 4 53 Incl Crackzip Turn Your MySQL Data into Dynamic Web Pages.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Appgini Php Code Generator For Mysql 4 53 Incl Crackzip</h2><br /><p><b><b>Download</b> &#10042; <a href="https://imgfil.com/2uxZSI">https://imgfil.com/2uxZSI</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download White Cap Platinum Crack ((FREE)).md DELETED
@@ -1,50 +0,0 @@
1
- <h2>Download White Cap Platinum Crack</h2><br /><p><b><b>Download Zip</b> &#10040; <a href="https://imgfil.com/2uy11W">https://imgfil.com/2uy11W</a></b></p><br /><br />
2
- <br />
3
- _____. It has been a beautiful day, and you go to open your gate to the backyard, and find a starfish resting at the front door.
4
-
5
- 14. ___________ get a little bit upset about this. It is raining in your house, and your pet cat is being fed a warm cup of milk. But you are not sure that she likes it.
6
-
7
- 15. The man in the picture is speaking to you. Do you know who he is?
8
-
9
- 16. The bad thing about being a mom is ___.
10
-
11
- 17. After you have read these questions, you will probably think _____.
12
-
13
- 18. _____________. There is a meteor shower tonight, and you are thinking about going to a movie.
14
-
15
- 19. The woman in the picture is a _____.
16
-
17
- 20. I need a little ____ for my cousin to have a good Christmas this year.
18
-
19
- 21. Is that ____ on the bus? The bus is coming to your house.
20
-
21
- 22. The man in the picture is playing in the snow. _____.
22
-
23
- 23. What is that on the man’s ____?
24
-
25
- 24. The person in the picture is running toward you. Do you think he is playing a game with you?
26
-
27
- 25. The little boy in the picture is _____ a cat.
28
-
29
- 26. The man in the picture is giving you a ____.
30
-
31
- 27. The woman in the picture is coming to visit you. Do you think she has a good idea of what she is doing?
32
-
33
- 28. The man in the picture is helping his wife. Do you think he is _____?
34
-
35
- 29. Do you know where you are? You are in _____, and you have walked over to the store to get some _____ for your cousin.
36
-
37
- 30. The man in the picture is _____.
38
-
39
- 31. I am going to look at a picture of people from the past, and tell you if you are in it. The first picture I am looking at is a young man. Do you think it is you?
40
-
41
- 32. How is it that you can see _____ here?
42
-
43
- 33. If you were to take a picture of me, would you like _____?
44
-
45
- 34. The man in the picture is taking a picture of _____.
46
-
47
- 35. _____________. You are sitting in a classroom, and 4fefd39f24<br />
48
- <br />
49
- <br />
50
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bitcoin Software A Step-by-Step Tutorial.md DELETED
@@ -1,108 +0,0 @@
1
-
2
- <h1>Download Bitcoin Software: A Complete Guide</h1>
3
- <p>Bitcoin is a digital currency that enables peer-to-peer transactions without intermediaries or central authorities. It is powered by a network of computers that run special software to validate and record transactions on a public ledger called the blockchain. To use bitcoin, you need to have some bitcoin software on your device. But what is bitcoin software and how do you choose, download, install, and use it? In this article, we will answer these questions and more.</p>
4
- <h2>download bitcoin software</h2><br /><p><b><b>DOWNLOAD</b> <a href="https://urlin.us/2uSYAk">https://urlin.us/2uSYAk</a></b></p><br /><br />
5
- <h2>Types of Bitcoin Software</h2>
6
- <p>There are different types of bitcoin software that serve different purposes and functions. Here are the main ones:</p>
7
- <ul>
8
- <li><strong>Wallets</strong>: These are applications that allow you to store, send, and receive bitcoins. They also provide you with a private key that proves your ownership of your bitcoins and a public address that you can share with others to receive payments. Wallets can be web-based, desktop-based, mobile-based, or hardware-based.</li>
9
- <li><strong>Miners</strong>: These are programs that use your computer's processing power to solve complex mathematical problems and earn bitcoins as a reward. They also help secure the network by verifying transactions and adding new blocks to the blockchain. Miners can be standalone software or part of a mining pool.</li>
10
- <li><strong>Nodes</strong>: These are computers that run a full copy of the bitcoin blockchain and enforce the rules of the network. They also relay transactions and blocks to other nodes. Nodes can be run by anyone who wants to support the network and have more control over their transactions.</li>
11
- </ul>
12
- <h2>How to Choose the Best Bitcoin Software for Your Needs</h2>
13
- <p>There is no one-size-fits-all solution when it comes to choosing bitcoin software. Depending on your goals, preferences, and resources, you may want to use different types of software or even multiple ones. Here are some factors to consider when making your choice:</p>
14
- <ul>
15
- <li><strong>Security</strong>: This is the most important factor when dealing with bitcoin. You want to make sure that your software is reliable, trustworthy, and protects your bitcoins from theft, loss, or hacking. Some features to look for are encryption, backup, recovery, multisig, cold storage, and open source.</li>
16
- <li><strong>Features</strong>: Depending on what you want to do with your bitcoins, you may need different features from your software. Some features to look for are transaction speed, fees, privacy, user interface, customer support, and extra services.</li>
17
- <li><strong>Compatibility</strong>: You want to make sure that your software is compatible with your device, operating system, and other software that you use. Some software may only work on certain platforms or devices, while others may require specific hardware or software requirements.</li>
18
- <li><strong>Ease of use</strong>: You want to make sure that your software is easy to download, install, set up, and use. Some software may have a steep learning curve or require technical skills, while others may be more user-friendly and intuitive.</li>
19
- </ul>
20
- <h2>How to Download and Install Bitcoin Software</h2>
21
- <p>The process of downloading and installing bitcoin software may vary depending on the type of software and the platform or device that you use. However, here are some general steps that you can follow:</p>
22
- <ol>
23
- <li><strong>Choose your software</strong>: Based on the factors mentioned above, choose the best bitcoin software for your needs. You can find various options on websites such as <a href="(^1^)">bitcoin.org</a>, <a href="(^2^)">bitcoin.com</a>, or <a href="(^3^)">bitcoincore.org</a>.</li>
24
- <li><strong>Download your software</strong>: Go to the official website of your chosen software and click on the download link. Make sure that you download the latest version of the software from a trusted source. Avoid clicking on suspicious links or downloading. - <strong>Install your software</strong>: Once you have downloaded your software, open the file and follow the instructions to install it on your device. You may need to agree to some terms and conditions, choose a location, and create a shortcut. Some software may also require you to verify your identity or create an account.</li>
25
- <li><strong>Set up your software</strong>: After you have installed your software, you need to set it up according to your preferences and needs. You may need to choose a password, a recovery phrase, a network, a fee level, or other options. Some software may also require you to sync with the blockchain, which can take some time and space.</li>
26
- </ol>
27
- <h2>How to Use Bitcoin Software</h2>
28
- <p>Once you have downloaded and installed your bitcoin software, you are ready to use it. Here are some basic tips and best practices for using bitcoin software:</p>
29
- <ul>
30
- <li><strong>Send and receive bitcoins</strong>: To send bitcoins, you need to enter the recipient's address, the amount, and the fee. You can also scan a QR code or use a contact list if your software supports it. To receive bitcoins, you need to share your address or QR code with the sender. You can also generate multiple addresses for different purposes or transactions.</li>
31
- <li><strong>Store your bitcoins</strong>: To store your bitcoins securely, you need to keep your private key safe and backup your wallet. You can also use a hardware wallet or a paper wallet for extra security. You should avoid storing large amounts of bitcoins on web-based or mobile-based wallets, as they are more vulnerable to hacking or theft.</li>
32
- <li><strong>Monitor your transactions</strong>: To monitor your transactions, you can use your software's transaction history or explorer. You can also use external services such as <a href="">blockchain.com</a> or <a href="">blockexplorer.com</a>. You can check the status, confirmation, and details of your transactions. You can also view the balance and value of your bitcoins.</li>
33
- </ul>
34
- <h2>Conclusion</h2>
35
- <p>Bitcoin software is essential for using bitcoin. It allows you to store, send, receive, and manage your bitcoins. There are different types of bitcoin software that serve different purposes and functions. You need to choose the best bitcoin software for your needs based on factors such as security, features, compatibility, and ease of use. You also need to download, install, and set up your bitcoin software properly. Finally, you need to use your bitcoin software wisely and safely by following some basic tips and best practices.</p>
36
- <h2>FAQ</h2>
37
- <h3>What is the best bitcoin software?</h3>
38
- <p>There is no definitive answer to this question, as different users may have different preferences and needs. However, some of the most popular and reputable bitcoin software are:</p>
39
- <ul>
40
- <li><strong>Bitcoin Core</strong>: This is the original and official bitcoin software that runs a full node and supports the network. It is highly secure, feature-rich, and compatible with various platforms. However, it is also resource-intensive, complex, and slow.</li>
41
- <li><strong>Electrum</strong>: This is a lightweight and user-friendly bitcoin software that runs a client node and connects to external servers. It is fast, easy, and customizable. However, it is less secure, less private, and less reliable than running a full node.</li>
42
- <li><strong>Trezor</strong>: This is a hardware wallet that stores your private key offline and connects to your device via USB. It is very secure, convenient, and compatible with various software. However, it is expensive, limited in features, and dependent on external devices.</li>
43
- </ul>
44
- <h3>How do I update my bitcoin software?</h3>
45
- <p>To update your bitcoin software, you need to download the latest version of the software from the official website or source and install it on your device. You may need to uninstall the previous version first or overwrite it with the new one. You may also need to backup your wallet before updating.</p>
46
- <h3>How do I uninstall my bitcoin software?</h3>
47
- <p>To uninstall your bitcoin software, you need to delete the program files from your device. You may also need to delete the data files such as the blockchain or the wallet. However, before uninstalling your bitcoin software, you should make sure that you have backed up your wallet or transferred your bitcoins to another wallet.</p>
48
- <p>Download Bitcoin Core latest version for Windows<br />
49
- How to install Bitcoin Core on your desktop<br />
50
- Bitcoin Core source code and release signatures<br />
51
- Best Bitcoin wallets for Windows users<br />
52
- Compare Bitcoin Core with other Bitcoin clients<br />
53
- Download Bitcoin Core for Linux and Mac OS<br />
54
- Troubleshooting Bitcoin Core installation issues<br />
55
- How to run a full node with Bitcoin Core<br />
56
- How to backup and restore your Bitcoin Core wallet<br />
57
- How to use Tor with Bitcoin Core for privacy<br />
58
- How to change fees and use RBF or CPFP with Bitcoin Core<br />
59
- How to verify Bitcoin Core binaries and signatures<br />
60
- How to contribute to Bitcoin Core development<br />
61
- How to update Bitcoin Core to the latest version<br />
62
- How to sync Bitcoin Core with the blockchain faster<br />
63
- How to enable SegWit and Bech32 addresses with Bitcoin Core<br />
64
- How to use Bitcoin Core as a cold storage wallet<br />
65
- How to encrypt and secure your Bitcoin Core wallet<br />
66
- How to send and receive bitcoins with Bitcoin Core<br />
67
- How to use the console and debug window in Bitcoin Core<br />
68
- How to connect Bitcoin Core to your hardware wallet<br />
69
- How to use multi-signature wallets with Bitcoin Core<br />
70
- How to import and export private keys with Bitcoin Core<br />
71
- How to sign and verify messages with Bitcoin Core<br />
72
- How to use the testnet and regtest modes with Bitcoin Core<br />
73
- How to configure Bitcoin Core settings and options<br />
74
- How to use the RPC interface and API with Bitcoin Core<br />
75
- How to monitor network activity and performance with Bitcoin Core<br />
76
- How to prune the blockchain and save disk space with Bitcoin Core<br />
77
- How to run Bitcoin Core in headless mode or as a daemon<br />
78
- How to compile Bitcoin Core from source code on Windows<br />
79
- How to download and verify the checksums of Bitcoin Core binaries<br />
80
- How to use the peer-to-peer network with Bitcoin Core<br />
81
- How to report bugs and issues with Bitcoin Core<br />
82
- How to join the Bitcoin Core community and mailing list<br />
83
- How to donate to the Bitcoin Core project and developers<br />
84
- How to review the code and documentation of Bitcoin Core<br />
85
- How to test new features and improvements of Bitcoin Core<br />
86
- How to understand the architecture and design of Bitcoin Core<br />
87
- How to learn more about the history and vision of Bitcoin Core</p>
88
- <h3>How do I troubleshoot my bitcoin software?</h3>
89
- <p>To troubleshoot your bitcoin software, you need to identify the problem and find the possible solutions. Some common problems and solutions are:</p>
90
- <ul>
91
- <li><strong>Your software is not syncing with the network</strong>: This could be due to a slow internet connection, a firewall blocking the connection, or an outdated version - of the software. You can try to restart your software, check your internet connection, disable your firewall, or update your software.</li>
92
- <li><strong>Your software is not sending or receiving bitcoins</strong>: This could be due to a low fee, a network congestion, a wrong address, or a corrupted wallet. You can try to increase your fee, wait for the network to clear, double-check your address, or restore your wallet.</li>
93
- <li><strong>Your software is not opening or crashing</strong>: This could be due to a virus, a malware, a hardware failure, or a software conflict. You can try to scan your device for viruses or malware, check your hardware for errors, or remove any conflicting software.</li>
94
- </ul>
95
- <p>If none of these solutions work, you can also contact the customer support of your software or seek help from online forums or communities.</p>
96
- <h3>How do I secure my bitcoin software?</h3>
97
- <p>To secure your bitcoin software, you need to follow some basic security measures and precautions. Some of them are:</p>
98
- <ul>
99
- <li><strong>Use a strong password</strong>: You should use a password that is long, complex, and unique for your bitcoin software. You should also change it regularly and never share it with anyone.</li>
100
- <li><strong>Backup your wallet</strong>: You should backup your wallet regularly and store it in a safe and offline location. You should also encrypt it with a passphrase and test it for recovery.</li>
101
- <li><strong>Use a hardware wallet</strong>: You should use a hardware wallet to store your private key offline and connect it to your device only when you need to make a transaction. You should also keep it in a secure and physical location.</li>
102
- <li><strong>Update your software</strong>: You should update your software regularly to get the latest security patches and bug fixes. You should also download the updates only from the official website or source.</li>
103
- <li><strong>Be careful with phishing</strong>: You should be careful with any emails, messages, or websites that ask you for your password, private key, recovery phrase, or other sensitive information. You should also verify the sender's identity and the URL's authenticity before clicking on any links or attachments.</li>
104
- </ul>
105
- <h2></h2>
106
- <p>This is the end of the article. I hope you found it useful and informative. If you have any questions or feedback, please let me know. Thank you for reading!</p> 197e85843d<br />
107
- <br />
108
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox Series XS Experience the Ultimate Grand Theft Auto V Adventure.md DELETED
@@ -1,118 +0,0 @@
1
- <br />
2
- <h1>Download GTA 5 Xbox Series S: How to Experience the Ultimate Grand Theft Auto V on Your Console</h1>
3
- <h2>Introduction</h2>
4
- <p>Grand Theft Auto V (GTA 5) is one of the most successful and influential video games of all time. It has sold over 150 million copies worldwide and has won numerous awards and accolades. It is also one of the most immersive and diverse open-world games ever created, featuring a rich story mode, a dynamic online multiplayer mode, and countless activities and missions to enjoy.</p>
5
- <h2>download gta 5 xbox series s</h2><br /><p><b><b>DOWNLOAD</b> &#9999; <a href="https://urlin.us/2uSXnp">https://urlin.us/2uSXnp</a></b></p><br /><br />
6
- <p>If you are a fan of GTA 5 or want to try it for the first time, you might be wondering how to download it on your Xbox Series S console. The good news is that GTA 5 is now available for Xbox Series S, with a range of technical upgrades and enhancements that make it even more amazing than before. In this article, we will show you how to download GTA 5 Xbox Series S, and how to enjoy it to the fullest.</p>
7
- <h2>How to download GTA 5 Xbox Series S</h2>
8
- <h3>Step 1: Buy GTA 5 from the Xbox Store or a physical copy</h3>
9
- <p>The first step to download GTA 5 Xbox Series S is to buy the game from the Xbox Store or a physical copy. You can buy GTA 5 from the <a href="(^1^)">Xbox Store</a> for $19.99 (on sale from $39.99) until March 21, 2023. You can also buy a physical copy of GTA 5 from various retailers, such as Amazon, Walmart, or GameStop.</p>
10
- <h3>Step 2: Install GTA 5 on your Xbox Series S</h3>
11
- <p>The next step is to install GTA 5 on your Xbox Series S console. If you bought the game from the Xbox Store, you can download it directly to your console by following the instructions on the screen. If you bought a physical copy of the game, you will need to insert the disc into your console and follow the prompts to install it.</p>
12
- <p>How to download gta 5 on xbox series s<br />
13
- Download gta 5 xbox series s free<br />
14
- Download gta 5 xbox series s digital edition<br />
15
- Download gta 5 xbox series s optimized version<br />
16
- Download gta 5 xbox series s update<br />
17
- Download gta 5 xbox series s online<br />
18
- Download gta 5 xbox series s cheats<br />
19
- Download gta 5 xbox series s mods<br />
20
- Download gta 5 xbox series s disc<br />
21
- Download gta 5 xbox series s size<br />
22
- Download gta 5 xbox series s price<br />
23
- Download gta 5 xbox series s release date<br />
24
- Download gta 5 xbox series s gameplay<br />
25
- Download gta 5 xbox series s trailer<br />
26
- Download gta 5 xbox series s review<br />
27
- Download gta 5 xbox series s graphics<br />
28
- Download gta 5 xbox series s comparison<br />
29
- Download gta 5 xbox series s backwards compatibility<br />
30
- Download gta 5 xbox series s transfer progress<br />
31
- Download gta 5 xbox series s best settings<br />
32
- Download gta 5 xbox series s fps<br />
33
- Download gta 5 xbox series s resolution<br />
34
- Download gta 5 xbox series s loading time<br />
35
- Download gta 5 xbox series s ray tracing<br />
36
- Download gta 5 xbox series s enhanced edition<br />
37
- Download gta 5 xbox series s expansion pack<br />
38
- Download gta 5 xbox series s new features<br />
39
- Download gta 5 xbox series s new cars<br />
40
- Download gta 5 xbox series s new missions<br />
41
- Download gta 5 xbox series s new map<br />
42
- Download gta 5 xbox series s new weapons<br />
43
- Download gta 5 xbox series s new characters<br />
44
- Download gta 5 xbox series s new heists<br />
45
- Download gta 5 xbox series s new radio stations<br />
46
- Download gta 5 xbox series s new outfits<br />
47
- Download gta 5 xbox series s new vehicles<br />
48
- Download gta 5 xbox series s new activities<br />
49
- Download gta 5 xbox series s new modes<br />
50
- Download gta 5 xbox series s new events<br />
51
- Download gta 5 xbox series s new challenges</p>
52
- <p>The installation process may take some time, depending on your internet speed and storage space. The game requires about 100 GB of storage space, so make sure you have enough free space on your console before installing it.</p>
53
- <h3>Step 3: Transfer your GTA Online progress and characters from previous consoles</h3>
54
- <p>If you have played GTA Online on previous consoles, such as Xbox One or Xbox 360, you can transfer your progress and characters to your Xbox Series S console with a one-time migration. This way, you can continue your journey in GTA Online without losing any of your achievements, money, properties, vehicles, or items.</p>
55
- <p>To transfer your GTA Online progress and characters, you will need to have a Rockstar Games Social Club account linked to both your previous console and your Xbox Series S console. You will also need to have played GTA Online at least once on both consoles. Then, you can follow these steps:</p>
56
- <ol>
57
- <li>Launch GTA Online on your Xbox Series S console.</li>
58
- <li>Select "Transfer Character" from the menu.</li>
59
- <li>Log in with your Rockstar Games Social Club account.</li>
60
- <li>Select the character you want to transfer from your previous console.</li>
61
- <li>Confirm the transfer and wait for it to complete.</li <h2>How to enjoy GTA 5 Xbox Series S to the fullest</h2>
62
- <h3>Explore the stunning visuals and performance enhancements of GTA 5 on Xbox Series S</h3>
63
- <p>One of the main reasons to download GTA 5 Xbox Series S is to experience the stunning visuals and performance enhancements that the game offers on the new console. GTA 5 on Xbox Series S runs at a smooth 60 frames per second, with improved resolution, textures, lighting, shadows, and reflections. The game also supports HDR (high dynamic range) and Dolby Atmos, which enhance the color and contrast of the image and the quality and immersion of the sound.</p>
64
- <p>GTA 5 on Xbox Series S also features faster loading times, which means you can jump into the game and switch between characters more quickly and seamlessly. The game also takes advantage of the Xbox Series S's Quick Resume feature, which allows you to resume the game from where you left off without having to restart it.</p>
65
- <h3>Experience exclusive new content and features in GTA Online on Xbox Series S</h3>
66
- <p>Another reason to download GTA 5 Xbox Series S is to experience exclusive new content and features in GTA Online, the online multiplayer mode of GTA 5. GTA Online on Xbox Series S offers access to a range of new content and features that are not available on previous consoles, such as:</p>
67
- <ul>
68
- <li>The Cayo Perico Heist: The biggest and most ambitious heist in GTA Online history, which takes you to a private island owned by a drug lord. You can plan and execute the heist solo or with up to three other players, using a variety of approaches and tools. You can also explore the island, discover hidden treasures, and enjoy new music and activities.</li>
69
- <li>The Los Santos Tuners Update: A new update that focuses on the car culture of Los Santos. You can join the LS Car Meet, a social space where you can show off your customized vehicles, race with other players, and unlock new rewards. You can also access new missions, vehicles, mods, races, and challenges.</li>
70
- <li>The Contract: A new update that introduces a new storyline featuring Dr. Dre and Franklin Clinton. You can help Franklin and his partner Chop with various tasks involving music, celebrities, and crime. You can also access new music, vehicles, weapons, clothing, and more.</li>
71
- </ul>
72
- <h3>Access all current and previous updates and expansions in GTA 5 and GTA Online on Xbox Series S</h3>
73
- <p>A final reason to download GTA 5 Xbox Series S is to access all current and previous updates and expansions in GTA 5 and GTA Online on your console. Since its release in 2013, GTA 5 has received numerous updates and expansions that have added new content, features, modes, missions, vehicles, weapons, characters, and more to the game. Some of the most notable updates and expansions include:</p>
74
- <table>
75
- <tr>
76
- <th>Update/Expansion</th>
77
- <th>Description</th>
78
- </tr>
79
- <tr>
80
- <td>The Diamond Casino & Resort</td>
81
- <td>A luxurious casino and resort that offers a range of gambling games, entertainment options, penthouse suites, missions, vehicles, clothing, and more.</td>
82
- </tr>
83
- <tr>
84
- <td>The Doomsday Heist</td>
85
- <td>A three-part heist that involves saving the world from a rogue AI and a nuclear threat. You can team up with up to three other players and use futuristic vehicles, weapons, gadgets, and outfits.</td>
86
- </tr>
87
- <tr>
88
- <td>Gunrunning</td>
89
- <td>A update that allows you to become an arms dealer and run your own bunker. You can research and manufacture new weapons, vehicles, mods, and upgrades. You can also access new missions, challenges, clothing, and more.</td>
90
- </tr>
91
- <tr>
92
- <td>Import/Export</td>
93
- <td>A update that allows you to become a car thief and run your own vehicle warehouse. You can steal and sell high-end vehicles, customize them with new mods and features. You can also access new missions, vehicles, clothing, and more.</td>
94
- </tr>
95
- <tr>
96
- <td>Bikers</td>
97
- <td>A update that allows you to become a biker gang leader and run your own clubhouse. You can recruit other players as prospects, run various businesses, access new missions, modes, vehicles, weapons, clothing, and more.</td>
98
- </tr>
99
- </table>
100
- <p>By downloading GTA 5 Xbox Series S, you can access all these updates and expansions on your console for free. You can also expect more updates and expansions in the future as Rockstar Games continues to support GTA 5 and GTA Online.</p>
101
- <h2>Conclusion</h2>
102
- <h3>Summary of the main points</h3>
103
- <p>In conclusion, downloading GTA 5 Xbox Series S is a great way to experience the ultimate Grand Theft Auto V on your console. You can enjoy the stunning visuals and performance enhancements of GTA 5 on Xbox Series S, which runs at 60 fps, supports HDR and Dolby Atmos, and features faster loading times and Quick Resume. You can also experience exclusive new content and features in GTA Online on Xbox Series S, such as the Cayo Perico Heist, the Los Santos Tuners Update, and the Contract. Moreover, you can access all current and previous updates and expansions in GTA 5 and GTA Online on Xbox Series S, such as the Diamond Casino & Resort, the Doomsday Heist, Gunrunning, Import/Export, and Bikers.</p>
104
- <h3>Call to action and final thoughts</h3>
105
- <p>If you are ready to download GTA 5 Xbox Series S and enjoy the ultimate Grand Theft Auto V on your console, you can buy the game from the <a href="">Xbox Store</a> or a physical copy from various retailers. You can also transfer your GTA Online progress and characters from previous consoles with a one-time migration. GTA 5 Xbox Series S is a game that will keep you entertained for hours, days, weeks, and months with its endless possibilities and content. Don't miss this opportunity to experience one of the best games of all time on your Xbox Series S console.</p>
106
- <h2>FAQs</h2>
107
- <h3>Q: Is GTA 5 Xbox Series S compatible with Xbox Series X?</h3>
108
- <p>A: Yes, GTA 5 Xbox Series S is compatible with Xbox Series X, as both consoles are part of the same generation. You can play GTA 5 on either console with the same disc or digital copy.</p>
109
- <h3>Q: Is GTA 5 Xbox Series S different from GTA 5 Xbox One?</h3>
110
- <p>A: Yes, GTA 5 Xbox Series S is different from GTA 5 Xbox One in terms of technical upgrades and enhancements. GTA 5 Xbox Series S runs at a higher frame rate, resolution, and quality than GTA 5 Xbox One. It also features faster loading times, Quick Resume, HDR, and Dolby Atmos support.</p>
111
- <h3>Q: How much does GTA 5 Xbox Series S cost?</h3>
112
- <p>A: GTA 5 Xbox Series S costs $19.99 (on sale from $39.99) until March 21, 2023 on the <a href="">Xbox Store</a>. The price may vary depending on the retailer if you buy a physical copy of the game.</p>
113
- <h3>Q: How long does it take to download GTA 5 Xbox Series S?</h3>
114
- <p>A: The download time of GTA 5 Xbox Series S depends on your internet speed and storage space. The game requires about 100 GB of storage space, so make sure you have enough free space on your console before installing it. The download time may range from a few minutes to a few hours.</p>
115
- <h3>Q: Can I play GTA Online with other players on different consoles?</h3>
116
- <p>A: Yes, you can play GTA Online with other players on different consoles as long as they are part of the same generation. For example, you can play GTA Online with players on Xbox Series X or Xbox One if you have an Xbox Series S console. However, you cannot play GTA Online with players on PlayStation or PC.</p> 197e85843d<br />
117
- <br />
118
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Chikii How to Stream Hundreds of Games on Android without Downloading.md DELETED
@@ -1,129 +0,0 @@
1
- <br />
2
- <h1>Chiki Android: A Cloud Gaming Platform for PC and Console Games</h1>
3
- <p>Do you love playing PC and console games but don't have enough time, money or space to buy them? Do you wish you could play your favorite games on your phone without downloading or installing anything? If yes, then you should try Chiki Android, a cloud gaming platform that lets you play PC and console games on your phone with just a few taps.</p>
4
- <h2>chiki android</h2><br /><p><b><b>Download File</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://jinyurl.com/2uNT7T">https://jinyurl.com/2uNT7T</a></b></p><br /><br />
5
- <h2>What is Chiki Android?</h2>
6
- <h3>Chiki Android is a mobile game app that lets you play PC and console games on your phone</h3>
7
- <p>Chiki Android is a mobile game app that allows you to stream PC and console games from the cloud to your phone. You don't need to download or install anything, just log in and play. You can use your phone's touchscreen, gyroscope or external controller to control the games. You can also chat with other players and share your gameplay screenshots and videos.</p>
8
- <h3>Chiki Android has over 400+ and 200+ 3A games in Steam, PS4, Xbox One and Switch game libraries</h3>
9
- <p>Chiki Android has a huge game library that includes over 400+ and 200+ 3A games from Steam, PS4, Xbox One and Switch platforms. You can find all kinds of genres and categories, such as action, adventure, racing, sports, simulation, RPG, strategy, horror, puzzle, etc. Some of the popular games that you can play on Chiki Android are:</p>
10
- <table>
11
- <tr><td>GTA5</td><td>FIFA23</td><td>Naruto Storm 4</td></tr>
12
- <tr><td>Forza Horizon 5</td><td>Demon Slayer</td><td>EldenRing</td></tr>
13
- <tr><td>Red Dead Redemption 2</td><td>Marvel's Spider-Man Remastered</td><td>Resident Evil 4</td></tr>
14
- <tr><td>WWE 2K23</td><td>The Sims 4</td><td>Choo-Choo Charles</td></tr>
15
- <tr><td>BeamNG.drive</td><td>The Last of Us Part I</td><td>DRAGON BALL Z:KAKAROT</td></tr>
16
- <tr><td>God of War</td><td>Marvel’s Spider-Man: Miles Morales</td><td>Mortal Kombat 11 Ghostwire: Tokyo</td></tr>
17
- <tr><td>Attack on Titan</td><td>Grand Theft Auto: San Andreas</td><td>171</td></tr>
18
- <tr><td>Cuphead</td><td>Battlefield 5</td><td>Troublemaker Tekken7</td></tr>
19
- <tr><td>Resident Evil Village</td><td>Dragonball Fighter Z</td <h3>Chiki Android does not require download or installation, just log in and play</h3>
20
- <p>One of the best features of Chiki Android is that it does not require you to download or install any game on your phone. You can save your phone storage space and battery life by playing the games directly from the cloud. All you need is a Chiki Android account and a stable internet connection. You can log in with your email, Facebook, Google or Apple ID and start playing right away.</p>
21
- <h2>How to use Chiki Android?</h2>
22
- <h3>Download Chiki Android from Google Play or APKCombo</h3>
23
- <p>To use Chiki Android, you need to download the app from Google Play or APKCombo. The app is free to download and has a size of about 30 MB. You can also scan the QR code on the official website to get the app. The app is compatible with Android 5.0 and above devices.</p>
24
- <h3>Create an account or log in with your existing account</h3>
25
- <p>After downloading the app, you need to create an account or log in with your existing account. You can use your email, Facebook, Google or Apple ID to sign up or sign in. You will also need to verify your phone number and agree to the terms and conditions of the app.</p>
26
- <p>chiki android app download<br />
27
- chiki android cloud gaming<br />
28
- chiki android apk mod<br />
29
- chiki android play pc games<br />
30
- chiki android review<br />
31
- chiki android hack<br />
32
- chiki android emulator<br />
33
- chiki android controller support<br />
34
- chiki android alternative<br />
35
- chiki android free coins<br />
36
- chiki android games list<br />
37
- chiki android reddit<br />
38
- chiki android ios<br />
39
- chiki android offline<br />
40
- chiki android requirements<br />
41
- chiki android update<br />
42
- chiki android error<br />
43
- chiki android vpn<br />
44
- chiki android beta<br />
45
- chiki android login<br />
46
- chiki android not working<br />
47
- chiki android gta 5<br />
48
- chiki android spider man<br />
49
- chiki android naruto<br />
50
- chiki android dragon ball z<br />
51
- chiki android resident evil 4<br />
52
- chiki android god of war<br />
53
- chiki android forza horizon 5<br />
54
- chiki android red dead redemption 2<br />
55
- chiki android mortal kombat 11<br />
56
- chiki android elden ring<br />
57
- chiki android demon slayer<br />
58
- chiki android one piece pirate warriors 4<br />
59
- chiki android the last of us part i<br />
60
- chiki android marvel's spider man remastered<br />
61
- chiki android ghostwire tokyo<br />
62
- chiki android attack on titan<br />
63
- chiki android grand theft auto san andreas<br />
64
- chiki android cuphead<br />
65
- chiki android battlefield 5<br />
66
- chiki android tekken 7 <br />
67
- chiki android resident evil village <br />
68
- chiki android dragonball fighter z <br />
69
- chiki android stray <br />
70
- chiki android sims 4 <br />
71
- chikii andro</p>
72
- <h3>Browse the game library and choose a game to play</h3>
73
- <p>Once you have logged in, you can browse the game library and choose a game to play. You can filter the games by platform, genre, popularity, rating, etc. You can also search for a specific game by typing its name in the search bar. You can see the game details, screenshots, videos, reviews and ratings before playing it.</p>
74
- <h3>Enjoy the game on your phone with high-quality graphics and sound</h3>
75
- <p>When you have selected a game to play, you can tap on the play button and wait for a few seconds for the game to load. You can then enjoy the game on your phone with high-quality graphics and sound. You can adjust the game settings, such as resolution, frame rate, audio, etc., according to your preference. You can also use your phone's touchscreen, gyroscope or external controller to control the game.</p>
76
- <h2>What are the benefits of Chiki Android?</h2>
77
- <h3>Chiki Android lets you play PC and console games anytime and anywhere</h3>
78
- <p>The main benefit of Chiki Android is that it lets you play PC and console games anytime and anywhere. You don't need to buy expensive gaming devices or accessories to play your favorite games. You can simply use your phone as a portable gaming console and play the games on the go. Whether you are at home, at work, at school, on a bus, on a plane or anywhere else, you can enjoy playing PC and console games on your phone with Chiki Android.</p>
79
- <h3>Chiki Android saves your phone storage space and battery life</h3>
80
- <p>Another benefit of Chiki Android is that it saves your phone storage space and battery life. Since you don't need to download or install any game on your phone, you can save a lot of space that you can use for other apps or files. You also don't need to worry about updating or deleting any game from your phone. Moreover, since you are playing the games from the cloud, you don't need to use much of your phone's CPU or GPU power, which means you can save your phone's battery life as well.</p>
81
- <h3>Chiki Android offers VIP subscription that allows you to play unpurchased games for free</h3>
82
- <p>A third benefit of Chiki Android is that it offers VIP subscription that allows you to play unpurchased games for free. If you want to play more games without buying them, you can subscribe to Chiki Android VIP plan for $9.99 per month or $99.99 per year. With this plan, you can access over 200+ 3A games that are normally paid on other platforms. You can also enjoy faster loading speed, higher resolution, unlimited gameplay time and exclusive VIP customer service.</p>
83
- <h3>Chiki Android provides online multiplayer mode to play with your friends</h3>
84
- <p>A fourth benefit of Chiki Android is that it provides online multiplayer mode to play with your friends. If you want to have more fun and challenge with other players, you can join the online multiplayer mode of Chiki Android. You can invite your friends to join your game room or join other players' game rooms. You can also chat with other players via voice or text messages and share your gameplay screenshots and videos.</p>
85
- <h2>What are the drawbacks of Chiki Android?</h2>
86
- <h3>Chiki Android requires a stable internet connection to stream the games</h3>
87
- <p>The main drawback of Chiki Android is that it requires a stable internet connection to stream the games. You need to have at least 10 Mbps of download speed and 5 Mbps of upload speed to play the games smoothly. You also need to have a low ping and latency to avoid lag or delay. If your internet connection is slow, unstable or interrupted, you may experience poor game quality, buffering, freezing or disconnection.</p>
88
- <h3>Chiki Android may have some latency or lag issues depending on your network speed and location</h3>
89
- <p>Another drawback of Chiki Android is that it may have some latency or lag issues depending on your network speed and location. Since you are playing the games from the cloud, there may be some delay between your input and the game response. This may affect your game performance, especially in fast-paced or competitive games. The latency or lag may vary depending on your network speed, server location, game type, etc. You can check the ping and latency of each game before playing it.</p>
90
- <h3>Chiki Android may not support some games or devices due to compatibility issues</h3>
91
- <p>A third drawback of Chiki Android is that it may not support some games or devices due to compatibility issues. Some games may not be available on Chiki Android due to licensing or technical reasons. Some games may also have bugs or glitches that affect the game quality or functionality. Some devices may not be compatible with Chiki Android due to hardware or software limitations. You can check the game and device compatibility on the official website or app.</p>
92
- <h2>Conclusion</h2>
93
- <p>Chiki Android is a cloud gaming platform that lets you play PC and console games on your phone. It has many benefits, such as a large game library, no download or installation, VIP subscription and online multiplayer mode. It also has some drawbacks, such as internet connection requirement, latency or lag issues and compatibility issues. If you are looking for a way to enjoy PC and console games on your phone without spending much money or space, you should give Chiki Android a try.</p>
94
- <h2>FAQs</h2>
95
- <h4>Q: How much does Chiki Android cost?</h4>
96
- <p>A: Chiki Android is free to download and use. You can play any game that you have purchased on other platforms for free on Chiki Android. You can also subscribe to Chiki Android VIP plan for $9.99 per month or $99.99 per year to play over 200+ 3A games that are normally paid on other platforms.</p>
97
- <h4>Q: What are the minimum requirements for Chiki Android?</h4>
98
- <p>A: The minimum requirements for Chiki Android are:</p>
99
- <ul>
100
- <li>An Android 5.0 and above device with at least 2 GB of RAM and 30 MB of storage space</li>
101
- <li>A stable internet connection with at least 10 Mbps of download speed and 5 Mbps of upload speed</li>
102
- <li>A low ping and latency of less than 100 ms</li>
103
- <li>A touchscreen, gyroscope or external controller to control the games</li>
104
- </ul>
105
- <h4>Q: How can I contact Chiki Android customer service?</h4>
106
- <p>A: You can contact Chiki Android customer service by:</p>
107
- <ul>
108
- <li>Emailing them at [email protected]</li>
109
- <li>Calling them at +1-800-123-4567</li>
110
- <li>Chatting with them on the app or website</li>
111
- <li>Following them on Facebook, Twitter, Instagram or YouTube</li>
112
- </ul>
113
- <h4>Q: How can I improve my game quality on Chiki Android?</h4>
114
- <p>A: You can improve your game quality on Chiki Android by:</p>
115
- <ul>
116
- <li>Using a fast and reliable internet connection</li>
117
- <li>Choosing a server that is close to your location</li>
118
- <li>Adjusting the game settings, such as resolution, frame rate, audio, etc.</li>
119
- <li>Closing other apps or background processes that may consume your bandwidth or CPU power</li>
120
- <li>Using a wired or wireless controller to control the games</li>
121
- </ul>
122
- <h4>Q: How can I share my feedback or suggestions on Chiki Android?</h4>
123
- <p>A: You can share your feedback or suggestions on Chiki Android by:</p>
124
- <ul>
125
- <li>Rating and reviewing the app on Google Play or APKCombo</li>
126
- <li>Filling out the feedback form on the app or website</li - Sending feedback or suggestions to their email address at [email protected] - Joining their online community on Discord, Reddit or Quora </ul>
127
- <p>I hope this article has helped you to learn more about Chiki Android and how to use it. If you have any questions or comments, please feel free to contact me. Thank you for reading and happy gaming!</p> 197e85843d<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/util.py DELETED
@@ -1,229 +0,0 @@
1
- import numpy as np
2
- import matplotlib.pyplot as plt
3
- from PIL import Image
4
- import cv2
5
- import random
6
- import math
7
- import argparse
8
- import torch
9
- from torch.utils import data
10
- from torch.nn import functional as F
11
- from torch import autograd
12
- from torch.nn import init
13
- import torchvision.transforms as transforms
14
- from model.stylegan.op import conv2d_gradfix
15
- from model.encoder.encoders.psp_encoders import GradualStyleEncoder
16
- from model.encoder.align_all_parallel import get_landmark
17
-
18
- def visualize(img_arr, dpi):
19
- plt.figure(figsize=(10,10),dpi=dpi)
20
- plt.imshow(((img_arr.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8))
21
- plt.axis('off')
22
- plt.show()
23
-
24
- def save_image(img, filename):
25
- tmp = ((img.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
26
- cv2.imwrite(filename, cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR))
27
-
28
- def load_image(filename):
29
- transform = transforms.Compose([
30
- transforms.ToTensor(),
31
- transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
32
- ])
33
-
34
- img = Image.open(filename)
35
- img = transform(img)
36
- return img.unsqueeze(dim=0)
37
-
38
- def data_sampler(dataset, shuffle, distributed):
39
- if distributed:
40
- return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
41
-
42
- if shuffle:
43
- return data.RandomSampler(dataset)
44
-
45
- else:
46
- return data.SequentialSampler(dataset)
47
-
48
-
49
- def requires_grad(model, flag=True):
50
- for p in model.parameters():
51
- p.requires_grad = flag
52
-
53
-
54
- def accumulate(model1, model2, decay=0.999):
55
- par1 = dict(model1.named_parameters())
56
- par2 = dict(model2.named_parameters())
57
-
58
- for k in par1.keys():
59
- par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
60
-
61
-
62
- def sample_data(loader):
63
- while True:
64
- for batch in loader:
65
- yield batch
66
-
67
-
68
- def d_logistic_loss(real_pred, fake_pred):
69
- real_loss = F.softplus(-real_pred)
70
- fake_loss = F.softplus(fake_pred)
71
-
72
- return real_loss.mean() + fake_loss.mean()
73
-
74
-
75
- def d_r1_loss(real_pred, real_img):
76
- with conv2d_gradfix.no_weight_gradients():
77
- grad_real, = autograd.grad(
78
- outputs=real_pred.sum(), inputs=real_img, create_graph=True
79
- )
80
- grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
81
-
82
- return grad_penalty
83
-
84
-
85
- def g_nonsaturating_loss(fake_pred):
86
- loss = F.softplus(-fake_pred).mean()
87
-
88
- return loss
89
-
90
-
91
- def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
92
- noise = torch.randn_like(fake_img) / math.sqrt(
93
- fake_img.shape[2] * fake_img.shape[3]
94
- )
95
- grad, = autograd.grad(
96
- outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
97
- )
98
- path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
99
-
100
- path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
101
-
102
- path_penalty = (path_lengths - path_mean).pow(2).mean()
103
-
104
- return path_penalty, path_mean.detach(), path_lengths
105
-
106
-
107
- def make_noise(batch, latent_dim, n_noise, device):
108
- if n_noise == 1:
109
- return torch.randn(batch, latent_dim, device=device)
110
-
111
- noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
112
-
113
- return noises
114
-
115
-
116
- def mixing_noise(batch, latent_dim, prob, device):
117
- if prob > 0 and random.random() < prob:
118
- return make_noise(batch, latent_dim, 2, device)
119
-
120
- else:
121
- return [make_noise(batch, latent_dim, 1, device)]
122
-
123
-
124
- def set_grad_none(model, targets):
125
- for n, p in model.named_parameters():
126
- if n in targets:
127
- p.grad = None
128
-
129
-
130
- def weights_init(m):
131
- classname = m.__class__.__name__
132
- if classname.find('BatchNorm2d') != -1:
133
- if hasattr(m, 'weight') and m.weight is not None:
134
- init.normal_(m.weight.data, 1.0, 0.02)
135
- if hasattr(m, 'bias') and m.bias is not None:
136
- init.constant_(m.bias.data, 0.0)
137
- elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
138
- init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
139
- if hasattr(m, 'bias') and m.bias is not None:
140
- init.constant_(m.bias.data, 0.0)
141
-
142
-
143
- def load_psp_standalone(checkpoint_path, device='cuda'):
144
- ckpt = torch.load(checkpoint_path, map_location='cpu')
145
- opts = ckpt['opts']
146
- if 'output_size' not in opts:
147
- opts['output_size'] = 1024
148
- opts['n_styles'] = int(math.log(opts['output_size'], 2)) * 2 - 2
149
- opts = argparse.Namespace(**opts)
150
- psp = GradualStyleEncoder(50, 'ir_se', opts)
151
- psp_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')}
152
- psp.load_state_dict(psp_dict)
153
- psp.eval()
154
- psp = psp.to(device)
155
- latent_avg = ckpt['latent_avg'].to(device)
156
-
157
- def add_latent_avg(model, inputs, outputs):
158
- return outputs + latent_avg.repeat(outputs.shape[0], 1, 1)
159
-
160
- psp.register_forward_hook(add_latent_avg)
161
- return psp
162
-
163
- def get_video_crop_parameter(filepath, predictor, padding=[200,200,200,200]):
164
- if type(filepath) == str:
165
- img = dlib.load_rgb_image(filepath)
166
- else:
167
- img = filepath
168
- lm = get_landmark(img, predictor)
169
- if lm is None:
170
- return None
171
- lm_chin = lm[0 : 17] # left-right
172
- lm_eyebrow_left = lm[17 : 22] # left-right
173
- lm_eyebrow_right = lm[22 : 27] # left-right
174
- lm_nose = lm[27 : 31] # top-down
175
- lm_nostrils = lm[31 : 36] # top-down
176
- lm_eye_left = lm[36 : 42] # left-clockwise
177
- lm_eye_right = lm[42 : 48] # left-clockwise
178
- lm_mouth_outer = lm[48 : 60] # left-clockwise
179
- lm_mouth_inner = lm[60 : 68] # left-clockwise
180
-
181
- scale = 64. / (np.mean(lm_eye_right[:,0])-np.mean(lm_eye_left[:,0]))
182
- center = ((np.mean(lm_eye_right, axis=0)+np.mean(lm_eye_left, axis=0)) / 2) * scale
183
- h, w = round(img.shape[0] * scale), round(img.shape[1] * scale)
184
- left = max(round(center[0] - padding[0]), 0) // 8 * 8
185
- right = min(round(center[0] + padding[1]), w) // 8 * 8
186
- top = max(round(center[1] - padding[2]), 0) // 8 * 8
187
- bottom = min(round(center[1] + padding[3]), h) // 8 * 8
188
- return h,w,top,bottom,left,right,scale
189
-
190
- def tensor2cv2(img):
191
- tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
192
- return cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR)
193
-
194
- # get parameters from the stylegan and mark them with their layers
195
- def gather_params(G):
196
- params = dict(
197
- [(res, {}) for res in range(18)] + [("others", {})]
198
- )
199
- for n, p in sorted(list(G.named_buffers()) + list(G.named_parameters())):
200
- if n.startswith("convs"):
201
- layer = int(n.split(".")[1]) + 1
202
- params[layer][n] = p
203
- elif n.startswith("to_rgbs"):
204
- layer = int(n.split(".")[1]) * 2 + 3
205
- params[layer][n] = p
206
- elif n.startswith("conv1"):
207
- params[0][n] = p
208
- elif n.startswith("to_rgb1"):
209
- params[1][n] = p
210
- else:
211
- params["others"][n] = p
212
- return params
213
-
214
- # blend the ffhq stylegan model and the finetuned model for toonify
215
- # see ``Resolution Dependent GAN Interpolation for Controllable Image Synthesis Between Domains''
216
- def blend_models(G_low, G_high, weight=[1]*7+[0]*11):
217
- params_low = gather_params(G_low)
218
- params_high = gather_params(G_high)
219
-
220
- for res in range(18):
221
- for n, p in params_high[res].items():
222
- params_high[res][n] = params_high[res][n] * (1-weight[res]) + params_low[res][n] * weight[res]
223
-
224
- state_dict = {}
225
- for _, p in params_high.items():
226
- state_dict.update(p)
227
-
228
- return state_dict
229
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/model.py DELETED
@@ -1,282 +0,0 @@
1
- from enum import Enum
2
- from re import findall, fullmatch
3
- from typing import Dict, List, Optional
4
-
5
- from pydantic import BaseModel, Field, conint, validator
6
-
7
- from .metas.Metas import Speaker, SpeakerInfo
8
-
9
-
10
- class Mora(BaseModel):
11
- """
12
- モーラ(子音+母音)ごとの情報
13
- """
14
-
15
- text: str = Field(title="文字")
16
- consonant: Optional[str] = Field(title="子音の音素")
17
- consonant_length: Optional[float] = Field(title="子音の音長")
18
- vowel: str = Field(title="母音の音素")
19
- vowel_length: float = Field(title="母音の音長")
20
- pitch: float = Field(title="音高") # デフォルト値をつけるとts側のOpenAPIで生成されたコードの型がOptionalになる
21
-
22
- def __hash__(self):
23
- items = [
24
- (k, tuple(v)) if isinstance(v, List) else (k, v)
25
- for k, v in self.__dict__.items()
26
- ]
27
- return hash(tuple(sorted(items)))
28
-
29
-
30
- class AccentPhrase(BaseModel):
31
- """
32
- アクセント句ごとの情報
33
- """
34
-
35
- moras: List[Mora] = Field(title="モーラのリスト")
36
- accent: int = Field(title="アクセント箇所")
37
- pause_mora: Optional[Mora] = Field(title="後ろに無音を付けるかどうか")
38
- is_interrogative: bool = Field(default=False, title="疑問系かどうか")
39
-
40
- def __hash__(self):
41
- items = [
42
- (k, tuple(v)) if isinstance(v, List) else (k, v)
43
- for k, v in self.__dict__.items()
44
- ]
45
- return hash(tuple(sorted(items)))
46
-
47
-
48
- class AudioQuery(BaseModel):
49
- """
50
- 音声合成用のクエリ
51
- """
52
-
53
- accent_phrases: List[AccentPhrase] = Field(title="アクセント句のリスト")
54
- speedScale: float = Field(title="全体の話速")
55
- pitchScale: float = Field(title="全体の音高")
56
- intonationScale: float = Field(title="全体の抑揚")
57
- volumeScale: float = Field(title="全体の音量")
58
- prePhonemeLength: float = Field(title="音声の前の無音時間")
59
- postPhonemeLength: float = Field(title="音声の後の無音時間")
60
- outputSamplingRate: int = Field(title="音声データの出力サンプリングレート")
61
- outputStereo: bool = Field(title="音声データをステレオ出力するか否か")
62
- kana: Optional[str] = Field(title="[読み取り専用]AquesTalkライクな読み仮名。音声合成クエリとしては無視される")
63
-
64
- def __hash__(self):
65
- items = [
66
- (k, tuple(v)) if isinstance(v, List) else (k, v)
67
- for k, v in self.__dict__.items()
68
- ]
69
- return hash(tuple(sorted(items)))
70
-
71
-
72
- class ParseKanaErrorCode(Enum):
73
- UNKNOWN_TEXT = "判別できない読み仮名があります: {text}"
74
- ACCENT_TOP = "句頭にアクセントは置けません: {text}"
75
- ACCENT_TWICE = "1つのアクセント句に二つ以上のアクセントは置けません: {text}"
76
- ACCENT_NOTFOUND = "アクセントを指定していないアクセント句があります: {text}"
77
- EMPTY_PHRASE = "{position}番目のアクセント句が空白です"
78
- INTERROGATION_MARK_NOT_AT_END = "アクセント句末以外に「?」は置けません: {text}"
79
- INFINITE_LOOP = "処理時に無限ループになってしまいました...バグ報告をお願いします。"
80
-
81
-
82
- class ParseKanaError(Exception):
83
- def __init__(self, errcode: ParseKanaErrorCode, **kwargs):
84
- self.errcode = errcode
85
- self.errname = errcode.name
86
- self.kwargs: Dict[str, str] = kwargs
87
- err_fmt: str = errcode.value
88
- self.text = err_fmt.format(**kwargs)
89
-
90
-
91
- class ParseKanaBadRequest(BaseModel):
92
- text: str = Field(title="エラーメッセージ")
93
- error_name: str = Field(
94
- title="エラー名",
95
- description="|name|description|\n|---|---|\n"
96
- + "\n".join(
97
- [
98
- "| {} | {} |".format(err.name, err.value)
99
- for err in list(ParseKanaErrorCode)
100
- ]
101
- ),
102
- )
103
- error_args: Dict[str, str] = Field(title="エラーを起こした箇所")
104
-
105
- def __init__(self, err: ParseKanaError):
106
- super().__init__(text=err.text, error_name=err.errname, error_args=err.kwargs)
107
-
108
-
109
- class MorphableTargetInfo(BaseModel):
110
-
111
- is_morphable: bool = Field(title="指定した話者に対してモーフィングの可否")
112
- # FIXME: add reason property
113
- # reason: Optional[str] = Field(title="is_morphableがfalseである場合、その理由")
114
-
115
-
116
- class SpeakerNotFoundError(LookupError):
117
- def __init__(self, speaker: int, *args: object, **kywrds: object) -> None:
118
- self.speaker = speaker
119
- super().__init__(f"speaker {speaker} is not found.", *args, **kywrds)
120
-
121
-
122
- class LibrarySpeaker(BaseModel):
123
- """
124
- 音声ライブラリに含まれる話者の情報
125
- """
126
-
127
- speaker: Speaker = Field(title="話者情報")
128
- speaker_info: SpeakerInfo = Field(title="話者の追加情報")
129
-
130
-
131
- class DownloadableLibrary(BaseModel):
132
- """
133
- ダウンロード可能な音声ライブラリの情報
134
- """
135
-
136
- name: str = Field(title="音声ライブラリの名前")
137
- uuid: str = Field(title="音声ライブラリのUUID")
138
- version: str = Field(title="音声ライブラリのバージョン")
139
- download_url: str = Field(title="音声ライブラリのダウンロードURL")
140
- bytes: int = Field(title="音声ライブラリのバイト数")
141
- speakers: List[LibrarySpeaker] = Field(title="音声ライブラリに含まれる話者のリスト")
142
-
143
-
144
- USER_DICT_MIN_PRIORITY = 0
145
- USER_DICT_MAX_PRIORITY = 10
146
-
147
-
148
- class UserDictWord(BaseModel):
149
- """
150
- 辞書のコンパイルに使われる情報
151
- """
152
-
153
- surface: str = Field(title="表層形")
154
- priority: conint(ge=USER_DICT_MIN_PRIORITY, le=USER_DICT_MAX_PRIORITY) = Field(
155
- title="優先度"
156
- )
157
- context_id: int = Field(title="文脈ID", default=1348)
158
- part_of_speech: str = Field(title="品詞")
159
- part_of_speech_detail_1: str = Field(title="品詞細分類1")
160
- part_of_speech_detail_2: str = Field(title="品詞細分類2")
161
- part_of_speech_detail_3: str = Field(title="品詞細分類3")
162
- inflectional_type: str = Field(title="活用型")
163
- inflectional_form: str = Field(title="活用形")
164
- stem: str = Field(title="原形")
165
- yomi: str = Field(title="読み")
166
- pronunciation: str = Field(title="発音")
167
- accent_type: int = Field(title="アクセント型")
168
- mora_count: Optional[int] = Field(title="モーラ数")
169
- accent_associative_rule: str = Field(title="アクセント結合規則")
170
-
171
- class Config:
172
- validate_assignment = True
173
-
174
- @validator("surface")
175
- def convert_to_zenkaku(cls, surface):
176
- return surface.translate(
177
- str.maketrans(
178
- "".join(chr(0x21 + i) for i in range(94)),
179
- "".join(chr(0xFF01 + i) for i in range(94)),
180
- )
181
- )
182
-
183
- @validator("pronunciation", pre=True)
184
- def check_is_katakana(cls, pronunciation):
185
- if not fullmatch(r"[ァ-ヴー]+", pronunciation):
186
- raise ValueError("発音は有効なカタカナでなくてはいけません。")
187
- sutegana = ["ァ", "ィ", "ゥ", "ェ", "ォ", "ャ", "ュ", "ョ", "ヮ", "ッ"]
188
- for i in range(len(pronunciation)):
189
- if pronunciation[i] in sutegana:
190
- # 「キャット」のように、捨て仮名が連続する可能性が考えられるので、
191
- # 「ッ」に関しては「ッ」そのものが連続している場合と、「ッ」の後にほかの捨て仮名が連続する場合のみ無効とする
192
- if i < len(pronunciation) - 1 and (
193
- pronunciation[i + 1] in sutegana[:-1]
194
- or (
195
- pronunciation[i] == sutegana[-1]
196
- and pronunciation[i + 1] == sutegana[-1]
197
- )
198
- ):
199
- raise ValueError("無効な発音です。(捨て仮名の連続)")
200
- if pronunciation[i] == "ヮ":
201
- if i != 0 and pronunciation[i - 1] not in ["ク", "グ"]:
202
- raise ValueError("無効な発音です。(「くゎ」「ぐゎ」以外の「ゎ」の使用)")
203
- return pronunciation
204
-
205
- @validator("mora_count", pre=True, always=True)
206
- def check_mora_count_and_accent_type(cls, mora_count, values):
207
- if "pronunciation" not in values or "accent_type" not in values:
208
- # 適切な場所でエラーを出すようにする
209
- return mora_count
210
-
211
- if mora_count is None:
212
- rule_others = "[イ][ェ]|[ヴ][ャュョ]|[トド][ゥ]|[テデ][ィャュョ]|[デ][ェ]|[クグ][ヮ]"
213
- rule_line_i = "[キシチニヒミリギジビピ][ェャュョ]"
214
- rule_line_u = "[ツフヴ][ァ]|[ウスツフヴズ][ィ]|[ウツフヴ][ェォ]"
215
- rule_one_mora = "[ァ-ヴー]"
216
- mora_count = len(
217
- findall(
218
- f"(?:{rule_others}|{rule_line_i}|{rule_line_u}|{rule_one_mora})",
219
- values["pronunciation"],
220
- )
221
- )
222
-
223
- if not 0 <= values["accent_type"] <= mora_count:
224
- raise ValueError(
225
- "誤ったアクセント型です({})。 expect: 0 <= accent_type <= {}".format(
226
- values["accent_type"], mora_count
227
- )
228
- )
229
- return mora_count
230
-
231
-
232
- class PartOfSpeechDetail(BaseModel):
233
- """
234
- 品詞ごとの情報
235
- """
236
-
237
- part_of_speech: str = Field(title="品詞")
238
- part_of_speech_detail_1: str = Field(title="品詞細分類1")
239
- part_of_speech_detail_2: str = Field(title="品詞細分類2")
240
- part_of_speech_detail_3: str = Field(title="品詞細分類3")
241
- # context_idは辞書の左・右文脈IDのこと
242
- # https://github.com/VOICEVOX/open_jtalk/blob/427cfd761b78efb6094bea3c5bb8c968f0d711ab/src/mecab-naist-jdic/_left-id.def # noqa
243
- context_id: int = Field(title="文脈ID")
244
- cost_candidates: List[int] = Field(title="コ���トのパーセンタイル")
245
- accent_associative_rules: List[str] = Field(title="アクセント結合規則の一覧")
246
-
247
-
248
- class WordTypes(str, Enum):
249
- """
250
- fastapiでword_type引数を検証する時に使用するクラス
251
- """
252
-
253
- PROPER_NOUN = "PROPER_NOUN"
254
- COMMON_NOUN = "COMMON_NOUN"
255
- VERB = "VERB"
256
- ADJECTIVE = "ADJECTIVE"
257
- SUFFIX = "SUFFIX"
258
-
259
-
260
- class SupportedDevicesInfo(BaseModel):
261
- """
262
- 対応しているデバイスの情報
263
- """
264
-
265
- cpu: bool = Field(title="CPUに対応しているか")
266
- cuda: bool = Field(title="CUDA(Nvidia GPU)に対応しているか")
267
- dml: bool = Field(title="DirectML(Nvidia GPU/Radeon GPU等)に対応しているか")
268
-
269
-
270
- class SupportedFeaturesInfo(BaseModel):
271
- """
272
- エンジンの機能の情報
273
- """
274
-
275
- support_adjusting_mora: bool = Field(title="モーラが調整可能かどうか")
276
- support_adjusting_speed_scale: bool = Field(title="話速が調整可能かどうか")
277
- support_adjusting_pitch_scale: bool = Field(title="音高が調整可能かどうか")
278
- support_adjusting_intonation_scale: bool = Field(title="抑揚が調整可能かどうか")
279
- support_adjusting_volume_scale: bool = Field(title="音量が調整可能かどうか")
280
- support_adjusting_silence_scale: bool = Field(title="前後の無音時間が調節可能かどうか")
281
- support_interrogative_upspeak: bool = Field(title="疑似疑問文に対応しているかどうか")
282
- support_switching_device: bool = Field(title="CPU/GPUの切り替えが可能かどうか")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/audio2exp_models/networks.py DELETED
@@ -1,74 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- class Conv2d(nn.Module):
6
- def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, use_act = True, *args, **kwargs):
7
- super().__init__(*args, **kwargs)
8
- self.conv_block = nn.Sequential(
9
- nn.Conv2d(cin, cout, kernel_size, stride, padding),
10
- nn.BatchNorm2d(cout)
11
- )
12
- self.act = nn.ReLU()
13
- self.residual = residual
14
- self.use_act = use_act
15
-
16
- def forward(self, x):
17
- out = self.conv_block(x)
18
- if self.residual:
19
- out += x
20
-
21
- if self.use_act:
22
- return self.act(out)
23
- else:
24
- return out
25
-
26
- class SimpleWrapperV2(nn.Module):
27
- def __init__(self) -> None:
28
- super().__init__()
29
- self.audio_encoder = nn.Sequential(
30
- Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
31
- Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
32
- Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),
33
-
34
- Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),
35
- Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
36
- Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),
37
-
38
- Conv2d(64, 128, kernel_size=3, stride=3, padding=1),
39
- Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
40
- Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),
41
-
42
- Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),
43
- Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),
44
-
45
- Conv2d(256, 512, kernel_size=3, stride=1, padding=0),
46
- Conv2d(512, 512, kernel_size=1, stride=1, padding=0),
47
- )
48
-
49
- #### load the pre-trained audio_encoder
50
- #self.audio_encoder = self.audio_encoder.to(device)
51
- '''
52
- wav2lip_state_dict = torch.load('/apdcephfs_cq2/share_1290939/wenxuazhang/checkpoints/wav2lip.pth')['state_dict']
53
- state_dict = self.audio_encoder.state_dict()
54
-
55
- for k,v in wav2lip_state_dict.items():
56
- if 'audio_encoder' in k:
57
- print('init:', k)
58
- state_dict[k.replace('module.audio_encoder.', '')] = v
59
- self.audio_encoder.load_state_dict(state_dict)
60
- '''
61
-
62
- self.mapping1 = nn.Linear(512+64+1, 64)
63
- #self.mapping2 = nn.Linear(30, 64)
64
- #nn.init.constant_(self.mapping1.weight, 0.)
65
- nn.init.constant_(self.mapping1.bias, 0.)
66
-
67
- def forward(self, x, ref, ratio):
68
- x = self.audio_encoder(x).view(x.size(0), -1)
69
- ref_reshape = ref.reshape(x.size(0), -1)
70
- ratio = ratio.reshape(x.size(0), -1)
71
-
72
- y = self.mapping1(torch.cat([x, ref_reshape, ratio], dim=1))
73
- out = y.reshape(ref.shape[0], ref.shape[1], -1) #+ ref # resudial
74
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/lib/hooks/use-bing.ts DELETED
@@ -1,173 +0,0 @@
1
- 'use client'
2
-
3
- import { useState, useCallback, useEffect, useMemo } from 'react'
4
- import { useAtom, useAtomValue } from 'jotai'
5
- import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state'
6
- import { setConversationMessages } from './chat-history'
7
- import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types'
8
- import { nanoid } from '../utils'
9
- import { TTS } from '../bots/bing/tts'
10
-
11
- export function useBing(botId: BotId = 'bing') {
12
- const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId])
13
- const [enableTTS] = useAtom(voiceAtom)
14
- const speaker = useMemo(() => new TTS(), [])
15
- const [hash, setHash] = useAtom(hashAtom)
16
- const bingConversationStyle = useAtomValue(bingConversationStyleAtom)
17
- const [chatState, setChatState] = useAtom(chatAtom)
18
- const [input, setInput] = useState('')
19
- const [attachmentList, setAttachmentList] = useState<FileItem[]>([])
20
-
21
- const updateMessage = useCallback(
22
- (messageId: string, updater: (message: ChatMessageModel) => void) => {
23
- setChatState((draft) => {
24
- const message = draft.messages.find((m) => m.id === messageId)
25
- if (message) {
26
- updater(message)
27
- }
28
- })
29
- },
30
- [setChatState],
31
- )
32
-
33
- const sendMessage = useCallback(
34
- async (input: string, options = {}) => {
35
- const botMessageId = nanoid()
36
- const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined
37
- setChatState((draft) => {
38
- const text = imageUrl ? `${input}\n\n![image](${imageUrl})` : input
39
- draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' })
40
- setAttachmentList([])
41
- })
42
- const abortController = new AbortController()
43
- setChatState((draft) => {
44
- draft.generatingMessageId = botMessageId
45
- draft.abortController = abortController
46
- })
47
- speaker.reset()
48
- await chatState.bot.sendMessage({
49
- prompt: input,
50
- imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl,
51
- options: {
52
- ...options,
53
- bingConversationStyle,
54
- },
55
- signal: abortController.signal,
56
- onEvent(event) {
57
- if (event.type === 'UPDATE_ANSWER') {
58
- updateMessage(botMessageId, (message) => {
59
- if (event.data.text.length > message.text.length) {
60
- message.text = event.data.text
61
- }
62
-
63
- if (event.data.spokenText && enableTTS) {
64
- speaker.speak(event.data.spokenText)
65
- }
66
-
67
- message.throttling = event.data.throttling || message.throttling
68
- message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions
69
- message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses
70
- })
71
- } else if (event.type === 'ERROR') {
72
- updateMessage(botMessageId, (message) => {
73
- message.error = event.error
74
- })
75
- setChatState((draft) => {
76
- draft.abortController = undefined
77
- draft.generatingMessageId = ''
78
- })
79
- } else if (event.type === 'DONE') {
80
- setChatState((draft) => {
81
- draft.abortController = undefined
82
- draft.generatingMessageId = ''
83
- })
84
- }
85
- },
86
- })
87
- },
88
- [botId, attachmentList, chatState.bot, setChatState, updateMessage],
89
- )
90
-
91
- const uploadImage = useCallback(async (imgUrl: string) => {
92
- setAttachmentList([{ url: imgUrl, status: 'loading' }])
93
- const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle)
94
- if (response?.blobId) {
95
- setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }])
96
- } else {
97
- setAttachmentList([{ url: imgUrl, status: 'error' }])
98
- }
99
- }, [chatState.bot])
100
-
101
- const resetConversation = useCallback(() => {
102
- chatState.bot.resetConversation()
103
- speaker.abort()
104
- setChatState((draft) => {
105
- draft.abortController = undefined
106
- draft.generatingMessageId = ''
107
- draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }]
108
- draft.conversationId = nanoid()
109
- })
110
- }, [chatState.bot, setChatState])
111
-
112
- const stopGenerating = useCallback(() => {
113
- chatState.abortController?.abort()
114
- if (chatState.generatingMessageId) {
115
- updateMessage(chatState.generatingMessageId, (message) => {
116
- if (!message.text && !message.error) {
117
- message.text = 'Cancelled'
118
- }
119
- })
120
- }
121
- setChatState((draft) => {
122
- draft.generatingMessageId = ''
123
- })
124
- }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage])
125
-
126
- useEffect(() => {
127
- if (chatState.messages.length) {
128
- setConversationMessages(botId, chatState.conversationId, chatState.messages)
129
- }
130
- }, [botId, chatState.conversationId, chatState.messages])
131
-
132
- useEffect(() => {
133
- if (hash === 'reset') {
134
- resetConversation()
135
- setHash('')
136
- }
137
- }, [hash, setHash])
138
-
139
- const chat = useMemo(
140
- () => ({
141
- botId,
142
- bot: chatState.bot,
143
- isSpeaking: speaker.isSpeaking,
144
- messages: chatState.messages,
145
- sendMessage,
146
- setInput,
147
- input,
148
- resetConversation,
149
- generating: !!chatState.generatingMessageId,
150
- stopGenerating,
151
- uploadImage,
152
- setAttachmentList,
153
- attachmentList,
154
- }),
155
- [
156
- botId,
157
- bingConversationStyle,
158
- chatState.bot,
159
- chatState.generatingMessageId,
160
- chatState.messages,
161
- speaker.isSpeaking,
162
- setInput,
163
- input,
164
- setAttachmentList,
165
- attachmentList,
166
- resetConversation,
167
- sendMessage,
168
- stopGenerating,
169
- ],
170
- )
171
-
172
- return chat
173
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/models/evaluator_wrapper.py DELETED
@@ -1,92 +0,0 @@
1
-
2
- import torch
3
- from os.path import join as pjoin
4
- import numpy as np
5
- from models.modules import MovementConvEncoder, TextEncoderBiGRUCo, MotionEncoderBiGRUCo
6
- from utils.word_vectorizer import POS_enumerator
7
-
8
- def build_models(opt):
9
- movement_enc = MovementConvEncoder(opt.dim_pose-4, opt.dim_movement_enc_hidden, opt.dim_movement_latent)
10
- text_enc = TextEncoderBiGRUCo(word_size=opt.dim_word,
11
- pos_size=opt.dim_pos_ohot,
12
- hidden_size=opt.dim_text_hidden,
13
- output_size=opt.dim_coemb_hidden,
14
- device=opt.device)
15
-
16
- motion_enc = MotionEncoderBiGRUCo(input_size=opt.dim_movement_latent,
17
- hidden_size=opt.dim_motion_hidden,
18
- output_size=opt.dim_coemb_hidden,
19
- device=opt.device)
20
-
21
- checkpoint = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'text_mot_match', 'model', 'finest.tar'),
22
- map_location=opt.device)
23
- movement_enc.load_state_dict(checkpoint['movement_encoder'])
24
- text_enc.load_state_dict(checkpoint['text_encoder'])
25
- motion_enc.load_state_dict(checkpoint['motion_encoder'])
26
- print('Loading Evaluation Model Wrapper (Epoch %d) Completed!!' % (checkpoint['epoch']))
27
- return text_enc, motion_enc, movement_enc
28
-
29
-
30
- class EvaluatorModelWrapper(object):
31
-
32
- def __init__(self, opt):
33
-
34
- if opt.dataset_name == 't2m':
35
- opt.dim_pose = 263
36
- elif opt.dataset_name == 'kit':
37
- opt.dim_pose = 251
38
- else:
39
- raise KeyError('Dataset not Recognized!!!')
40
-
41
- opt.dim_word = 300
42
- opt.max_motion_length = 196
43
- opt.dim_pos_ohot = len(POS_enumerator)
44
- opt.dim_motion_hidden = 1024
45
- opt.max_text_len = 20
46
- opt.dim_text_hidden = 512
47
- opt.dim_coemb_hidden = 512
48
-
49
- # print(opt)
50
-
51
- self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)
52
- self.opt = opt
53
- self.device = opt.device
54
-
55
- self.text_encoder.to(opt.device)
56
- self.motion_encoder.to(opt.device)
57
- self.movement_encoder.to(opt.device)
58
-
59
- self.text_encoder.eval()
60
- self.motion_encoder.eval()
61
- self.movement_encoder.eval()
62
-
63
- # Please note that the results does not following the order of inputs
64
- def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):
65
- with torch.no_grad():
66
- word_embs = word_embs.detach().to(self.device).float()
67
- pos_ohot = pos_ohot.detach().to(self.device).float()
68
- motions = motions.detach().to(self.device).float()
69
-
70
- '''Movement Encoding'''
71
- movements = self.movement_encoder(motions[..., :-4]).detach()
72
- m_lens = m_lens // self.opt.unit_length
73
- motion_embedding = self.motion_encoder(movements, m_lens)
74
-
75
- '''Text Encoding'''
76
- text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)
77
- return text_embedding, motion_embedding
78
-
79
- # Please note that the results does not following the order of inputs
80
- def get_motion_embeddings(self, motions, m_lens):
81
- with torch.no_grad():
82
- motions = motions.detach().to(self.device).float()
83
-
84
- align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
85
- motions = motions[align_idx]
86
- m_lens = m_lens[align_idx]
87
-
88
- '''Movement Encoding'''
89
- movements = self.movement_encoder(motions[..., :-4]).detach()
90
- m_lens = m_lens // self.opt.unit_length
91
- motion_embedding = self.motion_encoder(movements, m_lens)
92
- return motion_embedding
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/scene.py DELETED
@@ -1,585 +0,0 @@
1
- """Scenes, conforming to the glTF 2.0 standards as specified in
2
- https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-scene
3
-
4
- Author: Matthew Matl
5
- """
6
- import numpy as np
7
- import networkx as nx
8
- import trimesh
9
-
10
- from .mesh import Mesh
11
- from .camera import Camera
12
- from .light import Light, PointLight, DirectionalLight, SpotLight
13
- from .node import Node
14
- from .utils import format_color_vector
15
-
16
-
17
- class Scene(object):
18
- """A hierarchical scene graph.
19
-
20
- Parameters
21
- ----------
22
- nodes : list of :class:`Node`
23
- The set of all nodes in the scene.
24
- bg_color : (4,) float, optional
25
- Background color of scene.
26
- ambient_light : (3,) float, optional
27
- Color of ambient light. Defaults to no ambient light.
28
- name : str, optional
29
- The user-defined name of this object.
30
- """
31
-
32
- def __init__(self,
33
- nodes=None,
34
- bg_color=None,
35
- ambient_light=None,
36
- name=None):
37
-
38
- if bg_color is None:
39
- bg_color = np.ones(4)
40
- else:
41
- bg_color = format_color_vector(bg_color, 4)
42
-
43
- if ambient_light is None:
44
- ambient_light = np.zeros(3)
45
-
46
- if nodes is None:
47
- nodes = set()
48
- self._nodes = set() # Will be added at the end of this function
49
-
50
- self.bg_color = bg_color
51
- self.ambient_light = ambient_light
52
- self.name = name
53
-
54
- self._name_to_nodes = {}
55
- self._obj_to_nodes = {}
56
- self._obj_name_to_nodes = {}
57
- self._mesh_nodes = set()
58
- self._point_light_nodes = set()
59
- self._spot_light_nodes = set()
60
- self._directional_light_nodes = set()
61
- self._camera_nodes = set()
62
- self._main_camera_node = None
63
- self._bounds = None
64
-
65
- # Transform tree
66
- self._digraph = nx.DiGraph()
67
- self._digraph.add_node('world')
68
- self._path_cache = {}
69
-
70
- # Find root nodes and add them
71
- if len(nodes) > 0:
72
- node_parent_map = {n: None for n in nodes}
73
- for node in nodes:
74
- for child in node.children:
75
- if node_parent_map[child] is not None:
76
- raise ValueError('Nodes may not have more than '
77
- 'one parent')
78
- node_parent_map[child] = node
79
- for node in node_parent_map:
80
- if node_parent_map[node] is None:
81
- self.add_node(node)
82
-
83
- @property
84
- def name(self):
85
- """str : The user-defined name of this object.
86
- """
87
- return self._name
88
-
89
- @name.setter
90
- def name(self, value):
91
- if value is not None:
92
- value = str(value)
93
- self._name = value
94
-
95
- @property
96
- def nodes(self):
97
- """set of :class:`Node` : Set of nodes in the scene.
98
- """
99
- return self._nodes
100
-
101
- @property
102
- def bg_color(self):
103
- """(3,) float : The scene background color.
104
- """
105
- return self._bg_color
106
-
107
- @bg_color.setter
108
- def bg_color(self, value):
109
- if value is None:
110
- value = np.ones(4)
111
- else:
112
- value = format_color_vector(value, 4)
113
- self._bg_color = value
114
-
115
- @property
116
- def ambient_light(self):
117
- """(3,) float : The ambient light in the scene.
118
- """
119
- return self._ambient_light
120
-
121
- @ambient_light.setter
122
- def ambient_light(self, value):
123
- if value is None:
124
- value = np.zeros(3)
125
- else:
126
- value = format_color_vector(value, 3)
127
- self._ambient_light = value
128
-
129
- @property
130
- def meshes(self):
131
- """set of :class:`Mesh` : The meshes in the scene.
132
- """
133
- return set([n.mesh for n in self.mesh_nodes])
134
-
135
- @property
136
- def mesh_nodes(self):
137
- """set of :class:`Node` : The nodes containing meshes.
138
- """
139
- return self._mesh_nodes
140
-
141
- @property
142
- def lights(self):
143
- """set of :class:`Light` : The lights in the scene.
144
- """
145
- return self.point_lights | self.spot_lights | self.directional_lights
146
-
147
- @property
148
- def light_nodes(self):
149
- """set of :class:`Node` : The nodes containing lights.
150
- """
151
- return (self.point_light_nodes | self.spot_light_nodes |
152
- self.directional_light_nodes)
153
-
154
- @property
155
- def point_lights(self):
156
- """set of :class:`PointLight` : The point lights in the scene.
157
- """
158
- return set([n.light for n in self.point_light_nodes])
159
-
160
- @property
161
- def point_light_nodes(self):
162
- """set of :class:`Node` : The nodes containing point lights.
163
- """
164
- return self._point_light_nodes
165
-
166
- @property
167
- def spot_lights(self):
168
- """set of :class:`SpotLight` : The spot lights in the scene.
169
- """
170
- return set([n.light for n in self.spot_light_nodes])
171
-
172
- @property
173
- def spot_light_nodes(self):
174
- """set of :class:`Node` : The nodes containing spot lights.
175
- """
176
- return self._spot_light_nodes
177
-
178
- @property
179
- def directional_lights(self):
180
- """set of :class:`DirectionalLight` : The directional lights in
181
- the scene.
182
- """
183
- return set([n.light for n in self.directional_light_nodes])
184
-
185
- @property
186
- def directional_light_nodes(self):
187
- """set of :class:`Node` : The nodes containing directional lights.
188
- """
189
- return self._directional_light_nodes
190
-
191
- @property
192
- def cameras(self):
193
- """set of :class:`Camera` : The cameras in the scene.
194
- """
195
- return set([n.camera for n in self.camera_nodes])
196
-
197
- @property
198
- def camera_nodes(self):
199
- """set of :class:`Node` : The nodes containing cameras in the scene.
200
- """
201
- return self._camera_nodes
202
-
203
- @property
204
- def main_camera_node(self):
205
- """set of :class:`Node` : The node containing the main camera in the
206
- scene.
207
- """
208
- return self._main_camera_node
209
-
210
- @main_camera_node.setter
211
- def main_camera_node(self, value):
212
- if value not in self.nodes:
213
- raise ValueError('New main camera node must already be in scene')
214
- self._main_camera_node = value
215
-
216
- @property
217
- def bounds(self):
218
- """(2,3) float : The axis-aligned bounds of the scene.
219
- """
220
- if self._bounds is None:
221
- # Compute corners
222
- corners = []
223
- for mesh_node in self.mesh_nodes:
224
- mesh = mesh_node.mesh
225
- pose = self.get_pose(mesh_node)
226
- corners_local = trimesh.bounds.corners(mesh.bounds)
227
- corners_world = pose[:3,:3].dot(corners_local.T).T + pose[:3,3]
228
- corners.append(corners_world)
229
- if len(corners) == 0:
230
- self._bounds = np.zeros((2,3))
231
- else:
232
- corners = np.vstack(corners)
233
- self._bounds = np.array([np.min(corners, axis=0),
234
- np.max(corners, axis=0)])
235
- return self._bounds
236
-
237
- @property
238
- def centroid(self):
239
- """(3,) float : The centroid of the scene's axis-aligned bounding box
240
- (AABB).
241
- """
242
- return np.mean(self.bounds, axis=0)
243
-
244
- @property
245
- def extents(self):
246
- """(3,) float : The lengths of the axes of the scene's AABB.
247
- """
248
- return np.diff(self.bounds, axis=0).reshape(-1)
249
-
250
- @property
251
- def scale(self):
252
- """(3,) float : The length of the diagonal of the scene's AABB.
253
- """
254
- return np.linalg.norm(self.extents)
255
-
256
- def add(self, obj, name=None, pose=None,
257
- parent_node=None, parent_name=None):
258
- """Add an object (mesh, light, or camera) to the scene.
259
-
260
- Parameters
261
- ----------
262
- obj : :class:`Mesh`, :class:`Light`, or :class:`Camera`
263
- The object to add to the scene.
264
- name : str
265
- A name for the new node to be created.
266
- pose : (4,4) float
267
- The local pose of this node relative to its parent node.
268
- parent_node : :class:`Node`
269
- The parent of this Node. If None, the new node is a root node.
270
- parent_name : str
271
- The name of the parent node, can be specified instead of
272
- `parent_node`.
273
-
274
- Returns
275
- -------
276
- node : :class:`Node`
277
- The newly-created and inserted node.
278
- """
279
- if isinstance(obj, Mesh):
280
- node = Node(name=name, matrix=pose, mesh=obj)
281
- elif isinstance(obj, Light):
282
- node = Node(name=name, matrix=pose, light=obj)
283
- elif isinstance(obj, Camera):
284
- node = Node(name=name, matrix=pose, camera=obj)
285
- else:
286
- raise TypeError('Unrecognized object type')
287
-
288
- if parent_node is None and parent_name is not None:
289
- parent_nodes = self.get_nodes(name=parent_name)
290
- if len(parent_nodes) == 0:
291
- raise ValueError('No parent node with name {} found'
292
- .format(parent_name))
293
- elif len(parent_nodes) > 1:
294
- raise ValueError('More than one parent node with name {} found'
295
- .format(parent_name))
296
- parent_node = list(parent_nodes)[0]
297
-
298
- self.add_node(node, parent_node=parent_node)
299
-
300
- return node
301
-
302
- def get_nodes(self, node=None, name=None, obj=None, obj_name=None):
303
- """Search for existing nodes. Only nodes matching all specified
304
- parameters is returned, or None if no such node exists.
305
-
306
- Parameters
307
- ----------
308
- node : :class:`Node`, optional
309
- If present, returns this node if it is in the scene.
310
- name : str
311
- A name for the Node.
312
- obj : :class:`Mesh`, :class:`Light`, or :class:`Camera`
313
- An object that is attached to the node.
314
- obj_name : str
315
- The name of an object that is attached to the node.
316
-
317
- Returns
318
- -------
319
- nodes : set of :class:`.Node`
320
- The nodes that match all query terms.
321
- """
322
- if node is not None:
323
- if node in self.nodes:
324
- return set([node])
325
- else:
326
- return set()
327
- nodes = set(self.nodes)
328
- if name is not None:
329
- matches = set()
330
- if name in self._name_to_nodes:
331
- matches = self._name_to_nodes[name]
332
- nodes = nodes & matches
333
- if obj is not None:
334
- matches = set()
335
- if obj in self._obj_to_nodes:
336
- matches = self._obj_to_nodes[obj]
337
- nodes = nodes & matches
338
- if obj_name is not None:
339
- matches = set()
340
- if obj_name in self._obj_name_to_nodes:
341
- matches = self._obj_name_to_nodes[obj_name]
342
- nodes = nodes & matches
343
-
344
- return nodes
345
-
346
- def add_node(self, node, parent_node=None):
347
- """Add a Node to the scene.
348
-
349
- Parameters
350
- ----------
351
- node : :class:`Node`
352
- The node to be added.
353
- parent_node : :class:`Node`
354
- The parent of this Node. If None, the new node is a root node.
355
- """
356
- if node in self.nodes:
357
- raise ValueError('Node already in scene')
358
- self.nodes.add(node)
359
-
360
- # Add node to sets
361
- if node.name is not None:
362
- if node.name not in self._name_to_nodes:
363
- self._name_to_nodes[node.name] = set()
364
- self._name_to_nodes[node.name].add(node)
365
- for obj in [node.mesh, node.camera, node.light]:
366
- if obj is not None:
367
- if obj not in self._obj_to_nodes:
368
- self._obj_to_nodes[obj] = set()
369
- self._obj_to_nodes[obj].add(node)
370
- if obj.name is not None:
371
- if obj.name not in self._obj_name_to_nodes:
372
- self._obj_name_to_nodes[obj.name] = set()
373
- self._obj_name_to_nodes[obj.name].add(node)
374
- if node.mesh is not None:
375
- self._mesh_nodes.add(node)
376
- if node.light is not None:
377
- if isinstance(node.light, PointLight):
378
- self._point_light_nodes.add(node)
379
- if isinstance(node.light, SpotLight):
380
- self._spot_light_nodes.add(node)
381
- if isinstance(node.light, DirectionalLight):
382
- self._directional_light_nodes.add(node)
383
- if node.camera is not None:
384
- self._camera_nodes.add(node)
385
- if self._main_camera_node is None:
386
- self._main_camera_node = node
387
-
388
- if parent_node is None:
389
- parent_node = 'world'
390
- elif parent_node not in self.nodes:
391
- raise ValueError('Parent node must already be in scene')
392
- elif node not in parent_node.children:
393
- parent_node.children.append(node)
394
-
395
- # Create node in graph
396
- self._digraph.add_node(node)
397
- self._digraph.add_edge(node, parent_node)
398
-
399
- # Iterate over children
400
- for child in node.children:
401
- self.add_node(child, node)
402
-
403
- self._path_cache = {}
404
- self._bounds = None
405
-
406
- def has_node(self, node):
407
- """Check if a node is already in the scene.
408
-
409
- Parameters
410
- ----------
411
- node : :class:`Node`
412
- The node to be checked.
413
-
414
- Returns
415
- -------
416
- has_node : bool
417
- True if the node is already in the scene and false otherwise.
418
- """
419
- return node in self.nodes
420
-
421
- def remove_node(self, node):
422
- """Remove a node and all its children from the scene.
423
-
424
- Parameters
425
- ----------
426
- node : :class:`Node`
427
- The node to be removed.
428
- """
429
- # Disconnect self from parent who is staying in the graph
430
- parent = list(self._digraph.neighbors(node))[0]
431
- self._remove_node(node)
432
- if isinstance(parent, Node):
433
- parent.children.remove(node)
434
- self._path_cache = {}
435
- self._bounds = None
436
-
437
- def get_pose(self, node):
438
- """Get the world-frame pose of a node in the scene.
439
-
440
- Parameters
441
- ----------
442
- node : :class:`Node`
443
- The node to find the pose of.
444
-
445
- Returns
446
- -------
447
- pose : (4,4) float
448
- The transform matrix for this node.
449
- """
450
- if node not in self.nodes:
451
- raise ValueError('Node must already be in scene')
452
- if node in self._path_cache:
453
- path = self._path_cache[node]
454
- else:
455
- # Get path from from_frame to to_frame
456
- path = nx.shortest_path(self._digraph, node, 'world')
457
- self._path_cache[node] = path
458
-
459
- # Traverse from from_node to to_node
460
- pose = np.eye(4)
461
- for n in path[:-1]:
462
- pose = np.dot(n.matrix, pose)
463
-
464
- return pose
465
-
466
- def set_pose(self, node, pose):
467
- """Set the local-frame pose of a node in the scene.
468
-
469
- Parameters
470
- ----------
471
- node : :class:`Node`
472
- The node to set the pose of.
473
- pose : (4,4) float
474
- The pose to set the node to.
475
- """
476
- if node not in self.nodes:
477
- raise ValueError('Node must already be in scene')
478
- node._matrix = pose
479
- if node.mesh is not None:
480
- self._bounds = None
481
-
482
- def clear(self):
483
- """Clear out all nodes to form an empty scene.
484
- """
485
- self._nodes = set()
486
-
487
- self._name_to_nodes = {}
488
- self._obj_to_nodes = {}
489
- self._obj_name_to_nodes = {}
490
- self._mesh_nodes = set()
491
- self._point_light_nodes = set()
492
- self._spot_light_nodes = set()
493
- self._directional_light_nodes = set()
494
- self._camera_nodes = set()
495
- self._main_camera_node = None
496
- self._bounds = None
497
-
498
- # Transform tree
499
- self._digraph = nx.DiGraph()
500
- self._digraph.add_node('world')
501
- self._path_cache = {}
502
-
503
- def _remove_node(self, node):
504
- """Remove a node and all its children from the scene.
505
-
506
- Parameters
507
- ----------
508
- node : :class:`Node`
509
- The node to be removed.
510
- """
511
-
512
- # Remove self from nodes
513
- self.nodes.remove(node)
514
-
515
- # Remove children
516
- for child in node.children:
517
- self._remove_node(child)
518
-
519
- # Remove self from the graph
520
- self._digraph.remove_node(node)
521
-
522
- # Remove from maps
523
- if node.name in self._name_to_nodes:
524
- self._name_to_nodes[node.name].remove(node)
525
- if len(self._name_to_nodes[node.name]) == 0:
526
- self._name_to_nodes.pop(node.name)
527
- for obj in [node.mesh, node.camera, node.light]:
528
- if obj is None:
529
- continue
530
- self._obj_to_nodes[obj].remove(node)
531
- if len(self._obj_to_nodes[obj]) == 0:
532
- self._obj_to_nodes.pop(obj)
533
- if obj.name is not None:
534
- self._obj_name_to_nodes[obj.name].remove(node)
535
- if len(self._obj_name_to_nodes[obj.name]) == 0:
536
- self._obj_name_to_nodes.pop(obj.name)
537
- if node.mesh is not None:
538
- self._mesh_nodes.remove(node)
539
- if node.light is not None:
540
- if isinstance(node.light, PointLight):
541
- self._point_light_nodes.remove(node)
542
- if isinstance(node.light, SpotLight):
543
- self._spot_light_nodes.remove(node)
544
- if isinstance(node.light, DirectionalLight):
545
- self._directional_light_nodes.remove(node)
546
- if node.camera is not None:
547
- self._camera_nodes.remove(node)
548
- if self._main_camera_node == node:
549
- if len(self._camera_nodes) > 0:
550
- self._main_camera_node = next(iter(self._camera_nodes))
551
- else:
552
- self._main_camera_node = None
553
-
554
- @staticmethod
555
- def from_trimesh_scene(trimesh_scene,
556
- bg_color=None, ambient_light=None):
557
- """Create a :class:`.Scene` from a :class:`trimesh.scene.scene.Scene`.
558
-
559
- Parameters
560
- ----------
561
- trimesh_scene : :class:`trimesh.scene.scene.Scene`
562
- Scene with :class:~`trimesh.base.Trimesh` objects.
563
- bg_color : (4,) float
564
- Background color for the created scene.
565
- ambient_light : (3,) float or None
566
- Ambient light in the scene.
567
-
568
- Returns
569
- -------
570
- scene_pr : :class:`Scene`
571
- A scene containing the same geometry as the trimesh scene.
572
- """
573
- # convert trimesh geometries to pyrender geometries
574
- geometries = {name: Mesh.from_trimesh(geom)
575
- for name, geom in trimesh_scene.geometry.items()}
576
-
577
- # create the pyrender scene object
578
- scene_pr = Scene(bg_color=bg_color, ambient_light=ambient_light)
579
-
580
- # add every node with geometry to the pyrender scene
581
- for node in trimesh_scene.graph.nodes_geometry:
582
- pose, geom_name = trimesh_scene.graph[node]
583
- scene_pr.add(geometries[geom_name], pose=pose)
584
-
585
- return scene_pr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/scene-edit-detection/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Scene Edit Detection
3
- emoji: ✂️ 🎞
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.4
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: fffiloni/scene-edit-detection
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/r/[id]/message/[messageId]/prompt/+server.ts DELETED
@@ -1,47 +0,0 @@
1
- import { buildPrompt } from "$lib/buildPrompt";
2
- import { collections } from "$lib/server/database";
3
- import { models } from "$lib/server/models";
4
- import { error } from "@sveltejs/kit";
5
-
6
- export async function GET({ params }) {
7
- const conv = await collections.sharedConversations.findOne({
8
- _id: params.id,
9
- });
10
-
11
- if (!conv) {
12
- throw error(404, "Conversation not found");
13
- }
14
-
15
- const messageId = params.messageId;
16
-
17
- const messageIndex = conv.messages.findIndex((msg) => msg.id === messageId);
18
-
19
- if (messageIndex === -1) {
20
- throw error(404, "Message not found");
21
- }
22
-
23
- const model = models.find((m) => m.id === conv.model);
24
-
25
- if (!model) {
26
- throw error(404, "Conversation model not found");
27
- }
28
-
29
- const prompt = await buildPrompt({ messages: conv.messages.slice(0, messageIndex + 1), model });
30
-
31
- return new Response(
32
- JSON.stringify(
33
- {
34
- note: "This is a preview of the prompt that will be sent to the model when retrying the message. It may differ from what was sent in the past if the parameters have been updated since",
35
- prompt,
36
- model: model.name,
37
- parameters: {
38
- ...model.parameters,
39
- return_full_text: false,
40
- },
41
- },
42
- null,
43
- 2
44
- ),
45
- { headers: { "Content-Type": "application/json" } }
46
- );
47
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/SCUTAUTO210b/app.py DELETED
@@ -1,45 +0,0 @@
1
- import gradio as gr
2
- import os
3
-
4
-
5
- os.system("git clone https://github.com/megvii-research/NAFNet")
6
- os.system("mv NAFNet/* ./")
7
- os.system("mv *.pth experiments/pretrained_models/")
8
- os.system("python3 setup.py develop --no_cuda_ext --user")
9
-
10
-
11
- def inference(image, task):
12
- if not os.path.exists('tmp'):
13
- os.system('mkdir tmp')
14
- image.save("tmp/lq_image.png", "PNG")
15
-
16
- if task == 'Denoising':
17
- os.system("python basicsr/demo.py -opt options/test/SIDD/NAFNet-width64.yml --input_path ./tmp/lq_image.png --output_path ./tmp/image.png")
18
-
19
- if task == 'Deblurring':
20
- os.system("python basicsr/demo.py -opt options/test/REDS/NAFNet-width64.yml --input_path ./tmp/lq_image.png --output_path ./tmp/image.png")
21
-
22
- return 'tmp/image.png'
23
-
24
- title = "Restoration APP Visual"
25
- description = ""
26
- article = ""
27
- #description = "Gradio demo for <b>NAFNet: Nonlinear Activation Free Network for Image Restoration</b>. NAFNet achieves state-of-the-art performance on three tasks: image denoising, image debluring and stereo image super-resolution (SR). See the paper and project page for detailed results below. Here, we provide a demo for image denoise and deblur. To use it, simply upload your image, or click one of the examples to load them. Inference needs some time since this demo uses CPU."
28
- #article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.04676' target='_blank'>Simple Baselines for Image Restoration</a> | <a href='https://arxiv.org/abs/2204.08714' target='_blank'>NAFSSR: Stereo Image Super-Resolution Using NAFNet</a> | <a href='https://github.com/megvii-research/NAFNet' target='_blank'> Github Repo</a></p>"
29
-
30
-
31
- examples = [['demo/noisy.png', 'Denoising'],
32
- ['demo/blurry.jpg', 'Deblurring']]
33
-
34
- iface = gr.Interface(
35
- inference,
36
- [gr.inputs.Image(type="pil", label="Input"),
37
- gr.inputs.Radio(["Denoising", "Deblurring"], default="Denoising", label='task'),],
38
- gr.outputs.Image(type="file", label="Output"),
39
- title=title,
40
- description=description,
41
- article=article,
42
- enable_queue=True,
43
- examples=examples
44
- )
45
- iface.launch(debug=True,enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py DELETED
@@ -1,791 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from typing import Callable, List, Optional, Union
15
-
16
- import PIL
17
- import torch
18
- from transformers import (
19
- CLIPImageProcessor,
20
- CLIPTextModelWithProjection,
21
- CLIPTokenizer,
22
- CLIPVisionModelWithProjection,
23
- XLMRobertaTokenizer,
24
- )
25
-
26
- from ...models import PriorTransformer, UNet2DConditionModel, VQModel
27
- from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler
28
- from ...utils import (
29
- replace_example_docstring,
30
- )
31
- from ..pipeline_utils import DiffusionPipeline
32
- from .pipeline_kandinsky import KandinskyPipeline
33
- from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline
34
- from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
35
- from .pipeline_kandinsky_prior import KandinskyPriorPipeline
36
- from .text_encoder import MultilingualCLIP
37
-
38
-
39
- TEXT2IMAGE_EXAMPLE_DOC_STRING = """
40
- Examples:
41
- ```py
42
- from diffusers import AutoPipelineForText2Image
43
- import torch
44
-
45
- pipe = AutoPipelineForText2Image.from_pretrained(
46
- "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
47
- )
48
- pipe.enable_model_cpu_offload()
49
-
50
- prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
51
-
52
- image = pipe(prompt=prompt, num_inference_steps=25).images[0]
53
- ```
54
- """
55
-
56
- IMAGE2IMAGE_EXAMPLE_DOC_STRING = """
57
- Examples:
58
- ```py
59
- from diffusers import AutoPipelineForImage2Image
60
- import torch
61
- import requests
62
- from io import BytesIO
63
- from PIL import Image
64
- import os
65
-
66
- pipe = AutoPipelineForImage2Image.from_pretrained(
67
- "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
68
- )
69
- pipe.enable_model_cpu_offload()
70
-
71
- prompt = "A fantasy landscape, Cinematic lighting"
72
- negative_prompt = "low quality, bad quality"
73
-
74
- url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
75
-
76
- response = requests.get(url)
77
- image = Image.open(BytesIO(response.content)).convert("RGB")
78
- image.thumbnail((768, 768))
79
-
80
- image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]
81
- ```
82
- """
83
-
84
- INPAINT_EXAMPLE_DOC_STRING = """
85
- Examples:
86
- ```py
87
- from diffusers import AutoPipelineForInpainting
88
- from diffusers.utils import load_image
89
- import torch
90
- import numpy as np
91
-
92
- pipe = AutoPipelineForInpainting.from_pretrained(
93
- "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16
94
- )
95
- pipe.enable_model_cpu_offload()
96
-
97
- prompt = "A fantasy landscape, Cinematic lighting"
98
- negative_prompt = "low quality, bad quality"
99
-
100
- original_image = load_image(
101
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
102
- )
103
-
104
- mask = np.zeros((768, 768), dtype=np.float32)
105
- # Let's mask out an area above the cat's head
106
- mask[:250, 250:-250] = 1
107
-
108
- image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]
109
- ```
110
- """
111
-
112
-
113
- class KandinskyCombinedPipeline(DiffusionPipeline):
114
- """
115
- Combined Pipeline for text-to-image generation using Kandinsky
116
-
117
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
118
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
119
-
120
- Args:
121
- text_encoder ([`MultilingualCLIP`]):
122
- Frozen text-encoder.
123
- tokenizer ([`XLMRobertaTokenizer`]):
124
- Tokenizer of class
125
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
126
- A scheduler to be used in combination with `unet` to generate image latents.
127
- unet ([`UNet2DConditionModel`]):
128
- Conditional U-Net architecture to denoise the image embedding.
129
- movq ([`VQModel`]):
130
- MoVQ Decoder to generate the image from the latents.
131
- prior_prior ([`PriorTransformer`]):
132
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
133
- prior_image_encoder ([`CLIPVisionModelWithProjection`]):
134
- Frozen image-encoder.
135
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
136
- Frozen text-encoder.
137
- prior_tokenizer (`CLIPTokenizer`):
138
- Tokenizer of class
139
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
140
- prior_scheduler ([`UnCLIPScheduler`]):
141
- A scheduler to be used in combination with `prior` to generate image embedding.
142
- """
143
-
144
- _load_connected_pipes = True
145
-
146
- def __init__(
147
- self,
148
- text_encoder: MultilingualCLIP,
149
- tokenizer: XLMRobertaTokenizer,
150
- unet: UNet2DConditionModel,
151
- scheduler: Union[DDIMScheduler, DDPMScheduler],
152
- movq: VQModel,
153
- prior_prior: PriorTransformer,
154
- prior_image_encoder: CLIPVisionModelWithProjection,
155
- prior_text_encoder: CLIPTextModelWithProjection,
156
- prior_tokenizer: CLIPTokenizer,
157
- prior_scheduler: UnCLIPScheduler,
158
- prior_image_processor: CLIPImageProcessor,
159
- ):
160
- super().__init__()
161
-
162
- self.register_modules(
163
- text_encoder=text_encoder,
164
- tokenizer=tokenizer,
165
- unet=unet,
166
- scheduler=scheduler,
167
- movq=movq,
168
- prior_prior=prior_prior,
169
- prior_image_encoder=prior_image_encoder,
170
- prior_text_encoder=prior_text_encoder,
171
- prior_tokenizer=prior_tokenizer,
172
- prior_scheduler=prior_scheduler,
173
- prior_image_processor=prior_image_processor,
174
- )
175
- self.prior_pipe = KandinskyPriorPipeline(
176
- prior=prior_prior,
177
- image_encoder=prior_image_encoder,
178
- text_encoder=prior_text_encoder,
179
- tokenizer=prior_tokenizer,
180
- scheduler=prior_scheduler,
181
- image_processor=prior_image_processor,
182
- )
183
- self.decoder_pipe = KandinskyPipeline(
184
- text_encoder=text_encoder,
185
- tokenizer=tokenizer,
186
- unet=unet,
187
- scheduler=scheduler,
188
- movq=movq,
189
- )
190
-
191
- def enable_model_cpu_offload(self, gpu_id=0):
192
- r"""
193
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
194
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
195
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
196
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
197
- """
198
- self.prior_pipe.enable_model_cpu_offload()
199
- self.decoder_pipe.enable_model_cpu_offload()
200
-
201
- def progress_bar(self, iterable=None, total=None):
202
- self.prior_pipe.progress_bar(iterable=iterable, total=total)
203
- self.decoder_pipe.progress_bar(iterable=iterable, total=total)
204
- self.decoder_pipe.enable_model_cpu_offload()
205
-
206
- def set_progress_bar_config(self, **kwargs):
207
- self.prior_pipe.set_progress_bar_config(**kwargs)
208
- self.decoder_pipe.set_progress_bar_config(**kwargs)
209
-
210
- @torch.no_grad()
211
- @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
212
- def __call__(
213
- self,
214
- prompt: Union[str, List[str]],
215
- negative_prompt: Optional[Union[str, List[str]]] = None,
216
- num_inference_steps: int = 100,
217
- guidance_scale: float = 4.0,
218
- num_images_per_prompt: int = 1,
219
- height: int = 512,
220
- width: int = 512,
221
- prior_guidance_scale: float = 4.0,
222
- prior_num_inference_steps: int = 25,
223
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
224
- latents: Optional[torch.FloatTensor] = None,
225
- output_type: Optional[str] = "pil",
226
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
227
- callback_steps: int = 1,
228
- return_dict: bool = True,
229
- ):
230
- """
231
- Function invoked when calling the pipeline for generation.
232
-
233
- Args:
234
- prompt (`str` or `List[str]`):
235
- The prompt or prompts to guide the image generation.
236
- negative_prompt (`str` or `List[str]`, *optional*):
237
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
238
- if `guidance_scale` is less than `1`).
239
- num_images_per_prompt (`int`, *optional*, defaults to 1):
240
- The number of images to generate per prompt.
241
- num_inference_steps (`int`, *optional*, defaults to 100):
242
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
243
- expense of slower inference.
244
- height (`int`, *optional*, defaults to 512):
245
- The height in pixels of the generated image.
246
- width (`int`, *optional*, defaults to 512):
247
- The width in pixels of the generated image.
248
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
249
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
250
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
251
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
252
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
253
- usually at the expense of lower image quality.
254
- prior_num_inference_steps (`int`, *optional*, defaults to 100):
255
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
256
- expense of slower inference.
257
- guidance_scale (`float`, *optional*, defaults to 4.0):
258
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
259
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
260
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
261
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
262
- usually at the expense of lower image quality.
263
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
264
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
265
- to make generation deterministic.
266
- latents (`torch.FloatTensor`, *optional*):
267
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
268
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
269
- tensor will ge generated by sampling using the supplied random `generator`.
270
- output_type (`str`, *optional*, defaults to `"pil"`):
271
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
272
- (`np.array`) or `"pt"` (`torch.Tensor`).
273
- callback (`Callable`, *optional*):
274
- A function that calls every `callback_steps` steps during inference. The function is called with the
275
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
276
- callback_steps (`int`, *optional*, defaults to 1):
277
- The frequency at which the `callback` function is called. If not specified, the callback is called at
278
- every step.
279
- return_dict (`bool`, *optional*, defaults to `True`):
280
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
281
-
282
- Examples:
283
-
284
- Returns:
285
- [`~pipelines.ImagePipelineOutput`] or `tuple`
286
- """
287
- prior_outputs = self.prior_pipe(
288
- prompt=prompt,
289
- negative_prompt=negative_prompt,
290
- num_images_per_prompt=num_images_per_prompt,
291
- num_inference_steps=prior_num_inference_steps,
292
- generator=generator,
293
- latents=latents,
294
- guidance_scale=prior_guidance_scale,
295
- output_type="pt",
296
- return_dict=False,
297
- )
298
- image_embeds = prior_outputs[0]
299
- negative_image_embeds = prior_outputs[1]
300
-
301
- prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
302
-
303
- if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
304
- prompt = (image_embeds.shape[0] // len(prompt)) * prompt
305
-
306
- outputs = self.decoder_pipe(
307
- prompt=prompt,
308
- image_embeds=image_embeds,
309
- negative_image_embeds=negative_image_embeds,
310
- width=width,
311
- height=height,
312
- num_inference_steps=num_inference_steps,
313
- generator=generator,
314
- guidance_scale=guidance_scale,
315
- output_type=output_type,
316
- callback=callback,
317
- callback_steps=callback_steps,
318
- return_dict=return_dict,
319
- )
320
- return outputs
321
-
322
-
323
- class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline):
324
- """
325
- Combined Pipeline for image-to-image generation using Kandinsky
326
-
327
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
328
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
329
-
330
- Args:
331
- text_encoder ([`MultilingualCLIP`]):
332
- Frozen text-encoder.
333
- tokenizer ([`XLMRobertaTokenizer`]):
334
- Tokenizer of class
335
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
336
- A scheduler to be used in combination with `unet` to generate image latents.
337
- unet ([`UNet2DConditionModel`]):
338
- Conditional U-Net architecture to denoise the image embedding.
339
- movq ([`VQModel`]):
340
- MoVQ Decoder to generate the image from the latents.
341
- prior_prior ([`PriorTransformer`]):
342
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
343
- prior_image_encoder ([`CLIPVisionModelWithProjection`]):
344
- Frozen image-encoder.
345
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
346
- Frozen text-encoder.
347
- prior_tokenizer (`CLIPTokenizer`):
348
- Tokenizer of class
349
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
350
- prior_scheduler ([`UnCLIPScheduler`]):
351
- A scheduler to be used in combination with `prior` to generate image embedding.
352
- """
353
-
354
- _load_connected_pipes = True
355
-
356
- def __init__(
357
- self,
358
- text_encoder: MultilingualCLIP,
359
- tokenizer: XLMRobertaTokenizer,
360
- unet: UNet2DConditionModel,
361
- scheduler: Union[DDIMScheduler, DDPMScheduler],
362
- movq: VQModel,
363
- prior_prior: PriorTransformer,
364
- prior_image_encoder: CLIPVisionModelWithProjection,
365
- prior_text_encoder: CLIPTextModelWithProjection,
366
- prior_tokenizer: CLIPTokenizer,
367
- prior_scheduler: UnCLIPScheduler,
368
- prior_image_processor: CLIPImageProcessor,
369
- ):
370
- super().__init__()
371
-
372
- self.register_modules(
373
- text_encoder=text_encoder,
374
- tokenizer=tokenizer,
375
- unet=unet,
376
- scheduler=scheduler,
377
- movq=movq,
378
- prior_prior=prior_prior,
379
- prior_image_encoder=prior_image_encoder,
380
- prior_text_encoder=prior_text_encoder,
381
- prior_tokenizer=prior_tokenizer,
382
- prior_scheduler=prior_scheduler,
383
- prior_image_processor=prior_image_processor,
384
- )
385
- self.prior_pipe = KandinskyPriorPipeline(
386
- prior=prior_prior,
387
- image_encoder=prior_image_encoder,
388
- text_encoder=prior_text_encoder,
389
- tokenizer=prior_tokenizer,
390
- scheduler=prior_scheduler,
391
- image_processor=prior_image_processor,
392
- )
393
- self.decoder_pipe = KandinskyImg2ImgPipeline(
394
- text_encoder=text_encoder,
395
- tokenizer=tokenizer,
396
- unet=unet,
397
- scheduler=scheduler,
398
- movq=movq,
399
- )
400
-
401
- def enable_model_cpu_offload(self, gpu_id=0):
402
- r"""
403
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
404
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
405
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
406
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
407
- """
408
- self.prior_pipe.enable_model_cpu_offload()
409
- self.decoder_pipe.enable_model_cpu_offload()
410
-
411
- def progress_bar(self, iterable=None, total=None):
412
- self.prior_pipe.progress_bar(iterable=iterable, total=total)
413
- self.decoder_pipe.progress_bar(iterable=iterable, total=total)
414
- self.decoder_pipe.enable_model_cpu_offload()
415
-
416
- def set_progress_bar_config(self, **kwargs):
417
- self.prior_pipe.set_progress_bar_config(**kwargs)
418
- self.decoder_pipe.set_progress_bar_config(**kwargs)
419
-
420
- @torch.no_grad()
421
- @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING)
422
- def __call__(
423
- self,
424
- prompt: Union[str, List[str]],
425
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
426
- negative_prompt: Optional[Union[str, List[str]]] = None,
427
- num_inference_steps: int = 100,
428
- guidance_scale: float = 4.0,
429
- num_images_per_prompt: int = 1,
430
- strength: float = 0.3,
431
- height: int = 512,
432
- width: int = 512,
433
- prior_guidance_scale: float = 4.0,
434
- prior_num_inference_steps: int = 25,
435
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
436
- latents: Optional[torch.FloatTensor] = None,
437
- output_type: Optional[str] = "pil",
438
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
439
- callback_steps: int = 1,
440
- return_dict: bool = True,
441
- ):
442
- """
443
- Function invoked when calling the pipeline for generation.
444
-
445
- Args:
446
- prompt (`str` or `List[str]`):
447
- The prompt or prompts to guide the image generation.
448
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
449
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
450
- process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
451
- again.
452
- negative_prompt (`str` or `List[str]`, *optional*):
453
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
454
- if `guidance_scale` is less than `1`).
455
- num_images_per_prompt (`int`, *optional*, defaults to 1):
456
- The number of images to generate per prompt.
457
- num_inference_steps (`int`, *optional*, defaults to 100):
458
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
459
- expense of slower inference.
460
- height (`int`, *optional*, defaults to 512):
461
- The height in pixels of the generated image.
462
- width (`int`, *optional*, defaults to 512):
463
- The width in pixels of the generated image.
464
- strength (`float`, *optional*, defaults to 0.3):
465
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
466
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
467
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
468
- be maximum and the denoising process will run for the full number of iterations specified in
469
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
470
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
471
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
472
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
473
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
474
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
475
- usually at the expense of lower image quality.
476
- prior_num_inference_steps (`int`, *optional*, defaults to 100):
477
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
478
- expense of slower inference.
479
- guidance_scale (`float`, *optional*, defaults to 4.0):
480
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
481
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
482
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
483
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
484
- usually at the expense of lower image quality.
485
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
486
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
487
- to make generation deterministic.
488
- latents (`torch.FloatTensor`, *optional*):
489
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
490
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
491
- tensor will ge generated by sampling using the supplied random `generator`.
492
- output_type (`str`, *optional*, defaults to `"pil"`):
493
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
494
- (`np.array`) or `"pt"` (`torch.Tensor`).
495
- callback (`Callable`, *optional*):
496
- A function that calls every `callback_steps` steps during inference. The function is called with the
497
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
498
- callback_steps (`int`, *optional*, defaults to 1):
499
- The frequency at which the `callback` function is called. If not specified, the callback is called at
500
- every step.
501
- return_dict (`bool`, *optional*, defaults to `True`):
502
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
503
-
504
- Examples:
505
-
506
- Returns:
507
- [`~pipelines.ImagePipelineOutput`] or `tuple`
508
- """
509
- prior_outputs = self.prior_pipe(
510
- prompt=prompt,
511
- negative_prompt=negative_prompt,
512
- num_images_per_prompt=num_images_per_prompt,
513
- num_inference_steps=prior_num_inference_steps,
514
- generator=generator,
515
- latents=latents,
516
- guidance_scale=prior_guidance_scale,
517
- output_type="pt",
518
- return_dict=False,
519
- )
520
- image_embeds = prior_outputs[0]
521
- negative_image_embeds = prior_outputs[1]
522
-
523
- prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
524
- image = [image] if isinstance(prompt, PIL.Image.Image) else image
525
-
526
- if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
527
- prompt = (image_embeds.shape[0] // len(prompt)) * prompt
528
-
529
- if (
530
- isinstance(image, (list, tuple))
531
- and len(image) < image_embeds.shape[0]
532
- and image_embeds.shape[0] % len(image) == 0
533
- ):
534
- image = (image_embeds.shape[0] // len(image)) * image
535
-
536
- outputs = self.decoder_pipe(
537
- prompt=prompt,
538
- image=image,
539
- image_embeds=image_embeds,
540
- negative_image_embeds=negative_image_embeds,
541
- strength=strength,
542
- width=width,
543
- height=height,
544
- num_inference_steps=num_inference_steps,
545
- generator=generator,
546
- guidance_scale=guidance_scale,
547
- output_type=output_type,
548
- callback=callback,
549
- callback_steps=callback_steps,
550
- return_dict=return_dict,
551
- )
552
- return outputs
553
-
554
-
555
- class KandinskyInpaintCombinedPipeline(DiffusionPipeline):
556
- """
557
- Combined Pipeline for generation using Kandinsky
558
-
559
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
560
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
561
-
562
- Args:
563
- text_encoder ([`MultilingualCLIP`]):
564
- Frozen text-encoder.
565
- tokenizer ([`XLMRobertaTokenizer`]):
566
- Tokenizer of class
567
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
568
- A scheduler to be used in combination with `unet` to generate image latents.
569
- unet ([`UNet2DConditionModel`]):
570
- Conditional U-Net architecture to denoise the image embedding.
571
- movq ([`VQModel`]):
572
- MoVQ Decoder to generate the image from the latents.
573
- prior_prior ([`PriorTransformer`]):
574
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
575
- prior_image_encoder ([`CLIPVisionModelWithProjection`]):
576
- Frozen image-encoder.
577
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
578
- Frozen text-encoder.
579
- prior_tokenizer (`CLIPTokenizer`):
580
- Tokenizer of class
581
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
582
- prior_scheduler ([`UnCLIPScheduler`]):
583
- A scheduler to be used in combination with `prior` to generate image embedding.
584
- """
585
-
586
- _load_connected_pipes = True
587
-
588
- def __init__(
589
- self,
590
- text_encoder: MultilingualCLIP,
591
- tokenizer: XLMRobertaTokenizer,
592
- unet: UNet2DConditionModel,
593
- scheduler: Union[DDIMScheduler, DDPMScheduler],
594
- movq: VQModel,
595
- prior_prior: PriorTransformer,
596
- prior_image_encoder: CLIPVisionModelWithProjection,
597
- prior_text_encoder: CLIPTextModelWithProjection,
598
- prior_tokenizer: CLIPTokenizer,
599
- prior_scheduler: UnCLIPScheduler,
600
- prior_image_processor: CLIPImageProcessor,
601
- ):
602
- super().__init__()
603
-
604
- self.register_modules(
605
- text_encoder=text_encoder,
606
- tokenizer=tokenizer,
607
- unet=unet,
608
- scheduler=scheduler,
609
- movq=movq,
610
- prior_prior=prior_prior,
611
- prior_image_encoder=prior_image_encoder,
612
- prior_text_encoder=prior_text_encoder,
613
- prior_tokenizer=prior_tokenizer,
614
- prior_scheduler=prior_scheduler,
615
- prior_image_processor=prior_image_processor,
616
- )
617
- self.prior_pipe = KandinskyPriorPipeline(
618
- prior=prior_prior,
619
- image_encoder=prior_image_encoder,
620
- text_encoder=prior_text_encoder,
621
- tokenizer=prior_tokenizer,
622
- scheduler=prior_scheduler,
623
- image_processor=prior_image_processor,
624
- )
625
- self.decoder_pipe = KandinskyInpaintPipeline(
626
- text_encoder=text_encoder,
627
- tokenizer=tokenizer,
628
- unet=unet,
629
- scheduler=scheduler,
630
- movq=movq,
631
- )
632
-
633
- def enable_model_cpu_offload(self, gpu_id=0):
634
- r"""
635
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
636
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
637
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
638
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
639
- """
640
- self.prior_pipe.enable_model_cpu_offload()
641
- self.decoder_pipe.enable_model_cpu_offload()
642
-
643
- def progress_bar(self, iterable=None, total=None):
644
- self.prior_pipe.progress_bar(iterable=iterable, total=total)
645
- self.decoder_pipe.progress_bar(iterable=iterable, total=total)
646
- self.decoder_pipe.enable_model_cpu_offload()
647
-
648
- def set_progress_bar_config(self, **kwargs):
649
- self.prior_pipe.set_progress_bar_config(**kwargs)
650
- self.decoder_pipe.set_progress_bar_config(**kwargs)
651
-
652
- @torch.no_grad()
653
- @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING)
654
- def __call__(
655
- self,
656
- prompt: Union[str, List[str]],
657
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
658
- mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
659
- negative_prompt: Optional[Union[str, List[str]]] = None,
660
- num_inference_steps: int = 100,
661
- guidance_scale: float = 4.0,
662
- num_images_per_prompt: int = 1,
663
- height: int = 512,
664
- width: int = 512,
665
- prior_guidance_scale: float = 4.0,
666
- prior_num_inference_steps: int = 25,
667
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
668
- latents: Optional[torch.FloatTensor] = None,
669
- output_type: Optional[str] = "pil",
670
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
671
- callback_steps: int = 1,
672
- return_dict: bool = True,
673
- ):
674
- """
675
- Function invoked when calling the pipeline for generation.
676
-
677
- Args:
678
- prompt (`str` or `List[str]`):
679
- The prompt or prompts to guide the image generation.
680
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
681
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
682
- process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
683
- again.
684
- mask_image (`np.array`):
685
- Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while
686
- black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
687
- channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3,
688
- so the expected shape would be `(B, H, W, 1)`.
689
- negative_prompt (`str` or `List[str]`, *optional*):
690
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
691
- if `guidance_scale` is less than `1`).
692
- num_images_per_prompt (`int`, *optional*, defaults to 1):
693
- The number of images to generate per prompt.
694
- num_inference_steps (`int`, *optional*, defaults to 100):
695
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
696
- expense of slower inference.
697
- height (`int`, *optional*, defaults to 512):
698
- The height in pixels of the generated image.
699
- width (`int`, *optional*, defaults to 512):
700
- The width in pixels of the generated image.
701
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
702
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
703
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
704
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
705
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
706
- usually at the expense of lower image quality.
707
- prior_num_inference_steps (`int`, *optional*, defaults to 100):
708
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
709
- expense of slower inference.
710
- guidance_scale (`float`, *optional*, defaults to 4.0):
711
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
712
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
713
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
714
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
715
- usually at the expense of lower image quality.
716
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
717
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
718
- to make generation deterministic.
719
- latents (`torch.FloatTensor`, *optional*):
720
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
721
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
722
- tensor will ge generated by sampling using the supplied random `generator`.
723
- output_type (`str`, *optional*, defaults to `"pil"`):
724
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
725
- (`np.array`) or `"pt"` (`torch.Tensor`).
726
- callback (`Callable`, *optional*):
727
- A function that calls every `callback_steps` steps during inference. The function is called with the
728
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
729
- callback_steps (`int`, *optional*, defaults to 1):
730
- The frequency at which the `callback` function is called. If not specified, the callback is called at
731
- every step.
732
- return_dict (`bool`, *optional*, defaults to `True`):
733
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
734
-
735
- Examples:
736
-
737
- Returns:
738
- [`~pipelines.ImagePipelineOutput`] or `tuple`
739
- """
740
- prior_outputs = self.prior_pipe(
741
- prompt=prompt,
742
- negative_prompt=negative_prompt,
743
- num_images_per_prompt=num_images_per_prompt,
744
- num_inference_steps=prior_num_inference_steps,
745
- generator=generator,
746
- latents=latents,
747
- guidance_scale=prior_guidance_scale,
748
- output_type="pt",
749
- return_dict=False,
750
- )
751
- image_embeds = prior_outputs[0]
752
- negative_image_embeds = prior_outputs[1]
753
-
754
- prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
755
- image = [image] if isinstance(prompt, PIL.Image.Image) else image
756
- mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image
757
-
758
- if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
759
- prompt = (image_embeds.shape[0] // len(prompt)) * prompt
760
-
761
- if (
762
- isinstance(image, (list, tuple))
763
- and len(image) < image_embeds.shape[0]
764
- and image_embeds.shape[0] % len(image) == 0
765
- ):
766
- image = (image_embeds.shape[0] // len(image)) * image
767
-
768
- if (
769
- isinstance(mask_image, (list, tuple))
770
- and len(mask_image) < image_embeds.shape[0]
771
- and image_embeds.shape[0] % len(mask_image) == 0
772
- ):
773
- mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image
774
-
775
- outputs = self.decoder_pipe(
776
- prompt=prompt,
777
- image=image,
778
- mask_image=mask_image,
779
- image_embeds=image_embeds,
780
- negative_image_embeds=negative_image_embeds,
781
- width=width,
782
- height=height,
783
- num_inference_steps=num_inference_steps,
784
- generator=generator,
785
- guidance_scale=guidance_scale,
786
- output_type=output_type,
787
- callback=callback,
788
- callback_steps=callback_steps,
789
- return_dict=return_dict,
790
- )
791
- return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/README.md DELETED
@@ -1,73 +0,0 @@
1
- # Rethinking atrous convolution for semantic image segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latext
8
- @article{chen2017rethinking,
9
- title={Rethinking atrous convolution for semantic image segmentation},
10
- author={Chen, Liang-Chieh and Papandreou, George and Schroff, Florian and Adam, Hartwig},
11
- journal={arXiv preprint arXiv:1706.05587},
12
- year={2017}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- Note: `D-8` here corresponding to the output stride 8 setting for DeepLab series.
19
-
20
- ### Cityscapes
21
-
22
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
23
- | --------- | --------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
24
- | DeepLabV3 | R-50-D8 | 512x1024 | 40000 | 6.1 | 2.57 | 79.09 | 80.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449.log.json) |
25
- | DeepLabV3 | R-101-D8 | 512x1024 | 40000 | 9.6 | 1.92 | 77.12 | 79.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241.log.json) |
26
- | DeepLabV3 | R-50-D8 | 769x769 | 40000 | 6.9 | 1.11 | 78.58 | 79.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723.log.json) |
27
- | DeepLabV3 | R-101-D8 | 769x769 | 40000 | 10.9 | 0.83 | 79.27 | 80.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809.log.json) |
28
- | DeepLabV3 | R-18-D8 | 512x1024 | 80000 | 1.7 | 13.78 | 76.70 | 78.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes_20201225_021506-23dffbe2.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes-20201225_021506.log.json) |
29
- | DeepLabV3 | R-50-D8 | 512x1024 | 80000 | - | - | 79.32 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404.log.json) |
30
- | DeepLabV3 | R-101-D8 | 512x1024 | 80000 | - | - | 80.20 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503.log.json) |
31
- | DeepLabV3 | R-18-D8 | 769x769 | 80000 | 1.9 | 5.55 | 76.60 | 78.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes_20201225_021506-6452126a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes-20201225_021506.log.json) |
32
- | DeepLabV3 | R-50-D8 | 769x769 | 80000 | - | - | 79.89 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338.log.json) |
33
- | DeepLabV3 | R-101-D8 | 769x769 | 80000 | - | - | 79.67 | 80.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353.log.json) |
34
- | DeepLabV3 | R-101-D16-MG124 | 512x1024 | 40000 | 4.7 | - 6.96 | 76.71 | 78.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-67b0c992.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes-20200908_005644.log.json) |
35
- | DeepLabV3 | R-101-D16-MG124 | 512x1024 | 80000 | - | - | 78.36 | 79.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-57bb8425.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes-20200908_005644.log.json) |
36
- | DeepLabV3 | R-18b-D8 | 512x1024 | 80000 | 1.6 | 13.93 | 76.26 | 77.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes_20201225_094144-46040cef.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes-20201225_094144.log.json) |
37
- | DeepLabV3 | R-50b-D8 | 512x1024 | 80000 | 6.0 | 2.74 | 79.63 | 80.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes_20201225_155148-ec368954.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes-20201225_155148.log.json) |
38
- | DeepLabV3 | R-101b-D8 | 512x1024 | 80000 | 9.5 | 1.81 | 80.01 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes_20201226_171821-8fd49503.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes-20201226_171821.log.json) |
39
- | DeepLabV3 | R-18b-D8 | 769x769 | 80000 | 1.8 | 5.79 | 76.63 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes_20201225_094144-fdc985d9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes-20201225_094144.log.json) |
40
- | DeepLabV3 | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.16 | 78.80 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes_20201225_155404-87fb0cf4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes-20201225_155404.log.json) |
41
- | DeepLabV3 | R-101b-D8 | 769x769 | 80000 | 10.7 | 0.82 | 79.41 | 80.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes_20201226_190843-9142ee57.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes-20201226_190843.log.json) |
42
-
43
- ### ADE20K
44
-
45
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
46
- | --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
47
- | DeepLabV3 | R-50-D8 | 512x512 | 80000 | 8.9 | 14.76 | 42.42 | 43.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) |
48
- | DeepLabV3 | R-101-D8 | 512x512 | 80000 | 12.4 | 10.14 | 44.08 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256.log.json) |
49
- | DeepLabV3 | R-50-D8 | 512x512 | 160000 | - | - | 42.66 | 44.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227.log.json) |
50
- | DeepLabV3 | R-101-D8 | 512x512 | 160000 | - | - | 45.00 | 46.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) |
51
-
52
- ### Pascal VOC 2012 + Aug
53
-
54
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
55
- | --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
56
- | DeepLabV3 | R-50-D8 | 512x512 | 20000 | 6.1 | 13.88 | 76.17 | 77.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906.log.json) |
57
- | DeepLabV3 | R-101-D8 | 512x512 | 20000 | 9.6 | 9.81 | 78.70 | 79.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932.log.json) |
58
- | DeepLabV3 | R-50-D8 | 512x512 | 40000 | - | - | 77.68 | 78.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546.log.json) |
59
- | DeepLabV3 | R-101-D8 | 512x512 | 40000 | - | - | 77.92 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432.log.json) |
60
-
61
- ### Pascal Context
62
-
63
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
64
- | --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
65
- | DeepLabV3 | R-101-D8 | 480x480 | 40000 | 9.2 | 7.09 | 46.55 | 47.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context_20200911_204118-1aa27336.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context-20200911_204118.log.json) |
66
- | DeepLabV3 | R-101-D8 | 480x480 | 80000 | - | - | 46.42 | 47.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context_20200911_170155-2a21fff3.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context-20200911_170155.log.json) |
67
-
68
- ### Pascal Context 59
69
-
70
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
71
- | --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
72
- | DeepLabV3 | R-101-D8 | 480x480 | 40000 | - | - | 52.61 | 54.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59_20210416_110332-cb08ea46.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59-20210416_110332.log.json) |
73
- | DeepLabV3 | R-101-D8 | 480x480 | 80000 | - | - | 52.46 | 54.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59_20210416_113002-26303993.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59-20210416_113002.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arafath10/chatcode/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Chatcode
3
- emoji: 🐠
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.16.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AriaMei/TTSdemo/preprocess.py DELETED
@@ -1,25 +0,0 @@
1
- import argparse
2
- import text
3
- from utils import load_filepaths_and_text
4
-
5
- if __name__ == '__main__':
6
- parser = argparse.ArgumentParser()
7
- parser.add_argument("--out_extension", default="cleaned")
8
- parser.add_argument("--text_index", default=1, type=int)
9
- parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
10
- parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
11
-
12
- args = parser.parse_args()
13
-
14
-
15
- for filelist in args.filelists:
16
- print("START:", filelist)
17
- filepaths_and_text = load_filepaths_and_text(filelist)
18
- for i in range(len(filepaths_and_text)):
19
- original_text = filepaths_and_text[i][args.text_index]
20
- cleaned_text = text._clean_text(original_text, args.text_cleaners)
21
- filepaths_and_text[i][args.text_index] = cleaned_text
22
-
23
- new_filelist = filelist + "." + args.out_extension
24
- with open(new_filelist, "w", encoding="utf-8") as f:
25
- f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Armandoliv/gpt2-tweets-generation-app/app.py DELETED
@@ -1,54 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
-
5
- tokenizer = AutoTokenizer.from_pretrained("Armandoliv/gpt2-tweetml-generator")
6
-
7
- model = AutoModelForCausalLM.from_pretrained("Armandoliv/gpt2-tweetml-generator")
8
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
9
- model = model.to(device)
10
-
11
- def main_generator(text):
12
-
13
- preprocess_text = text.strip().replace("\n"," ").strip()
14
- prompt = f"<|startoftext|> {preprocess_text}"
15
- generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
16
- generated = generated.to(device)
17
-
18
- sample_outputs = model.generate(
19
- generated,
20
- do_sample=True,
21
- top_k=20,
22
- max_length = 70,
23
- top_p=0.98,
24
- num_return_sequences=10,
25
- temperature=0.95
26
-
27
- )
28
- output = ""
29
-
30
- for i, sample_output in enumerate(sample_outputs):
31
- output += "{}: {}\n\n".format(i+1, tokenizer.decode(sample_output, skip_special_tokens=True))
32
-
33
-
34
- return output
35
-
36
- inputs = [gr.Textbox(lines=1, placeholder="Text Here...", label="Input")]
37
- outputs = gr.Text( label="10 Tweets Generated")
38
- title="Tweets generation app"
39
- description = "This demo uses AI Models to create tweets.\nIt focus on Data Science and Machine Learning tweets creation."
40
- examples = ['I wonder']
41
-
42
- io = gr.Interface(fn=main_generator, inputs=inputs, outputs=outputs, title=title, description = description, examples = examples,
43
-
44
- css= """.gr-button-primary { background: -webkit-linear-gradient(
45
- 90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764;
46
- background: linear-gradient(
47
- 90deg, #355764 0%, #55a8a1 100% ) !important;
48
- background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important;
49
- background: -webkit-linear-gradient(
50
- 90deg, #355764 0%, #55a8a1 100% ) !important;
51
- color:white !important}"""
52
- )
53
-
54
- io.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/develop.py DELETED
@@ -1,193 +0,0 @@
1
- from distutils.util import convert_path
2
- from distutils import log
3
- from distutils.errors import DistutilsError, DistutilsOptionError
4
- import os
5
- import glob
6
- import io
7
-
8
- import pkg_resources
9
- from setuptools.command.easy_install import easy_install
10
- from setuptools import namespaces
11
- import setuptools
12
-
13
-
14
- class develop(namespaces.DevelopInstaller, easy_install):
15
- """Set up package for development"""
16
-
17
- description = "install package in 'development mode'"
18
-
19
- user_options = easy_install.user_options + [
20
- ("uninstall", "u", "Uninstall this source package"),
21
- ("egg-path=", None, "Set the path to be used in the .egg-link file"),
22
- ]
23
-
24
- boolean_options = easy_install.boolean_options + ['uninstall']
25
-
26
- command_consumes_arguments = False # override base
27
-
28
- def run(self):
29
- if self.uninstall:
30
- self.multi_version = True
31
- self.uninstall_link()
32
- self.uninstall_namespaces()
33
- else:
34
- self.install_for_development()
35
- self.warn_deprecated_options()
36
-
37
- def initialize_options(self):
38
- self.uninstall = None
39
- self.egg_path = None
40
- easy_install.initialize_options(self)
41
- self.setup_path = None
42
- self.always_copy_from = '.' # always copy eggs installed in curdir
43
-
44
- def finalize_options(self):
45
- ei = self.get_finalized_command("egg_info")
46
- if ei.broken_egg_info:
47
- template = "Please rename %r to %r before using 'develop'"
48
- args = ei.egg_info, ei.broken_egg_info
49
- raise DistutilsError(template % args)
50
- self.args = [ei.egg_name]
51
-
52
- easy_install.finalize_options(self)
53
- self.expand_basedirs()
54
- self.expand_dirs()
55
- # pick up setup-dir .egg files only: no .egg-info
56
- self.package_index.scan(glob.glob('*.egg'))
57
-
58
- egg_link_fn = ei.egg_name + '.egg-link'
59
- self.egg_link = os.path.join(self.install_dir, egg_link_fn)
60
- self.egg_base = ei.egg_base
61
- if self.egg_path is None:
62
- self.egg_path = os.path.abspath(ei.egg_base)
63
-
64
- target = pkg_resources.normalize_path(self.egg_base)
65
- egg_path = pkg_resources.normalize_path(
66
- os.path.join(self.install_dir, self.egg_path)
67
- )
68
- if egg_path != target:
69
- raise DistutilsOptionError(
70
- "--egg-path must be a relative path from the install"
71
- " directory to " + target
72
- )
73
-
74
- # Make a distribution for the package's source
75
- self.dist = pkg_resources.Distribution(
76
- target,
77
- pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
78
- project_name=ei.egg_name,
79
- )
80
-
81
- self.setup_path = self._resolve_setup_path(
82
- self.egg_base,
83
- self.install_dir,
84
- self.egg_path,
85
- )
86
-
87
- @staticmethod
88
- def _resolve_setup_path(egg_base, install_dir, egg_path):
89
- """
90
- Generate a path from egg_base back to '.' where the
91
- setup script resides and ensure that path points to the
92
- setup path from $install_dir/$egg_path.
93
- """
94
- path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
95
- if path_to_setup != os.curdir:
96
- path_to_setup = '../' * (path_to_setup.count('/') + 1)
97
- resolved = pkg_resources.normalize_path(
98
- os.path.join(install_dir, egg_path, path_to_setup)
99
- )
100
- if resolved != pkg_resources.normalize_path(os.curdir):
101
- raise DistutilsOptionError(
102
- "Can't get a consistent path to setup script from"
103
- " installation directory",
104
- resolved,
105
- pkg_resources.normalize_path(os.curdir),
106
- )
107
- return path_to_setup
108
-
109
- def install_for_development(self):
110
- self.run_command('egg_info')
111
-
112
- # Build extensions in-place
113
- self.reinitialize_command('build_ext', inplace=1)
114
- self.run_command('build_ext')
115
-
116
- if setuptools.bootstrap_install_from:
117
- self.easy_install(setuptools.bootstrap_install_from)
118
- setuptools.bootstrap_install_from = None
119
-
120
- self.install_namespaces()
121
-
122
- # create an .egg-link in the installation dir, pointing to our egg
123
- log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
124
- if not self.dry_run:
125
- with open(self.egg_link, "w") as f:
126
- f.write(self.egg_path + "\n" + self.setup_path)
127
- # postprocess the installed distro, fixing up .pth, installing scripts,
128
- # and handling requirements
129
- self.process_distribution(None, self.dist, not self.no_deps)
130
-
131
- def uninstall_link(self):
132
- if os.path.exists(self.egg_link):
133
- log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
134
- egg_link_file = open(self.egg_link)
135
- contents = [line.rstrip() for line in egg_link_file]
136
- egg_link_file.close()
137
- if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
138
- log.warn("Link points to %s: uninstall aborted", contents)
139
- return
140
- if not self.dry_run:
141
- os.unlink(self.egg_link)
142
- if not self.dry_run:
143
- self.update_pth(self.dist) # remove any .pth link to us
144
- if self.distribution.scripts:
145
- # XXX should also check for entry point scripts!
146
- log.warn("Note: you must uninstall or replace scripts manually!")
147
-
148
- def install_egg_scripts(self, dist):
149
- if dist is not self.dist:
150
- # Installing a dependency, so fall back to normal behavior
151
- return easy_install.install_egg_scripts(self, dist)
152
-
153
- # create wrapper scripts in the script dir, pointing to dist.scripts
154
-
155
- # new-style...
156
- self.install_wrapper_scripts(dist)
157
-
158
- # ...and old-style
159
- for script_name in self.distribution.scripts or []:
160
- script_path = os.path.abspath(convert_path(script_name))
161
- script_name = os.path.basename(script_path)
162
- with io.open(script_path) as strm:
163
- script_text = strm.read()
164
- self.install_script(dist, script_name, script_text, script_path)
165
-
166
- def install_wrapper_scripts(self, dist):
167
- dist = VersionlessRequirement(dist)
168
- return easy_install.install_wrapper_scripts(self, dist)
169
-
170
-
171
- class VersionlessRequirement:
172
- """
173
- Adapt a pkg_resources.Distribution to simply return the project
174
- name as the 'requirement' so that scripts will work across
175
- multiple versions.
176
-
177
- >>> from pkg_resources import Distribution
178
- >>> dist = Distribution(project_name='foo', version='1.0')
179
- >>> str(dist.as_requirement())
180
- 'foo==1.0'
181
- >>> adapted_dist = VersionlessRequirement(dist)
182
- >>> str(adapted_dist.as_requirement())
183
- 'foo'
184
- """
185
-
186
- def __init__(self, dist):
187
- self.__dist = dist
188
-
189
- def __getattr__(self, name):
190
- return getattr(self.__dist, name)
191
-
192
- def as_requirement(self):
193
- return self.project_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AutoLLM/ArxivDigest/relevancy.py DELETED
@@ -1,174 +0,0 @@
1
- """
2
- run:
3
- python -m relevancy run_all_day_paper \
4
- --output_dir ./data \
5
- --model_name="gpt-3.5-turbo" \
6
- """
7
- import time
8
- import json
9
- import os
10
- import random
11
- import re
12
- import string
13
- from datetime import datetime
14
-
15
- import numpy as np
16
- import tqdm
17
- import utils
18
-
19
-
20
- def encode_prompt(query, prompt_papers):
21
- """Encode multiple prompt instructions into a single string."""
22
- prompt = open("relevancy_prompt.txt").read() + "\n"
23
- prompt += query['interest']
24
-
25
- for idx, task_dict in enumerate(prompt_papers):
26
- (title, authors, abstract) = task_dict["title"], task_dict["authors"], task_dict["abstract"]
27
- if not title:
28
- raise
29
- prompt += f"###\n"
30
- prompt += f"{idx + 1}. Title: {title}\n"
31
- prompt += f"{idx + 1}. Authors: {authors}\n"
32
- prompt += f"{idx + 1}. Abstract: {abstract}\n"
33
- prompt += f"\n Generate response:\n1."
34
- print(prompt)
35
- return prompt
36
-
37
-
38
- def post_process_chat_gpt_response(paper_data, response, threshold_score=8):
39
- selected_data = []
40
- if response is None:
41
- return []
42
- json_items = response['message']['content'].replace("\n\n", "\n").split("\n")
43
- pattern = r"^\d+\. |\\"
44
- import pprint
45
- try:
46
- score_items = [
47
- json.loads(re.sub(pattern, "", line))
48
- for line in json_items if "relevancy score" in line.lower()]
49
- except Exception:
50
- pprint.pprint([re.sub(pattern, "", line) for line in json_items if "relevancy score" in line.lower()])
51
- raise RuntimeError("failed")
52
- pprint.pprint(score_items)
53
- scores = []
54
- for item in score_items:
55
- temp = item["Relevancy score"]
56
- if "/" in temp:
57
- scores.append(int(temp.split("/")[0]))
58
- else:
59
- scores.append(int(temp))
60
- if len(score_items) != len(paper_data):
61
- score_items = score_items[:len(paper_data)]
62
- hallucination = True
63
- else:
64
- hallucination = False
65
-
66
- for idx, inst in enumerate(score_items):
67
- # if the decoding stops due to length, the last example is likely truncated so we discard it
68
- if scores[idx] < threshold_score:
69
- continue
70
- output_str = "Title: " + paper_data[idx]["title"] + "\n"
71
- output_str += "Authors: " + paper_data[idx]["authors"] + "\n"
72
- output_str += "Link: " + paper_data[idx]["main_page"] + "\n"
73
- for key, value in inst.items():
74
- paper_data[idx][key] = value
75
- output_str += key + ": " + value + "\n"
76
- paper_data[idx]['summarized_text'] = output_str
77
- selected_data.append(paper_data[idx])
78
- return selected_data, hallucination
79
-
80
-
81
- def find_word_in_string(w, s):
82
- return re.compile(r"\b({0})\b".format(w), flags=re.IGNORECASE).search(s)
83
-
84
-
85
- def process_subject_fields(subjects):
86
- all_subjects = subjects.split(";")
87
- all_subjects = [s.split(" (")[0] for s in all_subjects]
88
- return all_subjects
89
-
90
- def generate_relevance_score(
91
- all_papers,
92
- query,
93
- model_name="gpt-3.5-turbo",
94
- threshold_score=8,
95
- num_paper_in_prompt=4,
96
- temperature=0.4,
97
- top_p=1.0,
98
- sorting=True
99
- ):
100
- ans_data = []
101
- request_idx = 1
102
- hallucination = False
103
- for id in tqdm.tqdm(range(0, len(all_papers), num_paper_in_prompt)):
104
- prompt_papers = all_papers[id:id+num_paper_in_prompt]
105
- # only sampling from the seed tasks
106
- prompt = encode_prompt(query, prompt_papers)
107
-
108
- decoding_args = utils.OpenAIDecodingArguments(
109
- temperature=temperature,
110
- n=1,
111
- max_tokens=1072, # hard-code to maximize the length. the requests will be automatically adjusted
112
- top_p=top_p,
113
- )
114
- request_start = time.time()
115
- response = utils.openai_completion(
116
- prompts=prompt,
117
- model_name=model_name,
118
- batch_size=1,
119
- decoding_args=decoding_args,
120
- logit_bias={"100257": -100}, # prevent the <|endoftext|> from being generated
121
- # "100265":-100, "100276":-100 for <|im_end|> and <endofprompt> token
122
- )
123
- print ("response", response['message']['content'])
124
- request_duration = time.time() - request_start
125
-
126
- process_start = time.time()
127
- batch_data, hallu = post_process_chat_gpt_response(prompt_papers, response, threshold_score=threshold_score)
128
- hallucination = hallucination or hallu
129
- ans_data.extend(batch_data)
130
-
131
- print(f"Request {request_idx+1} took {request_duration:.2f}s")
132
- print(f"Post-processing took {time.time() - process_start:.2f}s")
133
-
134
- if sorting:
135
- ans_data = sorted(ans_data, key=lambda x: int(x["Relevancy score"]), reverse=True)
136
-
137
- return ans_data, hallucination
138
-
139
- def run_all_day_paper(
140
- query={"interest":"", "subjects":["Computation and Language", "Artificial Intelligence"]},
141
- date=None,
142
- data_dir="../data",
143
- model_name="gpt-3.5-turbo",
144
- threshold_score=8,
145
- num_paper_in_prompt=8,
146
- temperature=0.4,
147
- top_p=1.0
148
- ):
149
- if date is None:
150
- date = datetime.today().strftime('%a, %d %b %y')
151
- # string format such as Wed, 10 May 23
152
- print ("the date for the arxiv data is: ", date)
153
-
154
- all_papers = [json.loads(l) for l in open(f"{data_dir}/{date}.jsonl", "r")]
155
- print (f"We found {len(all_papers)}.")
156
-
157
- all_papers_in_subjects = [
158
- t for t in all_papers
159
- if bool(set(process_subject_fields(t['subjects'])) & set(query['subjects']))
160
- ]
161
- print(f"After filtering subjects, we have {len(all_papers_in_subjects)} papers left.")
162
- ans_data = generate_relevance_score(all_papers_in_subjects, query, model_name, threshold_score, num_paper_in_prompt, temperature, top_p)
163
- utils.write_ans_to_file(ans_data, date, output_dir="../outputs")
164
- return ans_data
165
-
166
-
167
- if __name__ == "__main__":
168
- query = {"interest":"""
169
- 1. Large language model pretraining and finetunings
170
- 2. Multimodal machine learning
171
- 3. Do not care about specific application, for example, information extraction, summarization, etc.
172
- 4. Not interested in paper focus on specific languages, e.g., Arabic, Chinese, etc.\n""",
173
- "subjects":["Computation and Language"]}
174
- ans_data = run_all_day_paper(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/utils/data_utils.py DELETED
@@ -1,25 +0,0 @@
1
- """
2
- Code adopted from pix2pixHD:
3
- https://github.com/NVIDIA/pix2pixHD/blob/master/data/image_folder.py
4
- """
5
- import os
6
-
7
- IMG_EXTENSIONS = [
8
- '.jpg', '.JPG', '.jpeg', '.JPEG',
9
- '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
10
- ]
11
-
12
-
13
- def is_image_file(filename):
14
- return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
15
-
16
-
17
- def make_dataset(dir):
18
- images = []
19
- assert os.path.isdir(dir), '%s is not a valid directory' % dir
20
- for root, _, fnames in sorted(os.walk(dir)):
21
- for fname in fnames:
22
- if is_image_file(fname):
23
- path = os.path.join(root, fname)
24
- images.append(path)
25
- return images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Choque De Clanes Linux.md DELETED
@@ -1,137 +0,0 @@
1
-
2
- <h1>Cómo descargar Clash of Clans en Linux</h1>
3
- <p>Clash of Clans es uno de los juegos móviles más populares del mundo, con millones de jugadores construyendo sus aldeas, entrenando a sus tropas y luchando contra otros clanes en línea. Pero, ¿y si quieres jugar a este adictivo juego de estrategia en tu ordenador Linux? ¿Es posible descargar Clash of Clans en Linux y disfrutar del mismo modo de juego que en tu smartphone? </p>
4
- <h2>descargar choque de clanes linux</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://bltlly.com/2v6L10">https://bltlly.com/2v6L10</a></b></p><br /><br />
5
- <p>La respuesta es sí, es posible, pero no muy sencillo. Verás, Clash of Clans es una aplicación para Android, y Linux no es un sistema operativo Android. Así que no puedes simplemente descargar el juego desde Google Play Store y ejecutarlo en tu máquina Linux. Necesitas algunas herramientas y trucos para que funcione. </p>
6
- <p>En este artículo, le mostraremos cómo descargar Clash of Clans en Linux usando un software llamado Anbox, que le permite ejecutar aplicaciones de Android en cualquier distribución de Linux. También te daremos algunos consejos y trucos sobre cómo optimizar tu experiencia de juego y solucionar problemas comunes. </p>
7
- <h2>Lo que necesitas para descargar Clash of Clans en Linux</h2>
8
- <p>Antes de entrar en detalles, repasemos lo que necesitas para descargar Clash of Clans en Linux. Estos son los principales requisitos y herramientas que necesitará:</p>
9
- <ul>
10
- <li>Una computadora Linux con un procesador decente, RAM y tarjeta gráfica. </li>
11
- <li>Una conexión a Internet estable. </li>
12
- <li>Un administrador de paquetes instantáneos instalado en su distribución de Linux. </li>
13
- <li>Anbox, un Android en un software de caja que le permite ejecutar aplicaciones Android en Linux.</li>
14
- <li>Un archivo APK para Clash of Clans, que es el archivo de instalación para aplicaciones Android. </li>
15
- </ul>
16
- <p>No te preocupes si no tienes algunas de estas herramientas o no sabes cuáles son. Explicaremos todo en las siguientes secciones. </p>
17
- <h3>Paquetes rápidos</h3>
18
-
19
- <p>Para instalar paquetes snap en su distribución de Linux, necesita un administrador de paquetes snap. La mayoría de las distribuciones modernas de Linux vienen con snap preinstalado, pero si el tuyo no lo hace, puedes instalarlo fácilmente siguiendo las instrucciones <a href="( 5 )">here</a>. </p>
20
- <h3>Anbox</h3>
21
- <p>Anbox es un Android en un software de caja que le permite ejecutar aplicaciones Android en cualquier distribución de Linux. No es un emulador, sino un entorno en contenedores que utiliza el sistema nativo de Android del Android Open Source Project.</p>
22
- <p></p>
23
- <p>Anbox está disponible como un paquete de complemento , por lo que puede instalarlo fácilmente usando el comando snap. Estos son los pasos para instalar Anbox en su computadora Linux:</p>
24
- <ol>
25
- <li>Abra una ventana de terminal y escriba el siguiente comando: <code>sudo snap install ---devmode --beta anbox</code></li>
26
- <li>Espere a que la instalación se complete. Puede tardar unos minutos dependiendo de su velocidad de Internet. </li>
27
- <li>Una vez realizada la instalación, puede comprobar si Anbox está instalado correctamente escribiendo: <code>anbox system-info</code></li>
28
- <li>Debería ver alguna información sobre Anbox y su sistema. Si ve algún error o advertencia, es posible que necesite solucionarlos antes de continuar. </li>
29
- </ol>
30
- <h3>Archivos APK</h3>
31
- <p>Un archivo APK es el archivo de instalación para aplicaciones Android. Contiene todo el código, recursos y metadatos necesarios para que una aplicación se ejecute en un dispositivo Android. Puedes pensar en él como un archivo zip que contiene todo lo que una aplicación necesita. </p>
32
- <p>Para descargar Clash of Clans en Linux, necesitas encontrar un archivo APK para el juego. No puedes usar Google Play Store, porque requiere una cuenta de Google y un marco de servicios de Google Play, que no están disponibles en Anbox.</p>
33
- <p>En su lugar, puede utilizar un sitio web de terceros que aloja archivos APK para varias aplicaciones de Android. Hay muchos sitios web de este tipo, pero no todos son seguros y fiables. Algunos pueden contener malware o versiones obsoletas de las aplicaciones. </p>
34
-
35
- <ol>
36
- <li>Abra un navegador web en su computadora Linux y vaya a <a href="">https://apkpure.com/</a></li>
37
- <li>En el cuadro de búsqueda, escribe "Clash of Clans" y pulsa enter. </li>
38
- <li>Deberías ver una lista de resultados relacionados con Clash of Clans. Haz clic en el que dice "Clash of Clans (Supercell)". </li>
39
- <li>Deberías ver una página con información sobre el juego, como su descripción, capturas de pantalla y calificaciones. Haga clic en el botón verde que dice "Descargar APK". </li>
40
- <li>Aparecerá una ventana emergente pidiéndole que elija una ubicación de descarga. Elija una carpeta donde desea guardar el archivo APK y haga clic en "Guardar". </li>
41
- <li>La descarga debe iniciarse automáticamente. Espere a que termine y luego busque el archivo APK en la carpeta elegida. </li>
42
- </ol>
43
- <h2>Cómo instalar y ejecutar Clash of Clans en Linux</h2>
44
- <p>Ahora que tienes Anbox y el archivo APK para Clash of Clans, estás listo para instalar y ejecutar el juego en tu computadora Linux. Estos son los pasos para hacerlo:</p>
45
- <h3>Inicie Anbox desde su escritorio Linux</h3>
46
- <p>Anbox debería haber creado un icono de acceso directo en su escritorio Linux después de la instalación. Puede usar este icono para iniciar Anbox desde su entorno de escritorio. </p>
47
- <p>Si no ve el icono, también puede iniciar Anbox desde el terminal escribiendo: <code>anbox launch --package=org.anbox.appmgr --component=org.anbox.appmgr.AppViewActivity</code></p>
48
- <p>Este comando abrirá Anbox en una ventana que parece una pantalla de inicio de Android. Deberías ver algunas aplicaciones preinstaladas, como Calculadora, Contactos, Galería, etc.</p>
49
- <h3>Descargar archivo APK para Clash of Clans y ejecutarlo</h3>
50
- <p>Para instalar Clash of Clans en Anbox, necesita transferir el archivo APK de su carpeta Linux al almacenamiento interno de Anbox. Puedes hacer esto usando una herramienta llamada adb, que significa Android Debug Bridge.</p>
51
- <p>Adb es una herramienta de línea de comandos que te permite comunicarte con un dispositivo Android o emulador. Puede usar adb para instalar, desinstalar o ejecutar aplicaciones en Anbox.</p>
52
-
53
- <ol>
54
- <li>En la pantalla de inicio de Anbox, haga clic en la aplicación Configuración. </li>
55
- <li>Desplácese hacia abajo y toque en Acerca del dispositivo. </li>
56
- <li>Desplácese hacia abajo de nuevo y toque en el número de compilación siete veces. Debería ver un mensaje que diga "¡Ahora eres un desarrollador!" </li>
57
- <li>Volver a Configuración y toque en Opciones de desarrollador. </li>
58
- <li>Activar la depuración USB deslizando el interruptor al lado. </li>
59
- </ol> <p>Ahora que ha habilitado la depuración USB en Anbox, puede usar adb para instalar el archivo APK para Clash of Clans. Para hacer esto, siga estos pasos:</p>
60
- <ol>
61
- <li>Abra una ventana de terminal en su computadora Linux y vaya a la carpeta donde guardó el archivo APK. </li>
62
- <li>Escriba el siguiente comando: <code>adb install Clash-of-Clans.apk</code> (Reemplazar Clash-of-Clans.apk con el nombre de su archivo APK). </li>
63
- <li>Espere a que se complete la instalación. Debería ver un mensaje que diga "Éxito" en el terminal. </li>
64
- <li>Vuelve a la pantalla de inicio de Anbox y busca el icono Clash of Clans. Toca en él para ejecutar el juego. </li>
65
- </ol>
66
- <h3>Espere a medida que el archivo APK se instala y haga clic para ejecutar Clash of Clans</h3>
67
- <p>Después de tocar el icono Clash of Clans, es posible que tenga que esperar unos segundos a medida que el juego se carga y se conecta al servidor. También puede ver algunas ventanas emergentes pidiendo permisos o actualizaciones. Puede concederlas o denegarlas como desee. </p>
68
- <p>Una vez que el juego esté listo, deberías ver el familiar logo de Clash of Clans y la pantalla de introducción. A continuación, puede optar por iniciar sesión con su cuenta de Google o jugar como invitado. Si inicias sesión con tu cuenta de Google, puedes sincronizar tu progreso y acceder a tu aldea existente. Si juegas como invitado, comenzarás desde cero. </p>
69
- <p>Felicidades, has descargado exitosamente Clash of Clans en Linux y estás listo para jugar! </p>
70
- <h2>Consejos y trucos para jugar al choque de clanes en Linux</h2>
71
-
72
- <h3>Cómo ajustar el tamaño de la pantalla y la resolución de Anbox</h3>
73
- <p>Por defecto, es posible que Anbox no se ajuste muy bien a su tamaño de pantalla o resolución. Puede ver barras negras alrededor de los bordes o gráficos borrosos. Para solucionar esto, puede ajustar el tamaño de la pantalla y la resolución de Anbox utilizando una herramienta llamada xrandr. </p>
74
- <p>Xrandr es una herramienta de línea de comandos que le permite cambiar el tamaño, la orientación y la resolución de su pantalla. Puede usar xrandr para establecer una resolución personalizada para Anbox que coincida con el tamaño de la pantalla y la relación de aspecto. </p>
75
- <p>Para usar xrandr, necesita saber el nombre de su pantalla y la resolución que desea establecer. Puede encontrar esta información escribiendo: <code>xrandr --query</code> en una ventana de terminal. Debería ver algo como esto:</p>
76
- <pre><code>Pantalla 0: mínimo 320 x 200, corriente 1366 x 768, máximo 8192 x 8192 eDP-1 conectado primario 1366x768+0+0 (normal izquierda invertida derecha x eje y) 344mm x 194mm 1366x768 60.00*+ 1280x720 60.00 1024x768 60.00 1024x576 60.00 ... </code></pre>
77
- <p>En este ejemplo, el nombre de la pantalla es eDP-1 y la resolución actual es 1366x768. Puede elegir cualquier resolución compatible con su pantalla, pero le recomendamos que elija una que tenga la misma relación de aspecto que su pantalla. </p>
78
- <p>Una vez que haya decidido una resolución, puede configurarla para Anbox escribiendo: <code>xrandr --output eDP-1 -mode RESOLUTION -scale -scale -panning RESOLUTION</code> (Reemplace eDP-1 con su nombre para mostrar, RESOLUTION con la resolución elegida y SCALE con un factor de escala). </p>
79
- <p>El factor de escala es un número que determina la cantidad de Anbox se ampliará dentro o fuera. Por ejemplo, si desea establecer una resolución de 1280x720 para Anbox, pero su resolución de pantalla es 1366x768, puede usar un factor de escala de 1.0667 (1366/1280) para hacer que Anbox se ajuste a su pantalla sin barras negras. </p>
80
- <p>Aquí hay un comando de ejemplo: <code>xrandr --output eDP-1 --mode 1280x720 -scale 1.0667x1.0667 --panning 1280x720</code></p>
81
-
82
- <p>Puede experimentar con diferentes resoluciones y factores de escala hasta que encuentre la mejor configuración para su visualización y preferencia. </p>
83
- <h3>Cómo usar los controles de teclado y ratón para Clash of Clans</h3>
84
- <p>Uno de los inconvenientes de jugar a Clash of Clans en Linux es que no puedes usar los controles de pantalla táctil que están diseñados para el juego. En su lugar, tienes que usar el teclado y el ratón para interactuar con el juego. </p>
85
- <p>Esto puede ser un poco complicado al principio, especialmente si estás acostumbrado a jugar el juego en tu smartphone. Es posible que tenga que ajustar la sensibilidad del ratón y los atajos de teclado para que el juego sea más cómodo y sensible. </p>
86
- <p>Aquí hay algunos controles básicos de teclado y ratón para Clash of Clans en Linux:</p>
87
- <ul>
88
- <li> Para mover la cámara, utilice las teclas de flecha o arrastre el cursor del ratón en la pantalla. </li>
89
- <li>Para acercar o alejar, utilice la rueda del ratón o pulse las teclas + o - . </li>
90
- <li> Para seleccionar un edificio, tropa o hechizo, haga clic en él con el botón izquierdo del ratón. </li>
91
- <li> Para colocar un edificio, tropa o hechizo, haga clic en la ubicación deseada con el botón izquierdo del ratón. </li>
92
- <li> Para cancelar una colocación, pulse la tecla Esc o haga clic con el botón derecho en cualquier lugar de la pantalla. </li>
93
- <li>Para abrir la ventana de chat, pulse la tecla Enter. </li>
94
- <li> Para cambiar entre la aldea y la base del constructor, pulse la tecla Tab. </li>
95
- <li> Para acceder al menú, configuración o tienda, haga clic en los iconos correspondientes en la esquina inferior derecha de la pantalla. </li>
96
- </ul>
97
- <p>También puede personalizar la configuración del teclado y el ratón yendo a Configuración > Controles en Anbox. Puede cambiar la asignación de teclas, la sensibilidad del ratón y otras opciones para adaptarse a sus preferencias. </p>
98
- <h3>Cómo corregir errores comunes y bloqueos con Anbox y Clash of Clans</h3>
99
- <p>Jugar a Clash of Clans en Linux no está exento de desafíos. Usted puede encontrar algunos errores o accidentes que le impiden disfrutar del juego sin problemas. Aquí hay algunos problemas y soluciones comunes que pueden ayudarle a solucionarlos:</p>
100
- <tabla>
101
-
102
- <tr><td>Anbox falla al iniciar o muestra una pantalla negra</td><td>Asegúrese de que ha instalado Anbox correctamente y habilitado la depuración USB. Intente reiniciar Anbox o su computadora. Si eso no funciona, intente reinstalar Anbox usando snap. </td></tr>
103
- <tr><td>Clash of Clans falla al instalar o ejecutar</td><td>Asegúrate de haber descargado un archivo APK válido para Clash of Clans de una fuente confiable. Intenta borrar y reinstalar el archivo APK usando adb. Si eso no funciona, intenta descargar una versión diferente del archivo APK. </td></tr>
104
- <tr><td>Clash of Clans se bloquea o se congela durante el juego</td><td>Asegúrate de tener una conexión a Internet estable y suficientes recursos de RAM y CPU para Anbox y Clash of Clans. Intente bajar la resolución y el factor de escala de Anbox usando xrandr. Si eso no funciona, intenta borrar la caché y los datos de Clash of Clans en Configuración de Anbox > Aplicaciones.</td></tr>
105
- <tr><td>Clash of Clans muestra un mensaje de error sobre Google Play Services</td><td>Este es un error común que se produce porque Anbox no es compatible con Google Play Services, que son necesarios para algunas características de Clash of Clans, como iniciar sesión con la cuenta de Google, acceder a Google Play Games, o hacer compras en la aplicación. Desafortunadamente, no hay una manera fácil de solucionar este error. Puede intentar instalar Google Play Services en Anbox utilizando algunos métodos no oficiales, pero no se garantiza que funcionen y pueden causar más problemas. Alternativamente, puedes jugar Clash of Clans sin Google Play Services jugando como invitado o usando un ID de Supercell.</td></tr>
106
- </tabla>
107
- <h2>Conclusión</h2>
108
- <p>Clash of Clans es un juego divertido y adictivo que puedes jugar en tu ordenador Linux usando Anbox, un software Android en una caja que te permite ejecutar aplicaciones Android en cualquier distribución de Linux. Solo necesitas instalar Anbox usando snap, descargar un archivo APK para Clash of Clans desde APKPure, y usar adb para instalar y ejecutar el juego en Anbox.</p>
109
-
110
- <p>Esperamos que este artículo te haya ayudado a aprender cómo descargar Clash of Clans en Linux y divertirte jugando. Si tiene alguna pregunta o comentario, por favor háganoslo saber en los comentarios a continuación. Happy clashing! </p>
111
- <h2>Preguntas frecuentes</h2>
112
- <h4 <h4>¿Es Anbox la única forma de jugar a Clash of Clans en Linux? </h4>
113
- <p>No, Anbox no es la única forma de jugar a Clash of Clans en Linux. Hay otros métodos, como usar un emulador de Android, una máquina virtual o un sistema de arranque dual. Sin embargo, Anbox es una de las formas más fáciles y rápidas de jugar a Clash of Clans en Linux, ya que no requiere instalar un sistema operativo separado o crear un dispositivo virtual. </p>
114
- <h4>¿Puedo jugar Clash of Clans en Linux con otros jugadores en línea? </h4>
115
- <p>Sí, puedes jugar a Clash of Clans en Linux con otros jugadores en línea, siempre y cuando tengas una conexión a Internet estable y una cuenta válida. Puedes unirte o crear clanes, chatear con otros jugadores y participar en guerras de clanes y eventos. Sin embargo, es posible que no pueda acceder a algunas funciones que requieren Servicios de Google Play, como Google Play Games o compras en la aplicación. </p>
116
- <h4>¿Puedo jugar Clash of Clans en Linux sin conexión? </h4>
117
- <p>No, no puedes jugar Clash of Clans sin conexión a Linux. Clash of Clans es un juego en línea que requiere una conexión a Internet constante para funcionar. Si pierde su conexión a Internet o intenta jugar sin conexión, verá un mensaje de error que dice "No se puede conectar al servidor" y el juego se cerrará. </p>
118
- <h4>¿Puedo transferir mi progreso desde mi smartphone a mi ordenador Linux? </h4>
119
- <p>Sí, puede transferir su progreso desde su teléfono inteligente a su computadora Linux, siempre y cuando haya vinculado su cuenta de juego a una cuenta de Google o un ID de Supercell. Para hacer esto, siga estos pasos:</p>
120
- <ol>
121
- <li>En tu smartphone, abre Clash of Clans y ve a Configuración > Cuenta > Dispositivo de enlace.</li>
122
- <li>Seleccione "Este es el dispositivo antiguo" y elija si desea vincular su cuenta a una cuenta de Google o a un ID de Supercell.</li>
123
-
124
- <li>En su computadora Linux, abra Clash of Clans y vaya a Configuración > Cuenta > Dispositivo de enlace.</li>
125
- <li>Seleccione "Este es el nuevo dispositivo" y elija si desea vincular su cuenta a una cuenta de Google o a un ID de Supercell.</li>
126
- <li>Siga las instrucciones en la pantalla para vincular su cuenta y cargar su aldea existente. </li>
127
- </ol>
128
- <h4>¿Puedo actualizar Clash of Clans en Linux? </h4>
129
- <p>Sí, puedes actualizar Clash of Clans en Linux, pero no automáticamente. Tendrás que descargar e instalar manualmente la última versión del archivo APK para Clash of Clans desde APKPure u otra fuente de confianza. Para hacer esto, siga estos pasos:</p>
130
- <ol>
131
- <li>Elimina la versión antigua del archivo APK para Clash of Clans de tu carpeta de Linux. </li>
132
- <li>Descargar la última versión del archivo APK para Clash of Clans de APKPure u otra fuente de confianza. </li>
133
- <li>Instalar la nueva versión del archivo APK usando adb escribiendo: <code>adb install -r Clash-of-Clans.apk</code> (Reemplazar Clash-of-Clans.apk con el nombre de su archivo APK). </li>
134
- <li>Espere a que la instalación se complete y ejecute Clash of Clans en Anbox.</li>
135
- </ol></p> 64aa2da5cf<br />
136
- <br />
137
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/endpoint.py DELETED
@@ -1,443 +0,0 @@
1
- # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License"). You
5
- # may not use this file except in compliance with the License. A copy of
6
- # the License is located at
7
- #
8
- # http://aws.amazon.com/apache2.0/
9
- #
10
- # or in the "license" file accompanying this file. This file is
11
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12
- # ANY KIND, either express or implied. See the License for the specific
13
- # language governing permissions and limitations under the License.
14
-
15
- import datetime
16
- import logging
17
- import os
18
- import threading
19
- import time
20
- import uuid
21
-
22
- from botocore import parsers
23
- from botocore.awsrequest import create_request_object
24
- from botocore.exceptions import HTTPClientError
25
- from botocore.history import get_global_history_recorder
26
- from botocore.hooks import first_non_none_response
27
- from botocore.httpchecksum import handle_checksum_body
28
- from botocore.httpsession import URLLib3Session
29
- from botocore.response import StreamingBody
30
- from botocore.utils import (
31
- get_environ_proxies,
32
- is_valid_endpoint_url,
33
- is_valid_ipv6_endpoint_url,
34
- )
35
-
36
- logger = logging.getLogger(__name__)
37
- history_recorder = get_global_history_recorder()
38
- DEFAULT_TIMEOUT = 60
39
- MAX_POOL_CONNECTIONS = 10
40
-
41
-
42
- def convert_to_response_dict(http_response, operation_model):
43
- """Convert an HTTP response object to a request dict.
44
-
45
- This converts the requests library's HTTP response object to
46
- a dictionary.
47
-
48
- :type http_response: botocore.vendored.requests.model.Response
49
- :param http_response: The HTTP response from an AWS service request.
50
-
51
- :rtype: dict
52
- :return: A response dictionary which will contain the following keys:
53
- * headers (dict)
54
- * status_code (int)
55
- * body (string or file-like object)
56
-
57
- """
58
- response_dict = {
59
- 'headers': http_response.headers,
60
- 'status_code': http_response.status_code,
61
- 'context': {
62
- 'operation_name': operation_model.name,
63
- },
64
- }
65
- if response_dict['status_code'] >= 300:
66
- response_dict['body'] = http_response.content
67
- elif operation_model.has_event_stream_output:
68
- response_dict['body'] = http_response.raw
69
- elif operation_model.has_streaming_output:
70
- length = response_dict['headers'].get('content-length')
71
- response_dict['body'] = StreamingBody(http_response.raw, length)
72
- else:
73
- response_dict['body'] = http_response.content
74
- return response_dict
75
-
76
-
77
- class Endpoint:
78
- """
79
- Represents an endpoint for a particular service in a specific
80
- region. Only an endpoint can make requests.
81
-
82
- :ivar service: The Service object that describes this endpoints
83
- service.
84
- :ivar host: The fully qualified endpoint hostname.
85
- :ivar session: The session object.
86
- """
87
-
88
- def __init__(
89
- self,
90
- host,
91
- endpoint_prefix,
92
- event_emitter,
93
- response_parser_factory=None,
94
- http_session=None,
95
- ):
96
- self._endpoint_prefix = endpoint_prefix
97
- self._event_emitter = event_emitter
98
- self.host = host
99
- self._lock = threading.Lock()
100
- if response_parser_factory is None:
101
- response_parser_factory = parsers.ResponseParserFactory()
102
- self._response_parser_factory = response_parser_factory
103
- self.http_session = http_session
104
- if self.http_session is None:
105
- self.http_session = URLLib3Session()
106
-
107
- def __repr__(self):
108
- return f'{self._endpoint_prefix}({self.host})'
109
-
110
- def close(self):
111
- self.http_session.close()
112
-
113
- def make_request(self, operation_model, request_dict):
114
- logger.debug(
115
- "Making request for %s with params: %s",
116
- operation_model,
117
- request_dict,
118
- )
119
- return self._send_request(request_dict, operation_model)
120
-
121
- def create_request(self, params, operation_model=None):
122
- request = create_request_object(params)
123
- if operation_model:
124
- request.stream_output = any(
125
- [
126
- operation_model.has_streaming_output,
127
- operation_model.has_event_stream_output,
128
- ]
129
- )
130
- service_id = operation_model.service_model.service_id.hyphenize()
131
- event_name = 'request-created.{service_id}.{op_name}'.format(
132
- service_id=service_id, op_name=operation_model.name
133
- )
134
- self._event_emitter.emit(
135
- event_name,
136
- request=request,
137
- operation_name=operation_model.name,
138
- )
139
- prepared_request = self.prepare_request(request)
140
- return prepared_request
141
-
142
- def _encode_headers(self, headers):
143
- # In place encoding of headers to utf-8 if they are unicode.
144
- for key, value in headers.items():
145
- if isinstance(value, str):
146
- headers[key] = value.encode('utf-8')
147
-
148
- def prepare_request(self, request):
149
- self._encode_headers(request.headers)
150
- return request.prepare()
151
-
152
- def _calculate_ttl(
153
- self, response_received_timestamp, date_header, read_timeout
154
- ):
155
- local_timestamp = datetime.datetime.utcnow()
156
- date_conversion = datetime.datetime.strptime(
157
- date_header, "%a, %d %b %Y %H:%M:%S %Z"
158
- )
159
- estimated_skew = date_conversion - response_received_timestamp
160
- ttl = (
161
- local_timestamp
162
- + datetime.timedelta(seconds=read_timeout)
163
- + estimated_skew
164
- )
165
- return ttl.strftime('%Y%m%dT%H%M%SZ')
166
-
167
- def _set_ttl(self, retries_context, read_timeout, success_response):
168
- response_date_header = success_response[0].headers.get('Date')
169
- has_streaming_input = retries_context.get('has_streaming_input')
170
- if response_date_header and not has_streaming_input:
171
- try:
172
- response_received_timestamp = datetime.datetime.utcnow()
173
- retries_context['ttl'] = self._calculate_ttl(
174
- response_received_timestamp,
175
- response_date_header,
176
- read_timeout,
177
- )
178
- except Exception:
179
- logger.debug(
180
- "Exception received when updating retries context with TTL",
181
- exc_info=True,
182
- )
183
-
184
- def _update_retries_context(self, context, attempt, success_response=None):
185
- retries_context = context.setdefault('retries', {})
186
- retries_context['attempt'] = attempt
187
- if 'invocation-id' not in retries_context:
188
- retries_context['invocation-id'] = str(uuid.uuid4())
189
-
190
- if success_response:
191
- read_timeout = context['client_config'].read_timeout
192
- self._set_ttl(retries_context, read_timeout, success_response)
193
-
194
- def _send_request(self, request_dict, operation_model):
195
- attempts = 1
196
- context = request_dict['context']
197
- self._update_retries_context(context, attempts)
198
- request = self.create_request(request_dict, operation_model)
199
- success_response, exception = self._get_response(
200
- request, operation_model, context
201
- )
202
- while self._needs_retry(
203
- attempts,
204
- operation_model,
205
- request_dict,
206
- success_response,
207
- exception,
208
- ):
209
- attempts += 1
210
- self._update_retries_context(context, attempts, success_response)
211
- # If there is a stream associated with the request, we need
212
- # to reset it before attempting to send the request again.
213
- # This will ensure that we resend the entire contents of the
214
- # body.
215
- request.reset_stream()
216
- # Create a new request when retried (including a new signature).
217
- request = self.create_request(request_dict, operation_model)
218
- success_response, exception = self._get_response(
219
- request, operation_model, context
220
- )
221
- if (
222
- success_response is not None
223
- and 'ResponseMetadata' in success_response[1]
224
- ):
225
- # We want to share num retries, not num attempts.
226
- total_retries = attempts - 1
227
- success_response[1]['ResponseMetadata'][
228
- 'RetryAttempts'
229
- ] = total_retries
230
- if exception is not None:
231
- raise exception
232
- else:
233
- return success_response
234
-
235
- def _get_response(self, request, operation_model, context):
236
- # This will return a tuple of (success_response, exception)
237
- # and success_response is itself a tuple of
238
- # (http_response, parsed_dict).
239
- # If an exception occurs then the success_response is None.
240
- # If no exception occurs then exception is None.
241
- success_response, exception = self._do_get_response(
242
- request, operation_model, context
243
- )
244
- kwargs_to_emit = {
245
- 'response_dict': None,
246
- 'parsed_response': None,
247
- 'context': context,
248
- 'exception': exception,
249
- }
250
- if success_response is not None:
251
- http_response, parsed_response = success_response
252
- kwargs_to_emit['parsed_response'] = parsed_response
253
- kwargs_to_emit['response_dict'] = convert_to_response_dict(
254
- http_response, operation_model
255
- )
256
- service_id = operation_model.service_model.service_id.hyphenize()
257
- self._event_emitter.emit(
258
- f"response-received.{service_id}.{operation_model.name}",
259
- **kwargs_to_emit,
260
- )
261
- return success_response, exception
262
-
263
- def _do_get_response(self, request, operation_model, context):
264
- try:
265
- logger.debug("Sending http request: %s", request)
266
- history_recorder.record(
267
- 'HTTP_REQUEST',
268
- {
269
- 'method': request.method,
270
- 'headers': request.headers,
271
- 'streaming': operation_model.has_streaming_input,
272
- 'url': request.url,
273
- 'body': request.body,
274
- },
275
- )
276
- service_id = operation_model.service_model.service_id.hyphenize()
277
- event_name = f"before-send.{service_id}.{operation_model.name}"
278
- responses = self._event_emitter.emit(event_name, request=request)
279
- http_response = first_non_none_response(responses)
280
- if http_response is None:
281
- http_response = self._send(request)
282
- except HTTPClientError as e:
283
- return (None, e)
284
- except Exception as e:
285
- logger.debug(
286
- "Exception received when sending HTTP request.", exc_info=True
287
- )
288
- return (None, e)
289
- # This returns the http_response and the parsed_data.
290
- response_dict = convert_to_response_dict(
291
- http_response, operation_model
292
- )
293
- handle_checksum_body(
294
- http_response,
295
- response_dict,
296
- context,
297
- operation_model,
298
- )
299
-
300
- http_response_record_dict = response_dict.copy()
301
- http_response_record_dict[
302
- 'streaming'
303
- ] = operation_model.has_streaming_output
304
- history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
305
-
306
- protocol = operation_model.metadata['protocol']
307
- parser = self._response_parser_factory.create_parser(protocol)
308
- parsed_response = parser.parse(
309
- response_dict, operation_model.output_shape
310
- )
311
- # Do a second parsing pass to pick up on any modeled error fields
312
- # NOTE: Ideally, we would push this down into the parser classes but
313
- # they currently have no reference to the operation or service model
314
- # The parsers should probably take the operation model instead of
315
- # output shape but we can't change that now
316
- if http_response.status_code >= 300:
317
- self._add_modeled_error_fields(
318
- response_dict,
319
- parsed_response,
320
- operation_model,
321
- parser,
322
- )
323
- history_recorder.record('PARSED_RESPONSE', parsed_response)
324
- return (http_response, parsed_response), None
325
-
326
- def _add_modeled_error_fields(
327
- self,
328
- response_dict,
329
- parsed_response,
330
- operation_model,
331
- parser,
332
- ):
333
- error_code = parsed_response.get("Error", {}).get("Code")
334
- if error_code is None:
335
- return
336
- service_model = operation_model.service_model
337
- error_shape = service_model.shape_for_error_code(error_code)
338
- if error_shape is None:
339
- return
340
- modeled_parse = parser.parse(response_dict, error_shape)
341
- # TODO: avoid naming conflicts with ResponseMetadata and Error
342
- parsed_response.update(modeled_parse)
343
-
344
- def _needs_retry(
345
- self,
346
- attempts,
347
- operation_model,
348
- request_dict,
349
- response=None,
350
- caught_exception=None,
351
- ):
352
- service_id = operation_model.service_model.service_id.hyphenize()
353
- event_name = f"needs-retry.{service_id}.{operation_model.name}"
354
- responses = self._event_emitter.emit(
355
- event_name,
356
- response=response,
357
- endpoint=self,
358
- operation=operation_model,
359
- attempts=attempts,
360
- caught_exception=caught_exception,
361
- request_dict=request_dict,
362
- )
363
- handler_response = first_non_none_response(responses)
364
- if handler_response is None:
365
- return False
366
- else:
367
- # Request needs to be retried, and we need to sleep
368
- # for the specified number of times.
369
- logger.debug(
370
- "Response received to retry, sleeping for %s seconds",
371
- handler_response,
372
- )
373
- time.sleep(handler_response)
374
- return True
375
-
376
- def _send(self, request):
377
- return self.http_session.send(request)
378
-
379
-
380
- class EndpointCreator:
381
- def __init__(self, event_emitter):
382
- self._event_emitter = event_emitter
383
-
384
- def create_endpoint(
385
- self,
386
- service_model,
387
- region_name,
388
- endpoint_url,
389
- verify=None,
390
- response_parser_factory=None,
391
- timeout=DEFAULT_TIMEOUT,
392
- max_pool_connections=MAX_POOL_CONNECTIONS,
393
- http_session_cls=URLLib3Session,
394
- proxies=None,
395
- socket_options=None,
396
- client_cert=None,
397
- proxies_config=None,
398
- ):
399
- if not is_valid_endpoint_url(
400
- endpoint_url
401
- ) and not is_valid_ipv6_endpoint_url(endpoint_url):
402
- raise ValueError("Invalid endpoint: %s" % endpoint_url)
403
-
404
- if proxies is None:
405
- proxies = self._get_proxies(endpoint_url)
406
- endpoint_prefix = service_model.endpoint_prefix
407
-
408
- logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
409
- http_session = http_session_cls(
410
- timeout=timeout,
411
- proxies=proxies,
412
- verify=self._get_verify_value(verify),
413
- max_pool_connections=max_pool_connections,
414
- socket_options=socket_options,
415
- client_cert=client_cert,
416
- proxies_config=proxies_config,
417
- )
418
-
419
- return Endpoint(
420
- endpoint_url,
421
- endpoint_prefix=endpoint_prefix,
422
- event_emitter=self._event_emitter,
423
- response_parser_factory=response_parser_factory,
424
- http_session=http_session,
425
- )
426
-
427
- def _get_proxies(self, url):
428
- # We could also support getting proxies from a config file,
429
- # but for now proxy support is taken from the environment.
430
- return get_environ_proxies(url)
431
-
432
- def _get_verify_value(self, verify):
433
- # This is to account for:
434
- # https://github.com/kennethreitz/requests/issues/1436
435
- # where we need to honor REQUESTS_CA_BUNDLE because we're creating our
436
- # own request objects.
437
- # First, if verify is not None, then the user explicitly specified
438
- # a value so this automatically wins.
439
- if verify is not None:
440
- return verify
441
- # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
442
- # True if the env var does not exist.
443
- return os.environ.get('REQUESTS_CA_BUNDLE', True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/__init__.py DELETED
@@ -1,12 +0,0 @@
1
- from jmespath import parser
2
- from jmespath.visitor import Options
3
-
4
- __version__ = '1.0.1'
5
-
6
-
7
- def compile(expression):
8
- return parser.Parser().parse(expression)
9
-
10
-
11
- def search(expression, data, options=None):
12
- return parser.Parser().parse(expression).search(data, options=options)
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CNXT/CHaTx/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: CHaTx
3
- emoji: 🦀
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: docker
7
- pinned: false
8
- license: creativeml-openrail-m
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/getting_started.md DELETED
@@ -1 +0,0 @@
1
- ../../GETTING_STARTED.md
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/visual_genome.py DELETED
@@ -1,149 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
- import contextlib
4
- import io
5
- import logging
6
- import os
7
- from fvcore.common.file_io import PathManager
8
- from fvcore.common.timer import Timer
9
-
10
- from detectron2.data import DatasetCatalog, MetadataCatalog
11
- from detectron2.structures import BoxMode
12
-
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
- def load_coco_with_attributes_json(json_file,
17
- image_root,
18
- dataset_name=None,
19
- extra_annotation_keys=None):
20
- """
21
- Extend load_coco_json() with additional support for attributes
22
- """
23
- from pycocotools.coco import COCO
24
-
25
- timer = Timer()
26
- json_file = PathManager.get_local_path(json_file)
27
- with contextlib.redirect_stdout(io.StringIO()):
28
- coco_api = COCO(json_file)
29
- if timer.seconds() > 1:
30
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
31
-
32
- id_map = None
33
- if dataset_name is not None:
34
- meta = MetadataCatalog.get(dataset_name)
35
- cat_ids = sorted(coco_api.getCatIds())
36
- cats = coco_api.loadCats(cat_ids)
37
- thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
38
- meta.thing_classes = thing_classes
39
- if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
40
- if "coco" not in dataset_name:
41
- logger.warning(
42
- """
43
- Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
44
- """
45
- )
46
- id_map = {v: i for i, v in enumerate(cat_ids)}
47
- meta.thing_dataset_id_to_contiguous_id = id_map
48
-
49
- img_ids = sorted(coco_api.imgs.keys())
50
- imgs = coco_api.loadImgs(img_ids)
51
- anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
52
-
53
- if "minival" not in json_file:
54
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
55
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
56
- json_file
57
- )
58
-
59
- imgs_anns = list(zip(imgs, anns))
60
-
61
- logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
62
-
63
- dataset_dicts = []
64
-
65
- ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
66
-
67
- num_instances_without_valid_segmentation = 0
68
-
69
- for (img_dict, anno_dict_list) in imgs_anns:
70
- record = {}
71
- record["file_name"] = os.path.join(image_root, img_dict["file_name"])
72
- record["height"] = img_dict["height"]
73
- record["width"] = img_dict["width"]
74
- image_id = record["image_id"] = img_dict["id"]
75
-
76
- objs = []
77
- for anno in anno_dict_list:
78
- assert anno["image_id"] == image_id
79
-
80
- assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
81
-
82
- obj = {key: anno[key] for key in ann_keys if key in anno}
83
-
84
- segm = anno.get("segmentation", None)
85
- if segm:
86
- if not isinstance(segm, dict):
87
- segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
88
- if len(segm) == 0:
89
- num_instances_without_valid_segmentation += 1
90
- continue
91
- obj["segmentation"] = segm
92
-
93
- keypts = anno.get("keypoints", None)
94
- if keypts:
95
- for idx, v in enumerate(keypts):
96
- if idx % 3 != 2:
97
- keypts[idx] = v + 0.5
98
- obj["keypoints"] = keypts
99
-
100
- attrs = anno.get("attribute_ids", None)
101
- if attrs: # list[int]
102
- obj["attribute_ids"] = attrs
103
-
104
- obj["bbox_mode"] = BoxMode.XYWH_ABS
105
- if id_map:
106
- obj["category_id"] = id_map[obj["category_id"]]
107
- objs.append(obj)
108
- record["annotations"] = objs
109
- dataset_dicts.append(record)
110
-
111
- if num_instances_without_valid_segmentation > 0:
112
- logger.warning(
113
- "Filtered out {} instances without valid segmentation. "
114
- "There might be issues in your dataset generation process.".format(
115
- num_instances_without_valid_segmentation
116
- )
117
- )
118
- return dataset_dicts
119
-
120
- def register_coco_instances_with_attributes(name, metadata, json_file, image_root):
121
- DatasetCatalog.register(name, lambda: load_coco_with_attributes_json(json_file,
122
- image_root,
123
- name))
124
- MetadataCatalog.get(name).set(
125
- json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
126
- )
127
-
128
- # ==== Predefined splits for visual genome images ===========
129
- _PREDEFINED_SPLITS_VG = {
130
- "visual_genome_train": ("visual_genome/images",
131
- "visual_genome/annotations/visual_genome_train.json"),
132
- "visual_genome_val": ("visual_genome/images",
133
- "visual_genome/annotations/visual_genome_val.json"),
134
- "visual_genome_test": ("visual_genome/images",
135
- "visual_genome/annotations/visual_genome_test.json"),
136
- }
137
-
138
- def register_all_vg(root):
139
- for key, (image_root, json_file) in _PREDEFINED_SPLITS_VG.items():
140
- register_coco_instances_with_attributes(
141
- key,
142
- {}, # no meta data
143
- os.path.join(root, json_file),
144
- os.path.join(root, image_root),
145
- )
146
-
147
- # Register them all under "./datasets"
148
- _root = os.getenv("DETECTRON2_DATASETS", "datasets")
149
- register_all_vg(_root)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_numpy_dtypes.cpp DELETED
@@ -1,474 +0,0 @@
1
- /*
2
- tests/test_numpy_dtypes.cpp -- Structured and compound NumPy dtypes
3
-
4
- Copyright (c) 2016 Ivan Smirnov
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
- #include <pybind11/numpy.h>
12
-
13
- #ifdef __GNUC__
14
- #define PYBIND11_PACKED(cls) cls __attribute__((__packed__))
15
- #else
16
- #define PYBIND11_PACKED(cls) __pragma(pack(push, 1)) cls __pragma(pack(pop))
17
- #endif
18
-
19
- namespace py = pybind11;
20
-
21
- struct SimpleStruct {
22
- bool bool_;
23
- uint32_t uint_;
24
- float float_;
25
- long double ldbl_;
26
- };
27
-
28
- std::ostream& operator<<(std::ostream& os, const SimpleStruct& v) {
29
- return os << "s:" << v.bool_ << "," << v.uint_ << "," << v.float_ << "," << v.ldbl_;
30
- }
31
-
32
- struct SimpleStructReordered {
33
- bool bool_;
34
- float float_;
35
- uint32_t uint_;
36
- long double ldbl_;
37
- };
38
-
39
- PYBIND11_PACKED(struct PackedStruct {
40
- bool bool_;
41
- uint32_t uint_;
42
- float float_;
43
- long double ldbl_;
44
- });
45
-
46
- std::ostream& operator<<(std::ostream& os, const PackedStruct& v) {
47
- return os << "p:" << v.bool_ << "," << v.uint_ << "," << v.float_ << "," << v.ldbl_;
48
- }
49
-
50
- PYBIND11_PACKED(struct NestedStruct {
51
- SimpleStruct a;
52
- PackedStruct b;
53
- });
54
-
55
- std::ostream& operator<<(std::ostream& os, const NestedStruct& v) {
56
- return os << "n:a=" << v.a << ";b=" << v.b;
57
- }
58
-
59
- struct PartialStruct {
60
- bool bool_;
61
- uint32_t uint_;
62
- float float_;
63
- uint64_t dummy2;
64
- long double ldbl_;
65
- };
66
-
67
- struct PartialNestedStruct {
68
- uint64_t dummy1;
69
- PartialStruct a;
70
- uint64_t dummy2;
71
- };
72
-
73
- struct UnboundStruct { };
74
-
75
- struct StringStruct {
76
- char a[3];
77
- std::array<char, 3> b;
78
- };
79
-
80
- struct ComplexStruct {
81
- std::complex<float> cflt;
82
- std::complex<double> cdbl;
83
- };
84
-
85
- std::ostream& operator<<(std::ostream& os, const ComplexStruct& v) {
86
- return os << "c:" << v.cflt << "," << v.cdbl;
87
- }
88
-
89
- struct ArrayStruct {
90
- char a[3][4];
91
- int32_t b[2];
92
- std::array<uint8_t, 3> c;
93
- std::array<float, 2> d[4];
94
- };
95
-
96
- PYBIND11_PACKED(struct StructWithUglyNames {
97
- int8_t __x__;
98
- uint64_t __y__;
99
- });
100
-
101
- enum class E1 : int64_t { A = -1, B = 1 };
102
- enum E2 : uint8_t { X = 1, Y = 2 };
103
-
104
- PYBIND11_PACKED(struct EnumStruct {
105
- E1 e1;
106
- E2 e2;
107
- });
108
-
109
- std::ostream& operator<<(std::ostream& os, const StringStruct& v) {
110
- os << "a='";
111
- for (size_t i = 0; i < 3 && v.a[i]; i++) os << v.a[i];
112
- os << "',b='";
113
- for (size_t i = 0; i < 3 && v.b[i]; i++) os << v.b[i];
114
- return os << "'";
115
- }
116
-
117
- std::ostream& operator<<(std::ostream& os, const ArrayStruct& v) {
118
- os << "a={";
119
- for (int i = 0; i < 3; i++) {
120
- if (i > 0)
121
- os << ',';
122
- os << '{';
123
- for (int j = 0; j < 3; j++)
124
- os << v.a[i][j] << ',';
125
- os << v.a[i][3] << '}';
126
- }
127
- os << "},b={" << v.b[0] << ',' << v.b[1];
128
- os << "},c={" << int(v.c[0]) << ',' << int(v.c[1]) << ',' << int(v.c[2]);
129
- os << "},d={";
130
- for (int i = 0; i < 4; i++) {
131
- if (i > 0)
132
- os << ',';
133
- os << '{' << v.d[i][0] << ',' << v.d[i][1] << '}';
134
- }
135
- return os << '}';
136
- }
137
-
138
- std::ostream& operator<<(std::ostream& os, const EnumStruct& v) {
139
- return os << "e1=" << (v.e1 == E1::A ? "A" : "B") << ",e2=" << (v.e2 == E2::X ? "X" : "Y");
140
- }
141
-
142
- template <typename T>
143
- py::array mkarray_via_buffer(size_t n) {
144
- return py::array(py::buffer_info(nullptr, sizeof(T),
145
- py::format_descriptor<T>::format(),
146
- 1, { n }, { sizeof(T) }));
147
- }
148
-
149
- #define SET_TEST_VALS(s, i) do { \
150
- s.bool_ = (i) % 2 != 0; \
151
- s.uint_ = (uint32_t) (i); \
152
- s.float_ = (float) (i) * 1.5f; \
153
- s.ldbl_ = (long double) (i) * -2.5L; } while (0)
154
-
155
- template <typename S>
156
- py::array_t<S, 0> create_recarray(size_t n) {
157
- auto arr = mkarray_via_buffer<S>(n);
158
- auto req = arr.request();
159
- auto ptr = static_cast<S*>(req.ptr);
160
- for (size_t i = 0; i < n; i++) {
161
- SET_TEST_VALS(ptr[i], i);
162
- }
163
- return arr;
164
- }
165
-
166
- template <typename S>
167
- py::list print_recarray(py::array_t<S, 0> arr) {
168
- const auto req = arr.request();
169
- const auto ptr = static_cast<S*>(req.ptr);
170
- auto l = py::list();
171
- for (ssize_t i = 0; i < req.size; i++) {
172
- std::stringstream ss;
173
- ss << ptr[i];
174
- l.append(py::str(ss.str()));
175
- }
176
- return l;
177
- }
178
-
179
- py::array_t<int32_t, 0> test_array_ctors(int i) {
180
- using arr_t = py::array_t<int32_t, 0>;
181
-
182
- std::vector<int32_t> data { 1, 2, 3, 4, 5, 6 };
183
- std::vector<ssize_t> shape { 3, 2 };
184
- std::vector<ssize_t> strides { 8, 4 };
185
-
186
- auto ptr = data.data();
187
- auto vptr = (void *) ptr;
188
- auto dtype = py::dtype("int32");
189
-
190
- py::buffer_info buf_ndim1(vptr, 4, "i", 6);
191
- py::buffer_info buf_ndim1_null(nullptr, 4, "i", 6);
192
- py::buffer_info buf_ndim2(vptr, 4, "i", 2, shape, strides);
193
- py::buffer_info buf_ndim2_null(nullptr, 4, "i", 2, shape, strides);
194
-
195
- auto fill = [](py::array arr) {
196
- auto req = arr.request();
197
- for (int i = 0; i < 6; i++) ((int32_t *) req.ptr)[i] = i + 1;
198
- return arr;
199
- };
200
-
201
- switch (i) {
202
- // shape: (3, 2)
203
- case 10: return arr_t(shape, strides, ptr);
204
- case 11: return py::array(shape, strides, ptr);
205
- case 12: return py::array(dtype, shape, strides, vptr);
206
- case 13: return arr_t(shape, ptr);
207
- case 14: return py::array(shape, ptr);
208
- case 15: return py::array(dtype, shape, vptr);
209
- case 16: return arr_t(buf_ndim2);
210
- case 17: return py::array(buf_ndim2);
211
- // shape: (3, 2) - post-fill
212
- case 20: return fill(arr_t(shape, strides));
213
- case 21: return py::array(shape, strides, ptr); // can't have nullptr due to templated ctor
214
- case 22: return fill(py::array(dtype, shape, strides));
215
- case 23: return fill(arr_t(shape));
216
- case 24: return py::array(shape, ptr); // can't have nullptr due to templated ctor
217
- case 25: return fill(py::array(dtype, shape));
218
- case 26: return fill(arr_t(buf_ndim2_null));
219
- case 27: return fill(py::array(buf_ndim2_null));
220
- // shape: (6, )
221
- case 30: return arr_t(6, ptr);
222
- case 31: return py::array(6, ptr);
223
- case 32: return py::array(dtype, 6, vptr);
224
- case 33: return arr_t(buf_ndim1);
225
- case 34: return py::array(buf_ndim1);
226
- // shape: (6, )
227
- case 40: return fill(arr_t(6));
228
- case 41: return py::array(6, ptr); // can't have nullptr due to templated ctor
229
- case 42: return fill(py::array(dtype, 6));
230
- case 43: return fill(arr_t(buf_ndim1_null));
231
- case 44: return fill(py::array(buf_ndim1_null));
232
- }
233
- return arr_t();
234
- }
235
-
236
- py::list test_dtype_ctors() {
237
- py::list list;
238
- list.append(py::dtype("int32"));
239
- list.append(py::dtype(std::string("float64")));
240
- list.append(py::dtype::from_args(py::str("bool")));
241
- py::list names, offsets, formats;
242
- py::dict dict;
243
- names.append(py::str("a")); names.append(py::str("b")); dict["names"] = names;
244
- offsets.append(py::int_(1)); offsets.append(py::int_(10)); dict["offsets"] = offsets;
245
- formats.append(py::dtype("int32")); formats.append(py::dtype("float64")); dict["formats"] = formats;
246
- dict["itemsize"] = py::int_(20);
247
- list.append(py::dtype::from_args(dict));
248
- list.append(py::dtype(names, formats, offsets, 20));
249
- list.append(py::dtype(py::buffer_info((void *) 0, sizeof(unsigned int), "I", 1)));
250
- list.append(py::dtype(py::buffer_info((void *) 0, 0, "T{i:a:f:b:}", 1)));
251
- return list;
252
- }
253
-
254
- struct A {};
255
- struct B {};
256
-
257
- TEST_SUBMODULE(numpy_dtypes, m) {
258
- try { py::module::import("numpy"); }
259
- catch (...) { return; }
260
-
261
- // typeinfo may be registered before the dtype descriptor for scalar casts to work...
262
- py::class_<SimpleStruct>(m, "SimpleStruct");
263
-
264
- PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_);
265
- PYBIND11_NUMPY_DTYPE(SimpleStructReordered, bool_, uint_, float_, ldbl_);
266
- PYBIND11_NUMPY_DTYPE(PackedStruct, bool_, uint_, float_, ldbl_);
267
- PYBIND11_NUMPY_DTYPE(NestedStruct, a, b);
268
- PYBIND11_NUMPY_DTYPE(PartialStruct, bool_, uint_, float_, ldbl_);
269
- PYBIND11_NUMPY_DTYPE(PartialNestedStruct, a);
270
- PYBIND11_NUMPY_DTYPE(StringStruct, a, b);
271
- PYBIND11_NUMPY_DTYPE(ArrayStruct, a, b, c, d);
272
- PYBIND11_NUMPY_DTYPE(EnumStruct, e1, e2);
273
- PYBIND11_NUMPY_DTYPE(ComplexStruct, cflt, cdbl);
274
-
275
- // ... or after
276
- py::class_<PackedStruct>(m, "PackedStruct");
277
-
278
- PYBIND11_NUMPY_DTYPE_EX(StructWithUglyNames, __x__, "x", __y__, "y");
279
-
280
- // If uncommented, this should produce a static_assert failure telling the user that the struct
281
- // is not a POD type
282
- // struct NotPOD { std::string v; NotPOD() : v("hi") {}; };
283
- // PYBIND11_NUMPY_DTYPE(NotPOD, v);
284
-
285
- // Check that dtypes can be registered programmatically, both from
286
- // initializer lists of field descriptors and from other containers.
287
- py::detail::npy_format_descriptor<A>::register_dtype(
288
- {}
289
- );
290
- py::detail::npy_format_descriptor<B>::register_dtype(
291
- std::vector<py::detail::field_descriptor>{}
292
- );
293
-
294
- // test_recarray, test_scalar_conversion
295
- m.def("create_rec_simple", &create_recarray<SimpleStruct>);
296
- m.def("create_rec_packed", &create_recarray<PackedStruct>);
297
- m.def("create_rec_nested", [](size_t n) { // test_signature
298
- py::array_t<NestedStruct, 0> arr = mkarray_via_buffer<NestedStruct>(n);
299
- auto req = arr.request();
300
- auto ptr = static_cast<NestedStruct*>(req.ptr);
301
- for (size_t i = 0; i < n; i++) {
302
- SET_TEST_VALS(ptr[i].a, i);
303
- SET_TEST_VALS(ptr[i].b, i + 1);
304
- }
305
- return arr;
306
- });
307
- m.def("create_rec_partial", &create_recarray<PartialStruct>);
308
- m.def("create_rec_partial_nested", [](size_t n) {
309
- py::array_t<PartialNestedStruct, 0> arr = mkarray_via_buffer<PartialNestedStruct>(n);
310
- auto req = arr.request();
311
- auto ptr = static_cast<PartialNestedStruct*>(req.ptr);
312
- for (size_t i = 0; i < n; i++) {
313
- SET_TEST_VALS(ptr[i].a, i);
314
- }
315
- return arr;
316
- });
317
- m.def("print_rec_simple", &print_recarray<SimpleStruct>);
318
- m.def("print_rec_packed", &print_recarray<PackedStruct>);
319
- m.def("print_rec_nested", &print_recarray<NestedStruct>);
320
-
321
- // test_format_descriptors
322
- m.def("get_format_unbound", []() { return py::format_descriptor<UnboundStruct>::format(); });
323
- m.def("print_format_descriptors", []() {
324
- py::list l;
325
- for (const auto &fmt : {
326
- py::format_descriptor<SimpleStruct>::format(),
327
- py::format_descriptor<PackedStruct>::format(),
328
- py::format_descriptor<NestedStruct>::format(),
329
- py::format_descriptor<PartialStruct>::format(),
330
- py::format_descriptor<PartialNestedStruct>::format(),
331
- py::format_descriptor<StringStruct>::format(),
332
- py::format_descriptor<ArrayStruct>::format(),
333
- py::format_descriptor<EnumStruct>::format(),
334
- py::format_descriptor<ComplexStruct>::format()
335
- }) {
336
- l.append(py::cast(fmt));
337
- }
338
- return l;
339
- });
340
-
341
- // test_dtype
342
- m.def("print_dtypes", []() {
343
- py::list l;
344
- for (const py::handle &d : {
345
- py::dtype::of<SimpleStruct>(),
346
- py::dtype::of<PackedStruct>(),
347
- py::dtype::of<NestedStruct>(),
348
- py::dtype::of<PartialStruct>(),
349
- py::dtype::of<PartialNestedStruct>(),
350
- py::dtype::of<StringStruct>(),
351
- py::dtype::of<ArrayStruct>(),
352
- py::dtype::of<EnumStruct>(),
353
- py::dtype::of<StructWithUglyNames>(),
354
- py::dtype::of<ComplexStruct>()
355
- })
356
- l.append(py::str(d));
357
- return l;
358
- });
359
- m.def("test_dtype_ctors", &test_dtype_ctors);
360
- m.def("test_dtype_methods", []() {
361
- py::list list;
362
- auto dt1 = py::dtype::of<int32_t>();
363
- auto dt2 = py::dtype::of<SimpleStruct>();
364
- list.append(dt1); list.append(dt2);
365
- list.append(py::bool_(dt1.has_fields())); list.append(py::bool_(dt2.has_fields()));
366
- list.append(py::int_(dt1.itemsize())); list.append(py::int_(dt2.itemsize()));
367
- return list;
368
- });
369
- struct TrailingPaddingStruct {
370
- int32_t a;
371
- char b;
372
- };
373
- PYBIND11_NUMPY_DTYPE(TrailingPaddingStruct, a, b);
374
- m.def("trailing_padding_dtype", []() { return py::dtype::of<TrailingPaddingStruct>(); });
375
-
376
- // test_string_array
377
- m.def("create_string_array", [](bool non_empty) {
378
- py::array_t<StringStruct, 0> arr = mkarray_via_buffer<StringStruct>(non_empty ? 4 : 0);
379
- if (non_empty) {
380
- auto req = arr.request();
381
- auto ptr = static_cast<StringStruct*>(req.ptr);
382
- for (ssize_t i = 0; i < req.size * req.itemsize; i++)
383
- static_cast<char*>(req.ptr)[i] = 0;
384
- ptr[1].a[0] = 'a'; ptr[1].b[0] = 'a';
385
- ptr[2].a[0] = 'a'; ptr[2].b[0] = 'a';
386
- ptr[3].a[0] = 'a'; ptr[3].b[0] = 'a';
387
-
388
- ptr[2].a[1] = 'b'; ptr[2].b[1] = 'b';
389
- ptr[3].a[1] = 'b'; ptr[3].b[1] = 'b';
390
-
391
- ptr[3].a[2] = 'c'; ptr[3].b[2] = 'c';
392
- }
393
- return arr;
394
- });
395
- m.def("print_string_array", &print_recarray<StringStruct>);
396
-
397
- // test_array_array
398
- m.def("create_array_array", [](size_t n) {
399
- py::array_t<ArrayStruct, 0> arr = mkarray_via_buffer<ArrayStruct>(n);
400
- auto ptr = (ArrayStruct *) arr.mutable_data();
401
- for (size_t i = 0; i < n; i++) {
402
- for (size_t j = 0; j < 3; j++)
403
- for (size_t k = 0; k < 4; k++)
404
- ptr[i].a[j][k] = char('A' + (i * 100 + j * 10 + k) % 26);
405
- for (size_t j = 0; j < 2; j++)
406
- ptr[i].b[j] = int32_t(i * 1000 + j);
407
- for (size_t j = 0; j < 3; j++)
408
- ptr[i].c[j] = uint8_t(i * 10 + j);
409
- for (size_t j = 0; j < 4; j++)
410
- for (size_t k = 0; k < 2; k++)
411
- ptr[i].d[j][k] = float(i) * 100.0f + float(j) * 10.0f + float(k);
412
- }
413
- return arr;
414
- });
415
- m.def("print_array_array", &print_recarray<ArrayStruct>);
416
-
417
- // test_enum_array
418
- m.def("create_enum_array", [](size_t n) {
419
- py::array_t<EnumStruct, 0> arr = mkarray_via_buffer<EnumStruct>(n);
420
- auto ptr = (EnumStruct *) arr.mutable_data();
421
- for (size_t i = 0; i < n; i++) {
422
- ptr[i].e1 = static_cast<E1>(-1 + ((int) i % 2) * 2);
423
- ptr[i].e2 = static_cast<E2>(1 + (i % 2));
424
- }
425
- return arr;
426
- });
427
- m.def("print_enum_array", &print_recarray<EnumStruct>);
428
-
429
- // test_complex_array
430
- m.def("create_complex_array", [](size_t n) {
431
- py::array_t<ComplexStruct, 0> arr = mkarray_via_buffer<ComplexStruct>(n);
432
- auto ptr = (ComplexStruct *) arr.mutable_data();
433
- for (size_t i = 0; i < n; i++) {
434
- ptr[i].cflt.real(float(i));
435
- ptr[i].cflt.imag(float(i) + 0.25f);
436
- ptr[i].cdbl.real(double(i) + 0.5);
437
- ptr[i].cdbl.imag(double(i) + 0.75);
438
- }
439
- return arr;
440
- });
441
- m.def("print_complex_array", &print_recarray<ComplexStruct>);
442
-
443
- // test_array_constructors
444
- m.def("test_array_ctors", &test_array_ctors);
445
-
446
- // test_compare_buffer_info
447
- struct CompareStruct {
448
- bool x;
449
- uint32_t y;
450
- float z;
451
- };
452
- PYBIND11_NUMPY_DTYPE(CompareStruct, x, y, z);
453
- m.def("compare_buffer_info", []() {
454
- py::list list;
455
- list.append(py::bool_(py::detail::compare_buffer_info<float>::compare(py::buffer_info(nullptr, sizeof(float), "f", 1))));
456
- list.append(py::bool_(py::detail::compare_buffer_info<unsigned>::compare(py::buffer_info(nullptr, sizeof(int), "I", 1))));
457
- list.append(py::bool_(py::detail::compare_buffer_info<long>::compare(py::buffer_info(nullptr, sizeof(long), "l", 1))));
458
- list.append(py::bool_(py::detail::compare_buffer_info<long>::compare(py::buffer_info(nullptr, sizeof(long), sizeof(long) == sizeof(int) ? "i" : "q", 1))));
459
- list.append(py::bool_(py::detail::compare_buffer_info<CompareStruct>::compare(py::buffer_info(nullptr, sizeof(CompareStruct), "T{?:x:3xI:y:f:z:}", 1))));
460
- return list;
461
- });
462
- m.def("buffer_to_dtype", [](py::buffer& buf) { return py::dtype(buf.request()); });
463
-
464
- // test_scalar_conversion
465
- m.def("f_simple", [](SimpleStruct s) { return s.uint_ * 10; });
466
- m.def("f_packed", [](PackedStruct s) { return s.uint_ * 10; });
467
- m.def("f_nested", [](NestedStruct s) { return s.a.uint_ * 10; });
468
-
469
- // test_register_dtype
470
- m.def("register_dtype", []() { PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); });
471
-
472
- // test_str_leak
473
- m.def("dtype_wrapper", [](py::object d) { return py::dtype::from_args(std::move(d)); });
474
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/equal.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special version of this algorithm
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/execution_policy.h DELETED
@@ -1,81 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/detail/sequential/execution_policy.h>
21
-
22
- namespace thrust
23
- {
24
- namespace system
25
- {
26
- // put the canonical tag in the same ns as the backend's entry points
27
- namespace cpp
28
- {
29
- namespace detail
30
- {
31
-
32
- // this awkward sequence of definitions arise
33
- // from the desire both for tag to derive
34
- // from execution_policy and for execution_policy
35
- // to convert to tag (when execution_policy is not
36
- // an ancestor of tag)
37
-
38
- // forward declaration of tag
39
- struct tag;
40
-
41
- // forward declaration of execution_policy
42
- template<typename> struct execution_policy;
43
-
44
- // specialize execution_policy for tag
45
- template<>
46
- struct execution_policy<tag>
47
- : thrust::system::detail::sequential::execution_policy<tag>
48
- {};
49
-
50
- // tag's definition comes before the
51
- // generic definition of execution_policy
52
- struct tag : execution_policy<tag> {};
53
-
54
- // allow conversion to tag when it is not a successor
55
- template<typename Derived>
56
- struct execution_policy
57
- : thrust::system::detail::sequential::execution_policy<Derived>
58
- {
59
- typedef tag tag_type;
60
- operator tag() const { return tag(); }
61
- };
62
-
63
- } // end detail
64
-
65
- // alias execution_policy and tag here
66
- using thrust::system::cpp::detail::execution_policy;
67
- using thrust::system::cpp::detail::tag;
68
-
69
- } // end cpp
70
- } // end system
71
-
72
- // alias items at top-level
73
- namespace cpp
74
- {
75
-
76
- using thrust::system::cpp::execution_policy;
77
- using thrust::system::cpp::tag;
78
-
79
- } // end cpp
80
- } // end thrust
81
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scatter.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits this algorithm
22
- #include <thrust/system/cpp/detail/scatter.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/monoscene_lite/monoscene/DDR.py DELETED
@@ -1,139 +0,0 @@
1
- """
2
- Most of the code in this file is taken from https://github.com/waterljwant/SSC/blob/master/models/DDR.py
3
- """
4
-
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as F
8
-
9
-
10
- class SimpleRB(nn.Module):
11
- def __init__(self, in_channel, norm_layer, bn_momentum):
12
- super(SimpleRB, self).__init__()
13
- self.path = nn.Sequential(
14
- nn.Conv3d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
15
- norm_layer(in_channel, momentum=bn_momentum),
16
- nn.ReLU(),
17
- nn.Conv3d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
18
- norm_layer(in_channel, momentum=bn_momentum),
19
- )
20
- self.relu = nn.ReLU()
21
-
22
- def forward(self, x):
23
- residual = x
24
- conv_path = self.path(x)
25
- out = residual + conv_path
26
- out = self.relu(out)
27
- return out
28
-
29
-
30
- """
31
- 3D Residual Block,3x3x3 conv ==> 3 smaller 3D conv, refered from DDRNet
32
- """
33
-
34
-
35
- class Bottleneck3D(nn.Module):
36
- def __init__(
37
- self,
38
- inplanes,
39
- planes,
40
- norm_layer,
41
- stride=1,
42
- dilation=[1, 1, 1],
43
- expansion=4,
44
- downsample=None,
45
- fist_dilation=1,
46
- multi_grid=1,
47
- bn_momentum=0.0003,
48
- ):
49
- super(Bottleneck3D, self).__init__()
50
- # often,planes = inplanes // 4
51
- self.expansion = expansion
52
- self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
53
- self.bn1 = norm_layer(planes, momentum=bn_momentum)
54
- self.conv2 = nn.Conv3d(
55
- planes,
56
- planes,
57
- kernel_size=(1, 1, 3),
58
- stride=(1, 1, stride),
59
- dilation=(1, 1, dilation[0]),
60
- padding=(0, 0, dilation[0]),
61
- bias=False,
62
- )
63
- self.bn2 = norm_layer(planes, momentum=bn_momentum)
64
- self.conv3 = nn.Conv3d(
65
- planes,
66
- planes,
67
- kernel_size=(1, 3, 1),
68
- stride=(1, stride, 1),
69
- dilation=(1, dilation[1], 1),
70
- padding=(0, dilation[1], 0),
71
- bias=False,
72
- )
73
- self.bn3 = norm_layer(planes, momentum=bn_momentum)
74
- self.conv4 = nn.Conv3d(
75
- planes,
76
- planes,
77
- kernel_size=(3, 1, 1),
78
- stride=(stride, 1, 1),
79
- dilation=(dilation[2], 1, 1),
80
- padding=(dilation[2], 0, 0),
81
- bias=False,
82
- )
83
- self.bn4 = norm_layer(planes, momentum=bn_momentum)
84
- self.conv5 = nn.Conv3d(
85
- planes, planes * self.expansion, kernel_size=(1, 1, 1), bias=False
86
- )
87
- self.bn5 = norm_layer(planes * self.expansion, momentum=bn_momentum)
88
-
89
- self.relu = nn.ReLU(inplace=False)
90
- self.relu_inplace = nn.ReLU(inplace=True)
91
- self.downsample = downsample
92
- self.dilation = dilation
93
- self.stride = stride
94
-
95
- self.downsample2 = nn.Sequential(
96
- nn.AvgPool3d(kernel_size=(1, stride, 1), stride=(1, stride, 1)),
97
- nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
98
- norm_layer(planes, momentum=bn_momentum),
99
- )
100
- self.downsample3 = nn.Sequential(
101
- nn.AvgPool3d(kernel_size=(stride, 1, 1), stride=(stride, 1, 1)),
102
- nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
103
- norm_layer(planes, momentum=bn_momentum),
104
- )
105
- self.downsample4 = nn.Sequential(
106
- nn.AvgPool3d(kernel_size=(stride, 1, 1), stride=(stride, 1, 1)),
107
- nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
108
- norm_layer(planes, momentum=bn_momentum),
109
- )
110
-
111
- def forward(self, x):
112
- residual = x
113
-
114
- out1 = self.relu(self.bn1(self.conv1(x)))
115
- out2 = self.bn2(self.conv2(out1))
116
- out2_relu = self.relu(out2)
117
-
118
- out3 = self.bn3(self.conv3(out2_relu))
119
- if self.stride != 1:
120
- out2 = self.downsample2(out2)
121
- out3 = out3 + out2
122
- out3_relu = self.relu(out3)
123
-
124
- out4 = self.bn4(self.conv4(out3_relu))
125
- if self.stride != 1:
126
- out2 = self.downsample3(out2)
127
- out3 = self.downsample4(out3)
128
- out4 = out4 + out2 + out3
129
-
130
- out4_relu = self.relu(out4)
131
- out5 = self.bn5(self.conv5(out4_relu))
132
-
133
- if self.downsample is not None:
134
- residual = self.downsample(x)
135
-
136
- out = out5 + residual
137
- out_relu = self.relu(out)
138
-
139
- return out_relu
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cicooo/vits-uma-genshin-honkai/text/cleaners.py DELETED
@@ -1,475 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
- import re
16
- from unidecode import unidecode
17
- import pyopenjtalk
18
- from jamo import h2j, j2hcj
19
- from pypinyin import lazy_pinyin, BOPOMOFO
20
- import jieba, cn2an
21
-
22
-
23
- # This is a list of Korean classifiers preceded by pure Korean numerals.
24
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
25
-
26
- # Regular expression matching whitespace:
27
- _whitespace_re = re.compile(r'\s+')
28
-
29
- # Regular expression matching Japanese without punctuation marks:
30
- _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
31
-
32
- # Regular expression matching non-Japanese characters or punctuation marks:
33
- _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
34
-
35
- # List of (regular expression, replacement) pairs for abbreviations:
36
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
37
- ('mrs', 'misess'),
38
- ('mr', 'mister'),
39
- ('dr', 'doctor'),
40
- ('st', 'saint'),
41
- ('co', 'company'),
42
- ('jr', 'junior'),
43
- ('maj', 'major'),
44
- ('gen', 'general'),
45
- ('drs', 'doctors'),
46
- ('rev', 'reverend'),
47
- ('lt', 'lieutenant'),
48
- ('hon', 'honorable'),
49
- ('sgt', 'sergeant'),
50
- ('capt', 'captain'),
51
- ('esq', 'esquire'),
52
- ('ltd', 'limited'),
53
- ('col', 'colonel'),
54
- ('ft', 'fort'),
55
- ]]
56
-
57
- # List of (hangul, hangul divided) pairs:
58
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
59
- ('ㄳ', 'ㄱㅅ'),
60
- ('ㄵ', 'ㄴㅈ'),
61
- ('ㄶ', 'ㄴㅎ'),
62
- ('ㄺ', 'ㄹㄱ'),
63
- ('ㄻ', 'ㄹㅁ'),
64
- ('ㄼ', 'ㄹㅂ'),
65
- ('ㄽ', 'ㄹㅅ'),
66
- ('ㄾ', 'ㄹㅌ'),
67
- ('ㄿ', 'ㄹㅍ'),
68
- ('ㅀ', 'ㄹㅎ'),
69
- ('ㅄ', 'ㅂㅅ'),
70
- ('ㅘ', 'ㅗㅏ'),
71
- ('ㅙ', 'ㅗㅐ'),
72
- ('ㅚ', 'ㅗㅣ'),
73
- ('ㅝ', 'ㅜㅓ'),
74
- ('ㅞ', 'ㅜㅔ'),
75
- ('ㅟ', 'ㅜㅣ'),
76
- ('ㅢ', 'ㅡㅣ'),
77
- ('ㅑ', 'ㅣㅏ'),
78
- ('ㅒ', 'ㅣㅐ'),
79
- ('ㅕ', 'ㅣㅓ'),
80
- ('ㅖ', 'ㅣㅔ'),
81
- ('ㅛ', 'ㅣㅗ'),
82
- ('ㅠ', 'ㅣㅜ')
83
- ]]
84
-
85
- # List of (Latin alphabet, hangul) pairs:
86
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
87
- ('a', '에이'),
88
- ('b', '비'),
89
- ('c', '시'),
90
- ('d', '디'),
91
- ('e', '이'),
92
- ('f', '에프'),
93
- ('g', '지'),
94
- ('h', '에이치'),
95
- ('i', '아이'),
96
- ('j', '제이'),
97
- ('k', '케이'),
98
- ('l', '엘'),
99
- ('m', '엠'),
100
- ('n', '엔'),
101
- ('o', '오'),
102
- ('p', '피'),
103
- ('q', '큐'),
104
- ('r', '아르'),
105
- ('s', '에스'),
106
- ('t', '티'),
107
- ('u', '유'),
108
- ('v', '브이'),
109
- ('w', '더블유'),
110
- ('x', '엑스'),
111
- ('y', '와이'),
112
- ('z', '제트')
113
- ]]
114
-
115
- # List of (Latin alphabet, bopomofo) pairs:
116
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
117
- ('a', 'ㄟˉ'),
118
- ('b', 'ㄅㄧˋ'),
119
- ('c', 'ㄙㄧˉ'),
120
- ('d', 'ㄉㄧˋ'),
121
- ('e', 'ㄧˋ'),
122
- ('f', 'ㄝˊㄈㄨˋ'),
123
- ('g', 'ㄐㄧˋ'),
124
- ('h', 'ㄝˇㄑㄩˋ'),
125
- ('i', 'ㄞˋ'),
126
- ('j', 'ㄐㄟˋ'),
127
- ('k', 'ㄎㄟˋ'),
128
- ('l', 'ㄝˊㄛˋ'),
129
- ('m', 'ㄝˊㄇㄨˋ'),
130
- ('n', 'ㄣˉ'),
131
- ('o', 'ㄡˉ'),
132
- ('p', 'ㄆㄧˉ'),
133
- ('q', 'ㄎㄧㄡˉ'),
134
- ('r', 'ㄚˋ'),
135
- ('s', 'ㄝˊㄙˋ'),
136
- ('t', 'ㄊㄧˋ'),
137
- ('u', 'ㄧㄡˉ'),
138
- ('v', 'ㄨㄧˉ'),
139
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
140
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
141
- ('y', 'ㄨㄞˋ'),
142
- ('z', 'ㄗㄟˋ')
143
- ]]
144
-
145
-
146
- # List of (bopomofo, romaji) pairs:
147
- _bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
148
- ('ㄅㄛ', 'p⁼wo'),
149
- ('ㄆㄛ', 'pʰwo'),
150
- ('ㄇㄛ', 'mwo'),
151
- ('ㄈㄛ', 'fwo'),
152
- ('ㄅ', 'p⁼'),
153
- ('ㄆ', 'pʰ'),
154
- ('ㄇ', 'm'),
155
- ('ㄈ', 'f'),
156
- ('ㄉ', 't⁼'),
157
- ('ㄊ', 'tʰ'),
158
- ('ㄋ', 'n'),
159
- ('ㄌ', 'l'),
160
- ('ㄍ', 'k⁼'),
161
- ('ㄎ', 'kʰ'),
162
- ('ㄏ', 'h'),
163
- ('ㄐ', 'ʧ⁼'),
164
- ('ㄑ', 'ʧʰ'),
165
- ('ㄒ', 'ʃ'),
166
- ('ㄓ', 'ʦ`⁼'),
167
- ('ㄔ', 'ʦ`ʰ'),
168
- ('ㄕ', 's`'),
169
- ('ㄖ', 'ɹ`'),
170
- ('ㄗ', 'ʦ⁼'),
171
- ('ㄘ', 'ʦʰ'),
172
- ('ㄙ', 's'),
173
- ('ㄚ', 'a'),
174
- ('ㄛ', 'o'),
175
- ('ㄜ', 'ə'),
176
- ('ㄝ', 'e'),
177
- ('ㄞ', 'ai'),
178
- ('ㄟ', 'ei'),
179
- ('ㄠ', 'au'),
180
- ('ㄡ', 'ou'),
181
- ('ㄧㄢ', 'yeNN'),
182
- ('ㄢ', 'aNN'),
183
- ('ㄧㄣ', 'iNN'),
184
- ('ㄣ', 'əNN'),
185
- ('ㄤ', 'aNg'),
186
- ('ㄧㄥ', 'iNg'),
187
- ('ㄨㄥ', 'uNg'),
188
- ('ㄩㄥ', 'yuNg'),
189
- ('ㄥ', 'əNg'),
190
- ('ㄦ', 'əɻ'),
191
- ('ㄧ', 'i'),
192
- ('ㄨ', 'u'),
193
- ('ㄩ', 'ɥ'),
194
- ('ˉ', '→'),
195
- ('ˊ', '↑'),
196
- ('ˇ', '↓↑'),
197
- ('ˋ', '↓'),
198
- ('˙', ''),
199
- (',', ','),
200
- ('。', '.'),
201
- ('!', '!'),
202
- ('?', '?'),
203
- ('—', '-')
204
- ]]
205
-
206
-
207
- def expand_abbreviations(text):
208
- for regex, replacement in _abbreviations:
209
- text = re.sub(regex, replacement, text)
210
- return text
211
-
212
-
213
- def lowercase(text):
214
- return text.lower()
215
-
216
-
217
- def collapse_whitespace(text):
218
- return re.sub(_whitespace_re, ' ', text)
219
-
220
-
221
- def convert_to_ascii(text):
222
- return unidecode(text)
223
-
224
-
225
- def japanese_to_romaji_with_accent(text):
226
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
227
- sentences = re.split(_japanese_marks, text)
228
- marks = re.findall(_japanese_marks, text)
229
- text = ''
230
- for i, sentence in enumerate(sentences):
231
- if re.match(_japanese_characters, sentence):
232
- if text!='':
233
- text+=' '
234
- labels = pyopenjtalk.extract_fullcontext(sentence)
235
- for n, label in enumerate(labels):
236
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
237
- if phoneme not in ['sil','pau']:
238
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
239
- else:
240
- continue
241
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
242
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
243
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
244
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
245
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
246
- a2_next=-1
247
- else:
248
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
249
- # Accent phrase boundary
250
- if a3 == 1 and a2_next == 1:
251
- text += ' '
252
- # Falling
253
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
254
- text += '↓'
255
- # Rising
256
- elif a2 == 1 and a2_next == 2:
257
- text += '↑'
258
- if i<len(marks):
259
- text += unidecode(marks[i]).replace(' ','')
260
- return text
261
-
262
-
263
- def latin_to_hangul(text):
264
- for regex, replacement in _latin_to_hangul:
265
- text = re.sub(regex, replacement, text)
266
- return text
267
-
268
-
269
- def divide_hangul(text):
270
- for regex, replacement in _hangul_divided:
271
- text = re.sub(regex, replacement, text)
272
- return text
273
-
274
-
275
- def hangul_number(num, sino=True):
276
- '''Reference https://github.com/Kyubyong/g2pK'''
277
- num = re.sub(',', '', num)
278
-
279
- if num == '0':
280
- return '영'
281
- if not sino and num == '20':
282
- return '스무'
283
-
284
- digits = '123456789'
285
- names = '일이삼사오육칠팔구'
286
- digit2name = {d: n for d, n in zip(digits, names)}
287
-
288
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
289
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
290
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
291
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
292
-
293
- spelledout = []
294
- for i, digit in enumerate(num):
295
- i = len(num) - i - 1
296
- if sino:
297
- if i == 0:
298
- name = digit2name.get(digit, '')
299
- elif i == 1:
300
- name = digit2name.get(digit, '') + '십'
301
- name = name.replace('일십', '십')
302
- else:
303
- if i == 0:
304
- name = digit2mod.get(digit, '')
305
- elif i == 1:
306
- name = digit2dec.get(digit, '')
307
- if digit == '0':
308
- if i % 4 == 0:
309
- last_three = spelledout[-min(3, len(spelledout)):]
310
- if ''.join(last_three) == '':
311
- spelledout.append('')
312
- continue
313
- else:
314
- spelledout.append('')
315
- continue
316
- if i == 2:
317
- name = digit2name.get(digit, '') + '백'
318
- name = name.replace('일백', '백')
319
- elif i == 3:
320
- name = digit2name.get(digit, '') + '천'
321
- name = name.replace('일천', '천')
322
- elif i == 4:
323
- name = digit2name.get(digit, '') + '만'
324
- name = name.replace('일만', '만')
325
- elif i == 5:
326
- name = digit2name.get(digit, '') + '십'
327
- name = name.replace('일십', '십')
328
- elif i == 6:
329
- name = digit2name.get(digit, '') + '백'
330
- name = name.replace('일백', '백')
331
- elif i == 7:
332
- name = digit2name.get(digit, '') + '천'
333
- name = name.replace('일천', '천')
334
- elif i == 8:
335
- name = digit2name.get(digit, '') + '억'
336
- elif i == 9:
337
- name = digit2name.get(digit, '') + '십'
338
- elif i == 10:
339
- name = digit2name.get(digit, '') + '백'
340
- elif i == 11:
341
- name = digit2name.get(digit, '') + '천'
342
- elif i == 12:
343
- name = digit2name.get(digit, '') + '조'
344
- elif i == 13:
345
- name = digit2name.get(digit, '') + '십'
346
- elif i == 14:
347
- name = digit2name.get(digit, '') + '백'
348
- elif i == 15:
349
- name = digit2name.get(digit, '') + '천'
350
- spelledout.append(name)
351
- return ''.join(elem for elem in spelledout)
352
-
353
-
354
- def number_to_hangul(text):
355
- '''Reference https://github.com/Kyubyong/g2pK'''
356
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
357
- for token in tokens:
358
- num, classifier = token
359
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
360
- spelledout = hangul_number(num, sino=False)
361
- else:
362
- spelledout = hangul_number(num, sino=True)
363
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
364
- # digit by digit for remaining digits
365
- digits = '0123456789'
366
- names = '영일이삼사오육칠팔구'
367
- for d, n in zip(digits, names):
368
- text = text.replace(d, n)
369
- return text
370
-
371
-
372
- def number_to_chinese(text):
373
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
374
- for number in numbers:
375
- text = text.replace(number, cn2an.an2cn(number),1)
376
- return text
377
-
378
-
379
- def chinese_to_bopomofo(text):
380
- text=text.replace('、',',').replace(';',',').replace(':',',')
381
- words=jieba.lcut(text,cut_all=False)
382
- text=''
383
- for word in words:
384
- bopomofos=lazy_pinyin(word,BOPOMOFO)
385
- if not re.search('[\u4e00-\u9fff]',word):
386
- text+=word
387
- continue
388
- for i in range(len(bopomofos)):
389
- if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
390
- bopomofos[i]+='ˉ'
391
- if text!='':
392
- text+=' '
393
- text+=''.join(bopomofos)
394
- return text
395
-
396
-
397
- def latin_to_bopomofo(text):
398
- for regex, replacement in _latin_to_bopomofo:
399
- text = re.sub(regex, replacement, text)
400
- return text
401
-
402
-
403
- def bopomofo_to_romaji(text):
404
- for regex, replacement in _bopomofo_to_romaji:
405
- text = re.sub(regex, replacement, text)
406
- return text
407
-
408
-
409
- def basic_cleaners(text):
410
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
411
- text = lowercase(text)
412
- text = collapse_whitespace(text)
413
- return text
414
-
415
-
416
- def transliteration_cleaners(text):
417
- '''Pipeline for non-English text that transliterates to ASCII.'''
418
- text = convert_to_ascii(text)
419
- text = lowercase(text)
420
- text = collapse_whitespace(text)
421
- return text
422
-
423
-
424
- def japanese_cleaners(text):
425
- text=japanese_to_romaji_with_accent(text)
426
- if re.match('[A-Za-z]',text[-1]):
427
- text += '.'
428
- return text
429
-
430
-
431
- def japanese_cleaners2(text):
432
- return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
433
-
434
-
435
- def korean_cleaners(text):
436
- '''Pipeline for Korean text'''
437
- text = latin_to_hangul(text)
438
- text = number_to_hangul(text)
439
- text = j2hcj(h2j(text))
440
- text = divide_hangul(text)
441
- if re.match('[\u3131-\u3163]',text[-1]):
442
- text += '.'
443
- return text
444
-
445
-
446
- def chinese_cleaners(text):
447
- '''Pipeline for Chinese text'''
448
- text=number_to_chinese(text)
449
- text=chinese_to_bopomofo(text)
450
- text=latin_to_bopomofo(text)
451
- if re.match('[ˉˊˇˋ˙]',text[-1]):
452
- text += '。'
453
- return text
454
-
455
-
456
- def zh_ja_mixture_cleaners(text):
457
- chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
458
- japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
459
- for chinese_text in chinese_texts:
460
- cleaned_text=number_to_chinese(chinese_text[4:-4])
461
- cleaned_text=chinese_to_bopomofo(cleaned_text)
462
- cleaned_text=latin_to_bopomofo(cleaned_text)
463
- cleaned_text=bopomofo_to_romaji(cleaned_text)
464
- cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
465
- cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
466
- cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
467
- cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
468
- text = text.replace(chinese_text,cleaned_text+' ',1)
469
- for japanese_text in japanese_texts:
470
- cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
471
- text = text.replace(japanese_text,cleaned_text+' ',1)
472
- text=text[:-1]
473
- if re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
474
- text += '.'
475
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/Kemal-Diffusion/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Kemal Diffusion
3
- emoji: 🐢
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: kemal.py
9
- pinned: true
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ConceptArtHouse/webui-gameasset/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- title: Stable Diffusion Web UI
3
- emoji: 🚧
4
- colorFrom: yellow
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.9
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: camenduru/webui
11
- ---
12
-
13
- ## Stable Diffusion Web UI
14
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
15
-
16
- ## Documentation
17
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki)
18
-
19
- ## Models License
20
- https://huggingface.co/spaces/CompVis/stable-diffusion-license
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_d_m_x.py DELETED
@@ -1,119 +0,0 @@
1
- from fontTools.misc import sstruct
2
- from fontTools.misc.textTools import bytechr, byteord, strjoin
3
- from . import DefaultTable
4
- import array
5
- from collections.abc import Mapping
6
-
7
- hdmxHeaderFormat = """
8
- > # big endian!
9
- version: H
10
- numRecords: H
11
- recordSize: l
12
- """
13
-
14
-
15
- class _GlyphnamedList(Mapping):
16
- def __init__(self, reverseGlyphOrder, data):
17
- self._array = data
18
- self._map = dict(reverseGlyphOrder)
19
-
20
- def __getitem__(self, k):
21
- return self._array[self._map[k]]
22
-
23
- def __len__(self):
24
- return len(self._map)
25
-
26
- def __iter__(self):
27
- return iter(self._map)
28
-
29
- def keys(self):
30
- return self._map.keys()
31
-
32
-
33
- class table__h_d_m_x(DefaultTable.DefaultTable):
34
- def decompile(self, data, ttFont):
35
- numGlyphs = ttFont["maxp"].numGlyphs
36
- glyphOrder = ttFont.getGlyphOrder()
37
- dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
38
- self.hdmx = {}
39
- for i in range(self.numRecords):
40
- ppem = byteord(data[0])
41
- maxSize = byteord(data[1])
42
- widths = _GlyphnamedList(
43
- ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs])
44
- )
45
- self.hdmx[ppem] = widths
46
- data = data[self.recordSize :]
47
- assert len(data) == 0, "too much hdmx data"
48
-
49
- def compile(self, ttFont):
50
- self.version = 0
51
- numGlyphs = ttFont["maxp"].numGlyphs
52
- glyphOrder = ttFont.getGlyphOrder()
53
- self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
54
- pad = (self.recordSize - 2 - numGlyphs) * b"\0"
55
- self.numRecords = len(self.hdmx)
56
- data = sstruct.pack(hdmxHeaderFormat, self)
57
- items = sorted(self.hdmx.items())
58
- for ppem, widths in items:
59
- data = data + bytechr(ppem) + bytechr(max(widths.values()))
60
- for glyphID in range(len(glyphOrder)):
61
- width = widths[glyphOrder[glyphID]]
62
- data = data + bytechr(width)
63
- data = data + pad
64
- return data
65
-
66
- def toXML(self, writer, ttFont):
67
- writer.begintag("hdmxData")
68
- writer.newline()
69
- ppems = sorted(self.hdmx.keys())
70
- records = []
71
- format = ""
72
- for ppem in ppems:
73
- widths = self.hdmx[ppem]
74
- records.append(widths)
75
- format = format + "%4d"
76
- glyphNames = ttFont.getGlyphOrder()[:]
77
- glyphNames.sort()
78
- maxNameLen = max(map(len, glyphNames))
79
- format = "%" + repr(maxNameLen) + "s:" + format + " ;"
80
- writer.write(format % (("ppem",) + tuple(ppems)))
81
- writer.newline()
82
- writer.newline()
83
- for glyphName in glyphNames:
84
- row = []
85
- for ppem in ppems:
86
- widths = self.hdmx[ppem]
87
- row.append(widths[glyphName])
88
- if ";" in glyphName:
89
- glyphName = "\\x3b".join(glyphName.split(";"))
90
- writer.write(format % ((glyphName,) + tuple(row)))
91
- writer.newline()
92
- writer.endtag("hdmxData")
93
- writer.newline()
94
-
95
- def fromXML(self, name, attrs, content, ttFont):
96
- if name != "hdmxData":
97
- return
98
- content = strjoin(content)
99
- lines = content.split(";")
100
- topRow = lines[0].split()
101
- assert topRow[0] == "ppem:", "illegal hdmx format"
102
- ppems = list(map(int, topRow[1:]))
103
- self.hdmx = hdmx = {}
104
- for ppem in ppems:
105
- hdmx[ppem] = {}
106
- lines = (line.split() for line in lines[1:])
107
- for line in lines:
108
- if not line:
109
- continue
110
- assert line[0][-1] == ":", "illegal hdmx format"
111
- glyphName = line[0][:-1]
112
- if "\\" in glyphName:
113
- from fontTools.misc.textTools import safeEval
114
-
115
- glyphName = safeEval('"""' + glyphName + '"""')
116
- line = list(map(int, line[1:]))
117
- assert len(line) == len(ppems), "illegal hdmx format"
118
- for i in range(len(ppems)):
119
- hdmx[ppems[i]][glyphName] = line[i]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/conftest.py DELETED
@@ -1,55 +0,0 @@
1
- import os
2
- import shutil
3
- import subprocess
4
- import sys
5
- import time
6
-
7
- import pytest
8
-
9
- import fsspec
10
- from fsspec.implementations.cached import CachingFileSystem
11
-
12
-
13
- @pytest.fixture()
14
- def m():
15
- """
16
- Fixture providing a memory filesystem.
17
- """
18
- m = fsspec.filesystem("memory")
19
- m.store.clear()
20
- m.pseudo_dirs.clear()
21
- m.pseudo_dirs.append("")
22
- try:
23
- yield m
24
- finally:
25
- m.store.clear()
26
- m.pseudo_dirs.clear()
27
- m.pseudo_dirs.append("")
28
-
29
-
30
- @pytest.fixture
31
- def ftp_writable(tmpdir):
32
- """
33
- Fixture providing a writable FTP filesystem.
34
- """
35
- pytest.importorskip("pyftpdlib")
36
- from fsspec.implementations.ftp import FTPFileSystem
37
-
38
- FTPFileSystem.clear_instance_cache() # remove lingering connections
39
- CachingFileSystem.clear_instance_cache()
40
- d = str(tmpdir)
41
- with open(os.path.join(d, "out"), "wb") as f:
42
- f.write(b"hello" * 10000)
43
- P = subprocess.Popen(
44
- [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
- )
46
- try:
47
- time.sleep(1)
48
- yield "localhost", 2121, "user", "pass"
49
- finally:
50
- P.terminate()
51
- P.wait()
52
- try:
53
- shutil.rmtree(tmpdir)
54
- except Exception:
55
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-150cb53b.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as U,e as G,s as H,N as S,k as R,O as K,K as o,p as B,M as C,o as T,ap as E,Q as k,aw as N,z,v as A,A as M,x as D,a1 as W,B as X,am as Y,P as Z,R as y,E as p,ae as x,h as O,j as P,q as $,r as ee,t as Q,F}from"./index-3370be2a.js";/* empty css */import{B as le}from"./Button-89624748.js";import{B as ne}from"./BlockTitle-bcf8c05e.js";import"./Info-5611e10f.js";function ie(e){let n;return{c(){n=Z(e[5])},m(i,a){B(i,n,a)},p(i,a){a&32&&y(n,i[5])},d(i){i&&M(n)}}}function ae(e){let n,i,a,m,_,s,c,f,d,r,g;return m=new ne({props:{show_label:e[7],info:e[6],$$slots:{default:[ie]},$$scope:{ctx:e}}}),{c(){n=S("div"),i=S("div"),a=S("label"),R(m.$$.fragment),_=K(),s=S("input"),c=K(),f=S("input"),o(a,"for",e[8]),o(s,"data-testid","number-input"),o(s,"type","number"),o(s,"min",e[1]),o(s,"max",e[2]),o(s,"step",e[3]),s.disabled=e[4],o(s,"class","svelte-1cl284s"),o(i,"class","head svelte-1cl284s"),o(n,"class","wrap svelte-1cl284s"),o(f,"type","range"),o(f,"id",e[8]),o(f,"name","cowbell"),o(f,"min",e[1]),o(f,"max",e[2]),o(f,"step",e[3]),f.disabled=e[4],o(f,"class","svelte-1cl284s")},m(l,u){B(l,n,u),C(n,i),C(i,a),T(m,a,null),C(i,_),C(i,s),E(s,e[0]),B(l,c,u),B(l,f,u),E(f,e[0]),d=!0,r||(g=[k(s,"input",e[12]),k(s,"blur",e[10]),k(s,"pointerup",e[9]),k(f,"change",e[13]),k(f,"input",e[13]),k(f,"pointerup",e[9])],r=!0)},p(l,[u]){const v={};u&128&&(v.show_label=l[7]),u&64&&(v.info=l[6]),u&65568&&(v.$$scope={dirty:u,ctx:l}),m.$set(v),(!d||u&2)&&o(s,"min",l[1]),(!d||u&4)&&o(s,"max",l[2]),(!d||u&8)&&o(s,"step",l[3]),(!d||u&16)&&(s.disabled=l[4]),u&1&&N(s.value)!==l[0]&&E(s,l[0]),(!d||u&2)&&o(f,"min",l[1]),(!d||u&4)&&o(f,"max",l[2]),(!d||u&8)&&o(f,"step",l[3]),(!d||u&16)&&(f.disabled=l[4]),u&1&&E(f,l[0])},i(l){d||(z(m.$$.fragment,l),d=!0)},o(l){A(m.$$.fragment,l),d=!1},d(l){l&&(M(n),M(c),M(f)),D(m),r=!1,W(g)}}}let te=0;function ue(e,n,i){let{value:a=0}=n,{value_is_output:m=!1}=n,{minimum:_=0}=n,{maximum:s=100}=n,{step:c=1}=n,{disabled:f=!1}=n,{label:d}=n,{info:r=void 0}=n,{show_label:g}=n;const l=`range_id_${te++}`,u=X();function v(){u("change",a),m||u("input")}Y(()=>{i(11,m=!1)});function h(b){u("release",a)}const j=()=>{u("release",a),i(0,a=Math.min(Math.max(a,_),s))};function q(){a=N(this.value),i(0,a)}function w(){a=N(this.value),i(0,a)}return e.$$set=b=>{"value"in b&&i(0,a=b.value),"value_is_output"in b&&i(11,m=b.value_is_output),"minimum"in b&&i(1,_=b.minimum),"maximum"in b&&i(2,s=b.maximum),"step"in b&&i(3,c=b.step),"disabled"in b&&i(4,f=b.disabled),"label"in b&&i(5,d=b.label),"info"in b&&i(6,r=b.info),"show_label"in b&&i(7,g=b.show_label)},e.$$.update=()=>{e.$$.dirty&1&&v()},[a,_,s,c,f,d,r,g,l,h,j,m,q,w]}class se extends U{constructor(n){super(),G(this,n,ue,ae,H,{value:0,value_is_output:11,minimum:1,maximum:2,step:3,disabled:4,label:5,info:6,show_label:7})}}function me(e){let n,i,a,m,_,s;const c=[e[15]];let f={};for(let l=0;l<c.length;l+=1)f=p(f,c[l]);n=new x({props:f});function d(l){e[16](l)}function r(l){e[17](l)}let g={label:e[5],info:e[6],show_label:e[14],minimum:e[10],maximum:e[11],step:e[12],disabled:e[13]==="static"};return e[0]!==void 0&&(g.value=e[0]),e[1]!==void 0&&(g.value_is_output=e[1]),a=new se({props:g}),O.push(()=>P(a,"value",d)),O.push(()=>P(a,"value_is_output",r)),a.$on("input",e[18]),a.$on("change",e[19]),a.$on("release",e[20]),{c(){R(n.$$.fragment),i=K(),R(a.$$.fragment)},m(l,u){T(n,l,u),B(l,i,u),T(a,l,u),s=!0},p(l,u){const v=u&32768?$(c,[ee(l[15])]):{};n.$set(v);const h={};u&32&&(h.label=l[5]),u&64&&(h.info=l[6]),u&16384&&(h.show_label=l[14]),u&1024&&(h.minimum=l[10]),u&2048&&(h.maximum=l[11]),u&4096&&(h.step=l[12]),u&8192&&(h.disabled=l[13]==="static"),!m&&u&1&&(m=!0,h.value=l[0],Q(()=>m=!1)),!_&&u&2&&(_=!0,h.value_is_output=l[1],Q(()=>_=!1)),a.$set(h)},i(l){s||(z(n.$$.fragment,l),z(a.$$.fragment,l),s=!0)},o(l){A(n.$$.fragment,l),A(a.$$.fragment,l),s=!1},d(l){l&&M(i),D(n,l),D(a,l)}}}function fe(e){let n,i;return n=new le({props:{visible:e[4],elem_id:e[2],elem_classes:e[3],container:e[7],scale:e[8],min_width:e[9],$$slots:{default:[me]},$$scope:{ctx:e}}}),{c(){R(n.$$.fragment)},m(a,m){T(n,a,m),i=!0},p(a,[m]){const _={};m&16&&(_.visible=a[4]),m&4&&(_.elem_id=a[2]),m&8&&(_.elem_classes=a[3]),m&128&&(_.container=a[7]),m&256&&(_.scale=a[8]),m&512&&(_.min_width=a[9]),m&2161763&&(_.$$scope={dirty:m,ctx:a}),n.$set(_)},i(a){i||(z(n.$$.fragment,a),i=!0)},o(a){A(n.$$.fragment,a),i=!1},d(a){D(n,a)}}}function _e(e,n,i){let{elem_id:a=""}=n,{elem_classes:m=[]}=n,{visible:_=!0}=n,{value:s=0}=n,{label:c="Slider"}=n,{info:f=void 0}=n,{container:d=!0}=n,{scale:r=null}=n,{min_width:g=void 0}=n,{minimum:l}=n,{maximum:u}=n,{step:v}=n,{mode:h}=n,{show_label:j}=n,{loading_status:q}=n,{value_is_output:w=!1}=n;function b(t){s=t,i(0,s)}function I(t){w=t,i(1,w)}function J(t){F.call(this,e,t)}function L(t){F.call(this,e,t)}function V(t){F.call(this,e,t)}return e.$$set=t=>{"elem_id"in t&&i(2,a=t.elem_id),"elem_classes"in t&&i(3,m=t.elem_classes),"visible"in t&&i(4,_=t.visible),"value"in t&&i(0,s=t.value),"label"in t&&i(5,c=t.label),"info"in t&&i(6,f=t.info),"container"in t&&i(7,d=t.container),"scale"in t&&i(8,r=t.scale),"min_width"in t&&i(9,g=t.min_width),"minimum"in t&&i(10,l=t.minimum),"maximum"in t&&i(11,u=t.maximum),"step"in t&&i(12,v=t.step),"mode"in t&&i(13,h=t.mode),"show_label"in t&&i(14,j=t.show_label),"loading_status"in t&&i(15,q=t.loading_status),"value_is_output"in t&&i(1,w=t.value_is_output)},[s,w,a,m,_,c,f,d,r,g,l,u,v,h,j,q,b,I,J,L,V]}class oe extends U{constructor(n){super(),G(this,n,_e,fe,H,{elem_id:2,elem_classes:3,visible:4,value:0,label:5,info:6,container:7,scale:8,min_width:9,minimum:10,maximum:11,step:12,mode:13,show_label:14,loading_status:15,value_is_output:1})}}const re=oe,ve=["static","dynamic"],we=e=>({type:{payload:"number"},description:{payload:"selected value"},example_data:e.value??e.minimum});export{re as Component,we as document,ve as modes};
2
- //# sourceMappingURL=index-150cb53b.js.map
 
 
 
spaces/Datasculptor/MusicGen/audiocraft/utils/utils.py DELETED
@@ -1,234 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from concurrent.futures import ProcessPoolExecutor
8
- from functools import wraps
9
- import hashlib
10
- import logging
11
- import typing as tp
12
-
13
- import flashy
14
- import flashy.distrib
15
- import omegaconf
16
- import torch
17
- from torch.nn.utils.rnn import pad_sequence
18
-
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
24
- """Convenience function to map an omegaconf configuration to a dictionary.
25
-
26
- Args:
27
- cfg (omegaconf.DictConfig): Original configuration to map to dict.
28
- Returns:
29
- dict: Config as dictionary object.
30
- """
31
- dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
32
- assert isinstance(dct, dict)
33
- return dct
34
-
35
-
36
- def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
37
- if max_samples >= len(dataset):
38
- return dataset
39
-
40
- generator = torch.Generator().manual_seed(seed)
41
- perm = torch.randperm(len(dataset), generator=generator)
42
- return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
43
-
44
-
45
- def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
46
- num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
47
- """Convenience function to load dataset into a dataloader with optional subset sampling.
48
-
49
- Args:
50
- dataset: Dataset to load.
51
- num_samples (Optional[int]): Number of samples to limit subset size.
52
- batch_size (int): Batch size.
53
- num_workers (int): Number of workers for data loading.
54
- seed (int): Random seed.
55
- """
56
- if num_samples is not None:
57
- dataset = random_subset(dataset, num_samples, seed)
58
-
59
- dataloader = flashy.distrib.loader(
60
- dataset,
61
- batch_size=batch_size,
62
- num_workers=num_workers,
63
- **kwargs
64
- )
65
- return dataloader
66
-
67
-
68
- def get_dataset_from_loader(dataloader):
69
- dataset = dataloader.dataset
70
- if isinstance(dataset, torch.utils.data.Subset):
71
- return dataset.dataset
72
- else:
73
- return dataset
74
-
75
-
76
- def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
77
- """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
78
-
79
- Args:
80
- input (torch.Tensor): The input tensor containing probabilities.
81
- num_samples (int): Number of samples to draw.
82
- replacement (bool): Whether to draw with replacement or not.
83
- Keywords args:
84
- generator (torch.Generator): A pseudorandom number generator for sampling.
85
- Returns:
86
- torch.Tensor: Last dimension contains num_samples indices
87
- sampled from the multinomial probability distribution
88
- located in the last dimension of tensor input.
89
- """
90
- input_ = input.reshape(-1, input.shape[-1])
91
- output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
92
- output = output_.reshape(*list(input.shape[:-1]), -1)
93
- return output
94
-
95
-
96
- def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
97
- """Sample next token from top K values along the last dimension of the input probs tensor.
98
-
99
- Args:
100
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
101
- k (int): The k in “top-k”.
102
- Returns:
103
- torch.Tensor: Sampled tokens.
104
- """
105
- top_k_value, _ = torch.topk(probs, k, dim=-1)
106
- min_value_top_k = top_k_value[..., [-1]]
107
- probs *= (probs >= min_value_top_k).float()
108
- probs.div_(probs.sum(dim=-1, keepdim=True))
109
- next_token = multinomial(probs, num_samples=1)
110
- return next_token
111
-
112
-
113
- def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
114
- """Sample next token from top P probabilities along the last dimension of the input probs tensor.
115
-
116
- Args:
117
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
118
- p (int): The p in “top-p”.
119
- Returns:
120
- torch.Tensor: Sampled tokens.
121
- """
122
- probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
123
- probs_sum = torch.cumsum(probs_sort, dim=-1)
124
- mask = probs_sum - probs_sort > p
125
- probs_sort *= (~mask).float()
126
- probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
127
- next_token = multinomial(probs_sort, num_samples=1)
128
- next_token = torch.gather(probs_idx, -1, next_token)
129
- return next_token
130
-
131
-
132
- class DummyPoolExecutor:
133
- """Dummy pool executor to use when we actually have only 1 worker.
134
- (e.g. instead of ProcessPoolExecutor).
135
- """
136
- class DummyResult:
137
- def __init__(self, func, *args, **kwargs):
138
- self.func = func
139
- self.args = args
140
- self.kwargs = kwargs
141
-
142
- def result(self):
143
- return self.func(*self.args, **self.kwargs)
144
-
145
- def __init__(self, workers, mp_context=None):
146
- pass
147
-
148
- def submit(self, func, *args, **kwargs):
149
- return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
150
-
151
- def __enter__(self):
152
- return self
153
-
154
- def __exit__(self, exc_type, exc_value, exc_tb):
155
- return
156
-
157
-
158
- def get_pool_executor(num_workers: int, mp_context=None):
159
- return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
160
-
161
-
162
- def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
163
- """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
164
- For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
165
-
166
- Args:
167
- lengths (torch.Tensor): tensor with lengths
168
- max_len (int): can set the max length manually. Defaults to None.
169
- Returns:
170
- torch.Tensor: mask with 0s where there is pad tokens else 1s
171
- """
172
- assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
173
- final_length = lengths.max().item() if not max_len else max_len
174
- final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
175
- return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
176
-
177
-
178
- def hash_trick(word: str, vocab_size: int) -> int:
179
- """Hash trick to pair each word with an index
180
-
181
- Args:
182
- word (str): word we wish to convert to an index
183
- vocab_size (int): size of the vocabulary
184
- Returns:
185
- int: index of the word in the embedding LUT
186
- """
187
- hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
188
- return hash % vocab_size
189
-
190
-
191
- def with_rank_rng(base_seed: int = 1234):
192
- """Decorator for a function so that the function will use a Random Number Generator
193
- whose state depend on the GPU rank. The original RNG state is restored upon returning.
194
-
195
- Args:
196
- base_seed (int): Random seed.
197
- """
198
- def _decorator(fun: tp.Callable):
199
- @wraps(fun)
200
- def _decorated(*args, **kwargs):
201
- state = torch.get_rng_state()
202
- seed = base_seed ^ flashy.distrib.rank()
203
- torch.manual_seed(seed)
204
- logger.debug('Rank dependent seed set to %d', seed)
205
- try:
206
- return fun(*args, **kwargs)
207
- finally:
208
- torch.set_rng_state(state)
209
- logger.debug('RNG state restored.')
210
- return _decorated
211
- return _decorator
212
-
213
-
214
- def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
215
- """Get a list of tensors and collate them to a single tensor. according to the following logic:
216
- - `dim` specifies the time dimension which will be stacked and padded.
217
- - The output will contain 1 new dimension (dimension index 0) which will be the size of
218
- of the original list.
219
-
220
- Args:
221
- tensors (tp.List[torch.Tensor]): List of tensors to collate.
222
- dim (int): Dimension which will be stacked and padded.
223
- Returns:
224
- tp.Tuple[torch.Tensor, torch.Tensor]:
225
- torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
226
- (dimension index 0) which will be the size of the original list.
227
- torch.Tensor: Tensor containing length of original tensor sizes (without padding).
228
- """
229
- tensors = [x.transpose(0, dim) for x in tensors]
230
- lens = torch.LongTensor([len(x) for x in tensors])
231
- padded_tensors = pad_sequence(tensors)
232
- padded_tensors = padded_tensors.transpose(0, 1)
233
- padded_tensors = padded_tensors.transpose(1, dim + 1)
234
- return padded_tensors, lens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/StyleGAN-NADA/e4e/utils/__init__.py DELETED
File without changes
spaces/Dauzy/whisper-webui/src/prompts/prependPromptStrategy.py DELETED
@@ -1,31 +0,0 @@
1
- from src.config import VadInitialPromptMode
2
- from src.prompts.abstractPromptStrategy import AbstractPromptStrategy
3
-
4
- class PrependPromptStrategy(AbstractPromptStrategy):
5
- """
6
- A simple prompt strategy that prepends a single prompt to all segments of audio, or prepends the prompt to the first segment of audio.
7
- """
8
- def __init__(self, initial_prompt: str, initial_prompt_mode: VadInitialPromptMode):
9
- """
10
- Parameters
11
- ----------
12
- initial_prompt: str
13
- The initial prompt to use for the transcription.
14
- initial_prompt_mode: VadInitialPromptMode
15
- The mode to use for the initial prompt. If set to PREPEND_FIRST_SEGMENT, the initial prompt will be prepended to the first segment of audio.
16
- If set to PREPEND_ALL_SEGMENTS, the initial prompt will be prepended to all segments of audio.
17
- """
18
- self.initial_prompt = initial_prompt
19
- self.initial_prompt_mode = initial_prompt_mode
20
-
21
- # This is a simple prompt strategy, so we only support these two modes
22
- if initial_prompt_mode not in [VadInitialPromptMode.PREPEND_ALL_SEGMENTS, VadInitialPromptMode.PREPREND_FIRST_SEGMENT]:
23
- raise ValueError(f"Unsupported initial prompt mode {initial_prompt_mode}")
24
-
25
- def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str:
26
- if (self.initial_prompt_mode == VadInitialPromptMode.PREPEND_ALL_SEGMENTS):
27
- return self._concat_prompt(self.initial_prompt, whisper_prompt)
28
- elif (self.initial_prompt_mode == VadInitialPromptMode.PREPREND_FIRST_SEGMENT):
29
- return self._concat_prompt(self.initial_prompt, whisper_prompt) if segment_index == 0 else whisper_prompt
30
- else:
31
- raise ValueError(f"Unknown initial prompt mode {self.initial_prompt_mode}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeepDrivePL/PaddleSeg-Matting/matting/transforms.py DELETED
@@ -1,530 +0,0 @@
1
- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import random
16
-
17
- import cv2
18
- import numpy as np
19
- from paddleseg.transforms import functional
20
- from paddleseg.cvlibs import manager
21
- from PIL import Image
22
-
23
-
24
- @manager.TRANSFORMS.add_component
25
- class Compose:
26
- """
27
- Do transformation on input data with corresponding pre-processing and augmentation operations.
28
- The shape of input data to all operations is [height, width, channels].
29
- """
30
-
31
- def __init__(self, transforms, to_rgb=True):
32
- if not isinstance(transforms, list):
33
- raise TypeError('The transforms must be a list!')
34
- self.transforms = transforms
35
- self.to_rgb = to_rgb
36
-
37
- def __call__(self, data):
38
- """
39
- Args:
40
- data (dict): The data to transform.
41
-
42
- Returns:
43
- dict: Data after transformation
44
- """
45
- if 'trans_info' not in data:
46
- data['trans_info'] = []
47
- for op in self.transforms:
48
- data = op(data)
49
- if data is None:
50
- return None
51
-
52
- data['img'] = np.transpose(data['img'], (2, 0, 1))
53
- for key in data.get('gt_fields', []):
54
- if len(data[key].shape) == 2:
55
- continue
56
- data[key] = np.transpose(data[key], (2, 0, 1))
57
-
58
- return data
59
-
60
-
61
- @manager.TRANSFORMS.add_component
62
- class LoadImages:
63
- def __init__(self, to_rgb=True):
64
- self.to_rgb = to_rgb
65
-
66
- def __call__(self, data):
67
- if isinstance(data['img'], str):
68
- data['img'] = cv2.imread(data['img'])
69
- for key in data.get('gt_fields', []):
70
- if isinstance(data[key], str):
71
- data[key] = cv2.imread(data[key], cv2.IMREAD_UNCHANGED)
72
- # if alpha and trimap has 3 channels, extract one.
73
- if key in ['alpha', 'trimap']:
74
- if len(data[key].shape) > 2:
75
- data[key] = data[key][:, :, 0]
76
-
77
- if self.to_rgb:
78
- data['img'] = cv2.cvtColor(data['img'], cv2.COLOR_BGR2RGB)
79
- for key in data.get('gt_fields', []):
80
- if len(data[key].shape) == 2:
81
- continue
82
- data[key] = cv2.cvtColor(data[key], cv2.COLOR_BGR2RGB)
83
-
84
- return data
85
-
86
-
87
- @manager.TRANSFORMS.add_component
88
- class Resize:
89
- def __init__(self, target_size=(512, 512)):
90
- if isinstance(target_size, list) or isinstance(target_size, tuple):
91
- if len(target_size) != 2:
92
- raise ValueError(
93
- '`target_size` should include 2 elements, but it is {}'.
94
- format(target_size))
95
- else:
96
- raise TypeError(
97
- "Type of `target_size` is invalid. It should be list or tuple, but it is {}"
98
- .format(type(target_size)))
99
-
100
- self.target_size = target_size
101
-
102
- def __call__(self, data):
103
- data['trans_info'].append(('resize', data['img'].shape[0:2]))
104
- data['img'] = functional.resize(data['img'], self.target_size)
105
- for key in data.get('gt_fields', []):
106
- data[key] = functional.resize(data[key], self.target_size)
107
- return data
108
-
109
-
110
- @manager.TRANSFORMS.add_component
111
- class ResizeByLong:
112
- """
113
- Resize the long side of an image to given size, and then scale the other side proportionally.
114
-
115
- Args:
116
- long_size (int): The target size of long side.
117
- """
118
-
119
- def __init__(self, long_size):
120
- self.long_size = long_size
121
-
122
- def __call__(self, data):
123
- data['trans_info'].append(('resize', data['img'].shape[0:2]))
124
- data['img'] = functional.resize_long(data['img'], self.long_size)
125
- for key in data.get('gt_fields', []):
126
- data[key] = functional.resize_long(data[key], self.long_size)
127
- return data
128
-
129
-
130
- @manager.TRANSFORMS.add_component
131
- class ResizeByShort:
132
- """
133
- Resize the short side of an image to given size, and then scale the other side proportionally.
134
-
135
- Args:
136
- short_size (int): The target size of short side.
137
- """
138
-
139
- def __init__(self, short_size):
140
- self.short_size = short_size
141
-
142
- def __call__(self, data):
143
- data['trans_info'].append(('resize', data['img'].shape[0:2]))
144
- data['img'] = functional.resize_short(data['img'], self.short_size)
145
- for key in data.get('gt_fields', []):
146
- data[key] = functional.resize_short(data[key], self.short_size)
147
- return data
148
-
149
-
150
- @manager.TRANSFORMS.add_component
151
- class ResizeToIntMult:
152
- """
153
- Resize to some int muitple, d.g. 32.
154
- """
155
-
156
- def __init__(self, mult_int=32):
157
- self.mult_int = mult_int
158
-
159
- def __call__(self, data):
160
- data['trans_info'].append(('resize', data['img'].shape[0:2]))
161
-
162
- h, w = data['img'].shape[0:2]
163
- rw = w - w % 32
164
- rh = h - h % 32
165
- data['img'] = functional.resize(data['img'], (rw, rh))
166
- for key in data.get('gt_fields', []):
167
- data[key] = functional.resize(data[key], (rw, rh))
168
-
169
- return data
170
-
171
-
172
- @manager.TRANSFORMS.add_component
173
- class Normalize:
174
- """
175
- Normalize an image.
176
-
177
- Args:
178
- mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5].
179
- std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5].
180
-
181
- Raises:
182
- ValueError: When mean/std is not list or any value in std is 0.
183
- """
184
-
185
- def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
186
- self.mean = mean
187
- self.std = std
188
- if not (isinstance(self.mean, (list, tuple))
189
- and isinstance(self.std, (list, tuple))):
190
- raise ValueError(
191
- "{}: input type is invalid. It should be list or tuple".format(
192
- self))
193
- from functools import reduce
194
- if reduce(lambda x, y: x * y, self.std) == 0:
195
- raise ValueError('{}: std is invalid!'.format(self))
196
-
197
- def __call__(self, data):
198
- mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
199
- std = np.array(self.std)[np.newaxis, np.newaxis, :]
200
- data['img'] = functional.normalize(data['img'], mean, std)
201
- if 'fg' in data.get('gt_fields', []):
202
- data['fg'] = functional.normalize(data['fg'], mean, std)
203
- if 'bg' in data.get('gt_fields', []):
204
- data['bg'] = functional.normalize(data['bg'], mean, std)
205
-
206
- return data
207
-
208
-
209
- @manager.TRANSFORMS.add_component
210
- class RandomCropByAlpha:
211
- """
212
- Randomly crop while centered on uncertain area by a certain probability.
213
-
214
- Args:
215
- crop_size (tuple|list): The size you want to crop from image.
216
- p (float): The probability centered on uncertain area.
217
-
218
- """
219
-
220
- def __init__(self, crop_size=((320, 320), (480, 480), (640, 640)),
221
- prob=0.5):
222
- self.crop_size = crop_size
223
- self.prob = prob
224
-
225
- def __call__(self, data):
226
- idex = np.random.randint(low=0, high=len(self.crop_size))
227
- crop_w, crop_h = self.crop_size[idex]
228
-
229
- img_h = data['img'].shape[0]
230
- img_w = data['img'].shape[1]
231
- if np.random.rand() < self.prob:
232
- crop_center = np.where((data['alpha'] > 0) & (data['alpha'] < 255))
233
- center_h_array, center_w_array = crop_center
234
- if len(center_h_array) == 0:
235
- return data
236
- rand_ind = np.random.randint(len(center_h_array))
237
- center_h = center_h_array[rand_ind]
238
- center_w = center_w_array[rand_ind]
239
- delta_h = crop_h // 2
240
- delta_w = crop_w // 2
241
- start_h = max(0, center_h - delta_h)
242
- start_w = max(0, center_w - delta_w)
243
- else:
244
- start_h = 0
245
- start_w = 0
246
- if img_h > crop_h:
247
- start_h = np.random.randint(img_h - crop_h + 1)
248
- if img_w > crop_w:
249
- start_w = np.random.randint(img_w - crop_w + 1)
250
-
251
- end_h = min(img_h, start_h + crop_h)
252
- end_w = min(img_w, start_w + crop_w)
253
-
254
- data['img'] = data['img'][start_h:end_h, start_w:end_w]
255
- for key in data.get('gt_fields', []):
256
- data[key] = data[key][start_h:end_h, start_w:end_w]
257
-
258
- return data
259
-
260
-
261
- @manager.TRANSFORMS.add_component
262
- class RandomCrop:
263
- """
264
- Randomly crop
265
-
266
- Args:
267
- crop_size (tuple|list): The size you want to crop from image.
268
- """
269
-
270
- def __init__(self, crop_size=((320, 320), (480, 480), (640, 640))):
271
- if not isinstance(crop_size[0], (list, tuple)):
272
- crop_size = [crop_size]
273
- self.crop_size = crop_size
274
-
275
- def __call__(self, data):
276
- idex = np.random.randint(low=0, high=len(self.crop_size))
277
- crop_w, crop_h = self.crop_size[idex]
278
- img_h, img_w = data['img'].shape[0:2]
279
-
280
- start_h = 0
281
- start_w = 0
282
- if img_h > crop_h:
283
- start_h = np.random.randint(img_h - crop_h + 1)
284
- if img_w > crop_w:
285
- start_w = np.random.randint(img_w - crop_w + 1)
286
-
287
- end_h = min(img_h, start_h + crop_h)
288
- end_w = min(img_w, start_w + crop_w)
289
-
290
- data['img'] = data['img'][start_h:end_h, start_w:end_w]
291
- for key in data.get('gt_fields', []):
292
- data[key] = data[key][start_h:end_h, start_w:end_w]
293
-
294
- return data
295
-
296
-
297
- @manager.TRANSFORMS.add_component
298
- class LimitLong:
299
- """
300
- Limit the long edge of image.
301
-
302
- If the long edge is larger than max_long, resize the long edge
303
- to max_long, while scale the short edge proportionally.
304
-
305
- If the long edge is smaller than min_long, resize the long edge
306
- to min_long, while scale the short edge proportionally.
307
-
308
- Args:
309
- max_long (int, optional): If the long edge of image is larger than max_long,
310
- it will be resize to max_long. Default: None.
311
- min_long (int, optional): If the long edge of image is smaller than min_long,
312
- it will be resize to min_long. Default: None.
313
- """
314
-
315
- def __init__(self, max_long=None, min_long=None):
316
- if max_long is not None:
317
- if not isinstance(max_long, int):
318
- raise TypeError(
319
- "Type of `max_long` is invalid. It should be int, but it is {}"
320
- .format(type(max_long)))
321
- if min_long is not None:
322
- if not isinstance(min_long, int):
323
- raise TypeError(
324
- "Type of `min_long` is invalid. It should be int, but it is {}"
325
- .format(type(min_long)))
326
- if (max_long is not None) and (min_long is not None):
327
- if min_long > max_long:
328
- raise ValueError(
329
- '`max_long should not smaller than min_long, but they are {} and {}'
330
- .format(max_long, min_long))
331
- self.max_long = max_long
332
- self.min_long = min_long
333
-
334
- def __call__(self, data):
335
- h, w = data['img'].shape[:2]
336
- long_edge = max(h, w)
337
- target = long_edge
338
- if (self.max_long is not None) and (long_edge > self.max_long):
339
- target = self.max_long
340
- elif (self.min_long is not None) and (long_edge < self.min_long):
341
- target = self.min_long
342
-
343
- if target != long_edge:
344
- data['trans_info'].append(('resize', data['img'].shape[0:2]))
345
- data['img'] = functional.resize_long(data['img'], target)
346
- for key in data.get('gt_fields', []):
347
- data[key] = functional.resize_long(data[key], target)
348
-
349
- return data
350
-
351
-
352
- @manager.TRANSFORMS.add_component
353
- class RandomHorizontalFlip:
354
- """
355
- Flip an image horizontally with a certain probability.
356
-
357
- Args:
358
- prob (float, optional): A probability of horizontally flipping. Default: 0.5.
359
- """
360
-
361
- def __init__(self, prob=0.5):
362
- self.prob = prob
363
-
364
- def __call__(self, data):
365
- if random.random() < self.prob:
366
- data['img'] = functional.horizontal_flip(data['img'])
367
- for key in data.get('gt_fields', []):
368
- data[key] = functional.horizontal_flip(data[key])
369
-
370
- return data
371
-
372
-
373
- @manager.TRANSFORMS.add_component
374
- class RandomBlur:
375
- """
376
- Blurring an image by a Gaussian function with a certain probability.
377
-
378
- Args:
379
- prob (float, optional): A probability of blurring an image. Default: 0.1.
380
- """
381
-
382
- def __init__(self, prob=0.1):
383
- self.prob = prob
384
-
385
- def __call__(self, data):
386
- if self.prob <= 0:
387
- n = 0
388
- elif self.prob >= 1:
389
- n = 1
390
- else:
391
- n = int(1.0 / self.prob)
392
- if n > 0:
393
- if np.random.randint(0, n) == 0:
394
- radius = np.random.randint(3, 10)
395
- if radius % 2 != 1:
396
- radius = radius + 1
397
- if radius > 9:
398
- radius = 9
399
- data['img'] = cv2.GaussianBlur(data['img'], (radius, radius), 0,
400
- 0)
401
- for key in data.get('gt_fields', []):
402
- data[key] = cv2.GaussianBlur(data[key], (radius, radius), 0,
403
- 0)
404
- return data
405
-
406
-
407
- @manager.TRANSFORMS.add_component
408
- class RandomDistort:
409
- """
410
- Distort an image with random configurations.
411
-
412
- Args:
413
- brightness_range (float, optional): A range of brightness. Default: 0.5.
414
- brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5.
415
- contrast_range (float, optional): A range of contrast. Default: 0.5.
416
- contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5.
417
- saturation_range (float, optional): A range of saturation. Default: 0.5.
418
- saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5.
419
- hue_range (int, optional): A range of hue. Default: 18.
420
- hue_prob (float, optional): A probability of adjusting hue. Default: 0.5.
421
- """
422
-
423
- def __init__(self,
424
- brightness_range=0.5,
425
- brightness_prob=0.5,
426
- contrast_range=0.5,
427
- contrast_prob=0.5,
428
- saturation_range=0.5,
429
- saturation_prob=0.5,
430
- hue_range=18,
431
- hue_prob=0.5):
432
- self.brightness_range = brightness_range
433
- self.brightness_prob = brightness_prob
434
- self.contrast_range = contrast_range
435
- self.contrast_prob = contrast_prob
436
- self.saturation_range = saturation_range
437
- self.saturation_prob = saturation_prob
438
- self.hue_range = hue_range
439
- self.hue_prob = hue_prob
440
-
441
- def __call__(self, data):
442
- brightness_lower = 1 - self.brightness_range
443
- brightness_upper = 1 + self.brightness_range
444
- contrast_lower = 1 - self.contrast_range
445
- contrast_upper = 1 + self.contrast_range
446
- saturation_lower = 1 - self.saturation_range
447
- saturation_upper = 1 + self.saturation_range
448
- hue_lower = -self.hue_range
449
- hue_upper = self.hue_range
450
- ops = [
451
- functional.brightness, functional.contrast, functional.saturation,
452
- functional.hue
453
- ]
454
- random.shuffle(ops)
455
- params_dict = {
456
- 'brightness': {
457
- 'brightness_lower': brightness_lower,
458
- 'brightness_upper': brightness_upper
459
- },
460
- 'contrast': {
461
- 'contrast_lower': contrast_lower,
462
- 'contrast_upper': contrast_upper
463
- },
464
- 'saturation': {
465
- 'saturation_lower': saturation_lower,
466
- 'saturation_upper': saturation_upper
467
- },
468
- 'hue': {
469
- 'hue_lower': hue_lower,
470
- 'hue_upper': hue_upper
471
- }
472
- }
473
- prob_dict = {
474
- 'brightness': self.brightness_prob,
475
- 'contrast': self.contrast_prob,
476
- 'saturation': self.saturation_prob,
477
- 'hue': self.hue_prob
478
- }
479
-
480
- im = data['img'].astype('uint8')
481
- im = Image.fromarray(im)
482
- for id in range(len(ops)):
483
- params = params_dict[ops[id].__name__]
484
- params['im'] = im
485
- prob = prob_dict[ops[id].__name__]
486
- if np.random.uniform(0, 1) < prob:
487
- im = ops[id](**params)
488
- data['img'] = np.asarray(im)
489
-
490
- for key in data.get('gt_fields', []):
491
- if key in ['alpha', 'trimap']:
492
- continue
493
- else:
494
- im = data[key].astype('uint8')
495
- im = Image.fromarray(im)
496
- for id in range(len(ops)):
497
- params = params_dict[ops[id].__name__]
498
- params['im'] = im
499
- prob = prob_dict[ops[id].__name__]
500
- if np.random.uniform(0, 1) < prob:
501
- im = ops[id](**params)
502
- data[key] = np.asarray(im)
503
- return data
504
-
505
-
506
- if __name__ == "__main__":
507
- transforms = [RandomDistort()]
508
- transforms = Compose(transforms)
509
- fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png'
510
- alpha_path = fg_path.replace('fg', 'alpha')
511
- bg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/bg/unsplash_bg/attic/photo-1443884590026-2e4d21aee71c?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwxMjA3fDB8MXxzZWFyY2h8Nzh8fGF0dGljfGVufDB8fHx8MTYyOTY4MDcxNQ&ixlib=rb-1.2.1&q=80&w=400.jpg'
512
- data = {}
513
- data['fg'] = cv2.imread(fg_path)
514
- data['bg'] = cv2.imread(bg_path)
515
- h, w, c = data['fg'].shape
516
- data['bg'] = cv2.resize(data['bg'], (w, h))
517
- alpha = cv2.imread(alpha_path)
518
- data['alpha'] = alpha[:, :, 0]
519
- alpha = alpha / 255.
520
- data['img'] = alpha * data['fg'] + (1 - alpha) * data['bg']
521
-
522
- data['gt_fields'] = ['fg', 'bg']
523
- print(data['img'].shape)
524
- for key in data['gt_fields']:
525
- print(data[key].shape)
526
- # import pdb
527
- # pdb.set_trace()
528
- data = transforms(data)
529
- print(data['img'].dtype, data['img'].shape)
530
- cv2.imwrite('distort_img.jpg', data['img'].transpose([1, 2, 0]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/networks.py DELETED
@@ -1,966 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- import numpy as np
12
- import torch
13
- from torch_utils import misc
14
- from torch_utils import persistence
15
- from torch_utils.ops import conv2d_resample
16
- from torch_utils.ops import upfirdn2d
17
- from torch_utils.ops import bias_act
18
- from torch_utils.ops import fma
19
-
20
- # ----------------------------------------------------------------------------
21
-
22
-
23
- @misc.profiled_function
24
- def normalize_2nd_moment(x, dim=1, eps=1e-8):
25
- return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
26
-
27
- # ----------------------------------------------------------------------------
28
-
29
-
30
- @misc.profiled_function
31
- def modulated_conv2d(
32
- # Input tensor of shape [batch_size, in_channels, in_height, in_width].
33
- x,
34
- # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
35
- weight,
36
- # Modulation coefficients of shape [batch_size, in_channels].
37
- styles,
38
- noise=None, # Optional noise tensor to add to the output activations.
39
- up=1, # Integer upsampling factor.
40
- down=1, # Integer downsampling factor.
41
- padding=0, # Padding with respect to the upsampled image.
42
- # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
43
- resample_filter=None,
44
- demodulate=True, # Apply weight demodulation?
45
- # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
46
- flip_weight=True,
47
- # Perform modulation, convolution, and demodulation as a single fused operation?
48
- fused_modconv=True,
49
- ):
50
- batch_size = x.shape[0]
51
- out_channels, in_channels, kh, kw = weight.shape
52
- misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
53
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
54
- misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
55
-
56
- # Pre-normalize inputs to avoid FP16 overflow.
57
- if x.dtype == torch.float16 and demodulate:
58
- weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
59
- weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
60
- styles = styles / \
61
- styles.norm(float('inf'), dim=1, keepdim=True) # max_I
62
-
63
- # Calculate per-sample weights and demodulation coefficients.
64
- w = None
65
- dcoefs = None
66
- if demodulate or fused_modconv:
67
- w = weight.unsqueeze(0) # [NOIkk]
68
- w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
69
- if demodulate:
70
- dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
71
- if demodulate and fused_modconv:
72
- w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
73
-
74
- # Execute by scaling the activations before and after the convolution.
75
- if not fused_modconv:
76
- x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
77
- x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
78
- x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
79
- if demodulate and noise is not None:
80
- x = fma.fma(x, dcoefs.to(x.dtype).reshape(
81
- batch_size, -1, 1, 1), noise.to(x.dtype))
82
- elif demodulate:
83
- x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
84
- elif noise is not None:
85
- x = x.add_(noise.to(x.dtype))
86
- return x
87
-
88
- # Execute as one fused op using grouped convolution.
89
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
90
- batch_size = int(batch_size)
91
- misc.assert_shape(x, [batch_size, in_channels, None, None])
92
- x = x.reshape(1, -1, *x.shape[2:])
93
- w = w.reshape(-1, in_channels, kh, kw)
94
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(
95
- x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
96
- x = x.reshape(batch_size, -1, *x.shape[2:])
97
- if noise is not None:
98
- x = x.add_(noise)
99
- return x
100
-
101
- # ----------------------------------------------------------------------------
102
-
103
-
104
- @persistence.persistent_class
105
- class FullyConnectedLayer(torch.nn.Module):
106
- def __init__(self,
107
- in_features, # Number of input features.
108
- out_features, # Number of output features.
109
- bias=True, # Apply additive bias before the activation function?
110
- # Activation function: 'relu', 'lrelu', etc.
111
- activation='linear',
112
- lr_multiplier=1, # Learning rate multiplier.
113
- bias_init=0, # Initial value for the additive bias.
114
- ):
115
- super().__init__()
116
- self.activation = activation
117
- self.weight = torch.nn.Parameter(torch.randn(
118
- [out_features, in_features]) / lr_multiplier)
119
- self.bias = torch.nn.Parameter(torch.full(
120
- [out_features], np.float32(bias_init))) if bias else None
121
- self.weight_gain = lr_multiplier / np.sqrt(in_features)
122
- self.bias_gain = lr_multiplier
123
-
124
- def forward(self, x):
125
- w = self.weight.to(x.dtype) * self.weight_gain
126
- b = self.bias
127
- if b is not None:
128
- b = b.to(x.dtype)
129
- if self.bias_gain != 1:
130
- b = b * self.bias_gain
131
-
132
- if self.activation == 'linear' and b is not None:
133
- x = torch.addmm(b.unsqueeze(0), x, w.t())
134
- else:
135
- x = x.matmul(w.t())
136
- x = bias_act.bias_act(x, b, act=self.activation)
137
- return x
138
-
139
- # ----------------------------------------------------------------------------
140
-
141
-
142
- @persistence.persistent_class
143
- class Conv2dLayer(torch.nn.Module):
144
- def __init__(self,
145
- in_channels, # Number of input channels.
146
- out_channels, # Number of output channels.
147
- # Width and height of the convolution kernel.
148
- kernel_size,
149
- bias=True, # Apply additive bias before the activation function?
150
- # Activation function: 'relu', 'lrelu', etc.
151
- activation='linear',
152
- up=1, # Integer upsampling factor.
153
- down=1, # Integer downsampling factor.
154
- # Low-pass filter to apply when resampling activations.
155
- resample_filter=[1, 3, 3, 1],
156
- # Clamp the output to +-X, None = disable clamping.
157
- conv_clamp=None,
158
- channels_last=False, # Expect the input to have memory_format=channels_last?
159
- trainable=True, # Update the weights of this layer during training?
160
- ):
161
- super().__init__()
162
- self.activation = activation
163
- self.up = up
164
- self.down = down
165
- self.conv_clamp = conv_clamp
166
- self.register_buffer(
167
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
168
- self.padding = kernel_size // 2
169
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
170
- self.act_gain = bias_act.activation_funcs[activation].def_gain
171
-
172
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
173
- weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
174
- memory_format=memory_format)
175
- bias = torch.zeros([out_channels]) if bias else None
176
- if trainable:
177
- self.weight = torch.nn.Parameter(weight)
178
- self.bias = torch.nn.Parameter(bias) if bias is not None else None
179
- else:
180
- self.register_buffer('weight', weight)
181
- if bias is not None:
182
- self.register_buffer('bias', bias)
183
- else:
184
- self.bias = None
185
-
186
- def forward(self, x, gain=1):
187
- w = self.weight * self.weight_gain
188
- b = self.bias.to(x.dtype) if self.bias is not None else None
189
- flip_weight = (self.up == 1) # slightly faster
190
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(
191
- x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
192
-
193
- act_gain = self.act_gain * gain
194
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
195
- x = bias_act.bias_act(x, b, act=self.activation,
196
- gain=act_gain, clamp=act_clamp)
197
- return x
198
-
199
- # ----------------------------------------------------------------------------
200
-
201
-
202
- @persistence.persistent_class
203
- class MappingNetwork(torch.nn.Module):
204
- def __init__(self,
205
- # Input latent (Z) dimensionality, 0 = no latent.
206
- z_dim,
207
- # Conditioning label (C) dimensionality, 0 = no label.
208
- c_dim,
209
- # Intermediate latent (W) dimensionality.
210
- w_dim,
211
- # Number of intermediate latents to output, None = do not broadcast.
212
- num_ws,
213
- num_layers=8, # Number of mapping layers.
214
- # Label embedding dimensionality, None = same as w_dim.
215
- embed_features=None,
216
- # Number of intermediate features in the mapping layers, None = same as w_dim.
217
- layer_features=None,
218
- # Activation function: 'relu', 'lrelu', etc.
219
- activation='lrelu',
220
- # Learning rate multiplier for the mapping layers.
221
- lr_multiplier=0.01,
222
- # Decay for tracking the moving average of W during training, None = do not track.
223
- w_avg_beta=0.995,
224
- ):
225
- super().__init__()
226
- self.z_dim = z_dim
227
- self.c_dim = c_dim
228
- self.w_dim = w_dim
229
- self.num_ws = num_ws
230
- self.num_layers = num_layers
231
- self.w_avg_beta = w_avg_beta
232
-
233
- if embed_features is None:
234
- embed_features = w_dim
235
- if c_dim == 0:
236
- embed_features = 0
237
- if layer_features is None:
238
- layer_features = w_dim
239
- features_list = [z_dim + embed_features] + \
240
- [layer_features] * (num_layers - 1) + [w_dim]
241
-
242
- if c_dim > 0:
243
- self.embed = FullyConnectedLayer(c_dim, embed_features)
244
- for idx in range(num_layers):
245
- in_features = features_list[idx]
246
- out_features = features_list[idx + 1]
247
- layer = FullyConnectedLayer(
248
- in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
249
- setattr(self, f'fc{idx}', layer)
250
-
251
- if num_ws is not None and w_avg_beta is not None:
252
- self.register_buffer('w_avg', torch.zeros([w_dim]))
253
-
254
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):
255
- # Embed, normalize, and concat inputs.
256
- x = None
257
- with torch.autograd.profiler.record_function('input'):
258
- if self.z_dim > 0:
259
- misc.assert_shape(z, [None, self.z_dim])
260
- x = normalize_2nd_moment(z.to(torch.float32))
261
- if self.c_dim > 0:
262
- misc.assert_shape(c, [None, self.c_dim])
263
- y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
264
- x = torch.cat([x, y], dim=1) if x is not None else y
265
-
266
- # Main layers.
267
- for idx in range(self.num_layers):
268
- layer = getattr(self, f'fc{idx}')
269
- x = layer(x)
270
-
271
- # Update moving average of W.
272
- if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
273
- with torch.autograd.profiler.record_function('update_w_avg'):
274
- self.w_avg.copy_(x.detach().mean(
275
- dim=0).lerp(self.w_avg, self.w_avg_beta))
276
-
277
- # Broadcast.
278
- if self.num_ws is not None:
279
- with torch.autograd.profiler.record_function('broadcast'):
280
- x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
281
-
282
- # Apply truncation.
283
- if truncation_psi != 1:
284
- with torch.autograd.profiler.record_function('truncate'):
285
- assert self.w_avg_beta is not None
286
- if self.num_ws is None or truncation_cutoff is None:
287
- x = self.w_avg.lerp(x, truncation_psi)
288
- else:
289
- x[:, :truncation_cutoff] = self.w_avg.lerp(
290
- x[:, :truncation_cutoff], truncation_psi)
291
- return x
292
-
293
- # ----------------------------------------------------------------------------
294
-
295
-
296
- @persistence.persistent_class
297
- class SynthesisLayer(torch.nn.Module):
298
- def __init__(self,
299
- in_channels, # Number of input channels.
300
- out_channels, # Number of output channels.
301
- # Intermediate latent (W) dimensionality.
302
- w_dim,
303
- resolution, # Resolution of this layer.
304
- kernel_size=3, # Convolution kernel size.
305
- up=1, # Integer upsampling factor.
306
- use_noise=True, # Enable noise input?
307
- # Activation function: 'relu', 'lrelu', etc.
308
- activation='lrelu',
309
- # Low-pass filter to apply when resampling activations.
310
- resample_filter=[1, 3, 3, 1],
311
- # Clamp the output of convolution layers to +-X, None = disable clamping.
312
- conv_clamp=None,
313
- channels_last=False, # Use channels_last format for the weights?
314
- square=False, # default if for rectangle images
315
- ):
316
- super().__init__()
317
- self.resolution = resolution
318
- self.up = up
319
- self.use_noise = use_noise
320
- self.activation = activation
321
- self.conv_clamp = conv_clamp
322
- self.register_buffer(
323
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
324
- self.padding = kernel_size // 2
325
- self.act_gain = bias_act.activation_funcs[activation].def_gain
326
- self.square = square
327
-
328
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
329
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
330
- self.weight = torch.nn.Parameter(torch.randn(
331
- [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
332
- if use_noise:
333
- if self.square:
334
- self.register_buffer(
335
- 'noise_const', torch.randn([resolution, resolution]))
336
- else:
337
- self.register_buffer('noise_const', torch.randn(
338
- [resolution, resolution // 2]))
339
- self.noise_strength = torch.nn.Parameter(torch.zeros([]))
340
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
341
-
342
- def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
343
- assert noise_mode in ['random', 'const', 'none']
344
- in_resolution = self.resolution // self.up
345
- if self.square:
346
- misc.assert_shape(
347
- x, [None, self.weight.shape[1], in_resolution, in_resolution])
348
- else:
349
- misc.assert_shape(
350
- x, [None, self.weight.shape[1], in_resolution, in_resolution // 2])
351
- styles = self.affine(w)
352
-
353
- noise = None
354
- if self.use_noise and noise_mode == 'random':
355
- if self.square:
356
- noise = torch.randn(
357
- [x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
358
- else:
359
- noise = torch.randn(
360
- [x.shape[0], 1, self.resolution, self.resolution // 2], device=x.device) * self.noise_strength
361
- if self.use_noise and noise_mode == 'const':
362
- noise = self.noise_const * self.noise_strength
363
-
364
- flip_weight = (self.up == 1) # slightly faster
365
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
366
- padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
367
-
368
- act_gain = self.act_gain * gain
369
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
370
- x = bias_act.bias_act(x, self.bias.to(
371
- x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
372
- return x
373
-
374
- # ----------------------------------------------------------------------------
375
-
376
-
377
- @persistence.persistent_class
378
- class ToRGBLayer(torch.nn.Module):
379
- def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
380
- super().__init__()
381
- self.conv_clamp = conv_clamp
382
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
383
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
384
- self.weight = torch.nn.Parameter(torch.randn(
385
- [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
386
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
387
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
388
-
389
- def forward(self, x, w, fused_modconv=True):
390
- styles = self.affine(w) * self.weight_gain
391
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
392
- demodulate=False, fused_modconv=fused_modconv)
393
- x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
394
- return x
395
-
396
- # ----------------------------------------------------------------------------
397
-
398
-
399
- @persistence.persistent_class
400
- class SynthesisBlock(torch.nn.Module):
401
- def __init__(self,
402
- # Number of input channels, 0 = first block.
403
- in_channels,
404
- # Number of output channels.
405
- out_channels,
406
- # Intermediate latent (W) dimensionality.
407
- w_dim,
408
- # Resolution of this block.
409
- resolution,
410
- # Number of output color channels.
411
- img_channels,
412
- is_last, # Is this the last block?
413
- # Architecture: 'orig', 'skip', 'resnet'.
414
- architecture='skip',
415
- # Low-pass filter to apply when resampling activations.
416
- resample_filter=[1, 3, 3, 1],
417
- # Clamp the output of convolution layers to +-X, None = disable clamping.
418
- conv_clamp=None,
419
- use_fp16=False, # Use FP16 for this block?
420
- fp16_channels_last=False, # Use channels-last memory format with FP16?
421
- square=False, # default is for rectangle images
422
- # Arguments for SynthesisLayer.
423
- **layer_kwargs,
424
- ):
425
- assert architecture in ['orig', 'skip', 'resnet']
426
- super().__init__()
427
- self.in_channels = in_channels
428
- self.w_dim = w_dim
429
- self.resolution = resolution
430
- self.img_channels = img_channels
431
- self.is_last = is_last
432
- self.architecture = architecture
433
- self.use_fp16 = use_fp16
434
- self.channels_last = (use_fp16 and fp16_channels_last)
435
- self.register_buffer(
436
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
437
- self.num_conv = 0
438
- self.num_torgb = 0
439
- self.square = square
440
-
441
- if in_channels == 0:
442
- if self.square:
443
- self.const = torch.nn.Parameter(torch.randn(
444
- [out_channels, resolution, resolution]))
445
- else: # rectangle
446
- self.const = torch.nn.Parameter(torch.randn(
447
- [out_channels, resolution, resolution // 2]))
448
-
449
- if in_channels != 0:
450
- self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
451
- resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
452
- self.num_conv += 1
453
-
454
- self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
455
- conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
456
- self.num_conv += 1
457
-
458
- if is_last or architecture == 'skip':
459
- self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
460
- conv_clamp=conv_clamp, channels_last=self.channels_last)
461
- self.num_torgb += 1
462
-
463
- if in_channels != 0 and architecture == 'resnet':
464
- self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
465
- resample_filter=resample_filter, channels_last=self.channels_last)
466
-
467
- def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, **layer_kwargs):
468
- misc.assert_shape(
469
- ws, [None, self.num_conv + self.num_torgb, self.w_dim])
470
- w_iter = iter(ws.unbind(dim=1))
471
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
472
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
473
- if fused_modconv is None:
474
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
475
- fused_modconv = (not self.training) and (
476
- dtype == torch.float32 or int(x.shape[0]) == 1)
477
-
478
- # Input.
479
- if self.in_channels == 0:
480
- x = self.const.to(dtype=dtype, memory_format=memory_format)
481
- x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
482
- else:
483
- if self.square:
484
- misc.assert_shape(
485
- x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
486
- else: # rectangle
487
- misc.assert_shape(
488
- x, [None, self.in_channels, self.resolution // 2, self.resolution // 4])
489
- x = x.to(dtype=dtype, memory_format=memory_format)
490
-
491
- # Main layers.
492
- if self.in_channels == 0:
493
- x = self.conv1(x, next(w_iter),
494
- fused_modconv=fused_modconv, **layer_kwargs)
495
- elif self.architecture == 'resnet':
496
- y = self.skip(x, gain=np.sqrt(0.5))
497
- x = self.conv0(x, next(w_iter),
498
- fused_modconv=fused_modconv, **layer_kwargs)
499
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
500
- gain=np.sqrt(0.5), **layer_kwargs)
501
- x = y.add_(x)
502
- else:
503
- x = self.conv0(x, next(w_iter),
504
- fused_modconv=fused_modconv, **layer_kwargs)
505
- x = self.conv1(x, next(w_iter),
506
- fused_modconv=fused_modconv, **layer_kwargs)
507
-
508
- # ToRGB.
509
- if img is not None:
510
- if self.square:
511
- misc.assert_shape(
512
- img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
513
- else:
514
- misc.assert_shape(
515
- img, [None, self.img_channels, self.resolution // 2, self.resolution // 4])
516
- img = upfirdn2d.upsample2d(img, self.resample_filter)
517
- if self.is_last or self.architecture == 'skip':
518
- y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
519
- y = y.to(dtype=torch.float32,
520
- memory_format=torch.contiguous_format)
521
- img = img.add_(y) if img is not None else y
522
-
523
- assert x.dtype == dtype
524
- assert img is None or img.dtype == torch.float32
525
- return x, img
526
-
527
- # ----------------------------------------------------------------------------
528
-
529
-
530
- @persistence.persistent_class
531
- class SynthesisNetwork(torch.nn.Module):
532
- def __init__(self,
533
- # Intermediate latent (W) dimensionality.
534
- w_dim,
535
- img_resolution, # Output image resolution.
536
- img_channels, # Number of color channels.
537
- square,
538
- # Overall multiplier for the number of channels.
539
- channel_base=32768,
540
- # Maximum number of channels in any layer.
541
- channel_max=512,
542
- # Use FP16 for the N highest resolutions.
543
- num_fp16_res=0,
544
- **block_kwargs, # Arguments for SynthesisBlock.
545
- ):
546
- assert img_resolution >= 4 and img_resolution & (
547
- img_resolution - 1) == 0
548
- super().__init__()
549
- self.w_dim = w_dim
550
- self.img_resolution = img_resolution
551
- self.img_resolution_log2 = int(np.log2(img_resolution))
552
- self.img_channels = img_channels
553
- self.square = square
554
- self.block_resolutions = [
555
- 2 ** i for i in range(2, self.img_resolution_log2 + 1)]
556
- channels_dict = {res: min(channel_base // res, channel_max)
557
- for res in self.block_resolutions}
558
- fp16_resolution = max(
559
- 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
560
-
561
- self.num_ws = 0
562
- for res in self.block_resolutions:
563
- in_channels = channels_dict[res // 2] if res > 4 else 0
564
- out_channels = channels_dict[res]
565
- use_fp16 = (res >= fp16_resolution)
566
- is_last = (res == self.img_resolution)
567
- block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
568
- img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, square=square, **block_kwargs)
569
- self.num_ws += block.num_conv
570
- if is_last:
571
- self.num_ws += block.num_torgb
572
- setattr(self, f'b{res}', block)
573
-
574
- def forward(self, ws, return_feature=False, **block_kwargs):
575
- block_ws = []
576
- features = []
577
- with torch.autograd.profiler.record_function('split_ws'):
578
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
579
- ws = ws.to(torch.float32)
580
- w_idx = 0
581
- for res in self.block_resolutions:
582
- block = getattr(self, f'b{res}')
583
- block_ws.append(
584
- ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
585
- w_idx += block.num_conv
586
-
587
- x = img = None
588
- for res, cur_ws in zip(self.block_resolutions, block_ws):
589
- block = getattr(self, f'b{res}')
590
- x, img = block(x, img, cur_ws, **block_kwargs)
591
- features.append(x)
592
- if return_feature:
593
- return img, features
594
- else:
595
- return img
596
-
597
- # ----------------------------------------------------------------------------
598
-
599
-
600
- @persistence.persistent_class
601
- class Generator(torch.nn.Module):
602
- def __init__(self,
603
- z_dim, # Input latent (Z) dimensionality.
604
- # Conditioning label (C) dimensionality.
605
- c_dim,
606
- # Intermediate latent (W) dimensionality.
607
- w_dim,
608
- img_resolution, # Output resolution.
609
- square,
610
- img_channels, # Number of output color channels.
611
- mapping_kwargs={}, # Arguments for MappingNetwork.
612
- synthesis_kwargs={}, # Arguments for SynthesisNetwork.
613
- padding=False
614
- ):
615
- super().__init__()
616
- self.z_dim = z_dim
617
- self.c_dim = c_dim
618
- self.w_dim = w_dim
619
- self.square = square
620
- self.img_resolution = img_resolution
621
- self.img_channels = img_channels
622
- self.padding = padding
623
- self.synthesis = SynthesisNetwork(
624
- w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, square=square, **synthesis_kwargs)
625
- self.num_ws = self.synthesis.num_ws
626
- self.mapping = MappingNetwork(
627
- z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
628
-
629
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, input_is_w=False, return_feature=False, **synthesis_kwargs):
630
- if input_is_w:
631
- ws = z
632
- if ws.dim() == 2:
633
- ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
634
- else:
635
- ws = self.mapping(z, c, truncation_psi=truncation_psi,
636
- truncation_cutoff=truncation_cutoff)
637
- img = self.synthesis(
638
- ws, return_feature=return_feature, **synthesis_kwargs)
639
- if return_feature:
640
- img, feature = img
641
- if self.padding:
642
- pad = (img.size(2) - img.size(3)) // 2
643
- img = torch.nn.functional.pad(img, (pad, pad), "constant", 1)
644
- if return_feature:
645
- for i, feat in enumerate(feature):
646
- pad = (feat.size(2) - feat.size(3)) // 2
647
- feature[i] = torch.nn.functional.pad(
648
- feat, (pad, pad), "constant", 0)
649
- if return_feature:
650
- return img, feature
651
- else:
652
- return img
653
-
654
- # ----------------------------------------------------------------------------
655
-
656
-
657
- @persistence.persistent_class
658
- class DiscriminatorBlock(torch.nn.Module):
659
- def __init__(self,
660
- # Number of input channels, 0 = first block.
661
- in_channels,
662
- # Number of intermediate channels.
663
- tmp_channels,
664
- # Number of output channels.
665
- out_channels,
666
- # Resolution of this block.
667
- resolution,
668
- # Number of input color channels.
669
- img_channels,
670
- # Index of the first layer.
671
- first_layer_idx,
672
- # Architecture: 'orig', 'skip', 'resnet'.
673
- architecture='resnet',
674
- # Activation function: 'relu', 'lrelu', etc.
675
- activation='lrelu',
676
- # Low-pass filter to apply when resampling activations.
677
- resample_filter=[1, 3, 3, 1],
678
- # Clamp the output of convolution layers to +-X, None = disable clamping.
679
- conv_clamp=None,
680
- use_fp16=False, # Use FP16 for this block?
681
- fp16_channels_last=False, # Use channels-last memory format with FP16?
682
- # Freeze-D: Number of layers to freeze.
683
- freeze_layers=0,
684
- square=False,
685
- ):
686
- assert in_channels in [0, tmp_channels]
687
- assert architecture in ['orig', 'skip', 'resnet']
688
- super().__init__()
689
- self.in_channels = in_channels
690
- self.resolution = resolution
691
- self.img_channels = img_channels
692
- self.first_layer_idx = first_layer_idx
693
- self.architecture = architecture
694
- self.use_fp16 = use_fp16
695
- self.channels_last = (use_fp16 and fp16_channels_last)
696
- self.register_buffer(
697
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
698
- self.square = square
699
-
700
- self.num_layers = 0
701
-
702
- def trainable_gen():
703
- while True:
704
- layer_idx = self.first_layer_idx + self.num_layers
705
- trainable = (layer_idx >= freeze_layers)
706
- self.num_layers += 1
707
- yield trainable
708
- trainable_iter = trainable_gen()
709
-
710
- if in_channels == 0 or architecture == 'skip':
711
- self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
712
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
713
-
714
- self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
715
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
716
-
717
- self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
718
- trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
719
-
720
- if architecture == 'resnet':
721
- self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
722
- trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
723
-
724
- def forward(self, x, img, force_fp32=False):
725
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
726
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
727
-
728
- # Input.
729
- if x is not None:
730
- if self.square:
731
- misc.assert_shape(
732
- x, [None, self.in_channels, self.resolution, self.resolution])
733
- else:
734
- misc.assert_shape(
735
- x, [None, self.in_channels, self.resolution, self.resolution // 2])
736
- x = x.to(dtype=dtype, memory_format=memory_format)
737
-
738
- # FromRGB.
739
- if self.in_channels == 0 or self.architecture == 'skip':
740
- if self.square:
741
- misc.assert_shape(
742
- img, [None, self.img_channels, self.resolution, self.resolution])
743
- else:
744
- misc.assert_shape(
745
- img, [None, self.img_channels, self.resolution, self.resolution // 2])
746
- img = img.to(dtype=dtype, memory_format=memory_format)
747
- y = self.fromrgb(img)
748
- x = x + y if x is not None else y
749
- img = upfirdn2d.downsample2d(
750
- img, self.resample_filter) if self.architecture == 'skip' else None
751
-
752
- # Main layers.
753
- if self.architecture == 'resnet':
754
- y = self.skip(x, gain=np.sqrt(0.5))
755
- x = self.conv0(x)
756
- x = self.conv1(x, gain=np.sqrt(0.5))
757
- x = y.add_(x)
758
- else:
759
- x = self.conv0(x)
760
- x = self.conv1(x)
761
-
762
- assert x.dtype == dtype
763
- return x, img
764
-
765
- # ----------------------------------------------------------------------------
766
-
767
-
768
- @persistence.persistent_class
769
- class MinibatchStdLayer(torch.nn.Module):
770
- def __init__(self, group_size, num_channels=1):
771
- super().__init__()
772
- self.group_size = group_size
773
- self.num_channels = num_channels
774
-
775
- def forward(self, x):
776
- N, C, H, W = x.shape
777
- with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
778
- G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
779
- N)) if self.group_size is not None else N
780
- F = self.num_channels
781
- c = C // F
782
-
783
- # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
784
- y = x.reshape(G, -1, F, c, H, W)
785
- # [GnFcHW] Subtract mean over group.
786
- y = y - y.mean(dim=0)
787
- # [nFcHW] Calc variance over group.
788
- y = y.square().mean(dim=0)
789
- y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
790
- # [nF] Take average over channels and pixels.
791
- y = y.mean(dim=[2, 3, 4])
792
- y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
793
- # [NFHW] Replicate over group and pixels.
794
- y = y.repeat(G, 1, H, W)
795
- # [NCHW] Append to input as new channels.
796
- x = torch.cat([x, y], dim=1)
797
- return x
798
-
799
- # ----------------------------------------------------------------------------
800
-
801
-
802
- @persistence.persistent_class
803
- class DiscriminatorEpilogue(torch.nn.Module):
804
- def __init__(self,
805
- in_channels, # Number of input channels.
806
- # Dimensionality of mapped conditioning label, 0 = no label.
807
- cmap_dim,
808
- resolution, # Resolution of this block.
809
- # Number of input color channels.
810
- img_channels,
811
- # Architecture: 'orig', 'skip', 'resnet'.
812
- architecture='resnet',
813
- # Group size for the minibatch standard deviation layer, None = entire minibatch.
814
- mbstd_group_size=4,
815
- # Number of features for the minibatch standard deviation layer, 0 = disable.
816
- mbstd_num_channels=1,
817
- # Activation function: 'relu', 'lrelu', etc.
818
- activation='lrelu',
819
- # Clamp the output of convolution layers to +-X, None = disable clamping.
820
- conv_clamp=None,
821
- square=False,
822
- ):
823
- assert architecture in ['orig', 'skip', 'resnet']
824
- super().__init__()
825
- self.in_channels = in_channels
826
- self.cmap_dim = cmap_dim
827
- self.resolution = resolution
828
- self.img_channels = img_channels
829
- self.architecture = architecture
830
- self.square = square
831
-
832
- if architecture == 'skip':
833
- self.fromrgb = Conv2dLayer(
834
- img_channels, in_channels, kernel_size=1, activation=activation)
835
- self.mbstd = MinibatchStdLayer(
836
- group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
837
- self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
838
- kernel_size=3, activation=activation, conv_clamp=conv_clamp)
839
-
840
- if self.square:
841
- self.fc = FullyConnectedLayer(
842
- in_channels * (resolution ** 2), in_channels, activation=activation)
843
- else:
844
- self.fc = FullyConnectedLayer(
845
- in_channels * (resolution ** 2 // 2), in_channels, activation=activation)
846
-
847
- self.out = FullyConnectedLayer(
848
- in_channels, 1 if cmap_dim == 0 else cmap_dim)
849
-
850
- def forward(self, x, img, cmap, force_fp32=False):
851
- if self.square:
852
- misc.assert_shape(x, [None, self.in_channels,
853
- self.resolution, self.resolution])
854
- else:
855
- misc.assert_shape(
856
- x, [None, self.in_channels, self.resolution, self.resolution // 2]) # [NCHW]
857
- _ = force_fp32 # unused
858
- dtype = torch.float32
859
- memory_format = torch.contiguous_format
860
-
861
- # FromRGB.
862
- x = x.to(dtype=dtype, memory_format=memory_format)
863
- if self.architecture == 'skip':
864
- if self.square:
865
- misc.assert_shape(
866
- img, [None, self.img_channels, self.resolution, self.resolution])
867
- else:
868
- misc.assert_shape(
869
- img, [None, self.img_channels, self.resolution, self.resolution // 2])
870
- img = img.to(dtype=dtype, memory_format=memory_format)
871
- x = x + self.fromrgb(img)
872
-
873
- # Main layers.
874
- if self.mbstd is not None:
875
- x = self.mbstd(x)
876
- x = self.conv(x)
877
- x = self.fc(x.flatten(1))
878
- x = self.out(x)
879
-
880
- # Conditioning.
881
- if self.cmap_dim > 0:
882
- misc.assert_shape(cmap, [None, self.cmap_dim])
883
- x = (x * cmap).sum(dim=1, keepdim=True) * \
884
- (1 / np.sqrt(self.cmap_dim))
885
-
886
- assert x.dtype == dtype
887
- return x
888
-
889
- # ----------------------------------------------------------------------------
890
-
891
-
892
- @persistence.persistent_class
893
- class Discriminator(torch.nn.Module):
894
- def __init__(self,
895
- # Conditioning label (C) dimensionality.
896
- c_dim,
897
- img_resolution, # Input resolution.
898
- # Number of input color channels.
899
- img_channels,
900
- # Architecture: 'orig', 'skip', 'resnet'.
901
- architecture='resnet',
902
- # Overall multiplier for the number of channels.
903
- channel_base=32768,
904
- # Maximum number of channels in any layer.
905
- channel_max=512,
906
- # Use FP16 for the N highest resolutions.
907
- num_fp16_res=0,
908
- # Clamp the output of convolution layers to +-X, None = disable clamping.
909
- conv_clamp=None,
910
- # Dimensionality of mapped conditioning label, None = default.
911
- cmap_dim=None,
912
- square=False, # default for rectangle images
913
- block_kwargs={}, # Arguments for DiscriminatorBlock.
914
- mapping_kwargs={}, # Arguments for MappingNetwork.
915
- # Arguments for DiscriminatorEpilogue.
916
- epilogue_kwargs={},
917
- ):
918
- super().__init__()
919
- self.c_dim = c_dim
920
- self.img_resolution = img_resolution
921
- self.img_resolution_log2 = int(np.log2(img_resolution))
922
- self.img_channels = img_channels
923
- self.square = square
924
- self.block_resolutions = [
925
- 2 ** i for i in range(self.img_resolution_log2, 2, -1)]
926
- channels_dict = {res: min(channel_base // res, channel_max)
927
- for res in self.block_resolutions + [4]}
928
- fp16_resolution = max(
929
- 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
930
-
931
- if cmap_dim is None:
932
- cmap_dim = channels_dict[4]
933
- if c_dim == 0:
934
- cmap_dim = 0
935
-
936
- common_kwargs = dict(img_channels=img_channels,
937
- architecture=architecture, conv_clamp=conv_clamp)
938
- cur_layer_idx = 0
939
- for res in self.block_resolutions:
940
- in_channels = channels_dict[res] if res < img_resolution else 0
941
- tmp_channels = channels_dict[res]
942
- out_channels = channels_dict[res // 2]
943
- use_fp16 = (res >= fp16_resolution)
944
- block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
945
- first_layer_idx=cur_layer_idx, use_fp16=use_fp16, square=square, **block_kwargs, **common_kwargs)
946
- setattr(self, f'b{res}', block)
947
- cur_layer_idx += block.num_layers
948
- if c_dim > 0:
949
- self.mapping = MappingNetwork(
950
- z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
951
- self.b4 = DiscriminatorEpilogue(
952
- channels_dict[4], cmap_dim=cmap_dim, resolution=4, square=square, **epilogue_kwargs, **common_kwargs)
953
-
954
- def forward(self, img, c, **block_kwargs):
955
- x = None
956
- for res in self.block_resolutions:
957
- block = getattr(self, f'b{res}')
958
- x, img = block(x, img, **block_kwargs)
959
-
960
- cmap = None
961
- if self.c_dim > 0:
962
- cmap = self.mapping(None, c)
963
- x = self.b4(x, img, cmap)
964
- return x
965
-
966
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/stylegan_human/utils/log_utils.py DELETED
@@ -1,84 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
-
4
- import numpy as np
5
- from PIL import Image
6
- import wandb
7
- from pti.pti_configs import global_config
8
- import torch
9
- import matplotlib.pyplot as plt
10
-
11
-
12
- def log_image_from_w(w, G, name):
13
- img = get_image_from_w(w, G)
14
- pillow_image = Image.fromarray(img)
15
- wandb.log(
16
- {f"{name}": [
17
- wandb.Image(pillow_image, caption=f"current inversion {name}")]},
18
- step=global_config.training_step)
19
-
20
-
21
- def log_images_from_w(ws, G, names):
22
- for name, w in zip(names, ws):
23
- w = w.to(global_config.device)
24
- log_image_from_w(w, G, name)
25
-
26
-
27
- def plot_image_from_w(w, G):
28
- img = get_image_from_w(w, G)
29
- pillow_image = Image.fromarray(img)
30
- plt.imshow(pillow_image)
31
- plt.show()
32
-
33
-
34
- def plot_image(img):
35
- img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0,
36
- 255).to(torch.uint8).detach().cpu().numpy()
37
- pillow_image = Image.fromarray(img[0])
38
- plt.imshow(pillow_image)
39
- plt.show()
40
-
41
-
42
- def save_image(name, method_type, results_dir, image, run_id):
43
- image.save(f'{results_dir}/{method_type}_{name}_{run_id}.jpg')
44
-
45
-
46
- def save_w(w, G, name, method_type, results_dir):
47
- im = get_image_from_w(w, G)
48
- im = Image.fromarray(im, mode='RGB')
49
- save_image(name, method_type, results_dir, im)
50
-
51
-
52
- def save_concat_image(base_dir, image_latents, new_inv_image_latent, new_G,
53
- old_G,
54
- file_name,
55
- extra_image=None):
56
- images_to_save = []
57
- if extra_image is not None:
58
- images_to_save.append(extra_image)
59
- for latent in image_latents:
60
- images_to_save.append(get_image_from_w(latent, old_G))
61
- images_to_save.append(get_image_from_w(new_inv_image_latent, new_G))
62
- result_image = create_alongside_images(images_to_save)
63
- result_image.save(f'{base_dir}/{file_name}.jpg')
64
-
65
-
66
- def save_single_image(base_dir, image_latent, G, file_name):
67
- image_to_save = get_image_from_w(image_latent, G)
68
- image_to_save = Image.fromarray(image_to_save, mode='RGB')
69
- image_to_save.save(f'{base_dir}/{file_name}.jpg')
70
-
71
-
72
- def create_alongside_images(images):
73
- res = np.concatenate([np.array(image) for image in images], axis=1)
74
- return Image.fromarray(res, mode='RGB')
75
-
76
-
77
- def get_image_from_w(w, G):
78
- if len(w.size()) <= 2:
79
- w = w.unsqueeze(0)
80
- with torch.no_grad():
81
- img = G.synthesis(w, noise_mode='const')
82
- img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0,
83
- 255).to(torch.uint8).detach().cpu().numpy()
84
- return img[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EXPOSUREEE/Ai-Image-Enhancer/tests/test_dataset.py DELETED
@@ -1,151 +0,0 @@
1
- import pytest
2
- import yaml
3
-
4
- from realesrgan.data.realesrgan_dataset import RealESRGANDataset
5
- from realesrgan.data.realesrgan_paired_dataset import RealESRGANPairedDataset
6
-
7
-
8
- def test_realesrgan_dataset():
9
-
10
- with open('tests/data/test_realesrgan_dataset.yml', mode='r') as f:
11
- opt = yaml.load(f, Loader=yaml.FullLoader)
12
-
13
- dataset = RealESRGANDataset(opt)
14
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
15
- assert len(dataset) == 2 # whether to read correct meta info
16
- assert dataset.kernel_list == [
17
- 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
18
- ] # correct initialization the degradation configurations
19
- assert dataset.betag_range2 == [0.5, 4]
20
-
21
- # test __getitem__
22
- result = dataset.__getitem__(0)
23
- # check returned keys
24
- expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
25
- assert set(expected_keys).issubset(set(result.keys()))
26
- # check shape and contents
27
- assert result['gt'].shape == (3, 400, 400)
28
- assert result['kernel1'].shape == (21, 21)
29
- assert result['kernel2'].shape == (21, 21)
30
- assert result['sinc_kernel'].shape == (21, 21)
31
- assert result['gt_path'] == 'tests/data/gt/baboon.png'
32
-
33
- # ------------------ test lmdb backend -------------------- #
34
- opt['dataroot_gt'] = 'tests/data/gt.lmdb'
35
- opt['io_backend']['type'] = 'lmdb'
36
-
37
- dataset = RealESRGANDataset(opt)
38
- assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
39
- assert len(dataset.paths) == 2 # whether to read correct meta info
40
- assert dataset.kernel_list == [
41
- 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
42
- ] # correct initialization the degradation configurations
43
- assert dataset.betag_range2 == [0.5, 4]
44
-
45
- # test __getitem__
46
- result = dataset.__getitem__(1)
47
- # check returned keys
48
- expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
49
- assert set(expected_keys).issubset(set(result.keys()))
50
- # check shape and contents
51
- assert result['gt'].shape == (3, 400, 400)
52
- assert result['kernel1'].shape == (21, 21)
53
- assert result['kernel2'].shape == (21, 21)
54
- assert result['sinc_kernel'].shape == (21, 21)
55
- assert result['gt_path'] == 'comic'
56
-
57
- # ------------------ test with sinc_prob = 0 -------------------- #
58
- opt['dataroot_gt'] = 'tests/data/gt.lmdb'
59
- opt['io_backend']['type'] = 'lmdb'
60
- opt['sinc_prob'] = 0
61
- opt['sinc_prob2'] = 0
62
- opt['final_sinc_prob'] = 0
63
- dataset = RealESRGANDataset(opt)
64
- result = dataset.__getitem__(0)
65
- # check returned keys
66
- expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
67
- assert set(expected_keys).issubset(set(result.keys()))
68
- # check shape and contents
69
- assert result['gt'].shape == (3, 400, 400)
70
- assert result['kernel1'].shape == (21, 21)
71
- assert result['kernel2'].shape == (21, 21)
72
- assert result['sinc_kernel'].shape == (21, 21)
73
- assert result['gt_path'] == 'baboon'
74
-
75
- # ------------------ lmdb backend should have paths ends with lmdb -------------------- #
76
- with pytest.raises(ValueError):
77
- opt['dataroot_gt'] = 'tests/data/gt'
78
- opt['io_backend']['type'] = 'lmdb'
79
- dataset = RealESRGANDataset(opt)
80
-
81
-
82
- def test_realesrgan_paired_dataset():
83
-
84
- with open('tests/data/test_realesrgan_paired_dataset.yml', mode='r') as f:
85
- opt = yaml.load(f, Loader=yaml.FullLoader)
86
-
87
- dataset = RealESRGANPairedDataset(opt)
88
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
89
- assert len(dataset) == 2 # whether to read correct meta info
90
-
91
- # test __getitem__
92
- result = dataset.__getitem__(0)
93
- # check returned keys
94
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
95
- assert set(expected_keys).issubset(set(result.keys()))
96
- # check shape and contents
97
- assert result['gt'].shape == (3, 128, 128)
98
- assert result['lq'].shape == (3, 32, 32)
99
- assert result['gt_path'] == 'tests/data/gt/baboon.png'
100
- assert result['lq_path'] == 'tests/data/lq/baboon.png'
101
-
102
- # ------------------ test lmdb backend -------------------- #
103
- opt['dataroot_gt'] = 'tests/data/gt.lmdb'
104
- opt['dataroot_lq'] = 'tests/data/lq.lmdb'
105
- opt['io_backend']['type'] = 'lmdb'
106
-
107
- dataset = RealESRGANPairedDataset(opt)
108
- assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
109
- assert len(dataset) == 2 # whether to read correct meta info
110
-
111
- # test __getitem__
112
- result = dataset.__getitem__(1)
113
- # check returned keys
114
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
115
- assert set(expected_keys).issubset(set(result.keys()))
116
- # check shape and contents
117
- assert result['gt'].shape == (3, 128, 128)
118
- assert result['lq'].shape == (3, 32, 32)
119
- assert result['gt_path'] == 'comic'
120
- assert result['lq_path'] == 'comic'
121
-
122
- # ------------------ test paired_paths_from_folder -------------------- #
123
- opt['dataroot_gt'] = 'tests/data/gt'
124
- opt['dataroot_lq'] = 'tests/data/lq'
125
- opt['io_backend'] = dict(type='disk')
126
- opt['meta_info'] = None
127
-
128
- dataset = RealESRGANPairedDataset(opt)
129
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
130
- assert len(dataset) == 2 # whether to read correct meta info
131
-
132
- # test __getitem__
133
- result = dataset.__getitem__(0)
134
- # check returned keys
135
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
136
- assert set(expected_keys).issubset(set(result.keys()))
137
- # check shape and contents
138
- assert result['gt'].shape == (3, 128, 128)
139
- assert result['lq'].shape == (3, 32, 32)
140
-
141
- # ------------------ test normalization -------------------- #
142
- dataset.mean = [0.5, 0.5, 0.5]
143
- dataset.std = [0.5, 0.5, 0.5]
144
- # test __getitem__
145
- result = dataset.__getitem__(0)
146
- # check returned keys
147
- expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
148
- assert set(expected_keys).issubset(set(result.keys()))
149
- # check shape and contents
150
- assert result['gt'].shape == (3, 128, 128)
151
- assert result['lq'].shape == (3, 32, 32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ElainaFanBoy/MusicGen/CONTRIBUTING.md DELETED
@@ -1,35 +0,0 @@
1
- # Contributing to Audiocraft
2
-
3
- We want to make contributing to this project as easy and transparent as
4
- possible.
5
-
6
- ## Pull Requests
7
-
8
- Audiocraft is the implementation of a research paper.
9
- Therefore, we do not plan on accepting many pull requests for new features.
10
- We certainly welcome them for bug fixes.
11
-
12
- 1. Fork the repo and create your branch from `main`.
13
- 2. If you've added code that should be tested, add tests.
14
- 3. If you've changed APIs, update the documentation.
15
- 4. Ensure the test suite passes.
16
- 5. Make sure your code lints.
17
- 6. If you haven't already, complete the Contributor License Agreement ("CLA").
18
-
19
- ## Contributor License Agreement ("CLA")
20
- In order to accept your pull request, we need you to submit a CLA. You only need
21
- to do this once to work on any of Meta's open source projects.
22
-
23
- Complete your CLA here: <https://code.facebook.com/cla>
24
-
25
- ## Issues
26
- We use GitHub issues to track public bugs. Please ensure your description is
27
- clear and has sufficient instructions to be able to reproduce the issue.
28
-
29
- Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
30
- disclosure of security bugs. In those cases, please go through the process
31
- outlined on that page and do not file a public issue.
32
-
33
- ## License
34
- By contributing to encodec, you agree that your contributions will be licensed
35
- under the LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EsoCode/text-generation-webui/docs/Training-LoRAs.md DELETED
@@ -1,174 +0,0 @@
1
- ## Training Your Own LoRAs
2
-
3
- The WebUI seeks to make training your own LoRAs as easy as possible. It comes down to just a few simple steps:
4
-
5
- ### **Step 1**: Make a plan.
6
- - What base model do you want to use? The LoRA you make has to be matched up to a single architecture (eg LLaMA-13B) and cannot be transferred to others (eg LLaMA-7B, StableLM, etc. would all be different). Derivatives of the same model (eg Alpaca finetune of LLaMA-13B) might be transferrable, but even then it's best to train exactly on what you plan to use.
7
- - What model format do you want? At time of writing, 8-bit models are most stable, and 4-bit are supported but experimental. In the near future it is likely that 4-bit will be the best option for most users.
8
- - What are you training it on? Do you want it to learn real information, a simple format, ...?
9
-
10
- ### **Step 2**: Gather a dataset.
11
- - If you use a dataset similar to the [Alpaca](https://github.com/gururise/AlpacaDataCleaned/blob/main/alpaca_data_cleaned.json) format, that is natively supported by the `Formatted Dataset` input in the WebUI, with premade formatter options.
12
- - If you use a dataset that isn't matched to Alpaca's format, but uses the same basic JSON structure, you can make your own format file by copying `training/formats/alpaca-format.json` to a new file and [editing its content](#format-files).
13
- - If you can get the dataset into a simple text file, that works too! You can train using the `Raw text file` input option.
14
- - This means you can for example just copy/paste a chatlog/documentation page/whatever you want, shove it in a plain text file, and train on it.
15
- - If you use a structured dataset not in this format, you may have to find an external way to convert it - or open an issue to request native support.
16
-
17
- ### **Step 3**: Do the training.
18
- - **3.1**: Load the WebUI, and your model.
19
- - Make sure you don't have any LoRAs already loaded (unless you want to train for multi-LoRA usage).
20
- - **3.2**: Open the `Training` tab at the top, `Train LoRA` sub-tab.
21
- - **3.3**: Fill in the name of the LoRA, select your dataset in the dataset options.
22
- - **3.4**: Select other parameters to your preference. See [parameters below](#parameters).
23
- - **3.5**: click `Start LoRA Training`, and wait.
24
- - It can take a few hours for a large dataset, or just a few minute if doing a small run.
25
- - You may want to monitor your [loss value](#loss) while it goes.
26
-
27
- ### **Step 4**: Evaluate your results.
28
- - Load the LoRA under the Models Tab.
29
- - You can go test-drive it on the `Text generation` tab, or you can use the `Perplexity evaluation` sub-tab of the `Training` tab.
30
- - If you used the `Save every n steps` option, you can grab prior copies of the model from sub-folders within the LoRA model's folder and try them instead.
31
-
32
- ### **Step 5**: Re-run if you're unhappy.
33
- - Make sure to unload the LoRA before training it.
34
- - You can simply resume a prior run - use `Copy parameters from` to select your LoRA, and edit parameters. Note that you cannot change the `Rank` of an already created LoRA.
35
- - If you want to resume from a checkpoint saved along the way, simply copy the contents of the checkpoint folder into the LoRA's folder.
36
- - (Note: `adapter_model.bin` is the important file that holds the actual LoRA content).
37
- - This will start Learning Rate and Steps back to the start. If you want to resume as if you were midway through, you can adjust your Learning Rate to the last reported LR in logs and reduce your epochs.
38
- - Or, you can start over entirely if you prefer.
39
- - If your model is producing corrupted outputs, you probably need to start over and use a lower Learning Rate.
40
- - If your model isn't learning detailed information but you want it to, you might need to just run more epochs, or you might need a higher Rank.
41
- - If your model is enforcing a format you didn't want, you may need to tweak your dataset, or start over and not train as far.
42
-
43
- ## Format Files
44
-
45
- If using JSON formatted datasets, they are presumed to be in the following approximate format:
46
-
47
- ```json
48
- [
49
- {
50
- "somekey": "somevalue",
51
- "key2": "value2"
52
- },
53
- {
54
- // etc
55
- }
56
- ]
57
- ```
58
-
59
- Where the keys (eg `somekey`, `key2` above) are standardized, and relatively consistent across the dataset, and the values (eg `somevalue`, `value2`) contain the content actually intended to be trained.
60
-
61
- For Alpaca, the keys are `instruction`, `input`, and `output`, wherein `input` is sometimes blank.
62
-
63
- A simple format file for Alpaca to be used as a chat bot is:
64
-
65
- ```json
66
- {
67
- "instruction,output": "User: %instruction%\nAssistant: %output%",
68
- "instruction,input,output": "User: %instruction%: %input%\nAssistant: %output%"
69
- }
70
- ```
71
-
72
- Note that the keys (eg `instruction,output`) are a comma-separated list of dataset keys, and the values are a simple string that use those keys with `%%`.
73
-
74
- So for example if a dataset has `"instruction": "answer my question"`, then the format file's `User: %instruction%\n` will be automatically filled in as `User: answer my question\n`.
75
-
76
- If you have different sets of key inputs, you can make your own format file to match it. This format-file is designed to be as simple as possible to enable easy editing to match your needs.
77
-
78
- ## Raw Text File Settings
79
-
80
- When using raw text files as your dataset, the text is automatically split into chunks based on your `Cutoff Length` you get a few basic options to configure them.
81
- - `Overlap Length` is how much to overlap chunks by. Overlapping chunks helps prevent the model from learning strange mid-sentence cuts, and instead learn continual sentences that flow from earlier text.
82
- - `Prefer Newline Cut Length` sets a maximum distance in characters to shift the chunk cut towards newlines. Doing this helps prevent lines from starting or ending mid-sentence, preventing the model from learning to cut off sentences randomly.
83
- - `Hard Cut String` sets a string that indicates there must be a hard cut without overlap. This defaults to `\n\n\n`, meaning 3 newlines. No trained chunk will ever contain this string. This allows you to insert unrelated sections of text in the same text file, but still ensure the model won't be taught to randomly change the subject.
84
-
85
- ## Parameters
86
-
87
- The basic purpose and function of each parameter is documented on-page in the WebUI, so read through them in the UI to understand your options.
88
-
89
- That said, here's a guide to the most important parameter choices you should consider:
90
-
91
- ### VRAM
92
-
93
- - First, you must consider your VRAM availability.
94
- - Generally, under default settings, VRAM usage for training with default parameters is very close to when generating text (with 1000+ tokens of context) (ie, if you can generate text, you can train LoRAs).
95
- - Note: worse by default in the 4-bit monkeypatch currently. Reduce `Micro Batch Size` to `1` to restore this to expectations.
96
- - If you have VRAM to spare, setting higher batch sizes will use more VRAM and get you better quality training in exchange.
97
- - If you have large data, setting a higher cutoff length may be beneficial, but will cost significant VRAM. If you can spare some, set your batch size to `1` and see how high you can push your cutoff length.
98
- - If you're low on VRAM, reducing batch size or cutoff length will of course improve that.
99
- - Don't be afraid to just try it and see what happens. If it's too much, it will just error out, and you can lower settings and try again.
100
-
101
- ### Rank
102
-
103
- - Second, you want to consider the amount of learning you want.
104
- - For example, you may wish to just learn a dialogue format (as in the case of Alpaca) in which case setting a low `Rank` value (32 or lower) works great.
105
- - Or, you might be training on project documentation you want the bot to understand and be able to understand questions about, in which case the higher the rank, the better.
106
- - Generally, higher Rank = more precise learning = more total content learned = more VRAM usage while training.
107
-
108
- ### Learning Rate and Epochs
109
-
110
- - Third, how carefully you want it to be learned.
111
- - In other words, how okay or not you are with the model losing unrelated understandings.
112
- - You can control this with 3 key settings: the Learning Rate, its scheduler, and your total epochs.
113
- - The learning rate controls how much change is made to the model by each token it sees.
114
- - It's in scientific notation normally, so for example `3e-4` means `3 * 10^-4` which is `0.0003`. The number after `e-` controls how many `0`s are in the number.
115
- - Higher values let training run faster, but also are more likely to corrupt prior data in the model.
116
- - You essentially have two variables to balance: the LR, and Epochs.
117
- - If you make LR higher, you can set Epochs equally lower to match. High LR + low epochs = very fast, low quality training.
118
- - If you make LR low, set epochs high. Low LR + high epochs = slow but high-quality training.
119
- - The scheduler controls change-over-time as you train - it starts high, and then goes low. This helps balance getting data in, and having decent quality, at the same time.
120
- - You can see graphs of the different scheduler options [in the HuggingFace docs here](https://moon-ci-docs.huggingface.co/docs/transformers/pr_1/en/main_classes/optimizer_schedules#transformers.SchedulerType)
121
-
122
- ## Loss
123
-
124
- When you're running training, the WebUI's console window will log reports that include, among other things, a numeric value named `Loss`. It will start as a high number, and gradually get lower and lower as it goes.
125
-
126
- "Loss" in the world of AI training theoretically means "how close is the model to perfect", with `0` meaning "absolutely perfect". This is calculated by measuring the difference between the model outputting exactly the text you're training it to output, and what it actually outputs.
127
-
128
- In practice, a good LLM should have a very complex variable range of ideas running in its artificial head, so a loss of `0` would indicate that the model has broken and forgotten to how think about anything other than what you trained it.
129
-
130
- So, in effect, Loss is a balancing game: you want to get it low enough that it understands your data, but high enough that it isn't forgetting everything else. Generally, if it goes below `1.0`, it's going to start forgetting its prior memories, and you should stop training. In some cases you may prefer to take it as low as `0.5` (if you want it to be very very predictable). Different goals have different needs, so don't be afraid to experiment and see what works best for you.
131
-
132
- Note: if you see Loss start at or suddenly jump to exactly `0`, it is likely something has gone wrong in your training process (eg model corruption).
133
-
134
- ## Note: 4-Bit Monkeypatch
135
-
136
- The [4-bit LoRA monkeypatch](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) works for training, but has side effects:
137
- - VRAM usage is higher currently. You can reduce the `Micro Batch Size` to `1` to compensate.
138
- - Models do funky things. LoRAs apply themselves, or refuse to apply, or spontaneously error out, or etc. It can be helpful to reload base model or restart the WebUI between training/usage to minimize chances of anything going haywire.
139
- - Loading or working with multiple LoRAs at the same time doesn't currently work.
140
- - Generally, recognize and treat the monkeypatch as the dirty temporary hack it is - it works, but isn't very stable. It will get better in time when everything is merged upstream for full official support.
141
-
142
- ## Legacy notes
143
-
144
- LoRA training was contributed by [mcmonkey4eva](https://github.com/mcmonkey4eva) in PR [#570](https://github.com/oobabooga/text-generation-webui/pull/570).
145
-
146
- ### Using the original alpaca-lora code
147
-
148
- Kept here for reference. The Training tab has much more features than this method.
149
-
150
- ```
151
- conda activate textgen
152
- git clone https://github.com/tloen/alpaca-lora
153
- ```
154
-
155
- Edit those two lines in `alpaca-lora/finetune.py` to use your existing model folder instead of downloading everything from decapoda:
156
-
157
- ```
158
- model = LlamaForCausalLM.from_pretrained(
159
- "models/llama-7b",
160
- load_in_8bit=True,
161
- device_map="auto",
162
- )
163
- tokenizer = LlamaTokenizer.from_pretrained(
164
- "models/llama-7b", add_eos_token=True
165
- )
166
- ```
167
-
168
- Run the script with:
169
-
170
- ```
171
- python finetune.py
172
- ```
173
-
174
- It just works. It runs at 22.32s/it, with 1170 iterations in total, so about 7 hours and a half for training a LoRA. RTX 3090, 18153MiB VRAM used, drawing maximum power (350W, room heater mode).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FelixLuoX/stable_diffusion_test/share_btn.py DELETED
@@ -1,60 +0,0 @@
1
- community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
- <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
- <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
- </svg>"""
5
-
6
- loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
- style="color: #ffffff;
8
- "
9
- xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
-
11
- share_js = """async () => {
12
- async function uploadFile(file){
13
- const UPLOAD_URL = 'https://huggingface.co/uploads';
14
- const response = await fetch(UPLOAD_URL, {
15
- method: 'POST',
16
- headers: {
17
- 'Content-Type': file.type,
18
- 'X-Requested-With': 'XMLHttpRequest',
19
- },
20
- body: file, /// <- File inherits from Blob
21
- });
22
- const url = await response.text();
23
- return url;
24
- }
25
- const gradioEl = document.querySelector('body > gradio-app');
26
- const imgEls = gradioEl.querySelectorAll('#gallery img');
27
- const promptTxt = gradioEl.querySelector('#prompt-text-input input').value;
28
- const shareBtnEl = gradioEl.querySelector('#share-btn');
29
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
30
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
31
- if(!imgEls.length){
32
- return;
33
- };
34
- shareBtnEl.style.pointerEvents = 'none';
35
- shareIconEl.style.display = 'none';
36
- loadingIconEl.style.removeProperty('display');
37
- const files = await Promise.all(
38
- [...imgEls].map(async (imgEl) => {
39
- const res = await fetch(imgEl.src);
40
- const blob = await res.blob();
41
- const imgId = Date.now() % 200;
42
- const fileName = `diffuse-the-rest-${{imgId}}.png`;
43
- return new File([blob], fileName, { type: 'image/png' });
44
- })
45
- );
46
- const urls = await Promise.all(files.map((f) => uploadFile(f)));
47
- const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
48
- const descriptionMd = `<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
49
- ${htmlImgs.join(`\n`)}
50
- </div>`;
51
- const params = new URLSearchParams({
52
- title: promptTxt,
53
- description: descriptionMd,
54
- });
55
- const paramsStr = params.toString();
56
- window.open(`https://huggingface.co/spaces/stabilityai/stable-diffusion/discussions/new?${paramsStr}`, '_blank');
57
- shareBtnEl.style.removeProperty('pointer-events');
58
- shareIconEl.style.removeProperty('display');
59
- loadingIconEl.style.display = 'none';
60
- }"""