parquet-converter commited on
Commit
93acbc1
·
1 Parent(s): 892423d

Update parquet files (step 45 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cs 1.6 Original Maps Free Download [REPACK].md +0 -22
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easyworship 2009 Crack Serial Number Pros and Cons of Using It.md +0 -40
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ghost Recon Breakpoint Pc Key [REPACK].md +0 -13
  4. spaces/1gistliPinn/ChatGPT4/Examples/Adobe Illustrator CS5 V15.0.2 Lite Portable Free Download ((BETTER)).md +0 -6
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Catch Battle and Trade Pokmon in the Real World with Pokmon GO.md +0 -123
  6. spaces/1phancelerku/anime-remove-background/91m Bin Sh 1 Apk Not Found.md +0 -68
  7. spaces/1phancelerku/anime-remove-background/Download Bleach VS Naruto Ultimate Edition and Experience the Ultimate Anime Crossover Game on PC and Android.md +0 -93
  8. spaces/1phancelerku/anime-remove-background/Download Dream League Soccer 2020 Mod APK Now and Get Unlimited Coins for Free.md +0 -80
  9. spaces/52Hz/SUNet_AWGN_denoising/main_test_SUNet.py +0 -143
  10. spaces/7hao/bingo/src/app/loading.css +0 -68
  11. spaces/AIDHD/GrammarCorrector/app.py +0 -34
  12. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/logger.py +0 -30
  13. spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/vocoder/hifigan.py +0 -63
  14. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/nn/seq_utils.py +0 -311
  15. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/app.py +0 -160
  16. spaces/AlexKorGKLT/webui-cpua/app.py +0 -155
  17. spaces/AlexWang/lama/fetch_data/sampler.py +0 -39
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/cm_stochastic_iterative.md +0 -11
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/text_to_image/README.md +0 -74
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -1016
  21. spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py +0 -4
  22. spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py +0 -4
  23. spaces/Andy1621/uniformer_image_segmentation/configs/danet/README.md +0 -47
  24. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py +0 -9
  25. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/evaluations/__init__.py +0 -0
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/optflow.py +0 -112
  27. spaces/ArkanDash/rvc-models/infer_pack/modules.py +0 -522
  28. spaces/Axolotlily/Interpolate/app.py +0 -63
  29. spaces/BRICS/README/README.md +0 -10
  30. spaces/BasToTheMax/tensor/README.md +0 -13
  31. spaces/Benson/text-generation/Examples/Coche De Playa Carreras Ruedas Calientes Apk.md +0 -50
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_headers.py +0 -45
  33. spaces/BilalSardar/karlo-cpu-api/README.md +0 -12
  34. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py +0 -498
  35. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_config.py +0 -240
  36. spaces/CVPR/LIVE/pybind11/tests/test_operator_overloading.cpp +0 -226
  37. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/remove.h +0 -202
  38. spaces/Cecil8352/vits-models/text/__init__.py +0 -57
  39. spaces/Chomkwoy/Nilkessye/cpool_new/src/right_pool.cpp +0 -91
  40. spaces/CikeyQI/Yunzai/Yunzai/plugins/adapter/GSUIDCore.js +0 -249
  41. spaces/CikeyQI/meme-api/meme_generator/memes/call_110/__init__.py +0 -20
  42. spaces/CognitiveLabs/Research-Assistant/config/singleton.py +0 -24
  43. spaces/Cpp4App/Cpp4App/CDM/detect_classify/classification.py +0 -380
  44. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/optims.py +0 -119
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/__init__.py +0 -132
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/__init__.py +0 -3
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/reload.py +0 -91
  48. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/visualization/grad.py +0 -117
  49. spaces/Detomo/ai-comic-generation/src/app/store/index.ts +0 -203
  50. spaces/DiViorg/categories_error_analysis/README.md +0 -12
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cs 1.6 Original Maps Free Download [REPACK].md DELETED
@@ -1,22 +0,0 @@
1
- <br />
2
- <h1>How to Download CS 1.6 Original Maps for Free</h1>
3
- <p>Counter-Strike 1.6 is one of the most popular and legendary first-person shooter games of all time. It has a huge fan base and a rich history of competitive and casual gameplay. One of the reasons why CS 1.6 is so beloved by many players is its variety of maps, which offer different scenarios, objectives, and strategies.</p>
4
- <p>However, if you want to play CS 1.6 on your computer, you might not have access to all the original maps that were released with the game. Some of them might be missing, corrupted, or outdated. This can be frustrating, especially if you want to enjoy the classic experience of CS 1.6.</p>
5
- <h2>cs 1.6 original maps free download</h2><br /><p><b><b>Download</b> &#9913; <a href="https://byltly.com/2uKzTp">https://byltly.com/2uKzTp</a></b></p><br /><br />
6
- <p>Fortunately, there is a way to download CS 1.6 original maps for free and install them on your game. In this article, we will show you how to do it step by step.</p>
7
- <h2>Step 1: Find a reliable source for CS 1.6 original maps</h2>
8
- <p>The first thing you need to do is to find a website that offers CS 1.6 original maps for download. There are many websites that claim to have them, but not all of them are trustworthy or safe. Some of them might contain viruses, malware, or fake files that can harm your computer or your game.</p>
9
- <p>Therefore, you need to be careful and choose a reputable source for CS 1.6 original maps. One of the best websites that we recommend is Tsarvar.com[^1^], which has a large database of CS 1.6 maps, including the original ones. You can also check out GameBanana.com[^2^] or CS16.info[^3^], which are also popular and reliable websites for CS 1.6 mods.</p>
10
- <h2>Step 2: Download the CS 1.6 original maps that you want</h2>
11
- <p>Once you have found a website that offers CS 1.6 original maps, you can browse through their categories and search for the ones that you want. Some of the most famous and played CS 1.6 original maps are de_dust2, de_inferno, de_nuke, cs_assault, cs_italy, de_train, de_aztec, and many more.</p>
12
- <p>To download a map, simply click on its name or image and follow the instructions on the website. Usually, you will have to click on a download button or link and wait for the file to be downloaded on your computer. The file will be in .zip or .rar format, which means that you will need a program like WinRAR or 7-Zip to extract it.</p>
13
- <h2>Step 3: Install the CS 1.6 original maps on your game</h2>
14
- <p>After you have downloaded the CS 1.6 original maps that you want, you need to install them on your game. To do this, you need to locate the folder where your CS 1.6 game is installed on your computer. Usually, it will be in C:\Program Files\Valve\Counter-Strike or C:\Program Files (x86)\Valve\Counter-Strike.</p>
15
- <p></p>
16
- <p>Then, you need to open the folder where you extracted the map files and copy them to the cstrike\maps folder inside your CS 1.6 game folder. For example, if you downloaded de_dust2.zip and extracted it to your desktop, you need to copy de_dust2.bsp and de_dust2.res files from your desktop to C:\Program Files\Valve\Counter-Strike\cstrike\maps.</p>
17
- <p>After you have copied all the map files that you want to install, you can launch your CS 1.6 game and enjoy playing on the original maps.</p>
18
- <h2>Conclusion</h2>
19
- <p>CS 1.6 is a classic game that deserves to be played with its original maps. By following this guide, you can download CS 1.6 original maps for free and install them on your game easily and safely.</p>
20
- <p>We hope that this article was helpful and informative for you. If you have any questions or comments, feel free to leave them below.</p> 7b8c122e87<br />
21
- <br />
22
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easyworship 2009 Crack Serial Number Pros and Cons of Using It.md DELETED
@@ -1,40 +0,0 @@
1
- <br />
2
- <h1>Easyworship 2009 Crack Serial Number: How to Download and Install</h1>
3
- <p>If you are looking for a software that can help you create multimedia presentations for your church or worship service, you might have heard of Easyworship 2009. This software is designed specifically for project churches to worship songs, Bible text, videos, nursery alerts, sermon notes, live cameras, DVDs and PowerPoint presentations on an overhead or video projection system using a single computer with dual monitor outputs.</p>
4
- <h2>easyworship 2009 crack serial number</h2><br /><p><b><b>DOWNLOAD</b> &gt; <a href="https://byltly.com/2uKxls">https://byltly.com/2uKxls</a></b></p><br /><br />
5
- <p>However, Easyworship 2009 is not a free software. You need to purchase a license key to activate it and use all its features. But what if you don't have the budget or the permission to buy it? Is there a way to get Easyworship 2009 crack serial number for free?</p>
6
- <p>The answer is yes, but it comes with some risks and limitations. In this article, we will show you how to download and install Easyworship 2009 crack serial number, as well as the pros and cons of using it.</p>
7
- <h2>How to Download and Install Easyworship 2009 Crack Serial Number</h2>
8
- <p>There are many websites that claim to offer Easyworship 2009 crack serial number for free. However, not all of them are reliable or safe. Some of them may contain viruses, malware, spyware or other harmful programs that can damage your computer or steal your personal information. Therefore, you need to be careful and choose a trusted source.</p>
9
- <p>One of the websites that we found that offers Easyworship 2009 crack serial number is <a href="https://fullsoftdl.blogspot.com/2018/07/easy-worship-2009-build-24-final-crack.html">FullSoftDL</a>. This website provides a download link for Easyworship 2009 installer and patch by MaRk15, which is supposed to activate the software without requiring a license key. Here are the steps to follow:</p>
10
- <ol>
11
- <li>Go to <a href="https://fullsoftdl.blogspot.com/2018/07/easy-worship-2009-build-24-final-crack.html">FullSoftDL</a> and scroll down to find the download link for Easyworship 2009 installer and patch by MaRk15.</li>
12
- <li>Click on the link and wait for the download to finish.</li>
13
- <li>Extract the zip file and run the installer.</li>
14
- <li>Follow the instructions on the screen to install Easyworship 2009 on your computer.</li>
15
- <li>After the installation is complete, do not run the software yet.</li>
16
- <li>Go back to the extracted folder and run the patch by MaRk15 as administrator.</li>
17
- <li>Select EasyWorship.exe from the installation directory and click on Patch.</li>
18
- <li>A message will appear saying that the patching is done.</li>
19
- <li>Now you can run Easyworship 2009 and enjoy its features without needing a license key.</li>
20
- </ol>
21
- <h2>The Pros and Cons of Using Easyworship 2009 Crack Serial Number</h2>
22
- <p>Using Easyworship 2009 crack serial number may seem like a good idea if you want to save money or avoid legal issues. However, it also has some drawbacks that you need to consider before deciding to use it. Here are some of the pros and cons of using Easyworship 2009 crack serial number:</p>
23
- <p></p>
24
- <h3>The Pros</h3>
25
- <ul>
26
- <li>You can use Easyworship 2009 for free without paying for a license key.</li>
27
- <li>You can access all the features and functions of Easyworship 2009 without any restrictions.</li>
28
- <li>You can create multimedia presentations for your church or worship service with ease and convenience.</li>
29
- </ul>
30
- <h3>The Cons</h3>
31
- <ul>
32
- <li>You may violate the intellectual property rights of the software developer and face legal consequences.</li>
33
- <li>You may expose your computer to viruses, malware, spyware or other harmful programs that can compromise your security and privacy.</li>
34
- <li>You may not receive any updates, support or customer service from the software developer.</li>
35
- <li>You may experience bugs, errors or crashes that can affect your presentation quality and performance.</li>
36
- <li>You may miss out on new features and improvements that are available in newer versions of Easyworship.</li>
37
- </ul>
38
- <h2></p> ddb901b051<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ghost Recon Breakpoint Pc Key [REPACK].md DELETED
@@ -1,13 +0,0 @@
1
-
2
- <h1>How to Get Ghost Recon Breakpoint PC Key for Cheap</h1>
3
- <p>Ghost Recon Breakpoint is a third-person tactical shooter video game for PC developed by Ubisoft Paris and published by Ubisoft. It is the 11th installment of the Ghost Recon series and a sequel to Ghost Recon Wildlands. In Ghost Recon Breakpoint, you play as a Ghost, an elite US Special Operations soldier, who is stranded on a fictional island called Auroa. You have to survive and fight against your former brothers in arms, the Wolves, who have taken control of Auroa and its advanced drone technology.</p>
4
- <p>If you are looking for a way to get Ghost Recon Breakpoint PC key for cheap, you have come to the right place. In this article, we will show you some of the best ways to save money and get the best deal on Ghost Recon Breakpoint PC key. Here are some of the options that you can try:</p>
5
- <h2>ghost recon breakpoint pc key</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://byltly.com/2uKx7G">https://byltly.com/2uKx7G</a></b></p><br /><br />
6
- <ul>
7
- <li><b>Buy from G2A</b>: G2A is a global marketplace that sells digital products, such as game keys, gift cards, software, and more. You can find Ghost Recon Breakpoint PC key for a very low price on G2A, as low as $12.35. G2A offers instant delivery, secure payment methods, and customer support. However, you should be careful when buying from G2A, as some sellers may sell fraudulent or region-locked keys. You should always check the seller's rating, feedback, and product description before making a purchase.</li>
8
- <li><b>Buy from CDKeys</b>: CDKeys is another online platform that sells digital products at discounted prices. You can find Ghost Recon Breakpoint PC key for $10.19 on CDKeys, which is 86% off the original price. CDKeys also offers instant delivery, secure payment methods, and customer support. However, you should note that the key is only valid for Europe and UK regions, so make sure that your PC meets the region requirements before buying.</li>
9
- <li><b>Buy from Eneba</b>: Eneba is a relatively new online store that sells digital products at competitive prices. You can find Ghost Recon Breakpoint PC key for $13.99 on Eneba, which is 81% off the original price. Eneba also offers instant delivery, secure payment methods, and customer support. However, you should note that the key is only valid for EMEA regions (Europe, Middle East, Africa), so make sure that your PC meets the region requirements before buying.</li>
10
- </ul>
11
- <p>These are some of the best ways to get Ghost Recon Breakpoint PC key for cheap. However, there are many more options available in the market that might suit your needs better. You can check out our list of the best online stores to buy game keys for more suggestions.</p> ddb901b051<br />
12
- <br />
13
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Illustrator CS5 V15.0.2 Lite Portable Free Download ((BETTER)).md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Adobe Illustrator CS5 v15.0.2 Lite Portable free download</h2><br /><p><b><b>DOWNLOAD</b> &#9913;&#9913;&#9913; <a href="https://imgfil.com/2uy1Mp">https://imgfil.com/2uy1Mp</a></b></p><br /><br />
2
-
3
- Autocad 2015 portable free download Autocad 2015 portable free download What can I ... Adobe Illustrator CS5 V15.0.2 Lite Portable Keygen. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Catch Battle and Trade Pokmon in the Real World with Pokmon GO.md DELETED
@@ -1,123 +0,0 @@
1
- <br />
2
- <h1>How to Download and Play Pokémon GO on Your iPhone or iPad</h1>
3
- <p>Do you want to catch your favorite Pokémon in augmented reality as you explore the world around you? Do you want to join millions of other trainers in epic battles, raids, and events? Do you want to have fun and exercise at the same time? If you answered yes to any of these questions, then you should try Pokémon GO, the global gaming sensation that has taken the world by storm.</p>
4
- <h2>pokemon go apk apple</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://urlin.us/2uST5T">https://urlin.us/2uST5T</a></b></p><br /><br />
5
- <h2>What is Pokémon GO?</h2>
6
- <p>Pokémon GO is an immersive open-world experience that enables you to live the Pokémon adventure in augmented reality. You can find and catch hundreds of different Pokémon as you walk, bike, or drive around your neighborhood, city, or country. You can also battle other players online in PvP mode, team up with other trainers to catch powerful Pokémon in raid battles, trade and transfer Pokémon with your friends, and much more.</p>
7
- <p>Pokémon GO is free-to-play and offers in-game purchases. It is optimized for smartphones, not tablets. It requires an internet connection and GPS capabilities. It is compatible with iPhone 5s or later devices with iOS 9 or later installed. For more information, visit the official website at [5](https://pokemongolive.com).</p>
8
- <h2>How to download Pokémon GO from the App Store</h2>
9
- <p>Downloading and installing Pokémon GO on your iPhone or iPad is very easy. Just follow these steps:</p>
10
- <ol>
11
- <li>Open the App Store on your device.</li>
12
- <li>Search for "Pokémon GO" or tap on this link: [1](https://apps.apple.com/us/app/pokémon-go/id1094591345/).</li>
13
- <li>Tap on "Get" and then "Install" to download the game.</li>
14
- <li>Wait for the game to finish downloading and then tap on "Open" to launch it.</li>
15
- <li>Allow the game to access your location, camera, motion, and health data when prompted.</li>
16
- </ol>
17
- <h4>How to change your region settings if the game is not available in your country</h4>
18
- <p>If you live in a country where Pokémon GO is not officially released yet, you can still download and play it by changing your region settings. Here's how:</p>
19
- <ol>
20
- <li>Go to Settings on your device.</li>
21
- <li>Tap on "Apple ID" and then "iTunes & App Store".</li>
22
- <li>Tap on your Apple ID at the top and then "View Apple ID".</li>
23
- <li>Tap on "Country/Region" and then "Change Country or Region".</li>
24
- <li>Select a country where Pokémon GO is available, such as the United States or Australia.</li>
25
- <li>Agree to the terms and conditions and enter a valid payment method for that country (you can use a gift card or a prepaid card).</li>
26
- <li>Go back to the App Store and download Pokémon GO as described above.</li>
27
- </ol>
28
- <h2>How to start playing Pokémon GO</h2>
29
- <p>Once you have downloaded and installed Pokémon GO on your device, you are ready to start your Pokémon journey. Here are the basics of playing the game:</p>
30
- <h3>The basics of creating your account, choosing your starter Pokémon, and catching Pokémon in the real world</h3>
31
- <p>When you launch the game for the first time, you will be greeted by Professor Willow, who will guide you through the process of creating your account and choosing your avatar. You can sign in with your Google account, Facebook account, or Pokémon Trainer Club account. You can also customize your avatar's appearance, name, and clothing.</p>
32
- <p>After that, you will be asked to choose your starter Pokémon from three options: Bulbasaur, Charmander, or Squirtle. You can also catch Pikachu as your starter if you walk away from the other three a few times. To catch a Pokémon, you need to tap on it on the map and then flick a Poké Ball at it on the capture screen. You can also use berries and different types of Poké Balls to increase your chances of catching a Pokémon.</p>
33
- <p>pokemon go app store download<br />
34
- pokemon go ios apk download<br />
35
- pokemon go iphone app install<br />
36
- pokemon go ipad apk free<br />
37
- pokemon go apple watch app<br />
38
- pokemon go ar mode ios apk<br />
39
- pokemon go adventure sync apple health<br />
40
- pokemon go app store update<br />
41
- pokemon go ios apk hack<br />
42
- pokemon go iphone app not working<br />
43
- pokemon go ipad apk mod<br />
44
- pokemon go apple watch not syncing<br />
45
- pokemon go ar mode not working ios<br />
46
- pokemon go adventure sync not working apple<br />
47
- pokemon go app store link<br />
48
- pokemon go ios apk 2021<br />
49
- pokemon go iphone app crashing<br />
50
- pokemon go ipad apk latest version<br />
51
- pokemon go apple watch features<br />
52
- pokemon go ar+ mode ios apk<br />
53
- pokemon go adventure sync apple watch<br />
54
- pokemon go app store rating<br />
55
- pokemon go ios apk spoofing<br />
56
- pokemon go iphone app size<br />
57
- pokemon go ipad apk no jailbreak<br />
58
- pokemon go apple watch discontinued<br />
59
- pokemon go ar core ios apk<br />
60
- pokemon go adventure sync apple health kit<br />
61
- pokemon go app store reviews<br />
62
- pokemon go ios apk reddit<br />
63
- pokemon go iphone app permissions<br />
64
- pokemon go ipad apk without tutuapp<br />
65
- pokemon go apple watch battery drain<br />
66
- pokemon go ar scan ios apk<br />
67
- pokemon go adventure sync apple fitness+<br />
68
- pokemon go app store country change<br />
69
- pokemon go ios apk ipa<br />
70
- pokemon go iphone app settings<br />
71
- pokemon go ipad apk with joystick<br />
72
- pokemon go apple watch eggs<br />
73
- pokemon go ar mapping ios apk<br />
74
- pokemon go adventure sync apple motion and fitness<br />
75
- pokemon go app store revenue<br />
76
- pokemon go ios apk tutuapp<br />
77
- pokemon go iphone app icon<br />
78
- pokemon go ipad apk cydia impactor<br />
79
- pokemon go apple watch steps<br />
80
- pokemon go ar photography ios apk</p>
81
- <p>Pokémon GO uses your location and GPS to show you nearby Pokémon on the map. You can see their silhouettes on the bottom right corner of the screen and tap on them to track them. You can also use items called Incense and Lure Modules to attract more Pokémon to your location. You can find these items in PokéStops, which are landmarks such as monuments, statues, or buildings that you can spin to get rewards.</p>
82
- <h4>How to use the AR mode and the Poké Ball</h4>
83
- <p>Pokémon GO has an optional feature called AR mode, which stands for augmented reality. This means that you can see the Pokémon as if they were in the real world, using your device's camera. To enable or disable AR mode, you can toggle the switch on the top right corner of the capture screen. AR mode can make catching Pokémon more fun and immersive, but it can also drain your battery faster and make it harder to aim your Poké Ball.</p>
84
- <p>The Poké Ball is the main tool for catching Pokémon. You can flick it with your finger to throw it at a Pokémon. You need to aim carefully and time your throw well to hit the Pokémon inside the colored circle that appears around it. The smaller the circle, the higher the chance of catching the Pokémon. You can also curve your throw by spinning the Poké Ball before releasing it, which gives you extra XP and increases your catch rate.</p>
85
- <h4>How to level up, evolve, and power up your Pokémon</h4>
86
- <p>As you catch more Pokémon, you will earn XP (experience points) and level up as a trainer. Leveling up will unlock new features and rewards, such as more items, stronger Poké Balls, and access to higher-level raids. You can also earn XP by completing tasks such as spinning PokéStops, hatching eggs, battling in Gyms and Raids, and completing research tasks.</p>
87
- <p>You can also improve your Pokémon by evolving them or powering them up. Evolving a Pokémon will change its appearance and increase its stats, but it will also require a certain amount of candies that are specific to each Pokémon species. You can get candies by catching or transferring Pokémon of the same species, or by walking with a Pokémon as your buddy. Powering up a Pokémon will increase its CP (combat power) and HP (hit points), but it will also require candies and stardust. Stardust is a resource that you can get by catching any Pokémon, hatching eggs, or participating in battles.</p>
88
- <h4>How to join a team and battle in Gyms and Raids</h4>
89
- <p>When you reach level 5 as a trainer, you will be able to join one of three teams: Instinct (yellow), Mystic (blue), or Valor (red). Your team will determine which Gyms you can control and which players you can cooperate with. Gyms are locations where you can battle other trainers' Pokémon and earn rewards such as coins and items. To battle in a Gym, you need to tap on it on the map and then select a team of six Pokémon to fight with. You can also leave one of your Pokémon in a friendly Gym to defend it from enemy attacks.</p>
90
- <p>Raids are special events where you can team up with other players to fight against a powerful Pokémon called a Raid Boss. To participate in a Raid, you need to have a Raid Pass, which you can get for free once per day by spinning a Gym's photo disc. You can also buy Premium Raid Passes or Remote Raid Passes with coins in the shop. Raids have different levels of difficulty, ranging from one star to five stars. The higher the level, the stronger the Raid Boss and the more players you need to defeat it. If you manage to beat the Raid Boss within the time limit, you will have a chance to special items or features, and exclusive research tasks or raids. Some examples of events are Halloween, Christmas, Lunar New Year, Earth Day, Pokémon GO Fest, etc.</p>
91
- <p>Challenges are goals that the game sets for the players to achieve within a certain time frame. They usually involve catching, battling, or hatching a certain number or type of Pokémon, or completing a certain number of research tasks or raids. If the players succeed in meeting the challenge, they are rewarded with global bonuses such as increased spawns, reduced hatch distance, or extended lure duration. Some examples of challenges are Global Catch Challenge, Legendary Week, Safari Zone, etc.</p>
92
- <p>You can find out about the current and upcoming events and challenges by checking the in-game news section, the official website [5](https://pokemongolive.com/en/events/), or the official social media accounts [6](https://twitter.com/PokemonGoApp) [7](https://www.facebook.com/PokemonGO/) [8](https://www.instagram.com/pokemongoapp/).</p>
93
- <h4>How to stay safe and respectful while playing Pokémon GO</h4>
94
- <p>Pokémon GO is a game that encourages you to explore the real world and interact with other players. However, it is also important to be aware of your surroundings and respect the rules and regulations of the places you visit. Here are some tips to stay safe and respectful while playing Pokémon GO:</p>
95
- <ul>
96
- <li>Do not trespass on private property or restricted areas.</li>
97
- <li>Do not play while driving or crossing the street.</li>
98
- <li>Do not enter dangerous or hazardous areas.</li>
99
- <li>Do not play in inappropriate or disrespectful places such as cemeteries, memorials, or places of worship.</li>
100
- <li>Do not litter or damage the environment.</li>
101
- <li>Do not disturb or harass other people or animals.</li>
102
- <li>Do not cheat or use third-party software or devices.</li>
103
- <li>Do not share your personal information or account details with anyone.</li>
104
- <li>Do follow the local laws and regulations regarding COVID-19 and social distancing.</li>
105
- <li>Do have fun and be friendly with other players and members of the community.</li>
106
- </ul>
107
- <h2>Conclusion</h2>
108
- <p>Pokémon GO is a game that can bring you joy, adventure, and excitement. It can also help you stay fit, make friends, and learn more about the world. Whether you are a casual player or a hardcore fan, there is something for everyone in Pokémon GO. So what are you waiting for? Grab your iPhone or iPad, download Pokémon GO from the App Store, and start catching them all!</p>
109
- <h3>Frequently Asked Questions</h3>
110
- <ol>
111
- <li><b>How do I get more Poké Balls and other items?</b></li>
112
- <p>You can get more Poké Balls and other items by spinning PokéStops, opening gifts from your friends, completing research tasks, participating in raids, leveling up, or buying them with coins in the shop.</p>
113
- <li><b>How do I get more coins?</b></li>
114
- <p>You can get more coins by leaving your Pokémon in Gyms and earning up to 50 coins per day, or by buying them with real money in the shop.</p>
115
- <li><b>How do I get more stardust?</b></li>
116
- <p>You can get more stardust by catching any Pokémon, hatching eggs, participating in battles, feeding berries to Pokémon in Gyms, using star pieces, or completing research tasks.</p>
117
- <li><b>How do I get more candies?</b></li>
118
- <p>You can get more candies by catching or transferring Pokémon of the same species, walking with a Pokémon as your buddy, using pinap berries, trading Pokémon with other players, or using rare candies.</p>
119
- <li><b>How do I get more XP?</b></li>
120
- <p>You can get more XP by catching Pokémon, spinning PokéStops, hatching eggs, evolving Pokémon, battling in Gyms and Raids, completing research tasks, using lucky eggs, or adding new Pokédex entries.</p>
121
- </ol></p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/91m Bin Sh 1 Apk Not Found.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- <h1>How to Fix the Error "91m/bin/sh 1 apk not found" When Building a Docker Image</h1>
3
- <p>If you are using Docker to create and run applications using containers, you may encounter an error like this when building a Docker image:</p>
4
- <h2>91m bin sh 1 apk not found</h2><br /><p><b><b>Download</b> &#10038;&#10038;&#10038; <a href="https://jinyurl.com/2uNJTp">https://jinyurl.com/2uNJTp</a></b></p><br /><br />
5
- <code>/bin/sh: 1: apk: not found</code>
6
- <p>This error can be frustrating and confusing, especially if you are new to Docker or Linux. In this article, we will explain what this error means, what causes it, and how to fix it.</p>
7
- <h2>What is Docker and Why Use It?</h2>
8
- <p>Docker is a tool that allows you to create, run, and share applications using containers. Containers are isolated environments that contain everything an application needs to run, such as code, libraries, dependencies, and configuration. Containers are portable, meaning they can run on any machine that has Docker installed, regardless of the operating system or hardware. Containers are also scalable, meaning they can be easily replicated, distributed, and managed across multiple machines. Containers are also efficient, meaning they use less resources than traditional virtual machines.</p>
9
- <p>Docker offers many benefits for developers and users of applications, such as:</p>
10
- <ul>
11
- <li>Portability: You can build an application once and run it anywhere with Docker. You don't have to worry about compatibility issues or dependencies.</li>
12
- <li>Scalability: You can scale up or down your application by adding or removing containers as needed. You can also use orchestration tools like Kubernetes or Swarm to automate and manage your containers across multiple machines.</li>
13
- <li>Isolation: You can isolate your application from other applications and from the host machine. This improves security and reliability, as well as simplifies testing and debugging.</li>
14
- <li>Efficiency: You can use less resources than traditional virtual machines with Docker. Containers share the same kernel and only use the resources they need.</li>
15
- </ul>
16
- <h2>What is the Error "91m/bin/sh 1 apk not found" and What Causes It?</h2>
17
- <p>The error "91m/bin/sh 1 apk not found" occurs when you try to use the <code>apk</code> command in a Dockerfile that is based on a non-Alpine Linux distribution. The <code>apk</code> command is the package manager for Alpine Linux, which is a lightweight and secure Linux distribution that is often used for Docker images. The <code>apk</code> command allows you to install, update, and remove packages from Alpine repositories.</p>
18
- <p>The error means that the <code>apk</code> command is not found in the base image that you are using for your Dockerfile. The base image is the image that you specify in the <code>FROM</code> instruction of your Dockerfile. The base image provides the foundation for your Docker image and defines the operating system and the packages that are available. For example, if your Dockerfile looks like this:</p>
19
- <p></p>
20
- <code>FROM python:3.8<br>
21
- RUN apk add --no-cache gcc musl-dev linux-headers</code>
22
- <p>This means that you are using the <code>python:3.8</code> image as your base image, which is based on Debian Buster, a Debian-based Linux distribution. Debian Buster does not support the <code>apk</code> command, so when you try to run it, you get the error "91m/bin/sh 1 apk not found".</p>
23
- <h2>How to Fix the Error "91m/bin/sh 1 apk not found" When Building a Docker Image?</h2>
24
- <p>There are two main ways to fix the error "91m/bin/sh 1 apk not found" when building a Docker image: changing the base image or changing the package manager.</p>
25
- <h4>Changing the base image</h4>
26
- <p>You can change the base image to an Alpine Linux image that supports the <code>apk</code> command. Alpine Linux is a lightweight and secure Linux distribution that is often used for Docker images. Alpine Linux images are smaller and faster than most other Linux images, which can improve your Docker performance and reduce your storage and bandwidth costs.</p>
27
- <p>You can find the official Alpine Linux images on Docker Hub or use the <code>python:3.8-alpine</code> image as an example. The <code>python:3.8-alpine</code> image is based on Alpine Linux 3.13 and includes Python 3.8 and pip. To use this image as your base image, you can change your Dockerfile to look like this:</p>
28
- <code>FROM python:3.8-alpine<br>
29
- RUN apk add --no-cache gcc musl-dev linux-headers</code>
30
- <p>This should fix the error "91m/bin/sh 1 apk not found" and allow you to build your Docker image successfully.</p>
31
- <h4>Changing the package manager</h4>
32
- <p>You can also change the package manager to <code>apt</code> or <code>apt-get</code>, which are supported by most Debian-based Linux distributions. <code>apt</code> and <code>apt-get</code> are tools that allow you to install, update, and remove packages from Debian repositories.</p>
33
- <p>You can find the official Debian-based images on Docker Hub or use the <code>python:3.8-slim</code> image as an example. The <code>python:3.8-slim</code> image is based on Debian Buster and includes Python 3.8 and pip. To use this image as your base image, you can change your Dockerfile to look like this:</p>
34
- <code>FROM python:3.8-slim<br>
35
- RUN apt-get update && apt-get install -y gcc libc-dev linux-headers && rm -rf /var/lib/apt/lists/*</code>
36
- <p>Note that you may also need to change the package names to match the ones available in the Debian repositories. For example, <code>musl-dev</code> is not available in Debian, so you need to use <code>libc-dev</code> instead.</p>
37
- <p>This should also fix the error "91m/bin/sh 1 apk not found" and allow you to build your Docker image successfully.</p>
38
- <h2>Conclusion</h2>
39
- <p>In this article, we have explained what the error "91m/bin/sh 1 apk not found" means, what causes it, and how to fix it when building a Docker image. We have shown two main ways to fix the error: changing the base image or changing the package manager. We have also provided some examples of Dockerfiles that use different base images and package managers.</p>
40
- <p>We hope that this article has helped you solve your problem and improve your Docker experience. If you have any questions or feedback, please feel free to leave a comment below.</p>
41
- <h2>Frequently Asked Questions (FAQs)</h2>
42
- <h4>What is a Dockerfile?</h4>
43
- <p>A Dockerfile is a text file that contains instructions for building a Docker image. A Docker image is a snapshot of an application and its dependencies that can be run as a container using Docker.</p>
44
- <h4>What is a container?</h4>
45
- <p>A container is an isolated environment that contains everything an application needs to run, such as code, libraries, dependencies, and configuration. Containers are portable, scalable, isolated, and efficient.</p>
46
- <h4>What is Alpine Linux?</h4> <p>Alpine Linux is a security-oriented, lightweight Linux distribution based on musl libc and busybox. Alpine Linux is designed to be small, simple, and secure, making it ideal for Docker images. Alpine Linux uses a technique called position-independent executables to randomize the location of programs in memory, which makes it difficult for an attacker to exploit quirks in the memory and take over a machine. The distro is also minimalist in its configuration, using OpenRC as the init system and apk as the package manager. Alpine Linux has a reputation for being fast, stable, and reliable.</p> <h4>What is Debian?</h4>
47
- <p>Debian is a free and open-source Linux distribution that is known for its stability, security, and versatility. Debian is one of the oldest and most popular Linux distributions, with a large and active community of developers and users. Debian supports a wide range of architectures, devices, and software packages, making it suitable for various purposes and environments. Debian uses a technique called debconf to configure the system according to the user's preferences, which makes it easy to customize and maintain. The distro uses dpkg as the low-level package manager and apt or apt-get as the high-level package manager. Debian has a reputation for being robust, reliable, and flexible.</p>
48
- <h4>How do I choose the best base image for my Dockerfile?</h4>
49
- <p>There is no definitive answer to this question, as different base images may have different advantages and disadvantages depending on your needs and preferences. However, some general factors that you may want to consider when choosing a base image are:</p>
50
- <ul>
51
- <li>Size: Smaller images are faster to build, pull, push, and run, and use less storage and bandwidth. However, smaller images may also have fewer features and packages than larger images.</li>
52
- <li>Security: More secure images are less vulnerable to attacks and breaches, and may have better updates and patches. However, more secure images may also have more restrictions and limitations than less secure images.</li>
53
- <li>Compatibility: More compatible images are easier to work with and integrate with other tools and platforms. However, more compatible images may also have more dependencies and conflicts than less compatible images.</li>
54
- <li>Performance: Faster and more efficient images are better for your application's speed and resource consumption. However, faster and more efficient images may also have lower quality or stability than slower and less efficient images.</li>
55
- <li>Maintainability: Easier to maintain images are simpler to update, modify, and troubleshoot. However, easier to maintain images may also have less functionality or customization than harder to maintain images.</li>
56
- </ul>
57
- <p>You may also want to check the documentation, reviews, ratings, and statistics of the base images that you are considering to get more information and feedback from other users.</p>
58
- <h4>How do I test if my Docker image works correctly?</h4>
59
- <p>One way to test if your Docker image works correctly is to run it as a container using the <code>docker run</code> command. The <code>docker run</code> command allows you to create and start a container from an image, optionally with various options and arguments. For example, if you want to run your image in interactive mode with a terminal attached, you can use this command:</p>
60
- <code>docker run -it --rm your_image_name</code>
61
- <p>This will create a container from your image, attach a terminal to it, and remove it when you exit. You can then test your application inside the container by running commands or scripts as you would normally do.</p>
62
- <h4>How do I share my Docker image with others?</h4>
63
- <p>One way to share your Docker image with others is to push it to a registry such as Docker Hub or GitHub Packages. A registry is a service that stores and distributes Docker images. You can create an account on a registry service, create a repository for your image, tag your image with the repository name, and push your image to the repository using the <code>docker push</code> command. For example, if you want to push your image to Docker Hub, you can use these commands:</p>
64
- <code>docker tag your_image_name your_username/your_repository_name<br>
65
- docker push your_username/your_repository_name</code>
66
- <p>This will upload your image to your repository on Docker Hub. You can then share the repository URL with others who can pull your image using the <code>docker pull</code> command.</p> 197e85843d<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Bleach VS Naruto Ultimate Edition and Experience the Ultimate Anime Crossover Game on PC and Android.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <h1>Download Bleach vs Naruto Ultimate Edition: A Guide for Anime Fans</h1>
3
- <p>If you are a fan of anime and fighting games, you might have heard of Bleach vs Naruto, a free online 2D flash game developed by the Chinese company 5Dplay. It is a crossover anime fighting game featuring characters from both Bleach and Naruto Shippuden with guest characters from other series such as Rurouni Kenshin, One Piece, Fairy Tail, and more. But did you know that there is a special modification for this game that adds more characters, stages, modes, and features? It is called Bleach vs Naruto Ultimate Edition and it is available for both PC and Android devices. In this article, we will tell you everything you need to know about this amazing mod-pack and how to download it.</p>
4
- <h2>What is Bleach vs Naruto Ultimate Edition?</h2>
5
- <p>Bleach vs Naruto Ultimate Edition is a special modification for Bleach vs Naruto game made by Yuxi in collaboration with original BVN author Jian, 5Dplay. It is not an official update or sequel to the original game, but rather a fan-made project that enhances the game with various new elements. Some of the features of this mod-pack are:</p>
6
- <h2>download bleach vs naruto ultimate edition</h2><br /><p><b><b>DOWNLOAD</b> &rArr; <a href="https://jinyurl.com/2uNN3v">https://jinyurl.com/2uNN3v</a></b></p><br /><br />
7
- <h3>A special modification for Bleach vs Naruto game</h3>
8
- <ul>
9
- <li>It has more than 370 characters and 89 assists on PC version, and 308 characters and 76 assists on Android version. The characters are from various anime series such as Bleach, Naruto, One Piece, Dragon Ball, Hunter x Hunter, My Hero Academia, Demon Slayer, Attack on Titan, and more. You can also find some original characters created by the modders.</li>
10
- <li>It has 102 stages from different anime worlds and locations. You can fight in Soul Society, Konoha Village, Marineford, Namek, Dark Continent, UA High School, Mugen Train, Shiganshina District, and more.</li>
11
- <li>It has two exclusive modes from the latest bvn version 3.6: Watch Mode and Musou Mode. Watch Mode allows you to watch the computer-controlled characters fight each other in various scenarios. Musou Mode allows you to play as one character against multiple enemies in a Dynasty Warriors style.</li>
12
- </ul>
13
- <h3>Available on PC and Android</h3>
14
- <ul>
15
- <li>The mod-pack is compatible with both PC and Android devices. You can download it from various links provided by the author or other sources. The PC version weighs 3.42GB and the Android version weighs 1.99GB.</li>
16
- <li>The mod-pack is also compatible with Android 12, the latest version of the operating system. You can enjoy the game on your new devices without any issues.</li>
17
- <li>The mod-pack also has a complete remake of the user interface, new game effects, new game sounds, general game optimization, and many other improvements.</li>
18
- </ul>
19
- <h2>Why should you download Bleach vs Naruto Ultimate Edition?</h2>
20
- <p>If you are still not convinced that this mod-pack is worth downloading, here are some reasons why you should give it a try:</p>
21
- <h3>Enjoy the crossover anime fighting game featuring characters from Bleach, Naruto, and other series</h3>
22
- <ul>
23
- <li>If you love anime and fighting games, this mod-pack is perfect for you. You can play as your favorite characters from different anime series and see how they match up against each other. You can also create your own team of characters and fight against other teams in various modes.</li <li>Make sure you have enough storage space on your device and a stable internet connection.</li>
24
- <li>Make sure you have the correct password to extract the files.</li>
25
- <li>Make sure you have the latest version of the game and update it if necessary.</li>
26
- <li>Make sure you have the compatible device and operating system for the game.</li>
27
- <li>Make sure you have the proper software or app to run the game such as WinRAR, 7-Zip, or ZArchiver for extracting files, and Flash Player, Adobe AIR, or GameLoop for running the game.</li>
28
- <li>If you have any questions or feedback about the game, you can contact the author Yuxi on his YouTube channel or his Discord server. You can also join the Bleach vs Naruto community on Facebook, Reddit, or other platforms to interact with other players and fans.</li>
29
- </ul>
30
- <h2>Conclusion</h2>
31
- <p>Bleach vs Naruto Ultimate Edition is a special modification for Bleach vs Naruto game that adds more characters, stages, modes, and features to the original game. It is a crossover anime fighting game featuring characters from Bleach, Naruto, and other series. It is available for both PC and Android devices and it is compatible with Android 12. It is a fan-made project that is not affiliated with the official game or the anime series. It is a free online game that you can download from various links provided by the author or other sources. You will need a password to extract the files and a keyboard or a controller to play the game. You can customize your own team and fight against other players online or offline. You can also enjoy the exclusive features from the latest bvn version 3.6 such as Watch Mode and Musou Mode. If you are a fan of anime and fighting games, you should definitely try this mod-pack and have fun.</p>
32
- <h2>FAQs</h2>
33
- <h4>What is the difference between Bleach vs Naruto and Bleach vs Naruto Ultimate Edition?</h4>
34
- <p>Bleach vs Naruto is the original game developed by 5Dplay that features characters from Bleach and Naruto series. Bleach vs Naruto Ultimate Edition is a special modification for Bleach vs Naruto game made by Yuxi that adds more characters, stages, modes, and features from other anime series such as One Piece, Dragon Ball, Demon Slayer, Attack on Titan, My Hero Academia, and more.</p>
35
- <h4>How many characters are there in Bleach vs Naruto Ultimate Edition?</h4>
36
- <p>There are more than 370 characters and 89 assists on PC version, and 308 characters and 76 assists on Android version. The characters are from various anime series such as Bleach, Naruto, One Piece, Dragon Ball, Hunter x Hunter, My Hero Academia, Demon Slayer, Attack on Titan, and more. You can also find some original characters created by the modders.</p>
37
- <p>download bleach vs naruto ultimate edition pc<br />
38
- download bleach vs naruto ultimate edition android<br />
39
- download bleach vs naruto ultimate edition 370+ characters<br />
40
- download bleach vs naruto ultimate edition mediafire<br />
41
- download bleach vs naruto ultimate edition google drive<br />
42
- download bleach vs naruto ultimate edition mega<br />
43
- download bleach vs naruto ultimate edition mod apk<br />
44
- download bleach vs naruto ultimate edition offline<br />
45
- download bleach vs naruto ultimate edition latest version<br />
46
- download bleach vs naruto ultimate edition youtube<br />
47
- download bleach vs naruto ultimate edition kizuma gaming<br />
48
- download bleach vs naruto ultimate edition yuxi<br />
49
- download bleach vs naruto ultimate edition 5dplay<br />
50
- download bleach vs naruto ultimate edition watch mode<br />
51
- download bleach vs naruto ultimate edition musou mode<br />
52
- download bleach vs naruto ultimate edition password<br />
53
- download bleach vs naruto ultimate edition tutorial<br />
54
- download bleach vs naruto ultimate edition free<br />
55
- download bleach vs naruto ultimate edition full game<br />
56
- download bleach vs naruto ultimate edition zip file<br />
57
- download bleach vs naruto ultimate edition for windows 10<br />
58
- download bleach vs naruto ultimate edition for mac<br />
59
- download bleach vs naruto ultimate edition for ios<br />
60
- download bleach vs naruto ultimate edition for linux<br />
61
- download bleach vs naruto ultimate edition for chromebook<br />
62
- download bleach vs naruto ultimate edition no ads<br />
63
- download bleach vs naruto ultimate edition no virus<br />
64
- download bleach vs naruto ultimate edition no survey<br />
65
- download bleach vs naruto ultimate edition no root<br />
66
- download bleach vs naruto ultimate edition no emulator<br />
67
- download bleach vs naruto ultimate edition with all characters unlocked<br />
68
- download bleach vs naruto ultimate edition with new maps and assists<br />
69
- download bleach vs naruto ultimate edition with new effects and sounds<br />
70
- download bleach vs naruto ultimate edition with new user interface and loading screen<br />
71
- download bleach vs naruto ultimate edition with compatibility with android 12<br />
72
- how to download bleach vs naruto ultimate edition on pc<br />
73
- how to download bleach vs naruto ultimate edition on android<br />
74
- how to download bleach vs naruto ultimate edition on phone<br />
75
- how to download bleach vs naruto ultimate edition on tablet<br />
76
- how to download bleach vs naruto ultimate edition on laptop<br />
77
- where to download bleach vs naruto ultimate edition safely and securely<br />
78
- where to download bleach vs naruto ultimate edition from original author's link<br />
79
- where to find the password for downloading bleach vs naruto ultimate edition <br />
80
- where to get the latest updates for downloading bleach vs naruto ultimate edition <br />
81
- where to report bugs or errors for downloading bleach vs naruto ultimate edition <br />
82
- why you should download bleach vs naruto ultimate edition game <br />
83
- why you should not miss the opportunity to play the best anime crossover game ever <br />
84
- why you should join the discord community of the fans of the game <br />
85
- why you should support the creators of the game by donating or subscribing</p>
86
- <h4>How can I play Bleach vs Naruto Ultimate Edition online with other players?</h4>
87
- <p>You can play online with other players using the multiplayer mode. You can join or create a room with up to four players and choose the game mode, stage, time limit, and other settings. You can also chat with other players using the chat box.</p>
88
- <h4>What are Watch Mode and Musou Mode in Bleach vs Naruto Ultimate Edition?</h4>
89
- <p>Watch Mode and Musou Mode are two exclusive modes from the latest bvn version 3.6. Watch Mode allows you to watch the computer-controlled characters fight each other in various scenarios. Musou Mode allows you to play as one character against multiple enemies in a Dynasty Warriors style.</p>
90
- <h4>Where can I find more information about Bleach vs Naruto Ultimate Edition?</h4>
91
- <p>You can find more information about Bleach vs Naruto Ultimate Edition on the author's YouTube channel or his Discord server. You can also join the Bleach vs Naruto community on Facebook, Reddit, or other platforms to interact with other players and fans.</p> 401be4b1e0<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Dream League Soccer 2020 Mod APK Now and Get Unlimited Coins for Free.md DELETED
@@ -1,80 +0,0 @@
1
- <br />
2
- <h1>Download Dream League Soccer 2020 Mod APK Unlimited Coins</h1>
3
- <p>If you are a fan of soccer games, you must have heard of Dream League Soccer 2020, one of the most popular and realistic soccer games on Android. But what if you want to enjoy the game without any limitations or restrictions? Well, you can do that by downloading Dream League Soccer 2020 mod apk unlimited coins. In this article, we will tell you what is Dream League Soccer 2020, why you should download the mod apk version, and how to do it easily and safely.</p>
4
- <h2>What is Dream League Soccer 2020?</h2>
5
- <p>Dream League Soccer 2020 is a soccer simulation game developed by First Touch Games, a studio that specializes in creating high-quality soccer games for mobile devices. The game lets you create your own dream team, compete in various leagues and tournaments, and customize your stadium and kits. You can also play online with other players from around the world, or offline with friends using local multiplayer mode.</p>
6
- <h2>download dream league soccer 2020 mod apk unlimited coins</h2><br /><p><b><b>Download</b> &#127383; <a href="https://jinyurl.com/2uNTmZ">https://jinyurl.com/2uNTmZ</a></b></p><br /><br />
7
- <h3>Features of Dream League Soccer 2020</h3>
8
- <p>Dream League Soccer 2020 has many features that make it one of the best soccer games on Android. Here are some of them:</p>
9
- <h4>Build your own team</h4>
10
- <p>You can choose from over 4,000 licensed players from different clubs and countries, and create your own squad with your favorite stars. You can also train your players to improve their skills and abilities, and manage their transfers and contracts.</p>
11
- <h4>Play in different modes</h4>
12
- <p>You can play in various modes such as Career Mode, where you start from the bottom and work your way up to the top division; Season Mode, where you compete in a single season with different objectives; Online Mode, where you challenge other players from around the world; and Friendly Mode, where you play against your friends using local multiplayer.</p>
13
- <h4>Customize your stadium and kits</h4>
14
- <p>You can design your own stadium and upgrade it with different facilities and features. You can also customize your kits and logos with various colors and styles.</p>
15
- <h4>Enjoy realistic graphics and sound effects</h4>
16
- <p>The game has stunning graphics and animations that make the gameplay more immersive and realistic. You can also enjoy the authentic sound effects and commentary from professional commentators.</p>
17
- <h3>Why download Dream League Soccer 2020 mod apk unlimited coins?</h3>
18
- <p>While Dream League Soccer 2020 is a free game, it also has some in-app purchases that require real money. For example, you need coins to buy players, items, upgrades, and more. You can earn coins by playing the game, but it can be slow and tedious. That's why many players prefer to download Dream League Soccer 2020 mod apk unlimited coins, which gives them access to unlimited resources and features. Here are some benefits of downloading the mod apk version:</p>
19
- <p>download dls 2020 mod apk unlimited money and gold<br />
20
- how to get unlimited coins in dream league soccer 2020 mod<br />
21
- dream league soccer 2020 hack mod apk download free<br />
22
- dls 2020 mod apk unlimited levels and characters<br />
23
- download dream league soccer 2020 mod apk latest version<br />
24
- dream league soccer 2020 mod apk unlimited gems and coins<br />
25
- dls 2020 mod apk offline with unlimited money<br />
26
- download dream league soccer 2020 mod apk for android<br />
27
- dream league soccer 2020 cheats mod apk unlimited coins<br />
28
- dls 2020 mod apk online with unlimited players<br />
29
- download dream league soccer 2020 mod apk obb file<br />
30
- dream league soccer 2020 mod apk unlimited kits and logos<br />
31
- dls 2020 mod apk unlimited stamina and energy<br />
32
- download dream league soccer 2020 mod apk revdl<br />
33
- dream league soccer 2020 mod apk unlimited skills and abilities<br />
34
- dls 2020 mod apk unlimited transfers and signings<br />
35
- download dream league soccer 2020 mod apk rexdl<br />
36
- dream league soccer 2020 mod apk unlimited trophies and medals<br />
37
- dls 2020 mod apk unlimited coins no root<br />
38
- download dream league soccer 2020 mod apk hack version<br />
39
- dream league soccer 2020 mod apk unlimited diamonds and coins<br />
40
- dls 2020 mod apk unlimited coins and keys<br />
41
- download dream league soccer 2020 mod apk data file host<br />
42
- dream league soccer 2020 mod apk unlimited everything unlocked<br />
43
- dls 2020 mod apk unlimited coins and tickets<br />
44
- download dream league soccer 2020 mod apk for ios<br />
45
- dream league soccer 2020 mod apk unlimited coins and stars<br />
46
- dls 2020 mod apk unlimited coins and vip points<br />
47
- download dream league soccer 2020 mod apk for pc<br />
48
- dream league soccer 2020 mod apk unlimited coins and all players unlocked</p>
49
- <h4>Get unlimited coins and money</h4>
50
- <p>With the mod apk version, you don't have to worry about running out of coins or money. You can use them to buy anything you want in the game, such as players, items, upgrades, etc. You can also use them to skip ads and speed up the loading time.</p>
51
- <h4>Unlock all players and items</h4>
52
- <p>With the mod apk version, you don't have to wait for unlocking players or items. You can get them all for free without any restrictions or limitations. You can also upgrade them to the maximum level and make them more powerful and efficient.</p>
53
- <h4>Remove ads and enjoy faster loading</h4>
54
- <p>With the mod apk version, you don't have to deal with annoying ads that interrupt your gameplay and waste your time. You can also enjoy faster loading and smoother performance without any lags or glitches.</p>
55
- <h3>How to download Dream League Soccer 2020 mod apk unlimited coins?</h3>
56
- <p>Downloading Dream League Soccer 2020 mod apk unlimited coins is not difficult, but you need to follow some steps carefully to avoid any errors or problems. Here are the steps you need to follow:</p>
57
- <h4>Step 1: Download the mod apk file from a trusted source</h4>
58
- <p>The first thing you need to do is to find a reliable and safe website that offers the mod apk file for Dream League Soccer 2020. You can search on Google or use the link we have provided below. Make sure you download the latest version of the mod apk file that is compatible with your device.</p>
59
- <h4>Step 2: Enable unknown sources on your device settings</h4>
60
- <p>The next thing you need to do is to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may see a warning message, but don't worry, it's safe.</p>
61
- <h4>Step 3: Install the mod apk file and launch the game</h4>
62
- <p>The final thing you need to do is to install the mod apk file and launch the game. To do this, go to your file manager, find the downloaded mod apk file, and tap on it. You may see a confirmation message, just tap on install and wait for a few seconds. Once the installation is done, you can open the game and enjoy it with unlimited coins and features.</p>
63
- <h3>Conclusion</h3>
64
- <p>Dream League Soccer 2020 is a great game for soccer lovers, but it can be even better with the mod apk version that gives you unlimited coins and features. You can download Dream League Soccer 2020 mod apk unlimited coins easily and safely by following the steps we have explained above. So what are you waiting for? Download it now and have fun!</p>
65
- <h3>FAQs</h3>
66
- <p>Here are some frequently asked questions about Dream League Soccer 2020 mod apk unlimited coins:</p>
67
- <ul>
68
- <li><b>Is Dream League Soccer 2020 mod apk unlimited coins safe?</b></li>
69
- <p>Yes, it is safe as long as you download it from a trusted source and enable unknown sources on your device settings. However, we recommend that you use it at your own risk and discretion, as we are not responsible for any damages or issues that may occur.</p>
70
- <li><b>Is Dream League Soccer 2020 mod apk unlimited coins legal?</b></li>
71
- <p>No, it is not legal, as it violates the terms and conditions of the original game. It may also result in a ban or suspension from the online mode or other features of the game. Therefore, we advise that you use it only for personal and educational purposes, and not for commercial or malicious purposes.</p>
72
- <li><b>Does Dream League Soccer 2020 mod apk unlimited coins require root access?</b></li>
73
- <p>No, it does not require root access, as it works on both rooted and non-rooted devices. However, some features may work better on rooted devices than on non-rooted devices.</p>
74
- <li><b>Does Dream League Soccer 2020 mod apk unlimited coins work offline?</b></li>
75
- <p>Yes, it works offline, as you can play the game without an internet connection. However, some features may require an internet connection, such as online mode, updates, etc.</p>
76
- <li><b>Can I update Dream League Soccer 2020 mod apk unlimited coins?</b></li>
77
- <p>No, you cannot update Dream League Soccer 2020 mod apk unlimited coins, as it may cause errors or problems with the game. You need to uninstall the mod apk version and install the original version from the Google Play Store if you want to update the game.</p>
78
- </ul></p> 197e85843d<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SUNet_AWGN_denoising/main_test_SUNet.py DELETED
@@ -1,143 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import numpy as np
5
- from collections import OrderedDict
6
- from skimage import img_as_ubyte
7
- import os
8
- import torch
9
- import requests
10
- from PIL import Image
11
- import math
12
- import yaml
13
- import torchvision.transforms.functional as TF
14
- import torch.nn.functional as F
15
- from natsort import natsorted
16
- from model.SUNet import SUNet_model
17
-
18
- with open('training.yaml', 'r') as config:
19
- opt = yaml.safe_load(config)
20
-
21
- def clean_folder(folder):
22
- for filename in os.listdir(folder):
23
- file_path = os.path.join(folder, filename)
24
- try:
25
- if os.path.isfile(file_path) or os.path.islink(file_path):
26
- os.unlink(file_path)
27
- elif os.path.isdir(file_path):
28
- shutil.rmtree(file_path)
29
- except Exception as e:
30
- print('Failed to delete %s. Reason: %s' % (file_path, e))
31
-
32
- def main():
33
- parser = argparse.ArgumentParser(description='Demo Image Restoration')
34
- parser.add_argument('--input_dir', default='test/', type=str, help='Input images')
35
- parser.add_argument('--window_size', default=8, type=int, help='window size')
36
- parser.add_argument('--size', default=256, type=int, help='model image patch size')
37
- parser.add_argument('--stride', default=128, type=int, help='reconstruction stride')
38
- parser.add_argument('--result_dir', default='result/', type=str, help='Directory for results')
39
- parser.add_argument('--weights',
40
- default='experiments/pretrained_models/AWGN_denoising_SUNet.pth', type=str,
41
- help='Path to weights')
42
-
43
- args = parser.parse_args()
44
-
45
- inp_dir = args.input_dir
46
- out_dir = args.result_dir
47
-
48
- os.makedirs(out_dir, exist_ok=True)
49
-
50
- files = natsorted(glob.glob(os.path.join(inp_dir, '*')))
51
-
52
- if len(files) == 0:
53
- raise Exception(f"No files found at {inp_dir}")
54
-
55
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
56
-
57
- # Load corresponding models architecture and weights
58
- model = SUNet_model(opt)
59
- model = model.to(device)
60
- model.eval()
61
- load_checkpoint(model, args.weights)
62
- stride = args.stride
63
- model_img = args.size
64
-
65
- for file_ in files:
66
- img = Image.open(file_).convert('RGB')
67
- input_ = TF.to_tensor(img).unsqueeze(0).to(device)
68
- with torch.no_grad():
69
- # pad to multiple of 256
70
- square_input_, mask, max_wh = overlapped_square(input_.to(device), kernel=model_img, stride=stride)
71
- output_patch = torch.zeros(square_input_[0].shape).type_as(square_input_[0])
72
- for i, data in enumerate(square_input_):
73
- restored = model(square_input_[i])
74
- if i == 0:
75
- output_patch += restored
76
- else:
77
- output_patch = torch.cat([output_patch, restored], dim=0)
78
-
79
- B, C, PH, PW = output_patch.shape
80
- weight = torch.ones(B, C, PH, PH).type_as(output_patch) # weight_mask
81
-
82
- patch = output_patch.contiguous().view(B, C, -1, model_img*model_img)
83
- patch = patch.permute(2, 1, 3, 0) # B, C, K*K, #patches
84
- patch = patch.contiguous().view(1, C*model_img*model_img, -1)
85
-
86
- weight_mask = weight.contiguous().view(B, C, -1, model_img * model_img)
87
- weight_mask = weight_mask.permute(2, 1, 3, 0) # B, C, K*K, #patches
88
- weight_mask = weight_mask.contiguous().view(1, C * model_img * model_img, -1)
89
-
90
- restored = F.fold(patch, output_size=(max_wh, max_wh), kernel_size=model_img, stride=stride)
91
- we_mk = F.fold(weight_mask, output_size=(max_wh, max_wh), kernel_size=model_img, stride=stride)
92
- restored /= we_mk
93
-
94
- restored = torch.masked_select(restored, mask.bool()).reshape(input_.shape)
95
- restored = torch.clamp(restored, 0, 1)
96
-
97
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
98
- restored = img_as_ubyte(restored[0])
99
-
100
- f = os.path.splitext(os.path.split(file_)[-1])[0]
101
- save_img((os.path.join(out_dir, f + '.png')), restored)
102
- clean_folder(inp_dir)
103
-
104
- def save_img(filepath, img):#
105
- cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
106
-
107
-
108
- def load_checkpoint(model, weights):
109
- checkpoint = torch.load(weights, map_location=torch.device('cpu'))
110
- try:
111
- model.load_state_dict(checkpoint["state_dict"])
112
- except:
113
- state_dict = checkpoint["state_dict"]
114
- new_state_dict = OrderedDict()
115
- for k, v in state_dict.items():
116
- name = k[7:] # remove `module.`
117
- new_state_dict[name] = v
118
- model.load_state_dict(new_state_dict)
119
-
120
- def overlapped_square(timg, kernel=256, stride=128):
121
- patch_images = []
122
- b, c, h, w = timg.size()
123
- # 321, 481
124
- X = int(math.ceil(max(h, w) / float(kernel)) * kernel)
125
- img = torch.zeros(1, 3, X, X).type_as(timg) # 3, h, w
126
- mask = torch.zeros(1, 1, X, X).type_as(timg)
127
-
128
- img[:, :, ((X - h) // 2):((X - h) // 2 + h), ((X - w) // 2):((X - w) // 2 + w)] = timg
129
- mask[:, :, ((X - h) // 2):((X - h) // 2 + h), ((X - w) // 2):((X - w) // 2 + w)].fill_(1.0)
130
-
131
- patch = img.unfold(3, kernel, stride).unfold(2, kernel, stride)
132
- patch = patch.contiguous().view(b, c, -1, kernel, kernel) # B, C, #patches, K, K
133
- patch = patch.permute(2, 0, 1, 4, 3) # patches, B, C, K, K
134
-
135
- for each in range(len(patch)):
136
- patch_images.append(patch[each])
137
-
138
- return patch_images, mask, X
139
-
140
-
141
-
142
- if __name__ == '__main__':
143
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/app/loading.css DELETED
@@ -1,68 +0,0 @@
1
- ::-webkit-scrollbar {
2
- width: 10px;
3
- height: 10px;
4
- display: none;
5
- }
6
-
7
- ::-webkit-scrollbar-button:start:decrement,
8
- ::-webkit-scrollbar-button:end:increment {
9
- height: 30px;
10
- background-color: transparent;
11
- }
12
-
13
- ::-webkit-scrollbar-track-piece {
14
- background-color: #3b3b3b;
15
- -webkit-border-radius: 16px;
16
- }
17
-
18
- ::-webkit-scrollbar-thumb:vertical {
19
- height: 50px;
20
- background-color: #666;
21
- border: 1px solid #eee;
22
- -webkit-border-radius: 6px;
23
- }
24
-
25
- /* loading start */
26
- .loading-spinner {
27
- display: flex;
28
- justify-content: center;
29
- align-items: center;
30
- height: 100vh;
31
- opacity: 1;
32
- transition: opacity .8s ease-out;
33
- }
34
-
35
- .loading-spinner.hidden {
36
- opacity: 0;
37
- }
38
-
39
- .loading-spinner>div {
40
- width: 30px;
41
- height: 30px;
42
- background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%);
43
-
44
- border-radius: 100%;
45
- display: inline-block;
46
- animation: sk-bouncedelay 1.4s infinite ease-in-out both;
47
- }
48
-
49
- .loading-spinner .bounce1 {
50
- animation-delay: -0.32s;
51
- }
52
-
53
- .loading-spinner .bounce2 {
54
- animation-delay: -0.16s;
55
- }
56
-
57
- @keyframes sk-bouncedelay {
58
-
59
- 0%,
60
- 80%,
61
- 100% {
62
- transform: scale(0);
63
- }
64
-
65
- 40% {
66
- transform: scale(1.0);
67
- }
68
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIDHD/GrammarCorrector/app.py DELETED
@@ -1,34 +0,0 @@
1
- import streamlit as st
2
-
3
-
4
- st.title("Correct Grammar with Transformers 🦄")
5
- st.write("")
6
- st.write("Input your text here!")
7
-
8
- default_value = "Mike and Anna is skiing"
9
- sent = st.text_area("Text", default_value, height = 50)
10
- num_return_sequences = st.sidebar.number_input('Number of Return Sequences', min_value=1, max_value=3, value=1, step=1)
11
-
12
- ### Run Model
13
- from transformers import T5ForConditionalGeneration, T5Tokenizer
14
- import torch
15
- torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
16
- tokenizer = T5Tokenizer.from_pretrained('deep-learning-analytics/GrammarCorrector')
17
- model = T5ForConditionalGeneration.from_pretrained('deep-learning-analytics/GrammarCorrector').to(torch_device)
18
-
19
- def correct_grammar(input_text,num_return_sequences=num_return_sequences):
20
- batch = tokenizer([input_text],truncation=True,padding='max_length',max_length=len(input_text), return_tensors="pt").to(torch_device)
21
- results = model.generate(**batch,max_length=len(input_text),num_beams=2, num_return_sequences=num_return_sequences, temperature=1.5)
22
- #answer = tokenizer.batch_decode(results[0], skip_special_tokens=True)
23
- return results
24
-
25
- ##Prompts
26
- results = correct_grammar(sent, num_return_sequences)
27
-
28
- generated_sequences = []
29
- for generated_sequence_idx, generated_sequence in enumerate(results):
30
- # Decode text
31
- text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, skip_special_tokens=True)
32
- generated_sequences.append(text)
33
-
34
- st.write(generated_sequences)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/logger.py DELETED
@@ -1,30 +0,0 @@
1
- import logging
2
-
3
-
4
- def setup_logging(log_file, level, include_host=False):
5
- if include_host:
6
- import socket
7
-
8
- hostname = socket.gethostname()
9
- formatter = logging.Formatter(
10
- f"%(asctime)s | {hostname} | %(levelname)s | %(message)s",
11
- datefmt="%Y-%m-%d,%H:%M:%S",
12
- )
13
- else:
14
- formatter = logging.Formatter(
15
- "%(asctime)s | %(levelname)s | %(message)s", datefmt="%Y-%m-%d,%H:%M:%S"
16
- )
17
-
18
- logging.root.setLevel(level)
19
- loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
20
- for logger in loggers:
21
- logger.setLevel(level)
22
-
23
- stream_handler = logging.StreamHandler()
24
- stream_handler.setFormatter(formatter)
25
- logging.root.addHandler(stream_handler)
26
-
27
- if log_file:
28
- file_handler = logging.FileHandler(filename=log_file)
29
- file_handler.setFormatter(formatter)
30
- logging.root.addHandler(file_handler)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/vocoder/hifigan.py DELETED
@@ -1,63 +0,0 @@
1
- import torch.nn.functional as F
2
- from torch import nn
3
-
4
- from text_to_speech.modules.vocoder.hifigan.hifigan import HifiGanGenerator, MultiPeriodDiscriminator, MultiScaleDiscriminator, \
5
- generator_loss, feature_loss, discriminator_loss
6
- from text_to_speech.modules.vocoder.hifigan.mel_utils import mel_spectrogram
7
- from text_to_speech.modules.vocoder.hifigan.stft_loss import MultiResolutionSTFTLoss
8
- from tasks.vocoder.vocoder_base import VocoderBaseTask
9
- from text_to_speech.utils.commons.hparams import hparams
10
- from text_to_speech.utils.nn.model_utils import print_arch
11
-
12
-
13
- class HifiGanTask(VocoderBaseTask):
14
- def build_model(self):
15
- self.model_gen = HifiGanGenerator(hparams)
16
- self.model_disc = nn.ModuleDict()
17
- self.model_disc['mpd'] = MultiPeriodDiscriminator()
18
- self.model_disc['msd'] = MultiScaleDiscriminator()
19
- self.stft_loss = MultiResolutionSTFTLoss()
20
- print_arch(self.model_gen)
21
- if hparams['load_ckpt'] != '':
22
- self.load_ckpt(hparams['load_ckpt'], 'model_gen', 'model_gen', force=True, strict=True)
23
- self.load_ckpt(hparams['load_ckpt'], 'model_disc', 'model_disc', force=True, strict=True)
24
- return self.model_gen
25
-
26
- def _training_step(self, sample, batch_idx, optimizer_idx):
27
- mel = sample['mels']
28
- y = sample['wavs']
29
- f0 = sample['f0']
30
- loss_output = {}
31
- if optimizer_idx == 0:
32
- #######################
33
- # Generator #
34
- #######################
35
- y_ = self.model_gen(mel, f0)
36
- y_mel = mel_spectrogram(y.squeeze(1), hparams).transpose(1, 2)
37
- y_hat_mel = mel_spectrogram(y_.squeeze(1), hparams).transpose(1, 2)
38
- loss_output['mel'] = F.l1_loss(y_hat_mel, y_mel) * hparams['lambda_mel']
39
- _, y_p_hat_g, fmap_f_r, fmap_f_g = self.model_disc['mpd'](y, y_, mel)
40
- _, y_s_hat_g, fmap_s_r, fmap_s_g = self.model_disc['msd'](y, y_, mel)
41
- loss_output['a_p'] = generator_loss(y_p_hat_g) * hparams['lambda_adv']
42
- loss_output['a_s'] = generator_loss(y_s_hat_g) * hparams['lambda_adv']
43
- if hparams['use_fm_loss']:
44
- loss_output['fm_f'] = feature_loss(fmap_f_r, fmap_f_g)
45
- loss_output['fm_s'] = feature_loss(fmap_s_r, fmap_s_g)
46
- if hparams['use_ms_stft']:
47
- loss_output['sc'], loss_output['mag'] = self.stft_loss(y.squeeze(1), y_.squeeze(1))
48
- self.y_ = y_.detach()
49
- self.y_mel = y_mel.detach()
50
- self.y_hat_mel = y_hat_mel.detach()
51
- else:
52
- #######################
53
- # Discriminator #
54
- #######################
55
- y_ = self.y_
56
- # MPD
57
- y_p_hat_r, y_p_hat_g, _, _ = self.model_disc['mpd'](y, y_.detach(), mel)
58
- loss_output['r_p'], loss_output['f_p'] = discriminator_loss(y_p_hat_r, y_p_hat_g)
59
- # MSD
60
- y_s_hat_r, y_s_hat_g, _, _ = self.model_disc['msd'](y, y_.detach(), mel)
61
- loss_output['r_s'], loss_output['f_s'] = discriminator_loss(y_s_hat_r, y_s_hat_g)
62
- total_loss = sum(loss_output.values())
63
- return total_loss, loss_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/nn/seq_utils.py DELETED
@@ -1,311 +0,0 @@
1
- from collections import defaultdict
2
- import torch
3
- import torch.nn.functional as F
4
-
5
-
6
- def make_positions(tensor, padding_idx):
7
- """Replace non-padding symbols with their position numbers.
8
-
9
- Position numbers begin at padding_idx+1. Padding symbols are ignored.
10
- """
11
- # The series of casts and type-conversions here are carefully
12
- # balanced to both work with ONNX export and XLA. In particular XLA
13
- # prefers ints, cumsum defaults to output longs, and ONNX doesn't know
14
- # how to handle the dtype kwarg in cumsum.
15
- mask = tensor.ne(padding_idx).int()
16
- return (
17
- torch.cumsum(mask, dim=1).type_as(mask) * mask
18
- ).long() + padding_idx
19
-
20
-
21
- def softmax(x, dim):
22
- return F.softmax(x, dim=dim, dtype=torch.float32)
23
-
24
-
25
- def sequence_mask(lengths, maxlen, dtype=torch.bool):
26
- if maxlen is None:
27
- maxlen = lengths.max()
28
- mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t()
29
- mask.type(dtype)
30
- return mask
31
-
32
-
33
- def weights_nonzero_speech(target):
34
- # target : B x T x mel
35
- # Assign weight 1.0 to all labels except for padding (id=0).
36
- dim = target.size(-1)
37
- return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
38
-
39
-
40
- INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
41
-
42
-
43
- def _get_full_incremental_state_key(module_instance, key):
44
- module_name = module_instance.__class__.__name__
45
-
46
- # assign a unique ID to each module instance, so that incremental state is
47
- # not shared across module instances
48
- if not hasattr(module_instance, '_instance_id'):
49
- INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
50
- module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
51
-
52
- return '{}.{}.{}'.format(module_name, module_instance._instance_id, key)
53
-
54
-
55
- def get_incremental_state(module, incremental_state, key):
56
- """Helper for getting incremental state for an nn.Module."""
57
- full_key = _get_full_incremental_state_key(module, key)
58
- if incremental_state is None or full_key not in incremental_state:
59
- return None
60
- return incremental_state[full_key]
61
-
62
-
63
- def set_incremental_state(module, incremental_state, key, value):
64
- """Helper for setting incremental state for an nn.Module."""
65
- if incremental_state is not None:
66
- full_key = _get_full_incremental_state_key(module, key)
67
- incremental_state[full_key] = value
68
-
69
-
70
- def fill_with_neg_inf(t):
71
- """FP16-compatible function that fills a tensor with -inf."""
72
- return t.float().fill_(float('-inf')).type_as(t)
73
-
74
-
75
- def fill_with_neg_inf2(t):
76
- """FP16-compatible function that fills a tensor with -inf."""
77
- return t.float().fill_(-1e8).type_as(t)
78
-
79
-
80
- def select_attn(attn_logits, type='best'):
81
- """
82
-
83
- :param attn_logits: [n_layers, B, n_head, T_sp, T_txt]
84
- :return:
85
- """
86
- encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2)
87
- # [n_layers * n_head, B, T_sp, T_txt]
88
- encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1)
89
- if type == 'best':
90
- indices = encdec_attn.max(-1).values.sum(-1).argmax(0)
91
- encdec_attn = encdec_attn.gather(
92
- 0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0]
93
- return encdec_attn
94
- elif type == 'mean':
95
- return encdec_attn.mean(0)
96
-
97
-
98
- def make_pad_mask(lengths, xs=None, length_dim=-1):
99
- """Make mask tensor containing indices of padded part.
100
- Args:
101
- lengths (LongTensor or List): Batch of lengths (B,).
102
- xs (Tensor, optional): The reference tensor.
103
- If set, masks will be the same shape as this tensor.
104
- length_dim (int, optional): Dimension indicator of the above tensor.
105
- See the example.
106
- Returns:
107
- Tensor: Mask tensor containing indices of padded part.
108
- dtype=torch.uint8 in PyTorch 1.2-
109
- dtype=torch.bool in PyTorch 1.2+ (including 1.2)
110
- Examples:
111
- With only lengths.
112
- >>> lengths = [5, 3, 2]
113
- >>> make_non_pad_mask(lengths)
114
- masks = [[0, 0, 0, 0 ,0],
115
- [0, 0, 0, 1, 1],
116
- [0, 0, 1, 1, 1]]
117
- With the reference tensor.
118
- >>> xs = torch.zeros((3, 2, 4))
119
- >>> make_pad_mask(lengths, xs)
120
- tensor([[[0, 0, 0, 0],
121
- [0, 0, 0, 0]],
122
- [[0, 0, 0, 1],
123
- [0, 0, 0, 1]],
124
- [[0, 0, 1, 1],
125
- [0, 0, 1, 1]]], dtype=torch.uint8)
126
- >>> xs = torch.zeros((3, 2, 6))
127
- >>> make_pad_mask(lengths, xs)
128
- tensor([[[0, 0, 0, 0, 0, 1],
129
- [0, 0, 0, 0, 0, 1]],
130
- [[0, 0, 0, 1, 1, 1],
131
- [0, 0, 0, 1, 1, 1]],
132
- [[0, 0, 1, 1, 1, 1],
133
- [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
134
- With the reference tensor and dimension indicator.
135
- >>> xs = torch.zeros((3, 6, 6))
136
- >>> make_pad_mask(lengths, xs, 1)
137
- tensor([[[0, 0, 0, 0, 0, 0],
138
- [0, 0, 0, 0, 0, 0],
139
- [0, 0, 0, 0, 0, 0],
140
- [0, 0, 0, 0, 0, 0],
141
- [0, 0, 0, 0, 0, 0],
142
- [1, 1, 1, 1, 1, 1]],
143
- [[0, 0, 0, 0, 0, 0],
144
- [0, 0, 0, 0, 0, 0],
145
- [0, 0, 0, 0, 0, 0],
146
- [1, 1, 1, 1, 1, 1],
147
- [1, 1, 1, 1, 1, 1],
148
- [1, 1, 1, 1, 1, 1]],
149
- [[0, 0, 0, 0, 0, 0],
150
- [0, 0, 0, 0, 0, 0],
151
- [1, 1, 1, 1, 1, 1],
152
- [1, 1, 1, 1, 1, 1],
153
- [1, 1, 1, 1, 1, 1],
154
- [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
155
- >>> make_pad_mask(lengths, xs, 2)
156
- tensor([[[0, 0, 0, 0, 0, 1],
157
- [0, 0, 0, 0, 0, 1],
158
- [0, 0, 0, 0, 0, 1],
159
- [0, 0, 0, 0, 0, 1],
160
- [0, 0, 0, 0, 0, 1],
161
- [0, 0, 0, 0, 0, 1]],
162
- [[0, 0, 0, 1, 1, 1],
163
- [0, 0, 0, 1, 1, 1],
164
- [0, 0, 0, 1, 1, 1],
165
- [0, 0, 0, 1, 1, 1],
166
- [0, 0, 0, 1, 1, 1],
167
- [0, 0, 0, 1, 1, 1]],
168
- [[0, 0, 1, 1, 1, 1],
169
- [0, 0, 1, 1, 1, 1],
170
- [0, 0, 1, 1, 1, 1],
171
- [0, 0, 1, 1, 1, 1],
172
- [0, 0, 1, 1, 1, 1],
173
- [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
174
- """
175
- if length_dim == 0:
176
- raise ValueError("length_dim cannot be 0: {}".format(length_dim))
177
-
178
- if not isinstance(lengths, list):
179
- lengths = lengths.tolist()
180
- bs = int(len(lengths))
181
- if xs is None:
182
- maxlen = int(max(lengths))
183
- else:
184
- maxlen = xs.size(length_dim)
185
-
186
- seq_range = torch.arange(0, maxlen, dtype=torch.int64)
187
- seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
188
- seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
189
- mask = seq_range_expand >= seq_length_expand
190
-
191
- if xs is not None:
192
- assert xs.size(0) == bs, (xs.size(0), bs)
193
-
194
- if length_dim < 0:
195
- length_dim = xs.dim() + length_dim
196
- # ind = (:, None, ..., None, :, , None, ..., None)
197
- ind = tuple(
198
- slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
199
- )
200
- mask = mask[ind].expand_as(xs).to(xs.device)
201
- return mask
202
-
203
-
204
- def make_non_pad_mask(lengths, xs=None, length_dim=-1):
205
- """Make mask tensor containing indices of non-padded part.
206
- Args:
207
- lengths (LongTensor or List): Batch of lengths (B,).
208
- xs (Tensor, optional): The reference tensor.
209
- If set, masks will be the same shape as this tensor.
210
- length_dim (int, optional): Dimension indicator of the above tensor.
211
- See the example.
212
- Returns:
213
- ByteTensor: mask tensor containing indices of padded part.
214
- dtype=torch.uint8 in PyTorch 1.2-
215
- dtype=torch.bool in PyTorch 1.2+ (including 1.2)
216
- Examples:
217
- With only lengths.
218
- >>> lengths = [5, 3, 2]
219
- >>> make_non_pad_mask(lengths)
220
- masks = [[1, 1, 1, 1 ,1],
221
- [1, 1, 1, 0, 0],
222
- [1, 1, 0, 0, 0]]
223
- With the reference tensor.
224
- >>> xs = torch.zeros((3, 2, 4))
225
- >>> make_non_pad_mask(lengths, xs)
226
- tensor([[[1, 1, 1, 1],
227
- [1, 1, 1, 1]],
228
- [[1, 1, 1, 0],
229
- [1, 1, 1, 0]],
230
- [[1, 1, 0, 0],
231
- [1, 1, 0, 0]]], dtype=torch.uint8)
232
- >>> xs = torch.zeros((3, 2, 6))
233
- >>> make_non_pad_mask(lengths, xs)
234
- tensor([[[1, 1, 1, 1, 1, 0],
235
- [1, 1, 1, 1, 1, 0]],
236
- [[1, 1, 1, 0, 0, 0],
237
- [1, 1, 1, 0, 0, 0]],
238
- [[1, 1, 0, 0, 0, 0],
239
- [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
240
- With the reference tensor and dimension indicator.
241
- >>> xs = torch.zeros((3, 6, 6))
242
- >>> make_non_pad_mask(lengths, xs, 1)
243
- tensor([[[1, 1, 1, 1, 1, 1],
244
- [1, 1, 1, 1, 1, 1],
245
- [1, 1, 1, 1, 1, 1],
246
- [1, 1, 1, 1, 1, 1],
247
- [1, 1, 1, 1, 1, 1],
248
- [0, 0, 0, 0, 0, 0]],
249
- [[1, 1, 1, 1, 1, 1],
250
- [1, 1, 1, 1, 1, 1],
251
- [1, 1, 1, 1, 1, 1],
252
- [0, 0, 0, 0, 0, 0],
253
- [0, 0, 0, 0, 0, 0],
254
- [0, 0, 0, 0, 0, 0]],
255
- [[1, 1, 1, 1, 1, 1],
256
- [1, 1, 1, 1, 1, 1],
257
- [0, 0, 0, 0, 0, 0],
258
- [0, 0, 0, 0, 0, 0],
259
- [0, 0, 0, 0, 0, 0],
260
- [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
261
- >>> make_non_pad_mask(lengths, xs, 2)
262
- tensor([[[1, 1, 1, 1, 1, 0],
263
- [1, 1, 1, 1, 1, 0],
264
- [1, 1, 1, 1, 1, 0],
265
- [1, 1, 1, 1, 1, 0],
266
- [1, 1, 1, 1, 1, 0],
267
- [1, 1, 1, 1, 1, 0]],
268
- [[1, 1, 1, 0, 0, 0],
269
- [1, 1, 1, 0, 0, 0],
270
- [1, 1, 1, 0, 0, 0],
271
- [1, 1, 1, 0, 0, 0],
272
- [1, 1, 1, 0, 0, 0],
273
- [1, 1, 1, 0, 0, 0]],
274
- [[1, 1, 0, 0, 0, 0],
275
- [1, 1, 0, 0, 0, 0],
276
- [1, 1, 0, 0, 0, 0],
277
- [1, 1, 0, 0, 0, 0],
278
- [1, 1, 0, 0, 0, 0],
279
- [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
280
- """
281
- return ~make_pad_mask(lengths, xs, length_dim)
282
-
283
-
284
- def get_mask_from_lengths(lengths):
285
- max_len = torch.max(lengths).item()
286
- ids = torch.arange(0, max_len).to(lengths.device)
287
- mask = (ids < lengths.unsqueeze(1)).bool()
288
- return mask
289
-
290
-
291
- def group_hidden_by_segs(h, seg_ids, max_len):
292
- """
293
-
294
- :param h: [B, T, H]
295
- :param seg_ids: [B, T]
296
- :return: h_ph: [B, T_ph, H]
297
- """
298
- B, T, H = h.shape
299
- h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h)
300
- all_ones = h.new_ones(h.shape[:2])
301
- cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous()
302
- h_gby_segs = h_gby_segs[:, 1:]
303
- cnt_gby_segs = cnt_gby_segs[:, 1:]
304
- h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1)
305
- return h_gby_segs, cnt_gby_segs
306
-
307
- def expand_word2ph(word_encoding, ph2word):
308
- word_encoding = F.pad(word_encoding,[0,0,1,0])
309
- ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]])
310
- out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H]
311
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/app.py DELETED
@@ -1,160 +0,0 @@
1
- import os
2
- import numpy as np
3
- import torch
4
- from torch import no_grad, LongTensor
5
- import argparse
6
- import commons
7
- from mel_processing import spectrogram_torch
8
- import utils
9
- from models import SynthesizerTrn
10
- import gradio as gr
11
- import librosa
12
- import webbrowser
13
-
14
- from text import text_to_sequence, _clean_text
15
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
16
- language_marks = {
17
- "English": "[EN]",
18
- "Japanese": "",
19
- "日本語": "[JA]",
20
- "简体中文": "[ZH]",
21
- "Mix": "",
22
- }
23
- lang = ['English','日本語', '简体中文','Mix']
24
- def get_text(text, hps, is_symbol):
25
- text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
26
- if hps.data.add_blank:
27
- text_norm = commons.intersperse(text_norm, 0)
28
- text_norm = LongTensor(text_norm)
29
- return text_norm
30
-
31
- def create_tts_fn(model, hps, speaker_ids):
32
- def tts_fn(text, speaker, language, speed):
33
- if language is not None:
34
- text = language_marks[language] + text + language_marks[language]
35
- speaker_id = speaker_ids[speaker]
36
- stn_tst = get_text(text, hps, False)
37
- with no_grad():
38
- x_tst = stn_tst.unsqueeze(0).to(device)
39
- x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
40
- sid = LongTensor([speaker_id]).to(device)
41
- audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8,
42
- length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
43
- del stn_tst, x_tst, x_tst_lengths, sid
44
- return "Success", (hps.data.sampling_rate, audio)
45
-
46
- return tts_fn
47
-
48
- def create_vc_fn(model, hps, speaker_ids):
49
- def vc_fn(original_speaker, target_speaker, record_audio, upload_audio):
50
- input_audio = record_audio if record_audio is not None else upload_audio
51
- if input_audio is None:
52
- return "You need to record or upload an audio", None
53
- sampling_rate, audio = input_audio
54
- original_speaker_id = speaker_ids[original_speaker]
55
- target_speaker_id = speaker_ids[target_speaker]
56
-
57
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
58
- if len(audio.shape) > 1:
59
- audio = librosa.to_mono(audio.transpose(1, 0))
60
- if sampling_rate != hps.data.sampling_rate:
61
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
62
- with no_grad():
63
- y = torch.FloatTensor(audio)
64
- y = y / max(-y.min(), y.max()) / 0.99
65
- y = y.to(device)
66
- y = y.unsqueeze(0)
67
- spec = spectrogram_torch(y, hps.data.filter_length,
68
- hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
69
- center=False).to(device)
70
- spec_lengths = LongTensor([spec.size(-1)]).to(device)
71
- sid_src = LongTensor([original_speaker_id]).to(device)
72
- sid_tgt = LongTensor([target_speaker_id]).to(device)
73
- audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
74
- 0, 0].data.cpu().float().numpy()
75
- del y, spec, spec_lengths, sid_src, sid_tgt
76
- return "Success", (hps.data.sampling_rate, audio)
77
-
78
- return vc_fn
79
- if __name__ == "__main__":
80
- parser = argparse.ArgumentParser()
81
- parser.add_argument("--model_dir", default="./inference/G_latest.pth", help="directory to your fine-tuned model")
82
- parser.add_argument("--config_dir", default="./inference/finetune_speaker.json", help="directory to your model config file")
83
- parser.add_argument("--share", default=False, help="make link public (used in colab)")
84
-
85
- args = parser.parse_args()
86
- hps = utils.get_hparams_from_file(args.config_dir)
87
-
88
-
89
- net_g = SynthesizerTrn(
90
- len(hps.symbols),
91
- hps.data.filter_length // 2 + 1,
92
- hps.train.segment_size // hps.data.hop_length,
93
- n_speakers=hps.data.n_speakers,
94
- **hps.model).to(device)
95
- _ = net_g.eval()
96
-
97
- _ = utils.load_checkpoint(args.model_dir, net_g, None)
98
- speaker_ids = hps.speakers
99
- speakers = list(hps.speakers.keys())
100
- tts_fn = create_tts_fn(net_g, hps, speaker_ids)
101
- vc_fn = create_vc_fn(net_g, hps, speaker_ids)
102
- app = gr.Blocks()
103
-
104
- with app:
105
-
106
- gr.Markdown(
107
- """# League of Legends Yuumi Text to Speech Demo 魔法猫咪 悠米 TTS
108
-
109
- League of Legends Yuumi Text to Speech model trained with Yuumi's English in-game audio.
110
-
111
- 魔法猫咪 悠米 TTS模型训练数据为游戏内英文语音
112
-
113
- ## 👍Give original author stars & likes if you liked the project 如果喜欢给原作者一个星星和赞吧!
114
-
115
- https://github.com/Plachtaa/VITS-fast-fine-tuning
116
-
117
- https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer
118
-
119
- https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai
120
-
121
- ## ❓How to fine-tune your own model 如何调试自己的模型
122
-
123
- Follow the directions in this repo: https://github.com/Plachtaa/VITS-fast-fine-tuning
124
-
125
- 按照 https://github.com/Plachtaa/VITS-fast-fine-tuning 操作
126
-
127
- ## ⚠
128
- Use of the model should respect https://www.riotgames.com/en/legal
129
- 用该模型请遵守 https://www.riotgames.com/en/legal
130
-
131
- Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.
132
- 请不要生成会对个人以及组织造成侵害的内容
133
-
134
- ⚠Disclaimer: Not legally responsible for anything the model generates
135
- ⚠免责声明: 不对该模型任何输出负责"""
136
-
137
- )
138
-
139
- with gr.Tab("Text-to-Speech"):
140
- with gr.Row():
141
- with gr.Column():
142
- textbox = gr.TextArea(label="Text",
143
- placeholder="Type your sentence here",
144
- value="Hello...... I am Yuumi... Please play me next game! Thank you!", elem_id=f"tts-input")
145
- # select character
146
- char_dropdown = gr.Dropdown(choices=speakers, value=speakers[0], label='character')
147
- language_dropdown = gr.Dropdown(choices=lang, value=lang[0], label='language')
148
- duration_slider = gr.Slider(minimum=0.1, maximum=5, value=1, step=0.1,
149
- label='速度 Speed')
150
- with gr.Column():
151
- text_output = gr.Textbox(label="Message")
152
- audio_output = gr.Audio(label="Output Audio", elem_id="tts-audio")
153
- btn = gr.Button("Generate!")
154
- btn.click(tts_fn,
155
- inputs=[textbox, char_dropdown, language_dropdown, duration_slider,],
156
- outputs=[text_output, audio_output])
157
-
158
- app.queue(concurrency_count=1, api_open=False).launch(share=args.share)
159
-
160
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexKorGKLT/webui-cpua/app.py DELETED
@@ -1,155 +0,0 @@
1
- import os
2
- from sys import executable as pyexecutable
3
- import subprocess
4
- import pathlib
5
- import gc
6
-
7
- def Gitclone(URI:str,ClonePath:str = "") -> int :
8
- if(ClonePath == "") :
9
- while True:
10
- i=subprocess.run([r"git",r"clone",URI])
11
- if(i.returncode == 0 ):
12
- del i
13
- gc.collect()
14
- return 0
15
- else :
16
- del i
17
- else:
18
- while True:
19
- i=subprocess.run([r"git",r"clone",URI,ClonePath])
20
- if(i.returncode == 0 ):
21
- del i
22
- gc.collect()
23
- return 0
24
- else :
25
- del i
26
- def DownLoad(URI:str,DownloadPath:str,DownLoadFileName:str ) -> int:
27
- while (True):
28
- i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",DownloadPath,r"-o",DownLoadFileName,URI]);
29
- if(i.returncode == 0 ):
30
- del i
31
- gc.collect()
32
- return 0
33
- else :
34
- del i
35
- user_home =pathlib.Path.home().resolve()
36
- os.chdir(str(user_home))
37
- #clone stable-diffusion-webui repo
38
- print("cloning stable-diffusion-webui repo")
39
- Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",str(user_home / r"stable-diffusion-webui"))
40
- os.chdir(str(user_home / r"stable-diffusion-webui"))
41
- os.system("git reset --hard 89f9faa63388756314e8a1d96cf86bf5e0663045")
42
- #
43
-
44
- #install extensions
45
- print("installing extensions")
46
- Gitclone(r"https://huggingface.co/embed/negative",str(user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative"))
47
- Gitclone(r"https://huggingface.co/embed/lora",str(user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive"))
48
- DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",str(user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN") ,r"4x-UltraSharp.pth")
49
- while True:
50
- if(subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")]).returncode == 0):
51
- break
52
- Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" ))
53
- Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",str(user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser"))
54
- Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface"))
55
- Gitclone(r"https://github.com/camenduru/sd-civitai-browser",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser"))
56
- Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks"))
57
- Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet"))
58
- Gitclone(r"https://github.com/fkunn1326/openpose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor"))
59
- Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib"))
60
- Gitclone(r"https://github.com/hnmr293/posex",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"posex"))
61
- Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor"))
62
- #中文本地化的请解除下一行的注释
63
- #Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN"))
64
- Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete"))
65
- Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels"))
66
- Gitclone(r"https://github.com/etherealxx/batchlinks-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui"))
67
- Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin"))
68
-
69
- #Gitclone(r"https://github.com/KohakuBueleaf/a1111-sd-webui-locon",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-locon" ))
70
- Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg"))
71
- Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot"))
72
- Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo"))
73
-
74
- os.chdir(user_home / r"stable-diffusion-webui")
75
-
76
- #download ControlNet models
77
- print("extensions dolwnload done .\ndownloading ControlNet models")
78
- dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors",
79
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors",
80
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors",
81
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors",
82
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors",
83
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors",
84
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors",
85
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors",
86
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors",
87
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors",
88
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors",
89
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors",
90
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors",
91
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors",
92
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml",
93
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml",
94
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml",
95
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml",
96
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml",
97
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml",
98
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml",
99
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml",
100
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml",
101
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml",
102
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml",
103
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml",
104
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml",
105
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml",
106
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth",
107
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth",
108
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth",
109
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth",
110
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth",
111
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth",
112
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth",
113
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth",
114
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth",
115
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth",
116
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"]
117
- for i in range(0,len(dList)): DownLoad(dList[i],str(user_home / "stable-diffusion-webui" / "extensions" / "sd-webui-controlnet" / "models"),pathlib.Path(dList[i]).name)
118
- del dList
119
-
120
- #download model
121
- #you can change model download address here
122
- print("ControlNet models download done.\ndownloading model")
123
- DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.5-pruned.ckpt")
124
- DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.0.vae.pt")
125
- DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"Counterfeit-V3.0_fp16.safetensors")
126
- DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1B_orangemixs.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AOM3A1B_orangemixs.safetensors")
127
- DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_WithoutVAE.safetensors")
128
- DownLoad(r"https://civitai.com/api/download/models/9474",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"chilloutmix_NiPrunedFp16.safetensors")
129
-
130
- #My customly added models
131
- DownLoad(r"https://civitai.com/api/download/models/105674?", str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"realisticVisionV30_v30VAE.safetensors")
132
- DownLoad(r"https://civitai.com/api/download/models/94640", str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"majicmixRealistic_v6.safetensors")
133
- DownLoad(r"https://civitai.com/api/download/models/109123", str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"dreamshaper_7.safetensors")
134
- DownLoad(r"https://civitai.com/api/download/models/27392", str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"openjourney_V4.ckpt")
135
- DownLoad(r"https://civitai.com/api/download/models/95489", str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anyloraCheckpoint_bakedvaeBlessedFp16.safetensors")
136
- DownLoad(r"https://civitai.com/api/download/models/90854", str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AnythingV5Ink_ink.safetensors")
137
-
138
- #LoRa ?
139
- DownLoad(r"https://civitai.com/api/download/models/39885",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"Better_light.safetensors")
140
- DownLoad(r"https://civitai.com/api/download/models/39164",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"backlighting.safetensors")
141
- DownLoad(r"https://civitai.com/api/download/models/62833",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"add_detail.safetensors")
142
-
143
- #strt webui
144
-
145
- print("Done\nStarting Webui...")
146
- os.chdir(user_home / r"stable-diffusion-webui")
147
- while True:
148
- ret=subprocess.run([r"python3" ,r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")])
149
- if(ret.returncode == 0 ):
150
- del ret
151
- gc.collect()
152
- else :
153
- del ret
154
-
155
- del os ,user_home ,pyexecutable ,subprocess
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/fetch_data/sampler.py DELETED
@@ -1,39 +0,0 @@
1
- import os
2
- import random
3
-
4
- test_files_path = os.path.abspath('.') + '/places_standard_dataset/original/test/'
5
- test_files = [test_files_path + image for image in os.listdir(test_files_path)]
6
- print(f'found {len(test_files)} images in {test_files_path}')
7
-
8
- random.shuffle(test_files)
9
- test_files_random = test_files[0:2000]
10
- #print(test_files_random[0:10])
11
-
12
- list_of_random_test_files = os.path.abspath('.') \
13
- + '/places_standard_dataset/original/test_random_files.txt'
14
-
15
- print(f'copying 100 random images to {list_of_random_test_files}')
16
- with open(list_of_random_test_files, 'w') as fw:
17
- for filename in test_files_random:
18
- fw.write(filename+'\n')
19
- print('...done')
20
-
21
- # ----------------------------------------------------------------------------------
22
-
23
-
24
- val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/'
25
- val_files = [val_files_path + image for image in os.listdir(val_files_path)]
26
- print(f'found {len(val_files)} images in {val_files_path}')
27
-
28
- random.shuffle(val_files)
29
- val_files_random = val_files[0:100]
30
-
31
- list_of_random_val_files = os.path.abspath('.') \
32
- + '/places_standard_dataset/original/val_random_files.txt'
33
-
34
- print(f'copying 100 random images to {list_of_random_val_files}')
35
- with open(list_of_random_val_files, 'w') as fw:
36
- for filename in val_files_random:
37
- fw.write(filename+'\n')
38
- print('...done')
39
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/cm_stochastic_iterative.md DELETED
@@ -1,11 +0,0 @@
1
- # Consistency Model Multistep Scheduler
2
-
3
- ## Overview
4
-
5
- Multistep and onestep scheduler (Algorithm 1) introduced alongside consistency models in the paper [Consistency Models](https://arxiv.org/abs/2303.01469) by Yang Song, Prafulla Dhariwal, Mark Chen, and Ilya Sutskever.
6
- Based on the [original consistency models implementation](https://github.com/openai/consistency_models).
7
- Should generate good samples from [`ConsistencyModelPipeline`] in one or a small number of steps.
8
-
9
- ## CMStochasticIterativeScheduler
10
- [[autodoc]] CMStochasticIterativeScheduler
11
-
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/text_to_image/README.md DELETED
@@ -1,74 +0,0 @@
1
- # Stable Diffusion text-to-image fine-tuning
2
-
3
- The `train_text_to_image.py` script shows how to fine-tune stable diffusion model on your own dataset.
4
-
5
- ___Note___:
6
-
7
- ___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparamters to get the best result on your dataset.___
8
-
9
-
10
- ## Running locally with PyTorch
11
- ### Installing the dependencies
12
-
13
- Before running the scripts, make sure to install the library's training dependencies:
14
-
15
- **Important**
16
-
17
- To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
18
- ```bash
19
- git clone https://github.com/huggingface/diffusers
20
- cd diffusers
21
- pip install .
22
- ```
23
-
24
- Then cd in the example folder and run
25
- ```bash
26
- pip install -r requirements.txt
27
- ```
28
-
29
- And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
30
-
31
- ```bash
32
- accelerate config
33
- ```
34
-
35
- ### Pokemon example
36
-
37
- You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-4`, so you'll need to visit [its card](https://huggingface.co/CompVis/stable-diffusion-v1-4), read the license and tick the checkbox if you agree.
38
-
39
- You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens).
40
-
41
- Run the following command to authenticate your token
42
-
43
- ```bash
44
- huggingface-cli login
45
- ```
46
-
47
- If you have already cloned the repo, then you won't need to go through these steps.
48
-
49
- <br>
50
-
51
- ## Use ONNXRuntime to accelerate training
52
- In order to leverage onnxruntime to accelerate training, please use train_text_to_image.py
53
-
54
- The command to train a DDPM UNetCondition model on the Pokemon dataset with onnxruntime:
55
-
56
- ```bash
57
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
58
- export dataset_name="lambdalabs/pokemon-blip-captions"
59
- accelerate launch --mixed_precision="fp16" train_text_to_image.py \
60
- --pretrained_model_name_or_path=$MODEL_NAME \
61
- --dataset_name=$dataset_name \
62
- --use_ema \
63
- --resolution=512 --center_crop --random_flip \
64
- --train_batch_size=1 \
65
- --gradient_accumulation_steps=4 \
66
- --gradient_checkpointing \
67
- --max_train_steps=15000 \
68
- --learning_rate=1e-05 \
69
- --max_grad_norm=1 \
70
- --lr_scheduler="constant" --lr_warmup_steps=0 \
71
- --output_dir="sd-pokemon-model"
72
- ```
73
-
74
- Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py DELETED
@@ -1,1016 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import PIL.Image
20
- import torch
21
- from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
22
-
23
- from ...image_processor import VaeImageProcessor
24
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
- from ...models import AutoencoderKL, UNet2DConditionModel
26
- from ...models.attention_processor import (
27
- AttnProcessor2_0,
28
- LoRAAttnProcessor2_0,
29
- LoRAXFormersAttnProcessor,
30
- XFormersAttnProcessor,
31
- )
32
- from ...schedulers import KarrasDiffusionSchedulers
33
- from ...utils import (
34
- is_accelerate_available,
35
- is_accelerate_version,
36
- is_invisible_watermark_available,
37
- logging,
38
- randn_tensor,
39
- replace_example_docstring,
40
- )
41
- from ..pipeline_utils import DiffusionPipeline
42
- from . import StableDiffusionXLPipelineOutput
43
-
44
-
45
- if is_invisible_watermark_available():
46
- from .watermark import StableDiffusionXLWatermarker
47
-
48
-
49
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
50
-
51
- EXAMPLE_DOC_STRING = """
52
- Examples:
53
- ```py
54
- >>> import torch
55
- >>> from diffusers import StableDiffusionXLImg2ImgPipeline
56
- >>> from diffusers.utils import load_image
57
-
58
- >>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
59
- ... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16
60
- ... )
61
- >>> pipe = pipe.to("cuda")
62
- >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
63
-
64
- >>> init_image = load_image(url).convert("RGB")
65
- >>> prompt = "a photo of an astronaut riding a horse on mars"
66
- >>> image = pipe(prompt, image=init_image).images[0]
67
- ```
68
- """
69
-
70
-
71
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
72
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
73
- """
74
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
75
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
76
- """
77
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
78
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
79
- # rescale the results from guidance (fixes overexposure)
80
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
81
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
82
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
83
- return noise_cfg
84
-
85
-
86
- class StableDiffusionXLImg2ImgPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
87
- r"""
88
- Pipeline for text-to-image generation using Stable Diffusion XL.
89
-
90
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
91
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
92
-
93
- In addition the pipeline inherits the following loading methods:
94
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
95
- - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
96
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
97
-
98
- as well as the following saving methods:
99
- - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
100
-
101
- Args:
102
- vae ([`AutoencoderKL`]):
103
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
104
- text_encoder ([`CLIPTextModel`]):
105
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
106
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
107
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
108
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
109
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
110
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
111
- specifically the
112
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
113
- variant.
114
- tokenizer (`CLIPTokenizer`):
115
- Tokenizer of class
116
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
117
- tokenizer_2 (`CLIPTokenizer`):
118
- Second Tokenizer of class
119
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
120
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
121
- scheduler ([`SchedulerMixin`]):
122
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
123
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
124
- """
125
- _optional_components = ["tokenizer", "text_encoder"]
126
-
127
- def __init__(
128
- self,
129
- vae: AutoencoderKL,
130
- text_encoder: CLIPTextModel,
131
- text_encoder_2: CLIPTextModelWithProjection,
132
- tokenizer: CLIPTokenizer,
133
- tokenizer_2: CLIPTokenizer,
134
- unet: UNet2DConditionModel,
135
- scheduler: KarrasDiffusionSchedulers,
136
- requires_aesthetics_score: bool = False,
137
- force_zeros_for_empty_prompt: bool = True,
138
- add_watermarker: Optional[bool] = None,
139
- ):
140
- super().__init__()
141
-
142
- self.register_modules(
143
- vae=vae,
144
- text_encoder=text_encoder,
145
- text_encoder_2=text_encoder_2,
146
- tokenizer=tokenizer,
147
- tokenizer_2=tokenizer_2,
148
- unet=unet,
149
- scheduler=scheduler,
150
- )
151
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
152
- self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
153
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
154
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
155
-
156
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
157
-
158
- if add_watermarker:
159
- self.watermark = StableDiffusionXLWatermarker()
160
- else:
161
- self.watermark = None
162
-
163
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
164
- def enable_vae_slicing(self):
165
- r"""
166
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
167
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
168
- """
169
- self.vae.enable_slicing()
170
-
171
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
172
- def disable_vae_slicing(self):
173
- r"""
174
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
175
- computing decoding in one step.
176
- """
177
- self.vae.disable_slicing()
178
-
179
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
180
- def enable_vae_tiling(self):
181
- r"""
182
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
183
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
184
- processing larger images.
185
- """
186
- self.vae.enable_tiling()
187
-
188
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
189
- def disable_vae_tiling(self):
190
- r"""
191
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
192
- computing decoding in one step.
193
- """
194
- self.vae.disable_tiling()
195
-
196
- def enable_model_cpu_offload(self, gpu_id=0):
197
- r"""
198
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
199
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
200
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
201
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
202
- """
203
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
204
- from accelerate import cpu_offload_with_hook
205
- else:
206
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
207
-
208
- device = torch.device(f"cuda:{gpu_id}")
209
-
210
- if self.device.type != "cpu":
211
- self.to("cpu", silence_dtype_warnings=True)
212
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
213
-
214
- model_sequence = (
215
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
216
- )
217
- model_sequence.extend([self.unet, self.vae])
218
-
219
- hook = None
220
- for cpu_offloaded_model in model_sequence:
221
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
222
-
223
- # We'll offload the last model manually.
224
- self.final_offload_hook = hook
225
-
226
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
227
- def encode_prompt(
228
- self,
229
- prompt: str,
230
- prompt_2: Optional[str] = None,
231
- device: Optional[torch.device] = None,
232
- num_images_per_prompt: int = 1,
233
- do_classifier_free_guidance: bool = True,
234
- negative_prompt: Optional[str] = None,
235
- negative_prompt_2: Optional[str] = None,
236
- prompt_embeds: Optional[torch.FloatTensor] = None,
237
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
238
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
239
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
240
- lora_scale: Optional[float] = None,
241
- ):
242
- r"""
243
- Encodes the prompt into text encoder hidden states.
244
-
245
- Args:
246
- prompt (`str` or `List[str]`, *optional*):
247
- prompt to be encoded
248
- prompt_2 (`str` or `List[str]`, *optional*):
249
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
250
- used in both text-encoders
251
- device: (`torch.device`):
252
- torch device
253
- num_images_per_prompt (`int`):
254
- number of images that should be generated per prompt
255
- do_classifier_free_guidance (`bool`):
256
- whether to use classifier free guidance or not
257
- negative_prompt (`str` or `List[str]`, *optional*):
258
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
259
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
260
- less than `1`).
261
- negative_prompt_2 (`str` or `List[str]`, *optional*):
262
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
263
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
264
- prompt_embeds (`torch.FloatTensor`, *optional*):
265
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
266
- provided, text embeddings will be generated from `prompt` input argument.
267
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
268
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
269
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
270
- argument.
271
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
272
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
273
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
274
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
275
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
276
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
277
- input argument.
278
- lora_scale (`float`, *optional*):
279
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
280
- """
281
- device = device or self._execution_device
282
-
283
- # set lora scale so that monkey patched LoRA
284
- # function of text encoder can correctly access it
285
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
286
- self._lora_scale = lora_scale
287
-
288
- if prompt is not None and isinstance(prompt, str):
289
- batch_size = 1
290
- elif prompt is not None and isinstance(prompt, list):
291
- batch_size = len(prompt)
292
- else:
293
- batch_size = prompt_embeds.shape[0]
294
-
295
- # Define tokenizers and text encoders
296
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
297
- text_encoders = (
298
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
299
- )
300
-
301
- if prompt_embeds is None:
302
- prompt_2 = prompt_2 or prompt
303
- # textual inversion: procecss multi-vector tokens if necessary
304
- prompt_embeds_list = []
305
- prompts = [prompt, prompt_2]
306
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
307
- if isinstance(self, TextualInversionLoaderMixin):
308
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
309
-
310
- text_inputs = tokenizer(
311
- prompt,
312
- padding="max_length",
313
- max_length=tokenizer.model_max_length,
314
- truncation=True,
315
- return_tensors="pt",
316
- )
317
-
318
- text_input_ids = text_inputs.input_ids
319
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
320
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
321
-
322
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
323
- text_input_ids, untruncated_ids
324
- ):
325
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
326
- logger.warning(
327
- "The following part of your input was truncated because CLIP can only handle sequences up to"
328
- f" {tokenizer.model_max_length} tokens: {removed_text}"
329
- )
330
-
331
- prompt_embeds = text_encoder(
332
- text_input_ids.to(device),
333
- output_hidden_states=True,
334
- )
335
-
336
- # We are only ALWAYS interested in the pooled output of the final text encoder
337
- pooled_prompt_embeds = prompt_embeds[0]
338
- prompt_embeds = prompt_embeds.hidden_states[-2]
339
-
340
- prompt_embeds_list.append(prompt_embeds)
341
-
342
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
343
-
344
- # get unconditional embeddings for classifier free guidance
345
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
346
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
347
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
348
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
349
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
350
- negative_prompt = negative_prompt or ""
351
- negative_prompt_2 = negative_prompt_2 or negative_prompt
352
-
353
- uncond_tokens: List[str]
354
- if prompt is not None and type(prompt) is not type(negative_prompt):
355
- raise TypeError(
356
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
357
- f" {type(prompt)}."
358
- )
359
- elif isinstance(negative_prompt, str):
360
- uncond_tokens = [negative_prompt, negative_prompt_2]
361
- elif batch_size != len(negative_prompt):
362
- raise ValueError(
363
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
364
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
365
- " the batch size of `prompt`."
366
- )
367
- else:
368
- uncond_tokens = [negative_prompt, negative_prompt_2]
369
-
370
- negative_prompt_embeds_list = []
371
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
372
- if isinstance(self, TextualInversionLoaderMixin):
373
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
374
-
375
- max_length = prompt_embeds.shape[1]
376
- uncond_input = tokenizer(
377
- negative_prompt,
378
- padding="max_length",
379
- max_length=max_length,
380
- truncation=True,
381
- return_tensors="pt",
382
- )
383
-
384
- negative_prompt_embeds = text_encoder(
385
- uncond_input.input_ids.to(device),
386
- output_hidden_states=True,
387
- )
388
- # We are only ALWAYS interested in the pooled output of the final text encoder
389
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
390
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
391
-
392
- negative_prompt_embeds_list.append(negative_prompt_embeds)
393
-
394
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
395
-
396
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
397
- bs_embed, seq_len, _ = prompt_embeds.shape
398
- # duplicate text embeddings for each generation per prompt, using mps friendly method
399
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
400
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
401
-
402
- if do_classifier_free_guidance:
403
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
404
- seq_len = negative_prompt_embeds.shape[1]
405
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
406
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
407
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
408
-
409
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
410
- bs_embed * num_images_per_prompt, -1
411
- )
412
- if do_classifier_free_guidance:
413
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
414
- bs_embed * num_images_per_prompt, -1
415
- )
416
-
417
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
418
-
419
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
420
- def prepare_extra_step_kwargs(self, generator, eta):
421
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
422
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
423
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
424
- # and should be between [0, 1]
425
-
426
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
427
- extra_step_kwargs = {}
428
- if accepts_eta:
429
- extra_step_kwargs["eta"] = eta
430
-
431
- # check if the scheduler accepts generator
432
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
433
- if accepts_generator:
434
- extra_step_kwargs["generator"] = generator
435
- return extra_step_kwargs
436
-
437
- def check_inputs(
438
- self,
439
- prompt,
440
- prompt_2,
441
- strength,
442
- num_inference_steps,
443
- callback_steps,
444
- negative_prompt=None,
445
- negative_prompt_2=None,
446
- prompt_embeds=None,
447
- negative_prompt_embeds=None,
448
- ):
449
- if strength < 0 or strength > 1:
450
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
451
- if num_inference_steps is None:
452
- raise ValueError("`num_inference_steps` cannot be None.")
453
- elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
454
- raise ValueError(
455
- f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
456
- f" {type(num_inference_steps)}."
457
- )
458
- if (callback_steps is None) or (
459
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
460
- ):
461
- raise ValueError(
462
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
463
- f" {type(callback_steps)}."
464
- )
465
-
466
- if prompt is not None and prompt_embeds is not None:
467
- raise ValueError(
468
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
469
- " only forward one of the two."
470
- )
471
- elif prompt_2 is not None and prompt_embeds is not None:
472
- raise ValueError(
473
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
474
- " only forward one of the two."
475
- )
476
- elif prompt is None and prompt_embeds is None:
477
- raise ValueError(
478
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
479
- )
480
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
481
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
482
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
483
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
484
-
485
- if negative_prompt is not None and negative_prompt_embeds is not None:
486
- raise ValueError(
487
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
488
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
489
- )
490
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
491
- raise ValueError(
492
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
493
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
494
- )
495
-
496
- if prompt_embeds is not None and negative_prompt_embeds is not None:
497
- if prompt_embeds.shape != negative_prompt_embeds.shape:
498
- raise ValueError(
499
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
500
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
501
- f" {negative_prompt_embeds.shape}."
502
- )
503
-
504
- def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
505
- # get the original timestep using init_timestep
506
- if denoising_start is None:
507
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
508
- t_start = max(num_inference_steps - init_timestep, 0)
509
- else:
510
- t_start = 0
511
-
512
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
513
-
514
- # Strength is irrelevant if we directly request a timestep to start at;
515
- # that is, strength is determined by the denoising_start instead.
516
- if denoising_start is not None:
517
- discrete_timestep_cutoff = int(
518
- round(
519
- self.scheduler.config.num_train_timesteps
520
- - (denoising_start * self.scheduler.config.num_train_timesteps)
521
- )
522
- )
523
- timesteps = list(filter(lambda ts: ts < discrete_timestep_cutoff, timesteps))
524
- return torch.tensor(timesteps), len(timesteps)
525
-
526
- return timesteps, num_inference_steps - t_start
527
-
528
- def prepare_latents(
529
- self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
530
- ):
531
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
532
- raise ValueError(
533
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
534
- )
535
-
536
- # Offload text encoder if `enable_model_cpu_offload` was enabled
537
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
538
- self.text_encoder_2.to("cpu")
539
- torch.cuda.empty_cache()
540
-
541
- image = image.to(device=device, dtype=dtype)
542
-
543
- batch_size = batch_size * num_images_per_prompt
544
-
545
- if image.shape[1] == 4:
546
- init_latents = image
547
-
548
- else:
549
- # make sure the VAE is in float32 mode, as it overflows in float16
550
- if self.vae.config.force_upcast:
551
- image = image.float()
552
- self.vae.to(dtype=torch.float32)
553
-
554
- if isinstance(generator, list) and len(generator) != batch_size:
555
- raise ValueError(
556
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
557
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
558
- )
559
-
560
- elif isinstance(generator, list):
561
- init_latents = [
562
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
563
- ]
564
- init_latents = torch.cat(init_latents, dim=0)
565
- else:
566
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
567
-
568
- if self.vae.config.force_upcast:
569
- self.vae.to(dtype)
570
-
571
- init_latents = init_latents.to(dtype)
572
- init_latents = self.vae.config.scaling_factor * init_latents
573
-
574
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
575
- # expand init_latents for batch_size
576
- additional_image_per_prompt = batch_size // init_latents.shape[0]
577
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
578
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
579
- raise ValueError(
580
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
581
- )
582
- else:
583
- init_latents = torch.cat([init_latents], dim=0)
584
-
585
- if add_noise:
586
- shape = init_latents.shape
587
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
588
- # get latents
589
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
590
-
591
- latents = init_latents
592
-
593
- return latents
594
-
595
- def _get_add_time_ids(
596
- self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype
597
- ):
598
- if self.config.requires_aesthetics_score:
599
- add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
600
- add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
601
- else:
602
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
603
- add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
604
-
605
- passed_add_embed_dim = (
606
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
607
- )
608
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
609
-
610
- if (
611
- expected_add_embed_dim > passed_add_embed_dim
612
- and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
613
- ):
614
- raise ValueError(
615
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
616
- )
617
- elif (
618
- expected_add_embed_dim < passed_add_embed_dim
619
- and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
620
- ):
621
- raise ValueError(
622
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
623
- )
624
- elif expected_add_embed_dim != passed_add_embed_dim:
625
- raise ValueError(
626
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
627
- )
628
-
629
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
630
- add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
631
-
632
- return add_time_ids, add_neg_time_ids
633
-
634
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
635
- def upcast_vae(self):
636
- dtype = self.vae.dtype
637
- self.vae.to(dtype=torch.float32)
638
- use_torch_2_0_or_xformers = isinstance(
639
- self.vae.decoder.mid_block.attentions[0].processor,
640
- (
641
- AttnProcessor2_0,
642
- XFormersAttnProcessor,
643
- LoRAXFormersAttnProcessor,
644
- LoRAAttnProcessor2_0,
645
- ),
646
- )
647
- # if xformers or torch_2_0 is used attention block does not need
648
- # to be in float32 which can save lots of memory
649
- if use_torch_2_0_or_xformers:
650
- self.vae.post_quant_conv.to(dtype)
651
- self.vae.decoder.conv_in.to(dtype)
652
- self.vae.decoder.mid_block.to(dtype)
653
-
654
- @torch.no_grad()
655
- @replace_example_docstring(EXAMPLE_DOC_STRING)
656
- def __call__(
657
- self,
658
- prompt: Union[str, List[str]] = None,
659
- prompt_2: Optional[Union[str, List[str]]] = None,
660
- image: Union[
661
- torch.FloatTensor,
662
- PIL.Image.Image,
663
- np.ndarray,
664
- List[torch.FloatTensor],
665
- List[PIL.Image.Image],
666
- List[np.ndarray],
667
- ] = None,
668
- strength: float = 0.3,
669
- num_inference_steps: int = 50,
670
- denoising_start: Optional[float] = None,
671
- denoising_end: Optional[float] = None,
672
- guidance_scale: float = 5.0,
673
- negative_prompt: Optional[Union[str, List[str]]] = None,
674
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
675
- num_images_per_prompt: Optional[int] = 1,
676
- eta: float = 0.0,
677
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
678
- latents: Optional[torch.FloatTensor] = None,
679
- prompt_embeds: Optional[torch.FloatTensor] = None,
680
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
681
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
682
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
683
- output_type: Optional[str] = "pil",
684
- return_dict: bool = True,
685
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
686
- callback_steps: int = 1,
687
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
688
- guidance_rescale: float = 0.0,
689
- original_size: Tuple[int, int] = None,
690
- crops_coords_top_left: Tuple[int, int] = (0, 0),
691
- target_size: Tuple[int, int] = None,
692
- aesthetic_score: float = 6.0,
693
- negative_aesthetic_score: float = 2.5,
694
- ):
695
- r"""
696
- Function invoked when calling the pipeline for generation.
697
-
698
- Args:
699
- prompt (`str` or `List[str]`, *optional*):
700
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
701
- instead.
702
- prompt_2 (`str` or `List[str]`, *optional*):
703
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
704
- used in both text-encoders
705
- image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`):
706
- The image(s) to modify with the pipeline.
707
- strength (`float`, *optional*, defaults to 0.3):
708
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
709
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
710
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
711
- be maximum and the denoising process will run for the full number of iterations specified in
712
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of
713
- `denoising_start` being declared as an integer, the value of `strength` will be ignored.
714
- num_inference_steps (`int`, *optional*, defaults to 50):
715
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
716
- expense of slower inference.
717
- denoising_start (`float`, *optional*):
718
- When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
719
- bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
720
- it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
721
- strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
722
- is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
723
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
724
- denoising_end (`float`, *optional*):
725
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
726
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
727
- still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
728
- denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
729
- final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
730
- forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
731
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
732
- guidance_scale (`float`, *optional*, defaults to 7.5):
733
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
734
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
735
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
736
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
737
- usually at the expense of lower image quality.
738
- negative_prompt (`str` or `List[str]`, *optional*):
739
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
740
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
741
- less than `1`).
742
- negative_prompt_2 (`str` or `List[str]`, *optional*):
743
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
744
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
745
- num_images_per_prompt (`int`, *optional*, defaults to 1):
746
- The number of images to generate per prompt.
747
- eta (`float`, *optional*, defaults to 0.0):
748
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
749
- [`schedulers.DDIMScheduler`], will be ignored for others.
750
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
751
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
752
- to make generation deterministic.
753
- latents (`torch.FloatTensor`, *optional*):
754
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
755
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
756
- tensor will ge generated by sampling using the supplied random `generator`.
757
- prompt_embeds (`torch.FloatTensor`, *optional*):
758
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
759
- provided, text embeddings will be generated from `prompt` input argument.
760
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
761
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
762
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
763
- argument.
764
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
765
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
766
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
767
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
768
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
769
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
770
- input argument.
771
- output_type (`str`, *optional*, defaults to `"pil"`):
772
- The output format of the generate image. Choose between
773
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
774
- return_dict (`bool`, *optional*, defaults to `True`):
775
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a
776
- plain tuple.
777
- callback (`Callable`, *optional*):
778
- A function that will be called every `callback_steps` steps during inference. The function will be
779
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
780
- callback_steps (`int`, *optional*, defaults to 1):
781
- The frequency at which the `callback` function will be called. If not specified, the callback will be
782
- called at every step.
783
- cross_attention_kwargs (`dict`, *optional*):
784
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
785
- `self.processor` in
786
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
787
- guidance_rescale (`float`, *optional*, defaults to 0.7):
788
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
789
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
790
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
791
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
792
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
793
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
794
- `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
795
- explained in section 2.2 of
796
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
797
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
798
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
799
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
800
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
801
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
802
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
803
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
804
- not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
805
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
806
- aesthetic_score (`float`, *optional*, defaults to 6.0):
807
- Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
808
- Part of SDXL's micro-conditioning as explained in section 2.2 of
809
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
810
- negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
811
- Part of SDXL's micro-conditioning as explained in section 2.2 of
812
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
813
- simulate an aesthetic score of the generated image by influencing the negative text condition.
814
-
815
- Examples:
816
-
817
- Returns:
818
- [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
819
- [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
820
- `tuple. When returning a tuple, the first element is a list with the generated images.
821
- """
822
- # 1. Check inputs. Raise error if not correct
823
- self.check_inputs(
824
- prompt,
825
- prompt_2,
826
- strength,
827
- num_inference_steps,
828
- callback_steps,
829
- negative_prompt,
830
- negative_prompt_2,
831
- prompt_embeds,
832
- negative_prompt_embeds,
833
- )
834
-
835
- # 2. Define call parameters
836
- if prompt is not None and isinstance(prompt, str):
837
- batch_size = 1
838
- elif prompt is not None and isinstance(prompt, list):
839
- batch_size = len(prompt)
840
- else:
841
- batch_size = prompt_embeds.shape[0]
842
-
843
- device = self._execution_device
844
-
845
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
846
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
847
- # corresponds to doing no classifier free guidance.
848
- do_classifier_free_guidance = guidance_scale > 1.0
849
-
850
- # 3. Encode input prompt
851
- text_encoder_lora_scale = (
852
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
853
- )
854
- (
855
- prompt_embeds,
856
- negative_prompt_embeds,
857
- pooled_prompt_embeds,
858
- negative_pooled_prompt_embeds,
859
- ) = self.encode_prompt(
860
- prompt=prompt,
861
- prompt_2=prompt_2,
862
- device=device,
863
- num_images_per_prompt=num_images_per_prompt,
864
- do_classifier_free_guidance=do_classifier_free_guidance,
865
- negative_prompt=negative_prompt,
866
- negative_prompt_2=negative_prompt_2,
867
- prompt_embeds=prompt_embeds,
868
- negative_prompt_embeds=negative_prompt_embeds,
869
- pooled_prompt_embeds=pooled_prompt_embeds,
870
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
871
- lora_scale=text_encoder_lora_scale,
872
- )
873
-
874
- # 4. Preprocess image
875
- image = self.image_processor.preprocess(image)
876
-
877
- # 5. Prepare timesteps
878
- def denoising_value_valid(dnv):
879
- return type(denoising_end) == float and 0 < dnv < 1
880
-
881
- self.scheduler.set_timesteps(num_inference_steps, device=device)
882
- timesteps, num_inference_steps = self.get_timesteps(
883
- num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None
884
- )
885
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
886
-
887
- add_noise = True if denoising_start is None else False
888
- # 6. Prepare latent variables
889
- latents = self.prepare_latents(
890
- image,
891
- latent_timestep,
892
- batch_size,
893
- num_images_per_prompt,
894
- prompt_embeds.dtype,
895
- device,
896
- generator,
897
- add_noise,
898
- )
899
- # 7. Prepare extra step kwargs.
900
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
901
-
902
- height, width = latents.shape[-2:]
903
- height = height * self.vae_scale_factor
904
- width = width * self.vae_scale_factor
905
-
906
- original_size = original_size or (height, width)
907
- target_size = target_size or (height, width)
908
-
909
- # 8. Prepare added time ids & embeddings
910
- add_text_embeds = pooled_prompt_embeds
911
- add_time_ids, add_neg_time_ids = self._get_add_time_ids(
912
- original_size,
913
- crops_coords_top_left,
914
- target_size,
915
- aesthetic_score,
916
- negative_aesthetic_score,
917
- dtype=prompt_embeds.dtype,
918
- )
919
- add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
920
-
921
- if do_classifier_free_guidance:
922
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
923
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
924
- add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
925
- add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
926
-
927
- prompt_embeds = prompt_embeds.to(device)
928
- add_text_embeds = add_text_embeds.to(device)
929
- add_time_ids = add_time_ids.to(device)
930
-
931
- # 9. Denoising loop
932
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
933
-
934
- # 9.1 Apply denoising_end
935
- if (
936
- denoising_end is not None
937
- and denoising_start is not None
938
- and denoising_value_valid(denoising_end)
939
- and denoising_value_valid(denoising_start)
940
- and denoising_start >= denoising_end
941
- ):
942
- raise ValueError(
943
- f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
944
- + f" {denoising_end} when using type float."
945
- )
946
- elif denoising_end is not None and denoising_value_valid(denoising_end):
947
- discrete_timestep_cutoff = int(
948
- round(
949
- self.scheduler.config.num_train_timesteps
950
- - (denoising_end * self.scheduler.config.num_train_timesteps)
951
- )
952
- )
953
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
954
- timesteps = timesteps[:num_inference_steps]
955
-
956
- with self.progress_bar(total=num_inference_steps) as progress_bar:
957
- for i, t in enumerate(timesteps):
958
- # expand the latents if we are doing classifier free guidance
959
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
960
-
961
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
962
-
963
- # predict the noise residual
964
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
965
- noise_pred = self.unet(
966
- latent_model_input,
967
- t,
968
- encoder_hidden_states=prompt_embeds,
969
- cross_attention_kwargs=cross_attention_kwargs,
970
- added_cond_kwargs=added_cond_kwargs,
971
- return_dict=False,
972
- )[0]
973
-
974
- # perform guidance
975
- if do_classifier_free_guidance:
976
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
977
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
978
-
979
- if do_classifier_free_guidance and guidance_rescale > 0.0:
980
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
981
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
982
-
983
- # compute the previous noisy sample x_t -> x_t-1
984
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
985
-
986
- # call the callback, if provided
987
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
988
- progress_bar.update()
989
- if callback is not None and i % callback_steps == 0:
990
- callback(i, t, latents)
991
-
992
- # make sure the VAE is in float32 mode, as it overflows in float16
993
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
994
- self.upcast_vae()
995
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
996
-
997
- if not output_type == "latent":
998
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
999
- else:
1000
- image = latents
1001
- return StableDiffusionXLPipelineOutput(images=image)
1002
-
1003
- # apply watermark if available
1004
- if self.watermark is not None:
1005
- image = self.watermark.apply_watermark(image)
1006
-
1007
- image = self.image_processor.postprocess(image, output_type=output_type)
1008
-
1009
- # Offload last model to CPU
1010
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1011
- self.final_offload_hook.offload()
1012
-
1013
- if not return_dict:
1014
- return (image,)
1015
-
1016
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 22])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/README.md DELETED
@@ -1,47 +0,0 @@
1
- # Dual Attention Network for Scene Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @article{fu2018dual,
9
- title={Dual Attention Network for Scene Segmentation},
10
- author={Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu},
11
- booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
12
- year={2019}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- ### Cityscapes
19
-
20
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
21
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
22
- | DANet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.66 | 78.74 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324.log.json) |
23
- | DANet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.99 | 80.52 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831.log.json) |
24
- | DANet | R-50-D8 | 769x769 | 40000 | 8.8 | 1.56 | 78.88 | 80.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703.log.json) |
25
- | DANet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.07 | 79.88 | 81.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717.log.json) |
26
- | DANet | R-50-D8 | 512x1024 | 80000 | - | - | 79.34 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029.log.json) |
27
- | DANet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918.log.json) |
28
- | DANet | R-50-D8 | 769x769 | 80000 | - | - | 79.27 | 80.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954.log.json) |
29
- | DANet | R-101-D8 | 769x769 | 80000 | - | - | 80.47 | 82.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918.log.json) |
30
-
31
- ### ADE20K
32
-
33
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
34
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
35
- | DANet | R-50-D8 | 512x512 | 80000 | 11.5 | 21.20 | 41.66 | 42.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125.log.json) |
36
- | DANet | R-101-D8 | 512x512 | 80000 | 15 | 14.18 | 43.64 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126.log.json) |
37
- | DANet | R-50-D8 | 512x512 | 160000 | - | - | 42.45 | 43.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340.log.json) |
38
- | DANet | R-101-D8 | 512x512 | 160000 | - | - | 44.17 | 45.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348.log.json) |
39
-
40
- ### Pascal VOC 2012 + Aug
41
-
42
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
43
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
44
- | DANet | R-50-D8 | 512x512 | 20000 | 6.5 | 20.94 | 74.45 | 75.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026.log.json) |
45
- | DANet | R-101-D8 | 512x512 | 20000 | 9.9 | 13.76 | 76.02 | 77.23 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026.log.json) |
46
- | DANet | R-50-D8 | 512x512 | 40000 | - | - | 76.37 | 77.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526.log.json) |
47
- | DANet | R-101-D8 | 512x512 | 40000 | - | - | 76.51 | 77.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/danet_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_80k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/evaluations/__init__.py DELETED
File without changes
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/optflow.py DELETED
@@ -1,112 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from __future__ import division
3
-
4
- import numpy as np
5
-
6
- from annotator.uniformer.mmcv.image import rgb2bgr
7
- from annotator.uniformer.mmcv.video import flowread
8
- from .image import imshow
9
-
10
-
11
- def flowshow(flow, win_name='', wait_time=0):
12
- """Show optical flow.
13
-
14
- Args:
15
- flow (ndarray or str): The optical flow to be displayed.
16
- win_name (str): The window name.
17
- wait_time (int): Value of waitKey param.
18
- """
19
- flow = flowread(flow)
20
- flow_img = flow2rgb(flow)
21
- imshow(rgb2bgr(flow_img), win_name, wait_time)
22
-
23
-
24
- def flow2rgb(flow, color_wheel=None, unknown_thr=1e6):
25
- """Convert flow map to RGB image.
26
-
27
- Args:
28
- flow (ndarray): Array of optical flow.
29
- color_wheel (ndarray or None): Color wheel used to map flow field to
30
- RGB colorspace. Default color wheel will be used if not specified.
31
- unknown_thr (str): Values above this threshold will be marked as
32
- unknown and thus ignored.
33
-
34
- Returns:
35
- ndarray: RGB image that can be visualized.
36
- """
37
- assert flow.ndim == 3 and flow.shape[-1] == 2
38
- if color_wheel is None:
39
- color_wheel = make_color_wheel()
40
- assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
41
- num_bins = color_wheel.shape[0]
42
-
43
- dx = flow[:, :, 0].copy()
44
- dy = flow[:, :, 1].copy()
45
-
46
- ignore_inds = (
47
- np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
48
- (np.abs(dy) > unknown_thr))
49
- dx[ignore_inds] = 0
50
- dy[ignore_inds] = 0
51
-
52
- rad = np.sqrt(dx**2 + dy**2)
53
- if np.any(rad > np.finfo(float).eps):
54
- max_rad = np.max(rad)
55
- dx /= max_rad
56
- dy /= max_rad
57
-
58
- rad = np.sqrt(dx**2 + dy**2)
59
- angle = np.arctan2(-dy, -dx) / np.pi
60
-
61
- bin_real = (angle + 1) / 2 * (num_bins - 1)
62
- bin_left = np.floor(bin_real).astype(int)
63
- bin_right = (bin_left + 1) % num_bins
64
- w = (bin_real - bin_left.astype(np.float32))[..., None]
65
- flow_img = (1 -
66
- w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
67
- small_ind = rad <= 1
68
- flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
69
- flow_img[np.logical_not(small_ind)] *= 0.75
70
-
71
- flow_img[ignore_inds, :] = 0
72
-
73
- return flow_img
74
-
75
-
76
- def make_color_wheel(bins=None):
77
- """Build a color wheel.
78
-
79
- Args:
80
- bins(list or tuple, optional): Specify the number of bins for each
81
- color range, corresponding to six ranges: red -> yellow,
82
- yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,
83
- magenta -> red. [15, 6, 4, 11, 13, 6] is used for default
84
- (see Middlebury).
85
-
86
- Returns:
87
- ndarray: Color wheel of shape (total_bins, 3).
88
- """
89
- if bins is None:
90
- bins = [15, 6, 4, 11, 13, 6]
91
- assert len(bins) == 6
92
-
93
- RY, YG, GC, CB, BM, MR = tuple(bins)
94
-
95
- ry = [1, np.arange(RY) / RY, 0]
96
- yg = [1 - np.arange(YG) / YG, 1, 0]
97
- gc = [0, 1, np.arange(GC) / GC]
98
- cb = [0, 1 - np.arange(CB) / CB, 1]
99
- bm = [np.arange(BM) / BM, 0, 1]
100
- mr = [1, 0, 1 - np.arange(MR) / MR]
101
-
102
- num_bins = RY + YG + GC + CB + BM + MR
103
-
104
- color_wheel = np.zeros((3, num_bins), dtype=np.float32)
105
-
106
- col = 0
107
- for i, color in enumerate([ry, yg, gc, cb, bm, mr]):
108
- for j in range(3):
109
- color_wheel[j, col:col + bins[i]] = color[j]
110
- col += bins[i]
111
-
112
- return color_wheel.T
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArkanDash/rvc-models/infer_pack/modules.py DELETED
@@ -1,522 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- from infer_pack import commons
13
- from infer_pack.commons import init_weights, get_padding
14
- from infer_pack.transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(
37
- self,
38
- in_channels,
39
- hidden_channels,
40
- out_channels,
41
- kernel_size,
42
- n_layers,
43
- p_dropout,
44
- ):
45
- super().__init__()
46
- self.in_channels = in_channels
47
- self.hidden_channels = hidden_channels
48
- self.out_channels = out_channels
49
- self.kernel_size = kernel_size
50
- self.n_layers = n_layers
51
- self.p_dropout = p_dropout
52
- assert n_layers > 1, "Number of layers should be larger than 0."
53
-
54
- self.conv_layers = nn.ModuleList()
55
- self.norm_layers = nn.ModuleList()
56
- self.conv_layers.append(
57
- nn.Conv1d(
58
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
- )
60
- )
61
- self.norm_layers.append(LayerNorm(hidden_channels))
62
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
- for _ in range(n_layers - 1):
64
- self.conv_layers.append(
65
- nn.Conv1d(
66
- hidden_channels,
67
- hidden_channels,
68
- kernel_size,
69
- padding=kernel_size // 2,
70
- )
71
- )
72
- self.norm_layers.append(LayerNorm(hidden_channels))
73
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
- self.proj.weight.data.zero_()
75
- self.proj.bias.data.zero_()
76
-
77
- def forward(self, x, x_mask):
78
- x_org = x
79
- for i in range(self.n_layers):
80
- x = self.conv_layers[i](x * x_mask)
81
- x = self.norm_layers[i](x)
82
- x = self.relu_drop(x)
83
- x = x_org + self.proj(x)
84
- return x * x_mask
85
-
86
-
87
- class DDSConv(nn.Module):
88
- """
89
- Dialted and Depth-Separable Convolution
90
- """
91
-
92
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
- super().__init__()
94
- self.channels = channels
95
- self.kernel_size = kernel_size
96
- self.n_layers = n_layers
97
- self.p_dropout = p_dropout
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.convs_sep = nn.ModuleList()
101
- self.convs_1x1 = nn.ModuleList()
102
- self.norms_1 = nn.ModuleList()
103
- self.norms_2 = nn.ModuleList()
104
- for i in range(n_layers):
105
- dilation = kernel_size**i
106
- padding = (kernel_size * dilation - dilation) // 2
107
- self.convs_sep.append(
108
- nn.Conv1d(
109
- channels,
110
- channels,
111
- kernel_size,
112
- groups=channels,
113
- dilation=dilation,
114
- padding=padding,
115
- )
116
- )
117
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
- self.norms_1.append(LayerNorm(channels))
119
- self.norms_2.append(LayerNorm(channels))
120
-
121
- def forward(self, x, x_mask, g=None):
122
- if g is not None:
123
- x = x + g
124
- for i in range(self.n_layers):
125
- y = self.convs_sep[i](x * x_mask)
126
- y = self.norms_1[i](y)
127
- y = F.gelu(y)
128
- y = self.convs_1x1[i](y)
129
- y = self.norms_2[i](y)
130
- y = F.gelu(y)
131
- y = self.drop(y)
132
- x = x + y
133
- return x * x_mask
134
-
135
-
136
- class WN(torch.nn.Module):
137
- def __init__(
138
- self,
139
- hidden_channels,
140
- kernel_size,
141
- dilation_rate,
142
- n_layers,
143
- gin_channels=0,
144
- p_dropout=0,
145
- ):
146
- super(WN, self).__init__()
147
- assert kernel_size % 2 == 1
148
- self.hidden_channels = hidden_channels
149
- self.kernel_size = (kernel_size,)
150
- self.dilation_rate = dilation_rate
151
- self.n_layers = n_layers
152
- self.gin_channels = gin_channels
153
- self.p_dropout = p_dropout
154
-
155
- self.in_layers = torch.nn.ModuleList()
156
- self.res_skip_layers = torch.nn.ModuleList()
157
- self.drop = nn.Dropout(p_dropout)
158
-
159
- if gin_channels != 0:
160
- cond_layer = torch.nn.Conv1d(
161
- gin_channels, 2 * hidden_channels * n_layers, 1
162
- )
163
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
-
165
- for i in range(n_layers):
166
- dilation = dilation_rate**i
167
- padding = int((kernel_size * dilation - dilation) / 2)
168
- in_layer = torch.nn.Conv1d(
169
- hidden_channels,
170
- 2 * hidden_channels,
171
- kernel_size,
172
- dilation=dilation,
173
- padding=padding,
174
- )
175
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
- self.in_layers.append(in_layer)
177
-
178
- # last one is not necessary
179
- if i < n_layers - 1:
180
- res_skip_channels = 2 * hidden_channels
181
- else:
182
- res_skip_channels = hidden_channels
183
-
184
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
- self.res_skip_layers.append(res_skip_layer)
187
-
188
- def forward(self, x, x_mask, g=None, **kwargs):
189
- output = torch.zeros_like(x)
190
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
-
192
- if g is not None:
193
- g = self.cond_layer(g)
194
-
195
- for i in range(self.n_layers):
196
- x_in = self.in_layers[i](x)
197
- if g is not None:
198
- cond_offset = i * 2 * self.hidden_channels
199
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
- else:
201
- g_l = torch.zeros_like(x_in)
202
-
203
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
- acts = self.drop(acts)
205
-
206
- res_skip_acts = self.res_skip_layers[i](acts)
207
- if i < self.n_layers - 1:
208
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
- x = (x + res_acts) * x_mask
210
- output = output + res_skip_acts[:, self.hidden_channels :, :]
211
- else:
212
- output = output + res_skip_acts
213
- return output * x_mask
214
-
215
- def remove_weight_norm(self):
216
- if self.gin_channels != 0:
217
- torch.nn.utils.remove_weight_norm(self.cond_layer)
218
- for l in self.in_layers:
219
- torch.nn.utils.remove_weight_norm(l)
220
- for l in self.res_skip_layers:
221
- torch.nn.utils.remove_weight_norm(l)
222
-
223
-
224
- class ResBlock1(torch.nn.Module):
225
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
- super(ResBlock1, self).__init__()
227
- self.convs1 = nn.ModuleList(
228
- [
229
- weight_norm(
230
- Conv1d(
231
- channels,
232
- channels,
233
- kernel_size,
234
- 1,
235
- dilation=dilation[0],
236
- padding=get_padding(kernel_size, dilation[0]),
237
- )
238
- ),
239
- weight_norm(
240
- Conv1d(
241
- channels,
242
- channels,
243
- kernel_size,
244
- 1,
245
- dilation=dilation[1],
246
- padding=get_padding(kernel_size, dilation[1]),
247
- )
248
- ),
249
- weight_norm(
250
- Conv1d(
251
- channels,
252
- channels,
253
- kernel_size,
254
- 1,
255
- dilation=dilation[2],
256
- padding=get_padding(kernel_size, dilation[2]),
257
- )
258
- ),
259
- ]
260
- )
261
- self.convs1.apply(init_weights)
262
-
263
- self.convs2 = nn.ModuleList(
264
- [
265
- weight_norm(
266
- Conv1d(
267
- channels,
268
- channels,
269
- kernel_size,
270
- 1,
271
- dilation=1,
272
- padding=get_padding(kernel_size, 1),
273
- )
274
- ),
275
- weight_norm(
276
- Conv1d(
277
- channels,
278
- channels,
279
- kernel_size,
280
- 1,
281
- dilation=1,
282
- padding=get_padding(kernel_size, 1),
283
- )
284
- ),
285
- weight_norm(
286
- Conv1d(
287
- channels,
288
- channels,
289
- kernel_size,
290
- 1,
291
- dilation=1,
292
- padding=get_padding(kernel_size, 1),
293
- )
294
- ),
295
- ]
296
- )
297
- self.convs2.apply(init_weights)
298
-
299
- def forward(self, x, x_mask=None):
300
- for c1, c2 in zip(self.convs1, self.convs2):
301
- xt = F.leaky_relu(x, LRELU_SLOPE)
302
- if x_mask is not None:
303
- xt = xt * x_mask
304
- xt = c1(xt)
305
- xt = F.leaky_relu(xt, LRELU_SLOPE)
306
- if x_mask is not None:
307
- xt = xt * x_mask
308
- xt = c2(xt)
309
- x = xt + x
310
- if x_mask is not None:
311
- x = x * x_mask
312
- return x
313
-
314
- def remove_weight_norm(self):
315
- for l in self.convs1:
316
- remove_weight_norm(l)
317
- for l in self.convs2:
318
- remove_weight_norm(l)
319
-
320
-
321
- class ResBlock2(torch.nn.Module):
322
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
- super(ResBlock2, self).__init__()
324
- self.convs = nn.ModuleList(
325
- [
326
- weight_norm(
327
- Conv1d(
328
- channels,
329
- channels,
330
- kernel_size,
331
- 1,
332
- dilation=dilation[0],
333
- padding=get_padding(kernel_size, dilation[0]),
334
- )
335
- ),
336
- weight_norm(
337
- Conv1d(
338
- channels,
339
- channels,
340
- kernel_size,
341
- 1,
342
- dilation=dilation[1],
343
- padding=get_padding(kernel_size, dilation[1]),
344
- )
345
- ),
346
- ]
347
- )
348
- self.convs.apply(init_weights)
349
-
350
- def forward(self, x, x_mask=None):
351
- for c in self.convs:
352
- xt = F.leaky_relu(x, LRELU_SLOPE)
353
- if x_mask is not None:
354
- xt = xt * x_mask
355
- xt = c(xt)
356
- x = xt + x
357
- if x_mask is not None:
358
- x = x * x_mask
359
- return x
360
-
361
- def remove_weight_norm(self):
362
- for l in self.convs:
363
- remove_weight_norm(l)
364
-
365
-
366
- class Log(nn.Module):
367
- def forward(self, x, x_mask, reverse=False, **kwargs):
368
- if not reverse:
369
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
- logdet = torch.sum(-y, [1, 2])
371
- return y, logdet
372
- else:
373
- x = torch.exp(x) * x_mask
374
- return x
375
-
376
-
377
- class Flip(nn.Module):
378
- def forward(self, x, *args, reverse=False, **kwargs):
379
- x = torch.flip(x, [1])
380
- if not reverse:
381
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
- return x, logdet
383
- else:
384
- return x
385
-
386
-
387
- class ElementwiseAffine(nn.Module):
388
- def __init__(self, channels):
389
- super().__init__()
390
- self.channels = channels
391
- self.m = nn.Parameter(torch.zeros(channels, 1))
392
- self.logs = nn.Parameter(torch.zeros(channels, 1))
393
-
394
- def forward(self, x, x_mask, reverse=False, **kwargs):
395
- if not reverse:
396
- y = self.m + torch.exp(self.logs) * x
397
- y = y * x_mask
398
- logdet = torch.sum(self.logs * x_mask, [1, 2])
399
- return y, logdet
400
- else:
401
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
- return x
403
-
404
-
405
- class ResidualCouplingLayer(nn.Module):
406
- def __init__(
407
- self,
408
- channels,
409
- hidden_channels,
410
- kernel_size,
411
- dilation_rate,
412
- n_layers,
413
- p_dropout=0,
414
- gin_channels=0,
415
- mean_only=False,
416
- ):
417
- assert channels % 2 == 0, "channels should be divisible by 2"
418
- super().__init__()
419
- self.channels = channels
420
- self.hidden_channels = hidden_channels
421
- self.kernel_size = kernel_size
422
- self.dilation_rate = dilation_rate
423
- self.n_layers = n_layers
424
- self.half_channels = channels // 2
425
- self.mean_only = mean_only
426
-
427
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
- self.enc = WN(
429
- hidden_channels,
430
- kernel_size,
431
- dilation_rate,
432
- n_layers,
433
- p_dropout=p_dropout,
434
- gin_channels=gin_channels,
435
- )
436
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
- self.post.weight.data.zero_()
438
- self.post.bias.data.zero_()
439
-
440
- def forward(self, x, x_mask, g=None, reverse=False):
441
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
- h = self.pre(x0) * x_mask
443
- h = self.enc(h, x_mask, g=g)
444
- stats = self.post(h) * x_mask
445
- if not self.mean_only:
446
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
- else:
448
- m = stats
449
- logs = torch.zeros_like(m)
450
-
451
- if not reverse:
452
- x1 = m + x1 * torch.exp(logs) * x_mask
453
- x = torch.cat([x0, x1], 1)
454
- logdet = torch.sum(logs, [1, 2])
455
- return x, logdet
456
- else:
457
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
- x = torch.cat([x0, x1], 1)
459
- return x
460
-
461
- def remove_weight_norm(self):
462
- self.enc.remove_weight_norm()
463
-
464
-
465
- class ConvFlow(nn.Module):
466
- def __init__(
467
- self,
468
- in_channels,
469
- filter_channels,
470
- kernel_size,
471
- n_layers,
472
- num_bins=10,
473
- tail_bound=5.0,
474
- ):
475
- super().__init__()
476
- self.in_channels = in_channels
477
- self.filter_channels = filter_channels
478
- self.kernel_size = kernel_size
479
- self.n_layers = n_layers
480
- self.num_bins = num_bins
481
- self.tail_bound = tail_bound
482
- self.half_channels = in_channels // 2
483
-
484
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
- self.proj = nn.Conv1d(
487
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
- )
489
- self.proj.weight.data.zero_()
490
- self.proj.bias.data.zero_()
491
-
492
- def forward(self, x, x_mask, g=None, reverse=False):
493
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
- h = self.pre(x0)
495
- h = self.convs(h, x_mask, g=g)
496
- h = self.proj(h) * x_mask
497
-
498
- b, c, t = x0.shape
499
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
-
501
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
- self.filter_channels
504
- )
505
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
-
507
- x1, logabsdet = piecewise_rational_quadratic_transform(
508
- x1,
509
- unnormalized_widths,
510
- unnormalized_heights,
511
- unnormalized_derivatives,
512
- inverse=reverse,
513
- tails="linear",
514
- tail_bound=self.tail_bound,
515
- )
516
-
517
- x = torch.cat([x0, x1], 1) * x_mask
518
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
- if not reverse:
520
- return x, logdet
521
- else:
522
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Axolotlily/Interpolate/app.py DELETED
@@ -1,63 +0,0 @@
1
- import os
2
- os.system("git clone https://github.com/google-research/frame-interpolation")
3
- import sys
4
- sys.path.append("frame-interpolation")
5
- import numpy as np
6
- import tensorflow as tf
7
- import mediapy
8
- from PIL import Image
9
- from eval import interpolator, util
10
- import gradio as gr
11
-
12
- from huggingface_hub import snapshot_download
13
-
14
- from image_tools.sizes import resize_and_crop
15
-
16
-
17
- model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
18
-
19
- interpolator = interpolator.Interpolator(model, None)
20
-
21
- ffmpeg_path = util.get_ffmpeg_path()
22
- mediapy.set_ffmpeg(ffmpeg_path)
23
-
24
- def resize(width,img):
25
- basewidth = width
26
- img = Image.open(img)
27
- wpercent = (basewidth/float(img.size[0]))
28
- hsize = int((float(img.size[1])*float(wpercent)))
29
- img = img.resize((basewidth,hsize), Image.ANTIALIAS)
30
- return img
31
-
32
-
33
- def resize_img(img1,img2):
34
- img_target_size = Image.open(img1)
35
- img_to_resize = resize_and_crop(
36
- img2,
37
- (img_target_size.size[0],img_target_size.size[1]), #set width and height to match img1
38
- crop_origin="middle"
39
- )
40
- img_to_resize.save('resized_img2.png')
41
-
42
- def predict(frame1, frame2, times_to_interpolate):
43
-
44
- frame1 = resize(512,frame1)
45
- frame2 = resize(512,frame2)
46
-
47
- frame1.save("test1.png")
48
- frame2.save("test2.png")
49
-
50
- resize_img("test1.png","test2.png")
51
- input_frames = ["test1.png", "resized_img2.png"]
52
-
53
- frames = list(
54
- util.interpolate_recursively_from_files(
55
- input_frames, times_to_interpolate, interpolator))
56
-
57
- mediapy.write_video("out.mp4", frames, fps=15)
58
- return "out.mp4"
59
- article=""
60
- description="Using AI to guess the frames between two separate images."
61
- title="Frame Interpolation"
62
- examples=[['cat3.jpeg','cat4.jpeg',2]]
63
- gr.Interface(predict,[gr.inputs.Image(type='filepath'),gr.inputs.Image(type='filepath'),gr.inputs.Slider(minimum=2,maximum=8,step=1)],"playable_video",title=title,description=description,article=article,examples=examples).launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BRICS/README/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: README
3
- emoji: 💻
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Edit this `README.md` markdown file to author your organization card.
 
 
 
 
 
 
 
 
 
 
 
spaces/BasToTheMax/tensor/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Tensor
3
- emoji: 🐨
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.38.0
8
- app_file: app.py
9
- pinned: false
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Coche De Playa Carreras Ruedas Calientes Apk.md DELETED
@@ -1,50 +0,0 @@
1
-
2
- <h1>Beach Buggy Racing Hot Wheels APK Descargar: Un divertido y rápido juego de carreras de karts</h1>
3
- <p>Si usted está buscando un juego de carreras de karts divertido y de ritmo rápido que se puede jugar en su dispositivo Android, entonces usted debe comprobar Beach Buggy Racing Hot Wheels APK. Este es un juego gratuito que te permite conducir en un mundo lleno de acción y lleno de sorpresas de caos de carreras de karts todoterreno. Puedes competir contra un campo de pilotos rivales, cada uno con personalidades únicas y habilidades especiales. También puedes crear una colección de poderes locos, como Dodgeball Frenzy, Fireball y oíl Slick. También puede desbloquear y actualizar una variedad de coches, desde buggies de dunas a camiones monstruo a rovers lunares. También puedes poner a prueba tus habilidades en 6 modos de juego diferentes en 15 imaginativas pistas de carreras en 3D, contra un grupo de rivales amantes de lo tropical con un caso serio de furia en el camino. Esta es la secuela oficial de Beach Buggy Blitz, el juego de conducción gratuita con más de 30 millones de jugadores en todo el mundo. Rápido, furioso, divertido y gratis, Beach Buggy Racing Hot Wheels APK es una aventura de kart-racing isla para todas las edades. </p>
4
- <h2>coche de playa carreras ruedas calientes apk</h2><br /><p><b><b>Download Zip</b> &hArr; <a href="https://bltlly.com/2v6IAu">https://bltlly.com/2v6IAu</a></b></p><br /><br />
5
- <h2>Características de Beach Buggy Racing Hot Wheels APK</h2>
6
- <p>Beach Buggy Racing Hot Wheels APK tiene muchas características que lo convierten en un juego emocionante y agradable. Estos son algunos de ellos:</p>
7
- <ul>
8
- <li><b>Emocionante acción de carreras de karts con potenciadores creativos y jugabilidad basada en la física</b>: Puedes utilizar tus habilidades de conducción y una colección de potenciadores creativos para luchar hasta la meta. No es solo un gran juego de carreras en 3D, es una batalla épica con un juego espectacular basado en la física. </li>
9
- <li><b>Coches geniales para personalizar, desde buggies de dunas a camiones monstruosos a rovers lunares</b>: Puedes usar tus ganancias para recoger y actualizar un garaje lleno de coches únicos, desde camiones monstruosos hasta coches musculosos y rovers lunares. </li>
10
-
11
- <li><b>15 espectaculares pistas de carreras, desde selvas infestadas de dinosaurios a volcanes que arrojan lava a hermosas playas</b>: Puedes explorar selvas, volcanes, playas y más en un mundo lleno de acción de sorpresas. Verás cangrejos gigantes, yetis enojados, dragones voladores y más. </li>
12
- <li><b>Reúne un equipo de corredores, cada uno con un poder especial único</b>: Puedes reclutar un equipo de conductores para jugar, cada uno con un poder especial único como la teletransportación, pistas de fuego en llamas y hechizos de confusión. </li>
13
- <li><b>Modo multijugador de pantalla dividida para hasta 4 amigos</b>: Puedes retar a tus amigos en emocionantes carreras multijugador de pantalla dividida. Puedes jugar con hasta 4 amigos en un dispositivo. </li>
14
- <li><b>Integración de servicios de juego de Google Play para tablas de clasificación, logros, almacenamiento en la nube y sincronización</b>: Puedes competir con tus amigos en tablas de clasificación, obtener logros, hacer copias de seguridad de tu juego en la nube y mantener varios dispositivos sincronizados con tu cuenta de Google. </li>
15
- <li><b>Juega de la manera que quieras con la dirección de inclinación, la pantalla táctil o los controles del gamepad</b>: Puedes elegir entre varias opciones de control y personalizar la configuración de gráficos 3D para optimizar tu experiencia de juego. </li>
16
- <li><b>Personalice la configuración de gráficos 3D para optimizar su experiencia de juego</b>: Puede ajustar la calidad de los gráficos y la configuración de rendimiento para adaptarse a su dispositivo y preferencia. </li>
17
- </ul>
18
- <h2>Cómo descargar e instalar Beach Buggy Racing Hot Wheels APK</h2>
19
- <p>Si desea descargar e instalar Beach Buggy Racing Hot Wheels APK en su dispositivo Android, puede seguir estos sencillos pasos:</p>
20
- <ol>
21
- <li><b>Ir a la página web oficial de Beach Buggy Racing Hot Wheels APK y haga clic en el botón de descarga</b>: Esto iniciará el proceso de descarga del archivo APK en su dispositivo. Asegúrese de tener suficiente espacio de almacenamiento y una conexión a Internet estable. </li>
22
-
23
- <li><b>Si ves un mensaje de advertencia sobre la instalación de aplicaciones desde fuentes desconocidas, ve a la configuración de tu dispositivo y habilita la opción para permitirlo</b>: Algunos dispositivos pueden bloquear la instalación de aplicaciones desde fuentes distintas de Google Play Store por razones de seguridad. Si ves tal mensaje, necesitas ir a la configuración de tu dispositivo, encontrar la opción de seguridad o privacidad y habilitar la opción para permitir la instalación de aplicaciones desde fuentes desconocidas. Esto le permitirá instalar Beach Buggy Racing Hot Wheels APK sin ningún problema. </li>
24
- <li><b>Siga las instrucciones en pantalla para instalar el juego y disfrutar</b>: Después de habilitar la opción de instalar aplicaciones de fuentes desconocidas, puede seguir las instrucciones en pantalla para instalar Beach Buggy Racing Hot Wheels APK en su dispositivo. Tardará unos segundos o minutos dependiendo de la velocidad de tu dispositivo. Una vez finalizada la instalación, puedes iniciar el juego y disfrutarlo. </li>
25
- </ol>
26
- <h2> Consejos y trucos para jugar Beach Buggy Racing Hot Wheels APK</h2>
27
- <p>Beach Buggy Racing Hot Wheels APK es un juego divertido y adictivo que te mantendrá entretenido durante horas. Sin embargo, si quieres mejorar tus habilidades y rendimiento en el juego, puedes utilizar estos consejos y trucos:</p>
28
- <ul>
29
- <li><b>Utilice sus powerups sabiamente y estratégicamente para ganar una ventaja sobre sus oponentes</b>: Powerups son uno de los aspectos más importantes de Beach Buggy Racing Hot Wheels APK. Pueden ayudarte a acelerar, ralentizar, atacar o defenderte de otros corredores. Sin embargo, necesitas usarlos sabiamente y estratégicamente. Por ejemplo, puedes usar una bola de fuego para disparar a un oponente delante de ti, o una mancha de aceite para que se deslice detrás de ti. También puedes usar un escudo para protegerte de ataques entrantes, o un turbo para acercarte más allá de todos. Usted necesita saber cuándo y cómo utilizar cada powerup con eficacia. </li>
30
-
31
- <li><b>Pruebe diferentes modos de juego y pistas de carreras para desafiarse y divertirse</b>: Beach Buggy Racing Hot Wheels APK tiene seis modos de juego diferentes y 15 pistas de carreras 3D imaginativas para desafiarse y divertirse. Puedes probar el modo Campeonato, donde compites en una serie de carreras para convertirte en el campeón definitivo. También puedes probar el modo Carrera rápida, donde puedes elegir cualquier pista y cualquier coche y carrera por diversión. También puedes probar el modo Desafío diario, donde puedes ganar recompensas adicionales completando una tarea específica cada día. También puedes probar el modo Eventos especiales, donde puedes participar en eventos de tiempo limitado con reglas y premios especiales. También puedes probar el modo Hot Wheels, donde puedes competir con coches y pistas de Hot Wheels. También puedes probar el modo Boss Battle, donde puedes enfrentarte a los jefes de carreras y sus poderes especiales. Cada modo de juego y pista de carreras tiene sus propios desafíos, sorpresas y diversión. </li>
32
- <li><b>Reclutar nuevos corredores y utilizar sus poderes especiales para su ventaja</b>: Beach Buggy Racing Hot Wheels APK tiene una lista de 12 corredores, cada uno con un poder especial único. Puedes reclutarlos ganando carreras o comprándolas con gemas. También puedes cambiar entre ellas antes de cada carrera. Cada corredor tiene una personalidad, estilo y poder diferentes que pueden ayudarte en diferentes situaciones. Por ejemplo, Rez tiene el poder de teletransportación, lo que le permite avanzar por delante de la manada. Beach Bro tiene el poder de las pistas de fuego en llamas, que incendia el suelo detrás de él. Tiki tiene el poder de los hechizos de confusión, lo que hace que otros corredores pierdan el control de sus coches. Necesitas experimentar con diferentes corredores y sus poderes para encontrar la mejor combinación para cada carrera. </li>
33
-
34
- </ul>
35
- <h2>Conclusión</h2>
36
- <p>Beach Buggy Racing Hot Wheels APK es un divertido y rápido juego de carreras de karts que se puede descargar de forma gratuita y disfrutar en su dispositivo Android. Tiene muchas características que lo convierten en un juego emocionante y agradable, como powerups creativos, coches frescos, pistas de carreras espectaculares, corredores únicos y diferentes modos de juego. También tiene un proceso de instalación simple y fácil, así como consejos y trucos para ayudarle a mejorar sus habilidades y rendimiento en el juego. También tiene un modo multijugador de pantalla dividida y un modo online que te permite jugar con tus amigos y divertirte más. </p>
37
- <p></p>
38
- <p>Si usted está buscando una aventura isla de carreras de karts para todas las edades, entonces usted debe descargar Beach Buggy Racing Hot Wheels APK hoy y unirse al caos de carreras. </p>
39
- <h2>Preguntas frecuentes</h2>
40
- <p>Aquí hay algunas preguntas frecuentes sobre Beach Buggy Racing Hot Wheels APK:</p>
41
- <ol>
42
- <li><b> ¿Es seguro descargar e instalar Beach Buggy Racing Hot Wheels APK? </b>: Sí, Beach Buggy Racing Hot Wheels APK es seguro para descargar e instalar en su dispositivo Android. Es desarrollado por Vector Unit, un desarrollador de juegos de buena reputación que ha creado muchos juegos populares como Riptide GP, Shine Runner, MouseBot, etc. También es verificado por Google Play Protect, que escanea aplicaciones de malware y otras amenazas antes de instalarlos en su dispositivo. </li>
43
- <li><b>Es Beach Buggy Racing Hot Wheels APK libre para jugar? </b>: Sí, Beach Buggy Racing Hot Wheels APK es gratis para jugar en su dispositivo Android. Sin embargo, contiene algunas compras opcionales en la aplicación que pueden mejorar su experiencia de juego. Por ejemplo, puedes comprar gemas para desbloquear nuevos coches y corredores más rápido, o comprar monedas para mejorar tus coches más fácilmente. También puedes ver anuncios para ganar monedas y gemas extra gratis. </li>
44
-
45
- <li><b> ¿Cuáles son los requisitos mínimos del sistema para Beach Buggy Racing Hot Wheels APK? </b>: Beach Buggy Racing Hot Wheels APK requiere Android 4.1 o superior para funcionar sin problemas en su dispositivo. También requiere al menos 1 GB de RAM y un procesador decente para manejar los gráficos 3D y el juego basado en la física. </li>
46
- <li><b> ¿Cómo puedo contactar con el desarrollador de Beach Buggy Racing Hot Wheels APK? </b>: Si usted tiene alguna pregunta, comentarios, o problemas con respecto a Beach Buggy Racing Hot Wheels APK, puede ponerse en contacto con el desarrollador del juego por correo electrónico a [email protected]. También puede visitar su sitio web o seguirlos en Facebook o Twitter para obtener más información y actualizaciones sobre el juego. </li>
47
- </ol>
48
- : https://beach-buggy-racing-hotwheels.en.uptodown.com/android. : https://www.vectorunit.com/ : https://www.facebook.com/VectorUnit : https:/twitter.com/VectorUnit</p> 64aa2da5cf<br />
49
- <br />
50
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_headers.py DELETED
@@ -1,45 +0,0 @@
1
- """distutils.command.install_headers
2
-
3
- Implements the Distutils 'install_headers' command, to install C/C++ header
4
- files to the Python include directory."""
5
-
6
- from distutils.core import Command
7
-
8
-
9
- # XXX force is never used
10
- class install_headers(Command):
11
-
12
- description = "install C/C++ header files"
13
-
14
- user_options = [
15
- ('install-dir=', 'd', "directory to install header files to"),
16
- ('force', 'f', "force installation (overwrite existing files)"),
17
- ]
18
-
19
- boolean_options = ['force']
20
-
21
- def initialize_options(self):
22
- self.install_dir = None
23
- self.force = 0
24
- self.outfiles = []
25
-
26
- def finalize_options(self):
27
- self.set_undefined_options(
28
- 'install', ('install_headers', 'install_dir'), ('force', 'force')
29
- )
30
-
31
- def run(self):
32
- headers = self.distribution.headers
33
- if not headers:
34
- return
35
-
36
- self.mkpath(self.install_dir)
37
- for header in headers:
38
- (out, _) = self.copy_file(header, self.install_dir)
39
- self.outfiles.append(out)
40
-
41
- def get_inputs(self):
42
- return self.distribution.headers or []
43
-
44
- def get_outputs(self):
45
- return self.outfiles
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BilalSardar/karlo-cpu-api/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Karlo Cpu Api
3
- emoji: 🦀
4
- colorFrom: green
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.16.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py DELETED
@@ -1,498 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import torch
4
- from fvcore.nn import smooth_l1_loss
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- from detectron2.config import configurable
9
- from detectron2.layers import Linear, ShapeSpec, batched_nms, cat
10
- from detectron2.modeling.box_regression import Box2BoxTransform
11
- from detectron2.structures import Boxes, Instances
12
- from detectron2.utils.events import get_event_storage
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
- """
17
- Shape shorthand in this module:
18
-
19
- N: number of images in the minibatch
20
- R: number of ROIs, combined over all images, in the minibatch
21
- Ri: number of ROIs in image i
22
- K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
23
-
24
- Naming convention:
25
-
26
- deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
27
- transform (see :class:`box_regression.Box2BoxTransform`).
28
-
29
- pred_class_logits: predicted class scores in [-inf, +inf]; use
30
- softmax(pred_class_logits) to estimate P(class).
31
-
32
- gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
33
- foreground object classes and K represents the background class.
34
-
35
- pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
36
- to detection box predictions.
37
-
38
- gt_proposal_deltas: ground-truth box2box transform deltas
39
- """
40
-
41
-
42
- def fast_rcnn_inference(boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image):
43
- """
44
- Call `fast_rcnn_inference_single_image` for all images.
45
-
46
- Args:
47
- boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
48
- boxes for each image. Element i has shape (Ri, K * 4) if doing
49
- class-specific regression, or (Ri, 4) if doing class-agnostic
50
- regression, where Ri is the number of predicted objects for image i.
51
- This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
52
- scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
53
- Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
54
- for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
55
- image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
56
- score_thresh (float): Only return detections with a confidence score exceeding this
57
- threshold.
58
- nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
59
- topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
60
- all detections.
61
-
62
- Returns:
63
- instances: (list[Instances]): A list of N instances, one for each image in the batch,
64
- that stores the topk most confidence detections.
65
- kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
66
- the corresponding boxes/scores index in [0, Ri) from the input, for image i.
67
- """
68
- result_per_image = [
69
- fast_rcnn_inference_single_image(
70
- boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
71
- )
72
- for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
73
- ]
74
- return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
75
-
76
-
77
- def fast_rcnn_inference_single_image(
78
- boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image
79
- ):
80
- """
81
- Single-image inference. Return bounding-box detection results by thresholding
82
- on scores and applying non-maximum suppression (NMS).
83
-
84
- Args:
85
- Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
86
- per image.
87
-
88
- Returns:
89
- Same as `fast_rcnn_inference`, but for only one image.
90
- """
91
- valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
92
- if not valid_mask.all():
93
- boxes = boxes[valid_mask]
94
- scores = scores[valid_mask]
95
-
96
- scores = scores[:, :-1]
97
- num_bbox_reg_classes = boxes.shape[1] // 4
98
- # Convert to Boxes to use the `clip` function ...
99
- boxes = Boxes(boxes.reshape(-1, 4))
100
- boxes.clip(image_shape)
101
- boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
102
-
103
- # Filter results based on detection scores
104
- filter_mask = scores > score_thresh # R x K
105
- # R' x 2. First column contains indices of the R predictions;
106
- # Second column contains indices of classes.
107
- filter_inds = filter_mask.nonzero()
108
- if num_bbox_reg_classes == 1:
109
- boxes = boxes[filter_inds[:, 0], 0]
110
- else:
111
- boxes = boxes[filter_mask]
112
- scores = scores[filter_mask]
113
-
114
- # Apply per-class NMS
115
- keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
116
- if topk_per_image >= 0:
117
- keep = keep[:topk_per_image]
118
- boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
119
-
120
- result = Instances(image_shape)
121
- result.pred_boxes = Boxes(boxes)
122
- result.scores = scores
123
- result.pred_classes = filter_inds[:, 1]
124
- return result, filter_inds[:, 0]
125
-
126
-
127
- class FastRCNNOutputs(object):
128
- """
129
- A class that stores information about outputs of a Fast R-CNN head.
130
- It provides methods that are used to decode the outputs of a Fast R-CNN head.
131
- """
132
-
133
- def __init__(
134
- self,
135
- box2box_transform,
136
- pred_class_logits,
137
- pred_proposal_deltas,
138
- proposals,
139
- smooth_l1_beta=0,
140
- ):
141
- """
142
- Args:
143
- box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
144
- box2box transform instance for proposal-to-detection transformations.
145
- pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
146
- logits for all R predicted object instances.
147
- Each row corresponds to a predicted object instance.
148
- pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
149
- class-specific or class-agnostic regression. It stores the predicted deltas that
150
- transform proposals into final box detections.
151
- B is the box dimension (4 or 5).
152
- When B is 4, each row is [dx, dy, dw, dh (, ....)].
153
- When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
154
- proposals (list[Instances]): A list of N Instances, where Instances i stores the
155
- proposals for image i, in the field "proposal_boxes".
156
- When training, each Instances must have ground-truth labels
157
- stored in the field "gt_classes" and "gt_boxes".
158
- The total number of all instances must be equal to R.
159
- smooth_l1_beta (float): The transition point between L1 and L2 loss in
160
- the smooth L1 loss function. When set to 0, the loss becomes L1. When
161
- set to +inf, the loss becomes constant 0.
162
- """
163
- self.box2box_transform = box2box_transform
164
- self.num_preds_per_image = [len(p) for p in proposals]
165
- self.pred_class_logits = pred_class_logits
166
- self.pred_proposal_deltas = pred_proposal_deltas
167
- self.smooth_l1_beta = smooth_l1_beta
168
- self.image_shapes = [x.image_size for x in proposals]
169
-
170
- if len(proposals):
171
- box_type = type(proposals[0].proposal_boxes)
172
- # cat(..., dim=0) concatenates over all images in the batch
173
- self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
174
- assert (
175
- not self.proposals.tensor.requires_grad
176
- ), "Proposals should not require gradients!"
177
-
178
- # The following fields should exist only when training.
179
- if proposals[0].has("gt_boxes"):
180
- self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
181
- assert proposals[0].has("gt_classes")
182
- self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
183
- else:
184
- self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
185
- self._no_instances = len(proposals) == 0 # no instances found
186
-
187
- def _log_accuracy(self):
188
- """
189
- Log the accuracy metrics to EventStorage.
190
- """
191
- num_instances = self.gt_classes.numel()
192
- pred_classes = self.pred_class_logits.argmax(dim=1)
193
- bg_class_ind = self.pred_class_logits.shape[1] - 1
194
-
195
- fg_inds = (self.gt_classes >= 0) & (self.gt_classes < bg_class_ind)
196
- num_fg = fg_inds.nonzero().numel()
197
- fg_gt_classes = self.gt_classes[fg_inds]
198
- fg_pred_classes = pred_classes[fg_inds]
199
-
200
- num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()
201
- num_accurate = (pred_classes == self.gt_classes).nonzero().numel()
202
- fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()
203
-
204
- storage = get_event_storage()
205
- if num_instances > 0:
206
- storage.put_scalar("fast_rcnn/cls_accuracy", num_accurate / num_instances)
207
- if num_fg > 0:
208
- storage.put_scalar("fast_rcnn/fg_cls_accuracy", fg_num_accurate / num_fg)
209
- storage.put_scalar("fast_rcnn/false_negative", num_false_negative / num_fg)
210
-
211
- def softmax_cross_entropy_loss(self):
212
- """
213
- Compute the softmax cross entropy loss for box classification.
214
-
215
- Returns:
216
- scalar Tensor
217
- """
218
- if self._no_instances:
219
- return 0.0 * F.cross_entropy(
220
- self.pred_class_logits,
221
- torch.zeros(0, dtype=torch.long, device=self.pred_class_logits.device),
222
- reduction="sum",
223
- )
224
- else:
225
- self._log_accuracy()
226
- return F.cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
227
-
228
- def smooth_l1_loss(self):
229
- """
230
- Compute the smooth L1 loss for box regression.
231
-
232
- Returns:
233
- scalar Tensor
234
- """
235
- if self._no_instances:
236
- return 0.0 * smooth_l1_loss(
237
- self.pred_proposal_deltas,
238
- torch.zeros_like(self.pred_proposal_deltas),
239
- 0.0,
240
- reduction="sum",
241
- )
242
- gt_proposal_deltas = self.box2box_transform.get_deltas(
243
- self.proposals.tensor, self.gt_boxes.tensor
244
- )
245
- box_dim = gt_proposal_deltas.size(1) # 4 or 5
246
- cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
247
- device = self.pred_proposal_deltas.device
248
-
249
- bg_class_ind = self.pred_class_logits.shape[1] - 1
250
-
251
- # Box delta loss is only computed between the prediction for the gt class k
252
- # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
253
- # for non-gt classes and background.
254
- # Empty fg_inds produces a valid loss of zero as long as the size_average
255
- # arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
256
- # and would produce a nan loss).
257
- fg_inds = torch.nonzero((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind)).squeeze(
258
- 1
259
- )
260
- if cls_agnostic_bbox_reg:
261
- # pred_proposal_deltas only corresponds to foreground class for agnostic
262
- gt_class_cols = torch.arange(box_dim, device=device)
263
- else:
264
- fg_gt_classes = self.gt_classes[fg_inds]
265
- # pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
266
- # where b is the dimension of box representation (4 or 5)
267
- # Note that compared to Detectron1,
268
- # we do not perform bounding box regression for background classes.
269
- gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(box_dim, device=device)
270
-
271
- loss_box_reg = smooth_l1_loss(
272
- self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
273
- gt_proposal_deltas[fg_inds],
274
- self.smooth_l1_beta,
275
- reduction="sum",
276
- )
277
- # The loss is normalized using the total number of regions (R), not the number
278
- # of foreground regions even though the box regression loss is only defined on
279
- # foreground regions. Why? Because doing so gives equal training influence to
280
- # each foreground example. To see how, consider two different minibatches:
281
- # (1) Contains a single foreground region
282
- # (2) Contains 100 foreground regions
283
- # If we normalize by the number of foreground regions, the single example in
284
- # minibatch (1) will be given 100 times as much influence as each foreground
285
- # example in minibatch (2). Normalizing by the total number of regions, R,
286
- # means that the single example in minibatch (1) and each of the 100 examples
287
- # in minibatch (2) are given equal influence.
288
- loss_box_reg = loss_box_reg / self.gt_classes.numel()
289
- return loss_box_reg
290
-
291
- def _predict_boxes(self):
292
- """
293
- Returns:
294
- Tensor: A Tensors of predicted class-specific or class-agnostic boxes
295
- for all images in a batch. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
296
- the number of predicted objects for image i and B is the box dimension (4 or 5)
297
- """
298
- num_pred = len(self.proposals)
299
- B = self.proposals.tensor.shape[1]
300
- K = self.pred_proposal_deltas.shape[1] // B
301
- boxes = self.box2box_transform.apply_deltas(
302
- self.pred_proposal_deltas.view(num_pred * K, B),
303
- self.proposals.tensor.unsqueeze(1).expand(num_pred, K, B).reshape(-1, B),
304
- )
305
- return boxes.view(num_pred, K * B)
306
-
307
- """
308
- A subclass is expected to have the following methods because
309
- they are used to query information about the head predictions.0
310
- """
311
-
312
- def losses(self):
313
- """
314
- Compute the default losses for box head in Fast(er) R-CNN,
315
- with softmax cross entropy loss and smooth L1 loss.
316
-
317
- Returns:
318
- A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
319
- """
320
- return {
321
- "loss_cls": self.softmax_cross_entropy_loss(),
322
- "loss_box_reg": self.smooth_l1_loss(),
323
- }
324
-
325
- def predict_boxes(self):
326
- """
327
- Returns:
328
- list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
329
- for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
330
- the number of predicted objects for image i and B is the box dimension (4 or 5)
331
- """
332
- return self._predict_boxes().split(self.num_preds_per_image, dim=0)
333
-
334
- def predict_boxes_for_gt_classes(self):
335
- """
336
- Returns:
337
- list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
338
- class-specific box head. Element i of the list has shape (Ri, B), where Ri is
339
- the number of predicted objects for image i and B is the box dimension (4 or 5)
340
- """
341
- predicted_boxes = self._predict_boxes()
342
- B = self.proposals.tensor.shape[1]
343
- # If the box head is class-agnostic, then the method is equivalent to `predicted_boxes`.
344
- if predicted_boxes.shape[1] > B:
345
- num_pred = len(self.proposals)
346
- num_classes = predicted_boxes.shape[1] // B
347
- # Some proposals are ignored or have a background class. Their gt_classes
348
- # cannot be used as index.
349
- gt_classes = torch.clamp(self.gt_classes, 0, num_classes - 1)
350
- predicted_boxes = predicted_boxes.view(num_pred, num_classes, B)[
351
- torch.arange(num_pred, dtype=torch.long, device=predicted_boxes.device), gt_classes
352
- ]
353
- return predicted_boxes.split(self.num_preds_per_image, dim=0)
354
-
355
- def predict_probs(self):
356
- """
357
- Returns:
358
- list[Tensor]: A list of Tensors of predicted class probabilities for each image.
359
- Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
360
- for image i.
361
- """
362
- probs = F.softmax(self.pred_class_logits, dim=-1)
363
- return probs.split(self.num_preds_per_image, dim=0)
364
-
365
- def inference(self, score_thresh, nms_thresh, topk_per_image):
366
- """
367
- Args:
368
- score_thresh (float): same as fast_rcnn_inference.
369
- nms_thresh (float): same as fast_rcnn_inference.
370
- topk_per_image (int): same as fast_rcnn_inference.
371
- Returns:
372
- list[Instances]: same as fast_rcnn_inference.
373
- list[Tensor]: same as fast_rcnn_inference.
374
- """
375
- boxes = self.predict_boxes()
376
- scores = self.predict_probs()
377
- image_shapes = self.image_shapes
378
-
379
- return fast_rcnn_inference(
380
- boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
381
- )
382
-
383
-
384
- class FastRCNNOutputLayers(nn.Module):
385
- """
386
- Two linear layers for predicting Fast R-CNN outputs:
387
- (1) proposal-to-detection box regression deltas
388
- (2) classification scores
389
- """
390
-
391
- @configurable
392
- def __init__(
393
- self,
394
- input_shape,
395
- box2box_transform,
396
- num_classes,
397
- cls_agnostic_bbox_reg=False,
398
- smooth_l1_beta=0.0,
399
- test_score_thresh=0.0,
400
- test_nms_thresh=0.5,
401
- test_topk_per_image=100,
402
- ):
403
- """
404
- Args:
405
- input_shape (ShapeSpec): shape of the input feature to this module
406
- box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
407
- num_classes (int): number of foreground classes
408
- cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
409
- smooth_l1_beta (float): transition point from L1 to L2 loss.
410
- test_score_thresh (float): threshold to filter predictions results.
411
- test_nms_thresh (float): NMS threshold for prediction results.
412
- test_topk_per_image (int): number of top predictions to produce per image.
413
- """
414
- super().__init__()
415
- if isinstance(input_shape, int): # some backward compatbility
416
- input_shape = ShapeSpec(channels=input_shape)
417
- input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)
418
- # The prediction layer for num_classes foreground classes and one background class
419
- # (hence + 1)
420
- self.cls_score = Linear(input_size, num_classes + 1)
421
- num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
422
- box_dim = len(box2box_transform.weights)
423
- self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)
424
-
425
- nn.init.normal_(self.cls_score.weight, std=0.01)
426
- nn.init.normal_(self.bbox_pred.weight, std=0.001)
427
- for l in [self.cls_score, self.bbox_pred]:
428
- nn.init.constant_(l.bias, 0)
429
-
430
- self.box2box_transform = box2box_transform
431
- self.smooth_l1_beta = smooth_l1_beta
432
- self.test_score_thresh = test_score_thresh
433
- self.test_nms_thresh = test_nms_thresh
434
- self.test_topk_per_image = test_topk_per_image
435
-
436
- @classmethod
437
- def from_config(cls, cfg, input_shape):
438
- return {
439
- "input_shape": input_shape,
440
- "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
441
- # fmt: off
442
- "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,
443
- "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
444
- "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
445
- "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
446
- "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
447
- "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE
448
- # fmt: on
449
- }
450
-
451
- def forward(self, x):
452
- """
453
- Returns:
454
- Tensor: Nx(K+1) scores for each box
455
- Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.
456
- """
457
- if x.dim() > 2:
458
- x = torch.flatten(x, start_dim=1)
459
- scores = self.cls_score(x)
460
- proposal_deltas = self.bbox_pred(x)
461
- return scores, proposal_deltas
462
-
463
- # TODO: move the implementation to this class.
464
- def losses(self, predictions, proposals):
465
- """
466
- Args:
467
- predictions: return values of :meth:`forward()`.
468
- proposals (list[Instances]): proposals that match the features
469
- that were used to compute predictions.
470
- """
471
- scores, proposal_deltas = predictions
472
- return FastRCNNOutputs(
473
- self.box2box_transform, scores, proposal_deltas, proposals, self.smooth_l1_beta
474
- ).losses()
475
-
476
- def inference(self, predictions, proposals):
477
- scores, proposal_deltas = predictions
478
- return FastRCNNOutputs(
479
- self.box2box_transform, scores, proposal_deltas, proposals, self.smooth_l1_beta
480
- ).inference(self.test_score_thresh, self.test_nms_thresh, self.test_topk_per_image)
481
-
482
- def predict_boxes_for_gt_classes(self, predictions, proposals):
483
- scores, proposal_deltas = predictions
484
- return FastRCNNOutputs(
485
- self.box2box_transform, scores, proposal_deltas, proposals, self.smooth_l1_beta
486
- ).predict_boxes_for_gt_classes()
487
-
488
- def predict_boxes(self, predictions, proposals):
489
- scores, proposal_deltas = predictions
490
- return FastRCNNOutputs(
491
- self.box2box_transform, scores, proposal_deltas, proposals, self.smooth_l1_beta
492
- ).predict_boxes()
493
-
494
- def predict_probs(self, predictions, proposals):
495
- scores, proposal_deltas = predictions
496
- return FastRCNNOutputs(
497
- self.box2box_transform, scores, proposal_deltas, proposals, self.smooth_l1_beta
498
- ).predict_probs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_config.py DELETED
@@ -1,240 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
-
5
- import os
6
- import tempfile
7
- import unittest
8
- import torch
9
-
10
- from detectron2.config import configurable, downgrade_config, get_cfg, upgrade_config
11
- from detectron2.layers import ShapeSpec
12
-
13
- _V0_CFG = """
14
- MODEL:
15
- RPN_HEAD:
16
- NAME: "TEST"
17
- VERSION: 0
18
- """
19
-
20
- _V1_CFG = """
21
- MODEL:
22
- WEIGHT: "/path/to/weight"
23
- """
24
-
25
-
26
- class TestConfigVersioning(unittest.TestCase):
27
- def test_upgrade_downgrade_consistency(self):
28
- cfg = get_cfg()
29
- # check that custom is preserved
30
- cfg.USER_CUSTOM = 1
31
-
32
- down = downgrade_config(cfg, to_version=0)
33
- up = upgrade_config(down)
34
- self.assertTrue(up == cfg)
35
-
36
- def _merge_cfg_str(self, cfg, merge_str):
37
- f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False)
38
- try:
39
- f.write(merge_str)
40
- f.close()
41
- cfg.merge_from_file(f.name)
42
- finally:
43
- os.remove(f.name)
44
- return cfg
45
-
46
- def test_auto_upgrade(self):
47
- cfg = get_cfg()
48
- latest_ver = cfg.VERSION
49
- cfg.USER_CUSTOM = 1
50
-
51
- self._merge_cfg_str(cfg, _V0_CFG)
52
-
53
- self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST")
54
- self.assertEqual(cfg.VERSION, latest_ver)
55
-
56
- def test_guess_v1(self):
57
- cfg = get_cfg()
58
- latest_ver = cfg.VERSION
59
- self._merge_cfg_str(cfg, _V1_CFG)
60
- self.assertEqual(cfg.VERSION, latest_ver)
61
-
62
-
63
- class _TestClassA(torch.nn.Module):
64
- @configurable
65
- def __init__(self, arg1, arg2, arg3=3):
66
- super().__init__()
67
- self.arg1 = arg1
68
- self.arg2 = arg2
69
- self.arg3 = arg3
70
- assert arg1 == 1
71
- assert arg2 == 2
72
- assert arg3 == 3
73
-
74
- @classmethod
75
- def from_config(cls, cfg):
76
- args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
77
- return args
78
-
79
-
80
- class _TestClassB(_TestClassA):
81
- @configurable
82
- def __init__(self, input_shape, arg1, arg2, arg3=3):
83
- """
84
- Doc of _TestClassB
85
- """
86
- assert input_shape == "shape"
87
- super().__init__(arg1, arg2, arg3)
88
-
89
- @classmethod
90
- def from_config(cls, cfg, input_shape): # test extra positional arg in from_config
91
- args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
92
- args["input_shape"] = input_shape
93
- return args
94
-
95
-
96
- class _LegacySubClass(_TestClassB):
97
- # an old subclass written in cfg style
98
- def __init__(self, cfg, input_shape, arg4=4):
99
- super().__init__(cfg, input_shape)
100
- assert self.arg1 == 1
101
- assert self.arg2 == 2
102
- assert self.arg3 == 3
103
-
104
-
105
- class _NewSubClassNewInit(_TestClassB):
106
- # test new subclass with a new __init__
107
- @configurable
108
- def __init__(self, input_shape, arg4=4, **kwargs):
109
- super().__init__(input_shape, **kwargs)
110
- assert self.arg1 == 1
111
- assert self.arg2 == 2
112
- assert self.arg3 == 3
113
-
114
-
115
- class _LegacySubClassNotCfg(_TestClassB):
116
- # an old subclass written in cfg style, but argument is not called "cfg"
117
- def __init__(self, config, input_shape):
118
- super().__init__(config, input_shape)
119
- assert self.arg1 == 1
120
- assert self.arg2 == 2
121
- assert self.arg3 == 3
122
-
123
-
124
- class _TestClassC(_TestClassB):
125
- @classmethod
126
- def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite
127
- args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
128
- args["input_shape"] = input_shape
129
- args.update(kwargs)
130
- return args
131
-
132
-
133
- class _TestClassD(_TestClassA):
134
- @configurable
135
- def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3):
136
- assert input_shape == "shape"
137
- super().__init__(arg1, arg2, arg3)
138
-
139
- # _TestClassA.from_config does not have input_shape args.
140
- # Test whether input_shape will be forwarded to __init__
141
-
142
-
143
- class TestConfigurable(unittest.TestCase):
144
- def testInitWithArgs(self):
145
- _ = _TestClassA(arg1=1, arg2=2, arg3=3)
146
- _ = _TestClassB("shape", arg1=1, arg2=2)
147
- _ = _TestClassC("shape", arg1=1, arg2=2)
148
- _ = _TestClassD("shape", arg1=1, arg2=2, arg3=3)
149
-
150
- def testPatchedAttr(self):
151
- self.assertTrue("Doc" in _TestClassB.__init__.__doc__)
152
- self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int)
153
-
154
- def testInitWithCfg(self):
155
- cfg = get_cfg()
156
- cfg.ARG1 = 1
157
- cfg.ARG2 = 2
158
- cfg.ARG3 = 3
159
- _ = _TestClassA(cfg)
160
- _ = _TestClassB(cfg, input_shape="shape")
161
- _ = _TestClassC(cfg, input_shape="shape")
162
- _ = _TestClassD(cfg, input_shape="shape")
163
- _ = _LegacySubClass(cfg, input_shape="shape")
164
- _ = _NewSubClassNewInit(cfg, input_shape="shape")
165
- _ = _LegacySubClassNotCfg(cfg, input_shape="shape")
166
- with self.assertRaises(TypeError):
167
- # disallow forwarding positional args to __init__ since it's prone to errors
168
- _ = _TestClassD(cfg, "shape")
169
-
170
- # call with kwargs instead
171
- _ = _TestClassA(cfg=cfg)
172
- _ = _TestClassB(cfg=cfg, input_shape="shape")
173
- _ = _TestClassC(cfg=cfg, input_shape="shape")
174
- _ = _TestClassD(cfg=cfg, input_shape="shape")
175
- _ = _LegacySubClass(cfg=cfg, input_shape="shape")
176
- _ = _NewSubClassNewInit(cfg=cfg, input_shape="shape")
177
- _ = _LegacySubClassNotCfg(config=cfg, input_shape="shape")
178
-
179
- def testInitWithCfgOverwrite(self):
180
- cfg = get_cfg()
181
- cfg.ARG1 = 1
182
- cfg.ARG2 = 999 # wrong config
183
- with self.assertRaises(AssertionError):
184
- _ = _TestClassA(cfg, arg3=3)
185
-
186
- # overwrite arg2 with correct config later:
187
- _ = _TestClassA(cfg, arg2=2, arg3=3)
188
- _ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3)
189
- _ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3)
190
- _ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3)
191
-
192
- # call with kwargs cfg=cfg instead
193
- _ = _TestClassA(cfg=cfg, arg2=2, arg3=3)
194
- _ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
195
- _ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
196
- _ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
197
-
198
- def testInitWithCfgWrongArgs(self):
199
- cfg = get_cfg()
200
- cfg.ARG1 = 1
201
- cfg.ARG2 = 2
202
- with self.assertRaises(TypeError):
203
- _ = _TestClassB(cfg, "shape", not_exist=1)
204
- with self.assertRaises(TypeError):
205
- _ = _TestClassC(cfg, "shape", not_exist=1)
206
- with self.assertRaises(TypeError):
207
- _ = _TestClassD(cfg, "shape", not_exist=1)
208
-
209
- def testBadClass(self):
210
- class _BadClass1:
211
- @configurable
212
- def __init__(self, a=1, b=2):
213
- pass
214
-
215
- class _BadClass2:
216
- @configurable
217
- def __init__(self, a=1, b=2):
218
- pass
219
-
220
- def from_config(self, cfg): # noqa
221
- pass
222
-
223
- class _BadClass3:
224
- @configurable
225
- def __init__(self, a=1, b=2):
226
- pass
227
-
228
- # bad name: must be cfg
229
- @classmethod
230
- def from_config(cls, config): # noqa
231
- pass
232
-
233
- with self.assertRaises(AttributeError):
234
- _ = _BadClass1(a=1)
235
-
236
- with self.assertRaises(TypeError):
237
- _ = _BadClass2(a=1)
238
-
239
- with self.assertRaises(TypeError):
240
- _ = _BadClass3(get_cfg())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_operator_overloading.cpp DELETED
@@ -1,226 +0,0 @@
1
- /*
2
- tests/test_operator_overloading.cpp -- operator overloading
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
- #include "constructor_stats.h"
12
- #include <pybind11/operators.h>
13
- #include <functional>
14
-
15
- class Vector2 {
16
- public:
17
- Vector2(float x, float y) : x(x), y(y) { print_created(this, toString()); }
18
- Vector2(const Vector2 &v) : x(v.x), y(v.y) { print_copy_created(this); }
19
- Vector2(Vector2 &&v) : x(v.x), y(v.y) { print_move_created(this); v.x = v.y = 0; }
20
- Vector2 &operator=(const Vector2 &v) { x = v.x; y = v.y; print_copy_assigned(this); return *this; }
21
- Vector2 &operator=(Vector2 &&v) { x = v.x; y = v.y; v.x = v.y = 0; print_move_assigned(this); return *this; }
22
- ~Vector2() { print_destroyed(this); }
23
-
24
- std::string toString() const { return "[" + std::to_string(x) + ", " + std::to_string(y) + "]"; }
25
-
26
- Vector2 operator-() const { return Vector2(-x, -y); }
27
- Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); }
28
- Vector2 operator-(const Vector2 &v) const { return Vector2(x - v.x, y - v.y); }
29
- Vector2 operator-(float value) const { return Vector2(x - value, y - value); }
30
- Vector2 operator+(float value) const { return Vector2(x + value, y + value); }
31
- Vector2 operator*(float value) const { return Vector2(x * value, y * value); }
32
- Vector2 operator/(float value) const { return Vector2(x / value, y / value); }
33
- Vector2 operator*(const Vector2 &v) const { return Vector2(x * v.x, y * v.y); }
34
- Vector2 operator/(const Vector2 &v) const { return Vector2(x / v.x, y / v.y); }
35
- Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; }
36
- Vector2& operator-=(const Vector2 &v) { x -= v.x; y -= v.y; return *this; }
37
- Vector2& operator*=(float v) { x *= v; y *= v; return *this; }
38
- Vector2& operator/=(float v) { x /= v; y /= v; return *this; }
39
- Vector2& operator*=(const Vector2 &v) { x *= v.x; y *= v.y; return *this; }
40
- Vector2& operator/=(const Vector2 &v) { x /= v.x; y /= v.y; return *this; }
41
-
42
- friend Vector2 operator+(float f, const Vector2 &v) { return Vector2(f + v.x, f + v.y); }
43
- friend Vector2 operator-(float f, const Vector2 &v) { return Vector2(f - v.x, f - v.y); }
44
- friend Vector2 operator*(float f, const Vector2 &v) { return Vector2(f * v.x, f * v.y); }
45
- friend Vector2 operator/(float f, const Vector2 &v) { return Vector2(f / v.x, f / v.y); }
46
-
47
- bool operator==(const Vector2 &v) const {
48
- return x == v.x && y == v.y;
49
- }
50
- bool operator!=(const Vector2 &v) const {
51
- return x != v.x || y != v.y;
52
- }
53
- private:
54
- float x, y;
55
- };
56
-
57
- class C1 { };
58
- class C2 { };
59
-
60
- int operator+(const C1 &, const C1 &) { return 11; }
61
- int operator+(const C2 &, const C2 &) { return 22; }
62
- int operator+(const C2 &, const C1 &) { return 21; }
63
- int operator+(const C1 &, const C2 &) { return 12; }
64
-
65
- // Note: Specializing explicit within `namespace std { ... }` is done due to a
66
- // bug in GCC<7. If you are supporting compilers later than this, consider
67
- // specializing `using template<> struct std::hash<...>` in the global
68
- // namespace instead, per this recommendation:
69
- // https://en.cppreference.com/w/cpp/language/extending_std#Adding_template_specializations
70
- namespace std {
71
- template<>
72
- struct hash<Vector2> {
73
- // Not a good hash function, but easy to test
74
- size_t operator()(const Vector2 &) { return 4; }
75
- };
76
- }
77
-
78
- // Not a good abs function, but easy to test.
79
- std::string abs(const Vector2&) {
80
- return "abs(Vector2)";
81
- }
82
-
83
- // MSVC warns about unknown pragmas, and warnings are errors.
84
- #ifndef _MSC_VER
85
- #pragma GCC diagnostic push
86
- // clang 7.0.0 and Apple LLVM 10.0.1 introduce `-Wself-assign-overloaded` to
87
- // `-Wall`, which is used here for overloading (e.g. `py::self += py::self `).
88
- // Here, we suppress the warning using `#pragma diagnostic`.
89
- // Taken from: https://github.com/RobotLocomotion/drake/commit/aaf84b46
90
- // TODO(eric): This could be resolved using a function / functor (e.g. `py::self()`).
91
- #if (__APPLE__) && (__clang__)
92
- #if (__clang_major__ >= 10) && (__clang_minor__ >= 0) && (__clang_patchlevel__ >= 1)
93
- #pragma GCC diagnostic ignored "-Wself-assign-overloaded"
94
- #endif
95
- #elif (__clang__)
96
- #if (__clang_major__ >= 7)
97
- #pragma GCC diagnostic ignored "-Wself-assign-overloaded"
98
- #endif
99
- #endif
100
- #endif
101
-
102
- TEST_SUBMODULE(operators, m) {
103
-
104
- // test_operator_overloading
105
- py::class_<Vector2>(m, "Vector2")
106
- .def(py::init<float, float>())
107
- .def(py::self + py::self)
108
- .def(py::self + float())
109
- .def(py::self - py::self)
110
- .def(py::self - float())
111
- .def(py::self * float())
112
- .def(py::self / float())
113
- .def(py::self * py::self)
114
- .def(py::self / py::self)
115
- .def(py::self += py::self)
116
- .def(py::self -= py::self)
117
- .def(py::self *= float())
118
- .def(py::self /= float())
119
- .def(py::self *= py::self)
120
- .def(py::self /= py::self)
121
- .def(float() + py::self)
122
- .def(float() - py::self)
123
- .def(float() * py::self)
124
- .def(float() / py::self)
125
- .def(-py::self)
126
- .def("__str__", &Vector2::toString)
127
- .def("__repr__", &Vector2::toString)
128
- .def(py::self == py::self)
129
- .def(py::self != py::self)
130
- .def(py::hash(py::self))
131
- // N.B. See warning about usage of `py::detail::abs(py::self)` in
132
- // `operators.h`.
133
- .def("__abs__", [](const Vector2& v) { return abs(v); })
134
- ;
135
-
136
- m.attr("Vector") = m.attr("Vector2");
137
-
138
- // test_operators_notimplemented
139
- // #393: need to return NotSupported to ensure correct arithmetic operator behavior
140
- py::class_<C1>(m, "C1")
141
- .def(py::init<>())
142
- .def(py::self + py::self);
143
-
144
- py::class_<C2>(m, "C2")
145
- .def(py::init<>())
146
- .def(py::self + py::self)
147
- .def("__add__", [](const C2& c2, const C1& c1) { return c2 + c1; })
148
- .def("__radd__", [](const C2& c2, const C1& c1) { return c1 + c2; });
149
-
150
- // test_nested
151
- // #328: first member in a class can't be used in operators
152
- struct NestABase { int value = -2; };
153
- py::class_<NestABase>(m, "NestABase")
154
- .def(py::init<>())
155
- .def_readwrite("value", &NestABase::value);
156
-
157
- struct NestA : NestABase {
158
- int value = 3;
159
- NestA& operator+=(int i) { value += i; return *this; }
160
- };
161
- py::class_<NestA>(m, "NestA")
162
- .def(py::init<>())
163
- .def(py::self += int())
164
- .def("as_base", [](NestA &a) -> NestABase& {
165
- return (NestABase&) a;
166
- }, py::return_value_policy::reference_internal);
167
- m.def("get_NestA", [](const NestA &a) { return a.value; });
168
-
169
- struct NestB {
170
- NestA a;
171
- int value = 4;
172
- NestB& operator-=(int i) { value -= i; return *this; }
173
- };
174
- py::class_<NestB>(m, "NestB")
175
- .def(py::init<>())
176
- .def(py::self -= int())
177
- .def_readwrite("a", &NestB::a);
178
- m.def("get_NestB", [](const NestB &b) { return b.value; });
179
-
180
- struct NestC {
181
- NestB b;
182
- int value = 5;
183
- NestC& operator*=(int i) { value *= i; return *this; }
184
- };
185
- py::class_<NestC>(m, "NestC")
186
- .def(py::init<>())
187
- .def(py::self *= int())
188
- .def_readwrite("b", &NestC::b);
189
- m.def("get_NestC", [](const NestC &c) { return c.value; });
190
-
191
-
192
- // test_overriding_eq_reset_hash
193
- // #2191 Overriding __eq__ should set __hash__ to None
194
- struct Comparable {
195
- int value;
196
- bool operator==(const Comparable& rhs) const {return value == rhs.value;}
197
- };
198
-
199
- struct Hashable : Comparable {
200
- explicit Hashable(int value): Comparable{value}{};
201
- size_t hash() const { return static_cast<size_t>(value); }
202
- };
203
-
204
- struct Hashable2 : Hashable {
205
- using Hashable::Hashable;
206
- };
207
-
208
- py::class_<Comparable>(m, "Comparable")
209
- .def(py::init<int>())
210
- .def(py::self == py::self);
211
-
212
- py::class_<Hashable>(m, "Hashable")
213
- .def(py::init<int>())
214
- .def(py::self == py::self)
215
- .def("__hash__", &Hashable::hash);
216
-
217
- // define __hash__ before __eq__
218
- py::class_<Hashable2>(m, "Hashable2")
219
- .def("__hash__", &Hashable::hash)
220
- .def(py::init<int>())
221
- .def(py::self == py::self);
222
- }
223
-
224
- #ifndef _MSC_VER
225
- #pragma GCC diagnostic pop
226
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/remove.h DELETED
@@ -1,202 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file remove.h
19
- * \brief Sequential implementations of remove functions.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/function.h>
26
- #include <thrust/system/detail/sequential/execution_policy.h>
27
-
28
- namespace thrust
29
- {
30
- namespace system
31
- {
32
- namespace detail
33
- {
34
- namespace sequential
35
- {
36
-
37
-
38
- __thrust_exec_check_disable__
39
- template<typename DerivedPolicy,
40
- typename ForwardIterator,
41
- typename Predicate>
42
- __host__ __device__
43
- ForwardIterator remove_if(sequential::execution_policy<DerivedPolicy> &,
44
- ForwardIterator first,
45
- ForwardIterator last,
46
- Predicate pred)
47
- {
48
- // wrap pred
49
- thrust::detail::wrapped_function<
50
- Predicate,
51
- bool
52
- > wrapped_pred(pred);
53
-
54
- // advance iterators until wrapped_pred(*first) is true or we reach the end of input
55
- while(first != last && !wrapped_pred(*first))
56
- ++first;
57
-
58
- if(first == last)
59
- return first;
60
-
61
- // result always trails first
62
- ForwardIterator result = first;
63
-
64
- ++first;
65
-
66
- while(first != last)
67
- {
68
- if(!wrapped_pred(*first))
69
- {
70
- *result = *first;
71
- ++result;
72
- }
73
- ++first;
74
- }
75
-
76
- return result;
77
- }
78
-
79
-
80
- __thrust_exec_check_disable__
81
- template<typename DerivedPolicy,
82
- typename ForwardIterator,
83
- typename InputIterator,
84
- typename Predicate>
85
- __host__ __device__
86
- ForwardIterator remove_if(sequential::execution_policy<DerivedPolicy> &,
87
- ForwardIterator first,
88
- ForwardIterator last,
89
- InputIterator stencil,
90
- Predicate pred)
91
- {
92
- // wrap pred
93
- thrust::detail::wrapped_function<
94
- Predicate,
95
- bool
96
- > wrapped_pred(pred);
97
-
98
- // advance iterators until wrapped_pred(*stencil) is true or we reach the end of input
99
- while(first != last && !wrapped_pred(*stencil))
100
- {
101
- ++first;
102
- ++stencil;
103
- }
104
-
105
- if(first == last)
106
- return first;
107
-
108
- // result always trails first
109
- ForwardIterator result = first;
110
-
111
- ++first;
112
- ++stencil;
113
-
114
- while(first != last)
115
- {
116
- if(!wrapped_pred(*stencil))
117
- {
118
- *result = *first;
119
- ++result;
120
- }
121
- ++first;
122
- ++stencil;
123
- }
124
-
125
- return result;
126
- }
127
-
128
-
129
- __thrust_exec_check_disable__
130
- template<typename DerivedPolicy,
131
- typename InputIterator,
132
- typename OutputIterator,
133
- typename Predicate>
134
- __host__ __device__
135
- OutputIterator remove_copy_if(sequential::execution_policy<DerivedPolicy> &,
136
- InputIterator first,
137
- InputIterator last,
138
- OutputIterator result,
139
- Predicate pred)
140
- {
141
- // wrap pred
142
- thrust::detail::wrapped_function<
143
- Predicate,
144
- bool
145
- > wrapped_pred(pred);
146
-
147
- while (first != last)
148
- {
149
- if (!wrapped_pred(*first))
150
- {
151
- *result = *first;
152
- ++result;
153
- }
154
-
155
- ++first;
156
- }
157
-
158
- return result;
159
- }
160
-
161
-
162
- __thrust_exec_check_disable__
163
- template<typename DerivedPolicy,
164
- typename InputIterator1,
165
- typename InputIterator2,
166
- typename OutputIterator,
167
- typename Predicate>
168
- __host__ __device__
169
- OutputIterator remove_copy_if(sequential::execution_policy<DerivedPolicy> &,
170
- InputIterator1 first,
171
- InputIterator1 last,
172
- InputIterator2 stencil,
173
- OutputIterator result,
174
- Predicate pred)
175
- {
176
- // wrap pred
177
- thrust::detail::wrapped_function<
178
- Predicate,
179
- bool
180
- > wrapped_pred(pred);
181
-
182
- while (first != last)
183
- {
184
- if (!wrapped_pred(*stencil))
185
- {
186
- *result = *first;
187
- ++result;
188
- }
189
-
190
- ++first;
191
- ++stencil;
192
- }
193
-
194
- return result;
195
- }
196
-
197
-
198
- } // end namespace sequential
199
- } // end namespace detail
200
- } // end namespace system
201
- } // end namespace thrust
202
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cecil8352/vits-models/text/__init__.py DELETED
@@ -1,57 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
- from text.symbols import symbols
4
-
5
-
6
- # Mappings from symbol to numeric ID and vice versa:
7
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
- _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
-
10
-
11
- def text_to_sequence(text, symbols, cleaner_names):
12
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
- Args:
14
- text: string to convert to a sequence
15
- cleaner_names: names of the cleaner functions to run the text through
16
- Returns:
17
- List of integers corresponding to the symbols in the text
18
- '''
19
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
20
- sequence = []
21
-
22
- clean_text = _clean_text(text, cleaner_names)
23
- for symbol in clean_text:
24
- if symbol not in _symbol_to_id.keys():
25
- continue
26
- symbol_id = _symbol_to_id[symbol]
27
- sequence += [symbol_id]
28
- return sequence, clean_text
29
-
30
-
31
- def cleaned_text_to_sequence(cleaned_text):
32
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
33
- Args:
34
- text: string to convert to a sequence
35
- Returns:
36
- List of integers corresponding to the symbols in the text
37
- '''
38
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
39
- return sequence
40
-
41
-
42
- def sequence_to_text(sequence):
43
- '''Converts a sequence of IDs back to a string'''
44
- result = ''
45
- for symbol_id in sequence:
46
- s = _id_to_symbol[symbol_id]
47
- result += s
48
- return result
49
-
50
-
51
- def _clean_text(text, cleaner_names):
52
- for name in cleaner_names:
53
- cleaner = getattr(cleaners, name)
54
- if not cleaner:
55
- raise Exception('Unknown cleaner: %s' % name)
56
- text = cleaner(text)
57
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chomkwoy/Nilkessye/cpool_new/src/right_pool.cpp DELETED
@@ -1,91 +0,0 @@
1
- // #include <torch/torch.h>
2
- #include <torch/extension.h>
3
-
4
- #include <vector>
5
-
6
- std::vector<torch::Tensor> pool_forward(
7
- torch::Tensor input
8
- ) {
9
- // Initialize output
10
- torch::Tensor output = torch::zeros_like(input);
11
-
12
- // Get width
13
- int64_t width = input.size(3);
14
-
15
- // Copy the last column
16
- torch::Tensor input_temp = input.select(3, 0);
17
- torch::Tensor output_temp = output.select(3, 0);
18
- output_temp.copy_(input_temp);
19
-
20
- torch::Tensor max_temp;
21
- for (int64_t ind = 0; ind < width - 1; ++ind) {
22
- input_temp = input.select(3, ind + 1);
23
- output_temp = output.select(3, ind);
24
- max_temp = output.select(3, ind + 1);
25
-
26
- torch::max_out(max_temp, input_temp, output_temp);
27
- }
28
-
29
- return {
30
- output
31
- };
32
- }
33
-
34
- std::vector<torch::Tensor> pool_backward(
35
- torch::Tensor input,
36
- torch::Tensor grad_output
37
- ) {
38
- torch::Tensor output = torch::zeros_like(input);
39
-
40
- int32_t batch = input.size(0);
41
- int32_t channel = input.size(1);
42
- int32_t height = input.size(2);
43
- int32_t width = input.size(3);
44
-
45
- // auto max_val = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, height});
46
- // auto max_ind = torch::zeros(torch::CUDA(torch::kLong), {batch, channel, height});
47
- auto max_val = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
48
- auto max_ind = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA));
49
-
50
- auto input_temp = input.select(3, 0);
51
- max_val.copy_(input_temp);
52
-
53
- max_ind.fill_(0);
54
-
55
- auto output_temp = output.select(3, 0);
56
- auto grad_output_temp = grad_output.select(3, 0);
57
- output_temp.copy_(grad_output_temp);
58
-
59
- auto un_max_ind = max_ind.unsqueeze(3);
60
- // auto gt_mask = torch::zeros(torch::CUDA(torch::kByte), {batch, channel, height});
61
- // auto max_temp = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, height});
62
- auto gt_mask = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA));
63
- auto max_temp = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
64
-
65
- for (int32_t ind = 0; ind < width - 1; ++ind) {
66
- input_temp = input.select(3, ind + 1);
67
- torch::gt_out(gt_mask, input_temp, max_val);
68
-
69
- torch::masked_select_out(max_temp, input_temp, gt_mask);
70
- max_val.masked_scatter_(gt_mask, max_temp);
71
- max_ind.masked_fill_(gt_mask, ind + 1);
72
-
73
- grad_output_temp = grad_output.select(3, ind + 1).unsqueeze(3);
74
- output.scatter_add_(3, un_max_ind, grad_output_temp);
75
- }
76
-
77
- return {
78
- output
79
- };
80
- }
81
-
82
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
83
- m.def(
84
- "forward", &pool_forward, "Right Pool Forward",
85
- py::call_guard<py::gil_scoped_release>()
86
- );
87
- m.def(
88
- "backward", &pool_backward, "Right Pool Backward",
89
- py::call_guard<py::gil_scoped_release>()
90
- );
91
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/adapter/GSUIDCore.js DELETED
@@ -1,249 +0,0 @@
1
- import { randomUUID } from "crypto"
2
- import path from "node:path"
3
- import fs from "node:fs"
4
-
5
- Bot.adapter.push(new class GSUIDCoreAdapter {
6
- constructor() {
7
- this.id = "GSUIDCore"
8
- this.name = "早柚核心"
9
- this.path = this.id
10
- }
11
-
12
- toStr(data) {
13
- switch (typeof data) {
14
- case "string":
15
- return data
16
- case "number":
17
- return String(data)
18
- case "object":
19
- if (Buffer.isBuffer(data))
20
- return Buffer.from(data, "utf8").toString()
21
- else
22
- return JSON.stringify(data)
23
- }
24
- return data
25
- }
26
-
27
- makeLog(msg) {
28
- return this.toStr(msg).replace(/base64:\/\/.*?"/g, "base64://...\"")
29
- }
30
-
31
- makeMsg(msg) {
32
- if (!Array.isArray(msg))
33
- msg = [msg]
34
- const msgs = []
35
- for (let i of msg) {
36
- if (typeof i != "object")
37
- i = { type: "text", text: i }
38
-
39
- switch (i.type) {
40
- case "text":
41
- i = { type: "text", data: i.text }
42
- break
43
- case "image":
44
- i = { type: "image", data: i.file }
45
- break
46
- case "record":
47
- i = { type: "file", data: i.file }
48
- break
49
- case "video":
50
- i = { type: "file", data: i.file }
51
- break
52
- case "file":
53
- i = { type: "file", data: i.file }
54
- break
55
- case "at":
56
- i = { type: "at", data: i.qq }
57
- break
58
- case "reply":
59
- i = { type: "reply", data: i.id }
60
- break
61
- case "node": {
62
- const array = []
63
- for (const { message } of i.data)
64
- array.push(...this.makeMsg(message))
65
- i.data = array
66
- break
67
- } default:
68
- i = { type: "text", data: JSON.stringify(i) }
69
- }
70
- msgs.push(i)
71
- }
72
- return msgs
73
- }
74
-
75
- sendFriendMsg(data, msg) {
76
- const content = this.makeMsg(msg)
77
- logger.info(`${logger.blue(`[${data.self_id} => ${data.user_id}]`)} 发送好友消息:${this.makeLog(content)}`)
78
- data.bot.sendApi({
79
- bot_id: data.bot.bot_id,
80
- bot_self_id: data.bot.bot_self_id,
81
- target_type: "direct",
82
- target_id: data.user_id,
83
- content,
84
- })
85
- return { message_id: Date.now() }
86
- }
87
-
88
- sendGroupMsg(data, msg) {
89
- const target = data.group_id.split("-")
90
- const content = this.makeMsg(msg)
91
- logger.info(`${logger.blue(`[${data.self_id} => ${data.group_id}]`)} 发送群消息:${this.makeLog(content)}`)
92
- data.bot.sendApi({
93
- bot_id: data.bot.bot_id,
94
- bot_self_id: data.bot.bot_self_id,
95
- target_type: target[0],
96
- target_id: target[1],
97
- content,
98
- })
99
- return { message_id: Date.now() }
100
- }
101
-
102
- pickFriend(id, user_id) {
103
- const i = {
104
- ...Bot[id].fl.get(user_id),
105
- self_id: id,
106
- bot: Bot[id],
107
- user_id: user_id,
108
- }
109
- return {
110
- ...i,
111
- sendMsg: msg => this.sendFriendMsg(i, msg),
112
- }
113
- }
114
-
115
- pickMember(id, group_id, user_id) {
116
- const i = {
117
- ...Bot[id].fl.get(user_id),
118
- self_id: id,
119
- bot: Bot[id],
120
- group_id: group_id,
121
- user_id: user_id,
122
- }
123
- return {
124
- ...this.pickFriend(id, user_id),
125
- ...i,
126
- }
127
- }
128
-
129
- pickGroup(id, group_id) {
130
- const i = {
131
- ...Bot[id].gl.get(group_id),
132
- self_id: id,
133
- bot: Bot[id],
134
- group_id: group_id,
135
- }
136
- return {
137
- ...i,
138
- sendMsg: msg => this.sendGroupMsg(i, msg),
139
- pickMember: user_id => this.pickMember(id, group_id, user_id),
140
- }
141
- }
142
-
143
- makeBot(data, ws) {
144
- Bot[data.self_id] = {
145
- adapter: this,
146
- ws: ws,
147
- get sendApi() { return this.ws.sendMsg },
148
- uin: data.self_id,
149
- bot_id: data.bot_id,
150
- bot_self_id: data.bot_self_id,
151
- stat: { start_time: Date.now()/1000 },
152
- version: {
153
- id: this.id,
154
- name: this.name,
155
- },
156
- pickFriend: user_id => this.pickFriend(data.self_id, user_id),
157
- get pickUser() { return this.pickFriend },
158
- pickMember: (group_id, user_id) => this.pickMember(data.self_id, group_id, user_id),
159
- pickGroup: group_id => this.pickGroup(data.self_id, group_id),
160
- fl: new Map,
161
- gl: new Map,
162
- gml: new Map,
163
- }
164
-
165
- logger.mark(`${logger.blue(`[${data.self_id}]`)} ${this.name}(${this.id}) 已连接`)
166
- Bot.em(`connect.${data.self_id}`, data)
167
- }
168
-
169
- message(data, ws) {
170
- try {
171
- data = JSON.parse(data)
172
- } catch (err) {
173
- return logger.error(`解码数据失败:${logger.red(err)}`)
174
- }
175
-
176
- data.self_id = data.bot_self_id
177
- if (Bot[data.self_id]) {
178
- data.bot = Bot[data.self_id]
179
- data.bot.ws = ws
180
- } else {
181
- this.makeBot(data, ws)
182
- }
183
-
184
- data.post_type = "message"
185
- data.message_id = data.msg_id
186
- data.user_id = data.user_id
187
- data.sender = {
188
- user_id: data.user_id,
189
- user_pm: data.user_pm,
190
- }
191
- if (!data.bot.fl.has(data.user_id))
192
- data.bot.fl.set(data.user_id, data.sender)
193
-
194
- data.message = []
195
- data.raw_message = ""
196
- for (const i of data.content) {
197
- switch (i.type) {
198
- case "text":
199
- data.message.push({ type: "text", text: i.data })
200
- data.raw_message += i.data
201
- break
202
- case "image":
203
- data.message.push({ type: "image", url: i.data })
204
- data.raw_message += `[图片:${i.data}]`
205
- break
206
- case "file":
207
- data.message.push({ type: "file", url: i.data })
208
- data.raw_message += `[文件:${i.data}]`
209
- break
210
- case "at":
211
- data.message.push({ type: "at", qq: i.data })
212
- data.raw_message += `[提及:${i.data}]`
213
- break
214
- case "reply":
215
- data.message.push({ type: "reply", id: i.data })
216
- data.raw_message += `[回复:${i.data}]`
217
- break
218
- case "node":
219
- data.message.push({ type: "node", data: i.data })
220
- data.raw_message += `[合并转发:${JSON.stringify(i.data)}]`
221
- break
222
- default:
223
- data.message.push(i)
224
- data.raw_message += JSON.stringify(i)
225
- }
226
- }
227
-
228
- if (data.user_type == "direct") {
229
- data.message_type = "private"
230
- logger.info(`${logger.blue(`[${data.self_id}]`)} 好友消息:[${data.user_id}] ${data.raw_message}`)
231
- } else {
232
- data.message_type = "group"
233
- data.group_id = `${data.user_type}-${data.group_id}`
234
- if (!data.bot.gl.has(data.group_id))
235
- data.bot.gl.set(data.group_id, { group_id: data.group_id })
236
- logger.info(`${logger.blue(`[${data.self_id}]`)} 群消息:[${data.group_id}, ${data.user_id}] ${data.raw_message}`)
237
- }
238
-
239
- Bot.em(`${data.post_type}.${data.message_type}`, data)
240
- }
241
-
242
- load() {
243
- if (!Array.isArray(Bot.wsf[this.path]))
244
- Bot.wsf[this.path] = []
245
- Bot.wsf[this.path].push((ws, ...args) =>
246
- ws.on("message", data => this.message(data, ws, ...args))
247
- )
248
- }
249
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/call_110/__init__.py DELETED
@@ -1,20 +0,0 @@
1
- from typing import List
2
-
3
- from pil_utils import BuildImage
4
-
5
- from meme_generator import add_meme
6
-
7
-
8
- def call_110(images: List[BuildImage], texts, args):
9
- img1 = images[0].convert("RGBA").square().resize((250, 250))
10
- img0 = images[1].convert("RGBA").square().resize((250, 250))
11
-
12
- frame = BuildImage.new("RGB", (900, 500), "white")
13
- frame.draw_text((0, 0, 900, 200), "遇到困难请拨打", max_fontsize=100)
14
- frame.paste(img1, (50, 200), alpha=True)
15
- frame.paste(img1, (325, 200), alpha=True)
16
- frame.paste(img0, (600, 200), alpha=True)
17
- return frame.save_jpg()
18
-
19
-
20
- add_meme("call_110", call_110, min_images=2, max_images=2, keywords=["遇到困难请拨打"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CognitiveLabs/Research-Assistant/config/singleton.py DELETED
@@ -1,24 +0,0 @@
1
- """The singleton metaclass for ensuring only one instance of a class."""
2
- import abc
3
-
4
-
5
- class Singleton(abc.ABCMeta, type):
6
- """
7
- Singleton metaclass for ensuring only one instance of a class.
8
- """
9
-
10
- _instances = {}
11
-
12
- def __call__(cls, *args, **kwargs):
13
- """Call method for the singleton metaclass."""
14
- if cls not in cls._instances:
15
- cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
16
- return cls._instances[cls]
17
-
18
-
19
- class AbstractSingleton(abc.ABC, metaclass=Singleton):
20
- """
21
- Abstract singleton class for ensuring only one instance of a class.
22
- """
23
-
24
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/detect_classify/classification.py DELETED
@@ -1,380 +0,0 @@
1
- from CDM.detect_merge.Element import Element
2
- import CDM.detect_compo.lib_ip.ip_preprocessing as pre
3
- import time
4
- import cv2
5
- import torch
6
- import numpy as np
7
- from torchvision import models
8
- from torch import nn
9
- import pandas as pd
10
- import re
11
- import openai
12
- import random
13
- import os
14
- from CDM.detect_merge.merge import reassign_ids
15
- import CDM.detect_merge.merge as merge
16
- from os.path import join as pjoin, exists
17
-
18
- label_dic ={'72':'Location', '42':'Photos', '77':'Social media', '91':'Voices', '6':'Email', '89':'Social media', '40':'Location', '43':'Phone', '82':'Photos',
19
- '3':'Contacts', '68':'Contacts', '49':'Profile', '56':'Photos'}
20
-
21
- keyword_list = {'Name':['name', 'first name', 'last name', 'full name', 'real name', 'surname', 'family name', 'given name'],
22
- 'Birthday':['birthday', 'date of birth', 'birth date', 'DOB', 'dob full birthday', 'birth year'],
23
- 'Address':['mailing address', 'physical address', 'postal address', 'billing address', 'shipping address', 'delivery address', 'residence', 'collect address', 'personal address', 'residential address'],
24
- 'Phone':['phone', 'phone number', 'mobile', 'mobile phone', 'mobile number', 'telephone', 'telephone number', 'call'],
25
- 'Email':['email', 'e-mail', 'email address', 'e-mail address'],
26
- 'Contacts':['contacts', 'phone-book', 'phone book', 'phonebook', 'contact list', 'phone contacts', 'address book'],
27
- 'Location':['location', 'locate', 'geography', 'geo', 'geo-location', 'precision location', 'nearby'],
28
- 'Photos':['camera', 'photo', 'scan', 'album', 'picture', 'gallery', 'photo library', 'storage', 'image', 'video', 'scanner', 'photograph'],
29
- 'Voices':['microphone', 'voice', 'mic', 'speech', 'talk'],
30
- 'Financial info':['credit card', 'pay', 'payment', 'debit card', 'mastercard', 'wallet'],
31
- 'IP':['IP', 'Internet Protocol', 'IP address', 'internet protocol address'],
32
- 'Cookies':['cookies', 'cookie'],
33
- 'Social media':['facebook', 'twitter', 'socialmedia', 'social media'],
34
- 'Profile':['profile', 'account'],
35
- 'Gender':['gender']}
36
-
37
- def get_data_type(sentence, keywords, use_gpt=True):
38
-
39
- sent_data_type = "others"
40
-
41
- if use_gpt:
42
- openai.api_key = os.environ["OPENAI_API_KEY"]
43
-
44
- prompt = f"Is this piece of texts \"{sentence}\" related to any following privacy information data types? Or not relevant to any of them? ONLY answer the data type or \"not relevant\". ONLY use following data type list. Data types and their Description:\n" \
45
- f"Name: How a user refers to themselves," \
46
- f" Birthday: A user’s birthday," \
47
- f" Address: A user’s address," \
48
- f" Phone: A user’s phone number," \
49
- f" Email: A user’s email address," \
50
- f" Contacts: A user’s contact information, or the access to the contact permission," \
51
- f" Location: A user’s location information, or the access to the location permission," \
52
- f" Photos: A user’s photos, videos, or the access to the camera permission," \
53
- f" Voices: A user’s voices, recordings, or the access to the microphone permission," \
54
- f" Financial Info: Information about a user’s financial accounts, purchases, or transactions," \
55
- f" Profile: A user’s account information," \
56
- f"Social Media: A user's social media information, or the access to social media accounts"
57
-
58
- response = openai.ChatCompletion.create(
59
- # engine="text-davinci-002",
60
- model="gpt-3.5-turbo",
61
- messages=[
62
- # {"role": "system", "content": "You are a helpful assistant."},
63
- {"role": "user", "content": prompt}
64
- ],
65
- max_tokens=100,
66
- n=1,
67
- stop=None,
68
- temperature=0,
69
- )
70
-
71
- # response_full_text = response.choices[0].text.strip()
72
- response_full_text = response.choices[0].message['content']
73
- for k in keywords.keys():
74
- if k == "Financial info" or k == "Social media":
75
- if k.lower() in response_full_text.lower():
76
- sent_data_type = k
77
- break
78
- else:
79
- words = re.split(r'\W+', response_full_text.lower())
80
- if k.lower() in words:
81
- sent_data_type = k
82
- break
83
-
84
- # print("----------------------")
85
- # print("sentence: ", sentence)
86
- # print("prompt: ", prompt)
87
- # print("response: ", response_full_text)
88
- # print("sent_data_type: ", sent_data_type)
89
-
90
- else:
91
- for k in keywords.keys():
92
- for w in keywords[k]:
93
- words = re.split(r'\W+', sentence.lower())
94
- if w.lower() in words:
95
- sent_data_type = k
96
- break
97
- if sent_data_type != "others":
98
- break
99
-
100
- return sent_data_type
101
-
102
- # def get_clf_model(use_resnet18=True, use_gpu=False):
103
- #
104
- # device = 'cpu'
105
- # if use_gpu:
106
- # device = 'cuda:0'
107
- #
108
- # if use_resnet18:
109
- # model = models.resnet18().to(device)
110
- # in_feature_num = model.fc.in_features
111
- # model.fc = nn.Linear(in_feature_num, 99)
112
- # model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
113
- # bias=False)
114
- #
115
- # PATH = "./CDM/model/model-99-resnet18.pkl"
116
- # model.load_state_dict(torch.load(PATH, map_location=torch.device(device)))
117
- #
118
- # model.eval()
119
- # else:
120
- # # replace with your own model
121
- # None
122
- #
123
- # return model
124
-
125
- def get_clf_model(clf_model="ResNet18", use_gpu=False):
126
-
127
- device = 'cpu'
128
- if use_gpu:
129
- device = 'cuda:0'
130
-
131
- if clf_model == "ResNet18":
132
- model = models.resnet18().to(device)
133
- in_feature_num = model.fc.in_features
134
- model.fc = nn.Linear(in_feature_num, 99)
135
- model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
136
- bias=False)
137
-
138
- PATH = "./CDM/model/model-99-resnet18.pkl"
139
- model.load_state_dict(torch.load(PATH, map_location=torch.device(device)))
140
-
141
- model.eval()
142
- elif clf_model == "ViT":
143
- model = torch.load('./CDM/model/model-99-ViT-entire.pkl', map_location=torch.device(device))
144
- model = model.to(device)
145
- model.eval()
146
-
147
- else:
148
- # replace with your own model
149
- None
150
-
151
- return model
152
-
153
- def compo_classification(input_img, output_root, segment_root, merge_json, output_data, resize_by_height=800, clf_model="ResNet18"):
154
- # load text and non-text compo
155
- ele_id = 0
156
- compos = []
157
- texts = []
158
- elements = []
159
-
160
- for compo in merge_json['compos']:
161
- if compo['class'] == 'Text':
162
- element = Element(ele_id,
163
- (compo["position"]['column_min'], compo["position"]['row_min'],
164
- compo["position"]['column_max'], compo["position"]['row_max']),
165
- 'Text', text_content=compo['text_content'])
166
- texts.append(element)
167
- ele_id += 1
168
- else:
169
- element = Element(ele_id,
170
- (compo["position"]['column_min'], compo["position"]['row_min'],
171
- compo["position"]['column_max'], compo["position"]['row_max']),
172
- compo['class'])
173
- compos.append(element)
174
- ele_id += 1
175
-
176
- org, grey = pre.read_img(input_img, resize_by_height)
177
-
178
- grey = grey.astype('float32')
179
- grey = grey / 255
180
-
181
- # grey = (grey - grey.mean()) / grey.std()
182
-
183
- # --------- classification ----------
184
-
185
- classification_start_time = time.process_time()
186
-
187
- for compo in compos:
188
-
189
- # comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
190
- #
191
- # comp_crop = cv2.resize(comp_grey, (32, 32))
192
- #
193
- # comp_crop = comp_crop.reshape(1, 1, 32, 32)
194
- #
195
- # comp_tensor = torch.tensor(comp_crop)
196
- # comp_tensor = comp_tensor.permute(0, 1, 3, 2)
197
- #
198
- # model = get_clf_model()
199
- # pred_label = model(comp_tensor)
200
- #
201
- # if str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0]) in label_dic.keys():
202
- # compo.label = label_dic[str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])]
203
- # elements.append(compo)
204
- # else:
205
- # compo.label = str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])
206
-
207
- if clf_model == "ResNet18":
208
-
209
- comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
210
-
211
- comp_crop = cv2.resize(comp_grey, (32, 32))
212
-
213
- comp_crop = comp_crop.reshape(1, 1, 32, 32)
214
-
215
- comp_tensor = torch.tensor(comp_crop)
216
- comp_tensor = comp_tensor.permute(0, 1, 3, 2)
217
-
218
- model = get_clf_model(clf_model)
219
- pred_label = model(comp_tensor)
220
-
221
- if str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0]) in label_dic.keys():
222
- compo.label = label_dic[str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])]
223
- elements.append(compo)
224
- else:
225
- compo.label = str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])
226
-
227
- elif clf_model == "ViT":
228
-
229
- comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
230
-
231
- comp_crop = cv2.resize(comp_grey, (224, 224))
232
-
233
- # Convert the image to tensor
234
- comp_tensor = torch.from_numpy(comp_crop)
235
-
236
- # Reshape and repeat along the channel dimension to convert to RGB
237
- comp_tensor = comp_tensor.view(1, 224, 224).repeat(3, 1, 1)
238
-
239
- # comp_tensor = comp_tensor.permute(0, 2, 1)
240
-
241
- comp_tensor = comp_tensor.unsqueeze(0) # add a batch dimension
242
-
243
- model = get_clf_model(clf_model)
244
- # pred_label = model(comp_tensor)
245
-
246
- # Forward pass through the model
247
- with torch.no_grad():
248
- output = model(comp_tensor)
249
-
250
- # Get the predicted label
251
- _, predicted = torch.max(output.logits, 1)
252
-
253
- # print("predicted_label: ", predicted.cpu().numpy())
254
-
255
- if str(predicted.cpu().numpy()[0]) in label_dic.keys():
256
- compo.label = label_dic[str(predicted.cpu().numpy()[0])]
257
- elements.append(compo)
258
- else:
259
- compo.label = str(predicted.cpu().numpy()[0])
260
-
261
- else:
262
- print("clf_model has to be ResNet18 or ViT")
263
-
264
- time_cost_ic = time.process_time() - classification_start_time
265
- print("time cost for icon classification: %2.2f s" % time_cost_ic)
266
- # ic_time_cost_all.append(time_cost_ic)
267
-
268
- # --------- end classification ----------
269
-
270
- text_selection_time = time.process_time()
271
-
272
- for this_text in texts:
273
- # found_flag = 0
274
- #
275
- # for key in keyword_list:
276
- # for w in keyword_list[key]:
277
- # words = re.split(r'\W+', this_text.text_content.lower())
278
- # if w.lower() in words:
279
- # this_text.label = key
280
- # elements.append(this_text)
281
- # found_flag = 1
282
- # break
283
- #
284
- # if found_flag == 0:
285
- # this_text.label = 'others'
286
-
287
- retries = 10
288
- for i in range(retries):
289
- try:
290
- text_label = get_data_type(this_text.text_content.lower(), keyword_list, use_gpt=False)
291
- break
292
- except openai.error.RateLimitError as e:
293
- if "overloaded" in str(e):
294
- # Exponential backoff with jitter
295
- sleep_time = 2 * (2 ** i) + random.uniform(0, 0.1)
296
- time.sleep(sleep_time)
297
- else:
298
- raise
299
- except Exception as e:
300
- raise
301
-
302
- this_text.label = text_label
303
-
304
- if this_text.label != "others":
305
- elements.append(this_text)
306
-
307
- time_cost_ts = time.process_time() - text_selection_time
308
- print("time cost for text selection: %2.2f s" % time_cost_ts)
309
- # ts_time_cost_all.append(time_cost_ts)
310
-
311
- # ---------- end -------------------------------
312
-
313
- full_size_org, full_size_grey = pre.read_img(input_img)
314
- ratio = full_size_org.shape[0]/org.shape[0]
315
-
316
- show = False
317
- wait_key = 0
318
-
319
- reassign_ids(elements)
320
- board = merge.show_elements(full_size_org, elements, ratio, show=show, win_name='elements after merging', wait_key=wait_key, line=3)
321
- board_one_element = merge.show_one_element(full_size_org, elements, ratio, show=show, win_name='elements after merging', wait_key=wait_key, line=3)
322
-
323
- classification_root = pjoin(output_root, 'classification')
324
-
325
- # save all merged elements, clips and blank background
326
- name = input_img.replace('\\', '/').split('/')[-1][:-4]
327
- components = merge.save_elements(pjoin(classification_root, name + '.json'), elements, full_size_org.shape, ratio)
328
- cv2.imwrite(pjoin(classification_root, name + '.jpg'), board)
329
-
330
- print("len(board_one_element): ", len(board_one_element))
331
-
332
- for i in range(len(elements)):
333
- e_name = str(int(elements[i].id) + 1)
334
- cv2.imwrite(pjoin(classification_root + '/GUI', name + '-' + e_name + '.jpg'), board_one_element[i])
335
-
336
- print('[Classification Completed] Input: %s Output: %s' % (input_img, pjoin(classification_root, name + '.jpg')))
337
-
338
- # ---------- matching result -----------
339
-
340
- index = input_img.split('/')[-1][:-4]
341
- app_id = str(index).split('-')[0]
342
-
343
- index_path = pjoin(segment_root, app_id, 'classified_sentences/keyword_index.txt')
344
- dict_index = {}
345
- if exists(index_path):
346
- with open(index_path, 'r') as g:
347
- for line in g:
348
- key, value = line.strip().split(':', 1)
349
- dict_index[key] = value
350
-
351
- for item in elements:
352
- complete_path = pjoin(segment_root, app_id, 'classified_sentences', item.label + '.txt')
353
- print("complete_path: ", complete_path)
354
-
355
- if exists(complete_path):
356
-
357
- with open(complete_path, 'r', encoding='utf-8') as file:
358
- content = file.read()
359
-
360
- # Replace line breaks with spaces and strip any extra whitespace
361
- this_text = ' '.join(content.splitlines()).strip()
362
-
363
- lines = content.splitlines()
364
- non_empty_lines = [line for line in lines if line.strip() != ""]
365
- for i in range(len(non_empty_lines)):
366
- if non_empty_lines[i][0].isalpha():
367
- non_empty_lines[i] = non_empty_lines[i][0].upper() + non_empty_lines[i][1:]
368
-
369
- # output_data = output_data.append({'screenshot': 's' + str(index), 'id': item.id + 1, 'label': item.label, 'index': dict_index[item.label], 'text': this_text, 'sentences': non_empty_lines}, ignore_index=True)
370
- output_data = pd.concat([output_data, pd.DataFrame([{'screenshot': 's' + str(index), 'id': item.id + 1,
371
- 'label': item.label, 'index': dict_index[item.label],
372
- 'text': this_text, 'sentences': non_empty_lines}])])
373
-
374
- else:
375
- # output_data = output_data.append({'screenshot': 's' + str(index), 'id': item.id + 1, 'label': item.label, 'index': "None", 'text': "No information!", 'sentences': "None"},
376
- # ignore_index=True)
377
- output_data = pd.concat([output_data, pd.DataFrame([{'screenshot': 's' + str(index), 'id': item.id + 1,
378
- 'label': item.label, 'index': "None",
379
- 'text': "No information!", 'sentences': "None"}])])
380
- return time_cost_ic, time_cost_ts, output_data, board
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/optims.py DELETED
@@ -1,119 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import math
9
-
10
- from video_llama.common.registry import registry
11
-
12
-
13
- @registry.register_lr_scheduler("linear_warmup_step_lr")
14
- class LinearWarmupStepLRScheduler:
15
- def __init__(
16
- self,
17
- optimizer,
18
- max_epoch,
19
- min_lr,
20
- init_lr,
21
- decay_rate=1,
22
- warmup_start_lr=-1,
23
- warmup_steps=0,
24
- **kwargs
25
- ):
26
- self.optimizer = optimizer
27
-
28
- self.max_epoch = max_epoch
29
- self.min_lr = min_lr
30
-
31
- self.decay_rate = decay_rate
32
-
33
- self.init_lr = init_lr
34
- self.warmup_steps = warmup_steps
35
- self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
36
-
37
- def step(self, cur_epoch, cur_step):
38
- if cur_epoch == 0:
39
- warmup_lr_schedule(
40
- step=cur_step,
41
- optimizer=self.optimizer,
42
- max_step=self.warmup_steps,
43
- init_lr=self.warmup_start_lr,
44
- max_lr=self.init_lr,
45
- )
46
- else:
47
- step_lr_schedule(
48
- epoch=cur_epoch,
49
- optimizer=self.optimizer,
50
- init_lr=self.init_lr,
51
- min_lr=self.min_lr,
52
- decay_rate=self.decay_rate,
53
- )
54
-
55
-
56
- @registry.register_lr_scheduler("linear_warmup_cosine_lr")
57
- class LinearWarmupCosineLRScheduler:
58
- def __init__(
59
- self,
60
- optimizer,
61
- max_epoch,
62
- iters_per_epoch,
63
- min_lr,
64
- init_lr,
65
- warmup_steps=0,
66
- warmup_start_lr=-1,
67
- **kwargs
68
- ):
69
- self.optimizer = optimizer
70
-
71
- self.max_epoch = max_epoch
72
- self.iters_per_epoch = iters_per_epoch
73
- self.min_lr = min_lr
74
-
75
- self.init_lr = init_lr
76
- self.warmup_steps = warmup_steps
77
- self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
78
-
79
- def step(self, cur_epoch, cur_step):
80
- total_cur_step = cur_epoch * self.iters_per_epoch + cur_step
81
- if total_cur_step < self.warmup_steps:
82
- warmup_lr_schedule(
83
- step=cur_step,
84
- optimizer=self.optimizer,
85
- max_step=self.warmup_steps,
86
- init_lr=self.warmup_start_lr,
87
- max_lr=self.init_lr,
88
- )
89
- else:
90
- cosine_lr_schedule(
91
- epoch=total_cur_step,
92
- optimizer=self.optimizer,
93
- max_epoch=self.max_epoch * self.iters_per_epoch,
94
- init_lr=self.init_lr,
95
- min_lr=self.min_lr,
96
- )
97
-
98
-
99
- def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
100
- """Decay the learning rate"""
101
- lr = (init_lr - min_lr) * 0.5 * (
102
- 1.0 + math.cos(math.pi * epoch / max_epoch)
103
- ) + min_lr
104
- for param_group in optimizer.param_groups:
105
- param_group["lr"] = lr
106
-
107
-
108
- def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
109
- """Warmup the learning rate"""
110
- lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1))
111
- for param_group in optimizer.param_groups:
112
- param_group["lr"] = lr
113
-
114
-
115
- def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
116
- """Decay the learning rate"""
117
- lr = max(min_lr, init_lr * (decay_rate**epoch))
118
- for param_group in optimizer.param_groups:
119
- param_group["lr"] = lr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/__init__.py DELETED
@@ -1,132 +0,0 @@
1
- # SPDX-License-Identifier: MIT
2
-
3
- """
4
- Classes Without Boilerplate
5
- """
6
-
7
- from functools import partial
8
- from typing import Callable
9
-
10
- from . import converters, exceptions, filters, setters, validators
11
- from ._cmp import cmp_using
12
- from ._config import get_run_validators, set_run_validators
13
- from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
14
- from ._make import (
15
- NOTHING,
16
- Attribute,
17
- Factory,
18
- attrib,
19
- attrs,
20
- fields,
21
- fields_dict,
22
- make_class,
23
- validate,
24
- )
25
- from ._next_gen import define, field, frozen, mutable
26
- from ._version_info import VersionInfo
27
-
28
-
29
- s = attributes = attrs
30
- ib = attr = attrib
31
- dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
32
-
33
-
34
- class AttrsInstance:
35
- pass
36
-
37
-
38
- __all__ = [
39
- "Attribute",
40
- "AttrsInstance",
41
- "Factory",
42
- "NOTHING",
43
- "asdict",
44
- "assoc",
45
- "astuple",
46
- "attr",
47
- "attrib",
48
- "attributes",
49
- "attrs",
50
- "cmp_using",
51
- "converters",
52
- "define",
53
- "evolve",
54
- "exceptions",
55
- "field",
56
- "fields",
57
- "fields_dict",
58
- "filters",
59
- "frozen",
60
- "get_run_validators",
61
- "has",
62
- "ib",
63
- "make_class",
64
- "mutable",
65
- "resolve_types",
66
- "s",
67
- "set_run_validators",
68
- "setters",
69
- "validate",
70
- "validators",
71
- ]
72
-
73
-
74
- def _make_getattr(mod_name: str) -> Callable:
75
- """
76
- Create a metadata proxy for packaging information that uses *mod_name* in
77
- its warnings and errors.
78
- """
79
-
80
- def __getattr__(name: str) -> str:
81
- dunder_to_metadata = {
82
- "__title__": "Name",
83
- "__copyright__": "",
84
- "__version__": "version",
85
- "__version_info__": "version",
86
- "__description__": "summary",
87
- "__uri__": "",
88
- "__url__": "",
89
- "__author__": "",
90
- "__email__": "",
91
- "__license__": "license",
92
- }
93
- if name not in dunder_to_metadata.keys():
94
- raise AttributeError(f"module {mod_name} has no attribute {name}")
95
-
96
- import sys
97
- import warnings
98
-
99
- if sys.version_info < (3, 8):
100
- from importlib_metadata import metadata
101
- else:
102
- from importlib.metadata import metadata
103
-
104
- if name != "__version_info__":
105
- warnings.warn(
106
- f"Accessing {mod_name}.{name} is deprecated and will be "
107
- "removed in a future release. Use importlib.metadata directly "
108
- "to query for attrs's packaging metadata.",
109
- DeprecationWarning,
110
- stacklevel=2,
111
- )
112
-
113
- meta = metadata("attrs")
114
- if name == "__license__":
115
- return "MIT"
116
- elif name == "__copyright__":
117
- return "Copyright (c) 2015 Hynek Schlawack"
118
- elif name in ("__uri__", "__url__"):
119
- return meta["Project-URL"].split(" ", 1)[-1]
120
- elif name == "__version_info__":
121
- return VersionInfo._from_version_string(meta["version"])
122
- elif name == "__author__":
123
- return meta["Author-email"].rsplit(" ", 1)[0]
124
- elif name == "__email__":
125
- return meta["Author-email"].rsplit("<", 1)[1][:-1]
126
-
127
- return meta[dunder_to_metadata[name]]
128
-
129
- return __getattr__
130
-
131
-
132
- __getattr__ = _make_getattr(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .path import SVGPath, parse_path
2
-
3
- __all__ = ["SVGPath", "parse_path"]
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/reload.py DELETED
@@ -1,91 +0,0 @@
1
- """
2
-
3
- Contains the functions that run when `gradio` is called from the command line. Specifically, allows
4
-
5
- $ gradio app.py, to run app.py in reload mode where any changes in the app.py file or Gradio library reloads the demo.
6
- $ gradio app.py my_demo.app, to use variable names other than "demo"
7
- """
8
- import inspect
9
- import os
10
- import sys
11
- from pathlib import Path
12
-
13
- from uvicorn import Config
14
- from uvicorn.supervisors import ChangeReload
15
-
16
- import gradio
17
- from gradio import networking, utils
18
-
19
-
20
- def _setup_config():
21
- args = sys.argv[1:]
22
- if len(args) == 0:
23
- raise ValueError("No file specified.")
24
- if len(args) == 1 or args[1].startswith("--"):
25
- demo_name = "demo.app"
26
- else:
27
- demo_name = args[1]
28
- if "." not in demo_name:
29
- print(
30
- "\nWARNING: As of Gradio 3.31, the parameter after the file path must be the name of the FastAPI app, not the Gradio demo. In most cases, this just means you should add '.app' after the name of your demo, e.g. 'demo' -> 'demo.app'."
31
- )
32
-
33
- original_path = args[0]
34
- abs_original_path = utils.abspath(original_path)
35
- path = os.path.normpath(original_path)
36
- path = path.replace("/", ".")
37
- path = path.replace("\\", ".")
38
- filename = os.path.splitext(path)[0]
39
-
40
- gradio_folder = Path(inspect.getfile(gradio)).parent
41
-
42
- port = networking.get_first_available_port(
43
- networking.INITIAL_PORT_VALUE,
44
- networking.INITIAL_PORT_VALUE + networking.TRY_NUM_PORTS,
45
- )
46
- print(
47
- f"\nLaunching in *reload mode* on: http://{networking.LOCALHOST_NAME}:{port} (Press CTRL+C to quit)\n"
48
- )
49
-
50
- gradio_app = f"{filename}:{demo_name}"
51
- message = "Watching:"
52
- message_change_count = 0
53
-
54
- watching_dirs = []
55
- if str(gradio_folder).strip():
56
- watching_dirs.append(gradio_folder)
57
- message += f" '{gradio_folder}'"
58
- message_change_count += 1
59
-
60
- abs_parent = abs_original_path.parent
61
- if str(abs_parent).strip():
62
- watching_dirs.append(abs_parent)
63
- if message_change_count == 1:
64
- message += ","
65
- message += f" '{abs_parent}'"
66
-
67
- print(message + "\n")
68
-
69
- # guaranty access to the module of an app
70
- sys.path.insert(0, os.getcwd())
71
-
72
- # uvicorn.run blocks the execution (looping) which makes it hard to test
73
- return Config(
74
- gradio_app,
75
- reload=True,
76
- port=port,
77
- log_level="warning",
78
- reload_dirs=watching_dirs,
79
- )
80
-
81
-
82
- def main():
83
- # default execution pattern to start the server and watch changes
84
- config = _setup_config()
85
- server = networking.Server(config)
86
- sock = config.bind_socket()
87
- ChangeReload(config, target=server.run, sockets=[sock]).run()
88
-
89
-
90
- if __name__ == "__main__":
91
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/visualization/grad.py DELETED
@@ -1,117 +0,0 @@
1
- """
2
- @Date: 2021/11/06
3
- @description:
4
- """
5
- import cv2
6
- import numpy as np
7
- import torch
8
- import matplotlib.pyplot as plt
9
-
10
- from utils.conversion import depth2xyz
11
-
12
-
13
- def convert_img(value, h, need_nor=True, cmap=None):
14
- value = value.clone().detach().cpu().numpy()[None]
15
- if need_nor:
16
- value -= value.min()
17
- value /= value.max() - value.min()
18
- grad_img = value.repeat(int(h), axis=0)
19
-
20
- if cmap is None:
21
- grad_img = grad_img[..., np.newaxis].repeat(3, axis=-1)
22
- elif cmap == cv2.COLORMAP_PLASMA:
23
- grad_img = cv2.applyColorMap((grad_img * 255).astype(np.uint8), colormap=cmap)
24
- grad_img = grad_img[..., ::-1]
25
- grad_img = grad_img.astype(np.float) / 255.0
26
- elif cmap == 'HSV':
27
- grad_img = np.round(grad_img * 1000) / 1000.0
28
- grad_img = grad_img[..., np.newaxis].repeat(3, axis=-1)
29
- grad_img[..., 0] = grad_img[..., 0] * 180
30
- grad_img[..., 1] = 255
31
- grad_img[..., 2] = 255
32
- grad_img = grad_img.astype(np.uint8)
33
- grad_img = cv2.cvtColor(grad_img, cv2.COLOR_HSV2RGB)
34
- grad_img = grad_img.astype(np.float) / 255.0
35
- return grad_img
36
-
37
-
38
- def show_grad(depth, grad_conv, h=5, show=False):
39
- """
40
- :param h:
41
- :param depth: [patch_num]
42
- :param grad_conv:
43
- :param show:
44
- :return:
45
- """
46
-
47
- direction, angle, grad = get_all(depth[None], grad_conv)
48
-
49
- # depth_img = convert_img(depth, h)
50
- # angle_img = convert_img(angle[0], h)
51
- # grad_img = convert_img(grad[0], depth.shape[-1] // 4 - h * 2)
52
- depth_img = convert_img(depth, h, cmap=cv2.COLORMAP_PLASMA)
53
- angle_img = convert_img(angle[0], h, cmap='HSV')
54
-
55
- # vis_grad = grad[0] / grad[0].max() / 2 + 0.5
56
- grad_img = convert_img(grad[0], h)
57
- img = np.concatenate([depth_img, angle_img, grad_img], axis=0)
58
- if show:
59
- plt.imshow(img)
60
- plt.show()
61
- return img
62
-
63
-
64
- def get_grad(direction):
65
- """
66
- :param direction: [b patch_num]
67
- :return:[b patch_num]
68
- """
69
- a = torch.roll(direction, -1, dims=1) # xz[i+1]
70
- b = torch.roll(direction, 1, dims=1) # xz[i-1]
71
- grad = torch.acos(torch.clip(a[..., 0] * b[..., 0] + a[..., 1] * b[..., 1], -1+1e-6, 1-1e-6))
72
- return grad
73
-
74
-
75
- def get_grad2(angle, grad_conv):
76
- """
77
- :param angle: [b patch_num]
78
- :param grad_conv:
79
- :return:[b patch_num]
80
- """
81
- angle = torch.sin(angle)
82
- angle = angle + 1
83
-
84
- angle = torch.cat([angle[..., -1:], angle, angle[..., :1]], dim=-1)
85
- grad = grad_conv(angle[:, None]) # [b, patch_num] -> [b, 1, patch_num]
86
- # grad = torch.abs(grad)
87
- return grad.reshape(angle.shape[0], -1)
88
-
89
-
90
- def get_edge_angle(direction):
91
- """
92
- :param direction: [b patch_num 2]
93
- :return:
94
- """
95
- angle = torch.atan2(direction[..., 1], direction[..., 0])
96
- return angle
97
-
98
-
99
- def get_edge_direction(depth):
100
- xz = depth2xyz(depth)[..., ::2]
101
- direction = torch.roll(xz, -1, dims=1) - xz # direct[i] = xz[i+1] - xz[i]
102
- direction = direction / direction.norm(p=2, dim=-1)[..., None]
103
- return direction
104
-
105
-
106
- def get_all(depth, grad_conv):
107
- """
108
-
109
- :param grad_conv:
110
- :param depth: [b patch_num]
111
- :return:
112
- """
113
- direction = get_edge_direction(depth)
114
- angle = get_edge_angle(direction)
115
- # angle_grad = get_grad(direction)
116
- angle_grad = get_grad2(angle, grad_conv) # signed gradient
117
- return direction, angle, angle_grad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/app/store/index.ts DELETED
@@ -1,203 +0,0 @@
1
- "use client"
2
-
3
- import { create } from "zustand"
4
-
5
- import { FontName } from "@/lib/fonts"
6
- import { Preset, PresetName, defaultPreset, getPreset, getRandomPreset } from "@/app/engine/presets"
7
- import { LayoutName, defaultLayout, getRandomLayoutName, getRandomLayoutNames } from "../layouts"
8
- import html2canvas from "html2canvas"
9
- import { RenderedScene } from "@/types"
10
-
11
- export const useStore = create<{
12
- prompt: string
13
- font: FontName
14
- preset: Preset
15
- nbFrames: number
16
- panels: string[]
17
- captions: string[]
18
- upscaleQueue: Record<string, RenderedScene>
19
- showCaptions: boolean
20
- renderedScenes: Record<string, RenderedScene>
21
- layout: LayoutName
22
- layouts: LayoutName[]
23
- zoomLevel: number
24
- page: HTMLDivElement
25
- isGeneratingStory: boolean
26
- panelGenerationStatus: Record<number, boolean>
27
- isGeneratingText: boolean
28
- atLeastOnePanelIsBusy: boolean
29
- setRendered: (panelId: string, renderedScene: RenderedScene) => void
30
- addToUpscaleQueue: (panelId: string, renderedScene: RenderedScene) => void
31
- removeFromUpscaleQueue: (panelId: string) => void
32
- setPrompt: (prompt: string) => void
33
- setFont: (font: FontName) => void
34
- setPreset: (preset: Preset) => void
35
- setPanels: (panels: string[]) => void
36
- setShowCaptions: (showCaptions: boolean) => void
37
- setLayout: (layout: LayoutName) => void
38
- setLayouts: (layouts: LayoutName[]) => void
39
- setCaptions: (captions: string[]) => void
40
- setZoomLevel: (zoomLevel: number) => void
41
- setPage: (page: HTMLDivElement) => void
42
- setGeneratingStory: (isGeneratingStory: boolean) => void
43
- setGeneratingImages: (panelId: string, value: boolean) => void
44
- setGeneratingText: (isGeneratingText: boolean) => void
45
- pageToImage: () => Promise<string>
46
- download: () => Promise<void>
47
- generate: (prompt: string, presetName: PresetName, layoutName: LayoutName) => void
48
- }>((set, get) => ({
49
- prompt: "",
50
- font: "actionman",
51
- preset: getPreset(defaultPreset),
52
- nbFrames: 1,
53
- panels: [],
54
- captions: [],
55
- upscaleQueue: {} as Record<string, RenderedScene>,
56
- renderedScenes: {} as Record<string, RenderedScene>,
57
- showCaptions: false,
58
- layout: defaultLayout,
59
- layouts: [defaultLayout, defaultLayout],
60
- zoomLevel: 60,
61
- page: undefined as unknown as HTMLDivElement,
62
- isGeneratingStory: false,
63
- panelGenerationStatus: {},
64
- isGeneratingText: false,
65
- atLeastOnePanelIsBusy: false,
66
- setRendered: (panelId: string, renderedScene: RenderedScene) => {
67
- const { renderedScenes } = get()
68
- set({
69
- renderedScenes: {
70
- ...renderedScenes,
71
- [panelId]: renderedScene
72
- }
73
- })
74
- },
75
- addToUpscaleQueue: (panelId: string, renderedScene: RenderedScene) => {
76
- const { upscaleQueue } = get()
77
- set({
78
- upscaleQueue: {
79
- ...upscaleQueue,
80
- [panelId]: renderedScene
81
- },
82
- })
83
- },
84
- removeFromUpscaleQueue: (panelId: string) => {
85
- const upscaleQueue = { ...get().upscaleQueue }
86
- delete upscaleQueue[panelId]
87
- set({
88
- upscaleQueue,
89
- })
90
- },
91
- setPrompt: (prompt: string) => {
92
- const existingPrompt = get().prompt
93
- if (prompt === existingPrompt) { return }
94
- set({
95
- prompt,
96
- })
97
- },
98
- setFont: (font: FontName) => {
99
- const existingFont = get().font
100
- if (font === existingFont) { return }
101
- set({
102
- font,
103
- })
104
- },
105
- setPreset: (preset: Preset) => {
106
- const existingPreset = get().preset
107
- if (preset.label === existingPreset.label) { return }
108
- set({
109
- preset,
110
- })
111
- },
112
- setNbFrames: (nbFrames: number) => {
113
- const existingNbFrames = get().nbFrames
114
- if (nbFrames === existingNbFrames) { return }
115
- set({
116
- nbFrames,
117
- })
118
- },
119
- setPanels: (panels: string[]) => set({ panels }),
120
- setCaptions: (captions: string[]) => {
121
- set({
122
- captions,
123
- })
124
- },
125
- setShowCaptions: (showCaptions: boolean) => {
126
- set({
127
- showCaptions,
128
- })
129
- },
130
- setLayout: (layoutName: LayoutName) => {
131
- const layout = layoutName === "random"
132
- ? getRandomLayoutName()
133
- : layoutName
134
-
135
- set({
136
- layout,
137
- layouts: [layout, layout]
138
- })
139
- },
140
- setLayouts: (layouts: LayoutName[]) => set({ layouts }),
141
- setZoomLevel: (zoomLevel: number) => set({ zoomLevel }),
142
- setPage: (page: HTMLDivElement) => {
143
- if (!page) { return }
144
- set({ page })
145
- },
146
- setGeneratingStory: (isGeneratingStory: boolean) => set({ isGeneratingStory }),
147
- setGeneratingImages: (panelId: string, value: boolean) => {
148
- const panelGenerationStatus: Record<string, boolean> = {
149
- ...get().panelGenerationStatus,
150
- [panelId]: value
151
- }
152
-
153
- const atLeastOnePanelIsBusy = Object.values(panelGenerationStatus).includes(true)
154
-
155
- set({
156
- panelGenerationStatus,
157
- atLeastOnePanelIsBusy
158
- })
159
- },
160
- setGeneratingText: (isGeneratingText: boolean) => set({ isGeneratingText }),
161
- pageToImage: async () => {
162
- const { page } = get()
163
- if (!page) { return "" }
164
-
165
-
166
- const canvas = await html2canvas(page)
167
- console.log("canvas:", canvas)
168
-
169
- const data = canvas.toDataURL('image/jpeg', 0.5)
170
- return data
171
- },
172
- download: async () => {
173
- const { pageToImage } = get()
174
- const data = await pageToImage()
175
-
176
- const link = document.createElement('a')
177
-
178
- if (typeof link.download === 'string') {
179
- link.href = data
180
- link.download = 'comic.jpg'
181
- document.body.appendChild(link)
182
- link.click()
183
- document.body.removeChild(link)
184
- } else {
185
- window.open(data)
186
- }
187
- },
188
- generate: (prompt: string, presetName: PresetName, layoutName: LayoutName) => {
189
- const layout = layoutName === "random"
190
- ? getRandomLayoutName()
191
- : layoutName
192
- set({
193
- prompt,
194
- panels: [],
195
- captions: [],
196
- preset: presetName === "random"
197
- ? getRandomPreset()
198
- : getPreset(presetName),
199
- layout,
200
- layouts: [layout, layout],
201
- })
202
- }
203
- }))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DiViorg/categories_error_analysis/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Categories Error Analysis
3
- emoji: 🐛
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference