parquet-converter commited on
Commit
9f18c88
·
1 Parent(s): 42b7873

Update parquet files (step 22 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/testing/italygpt2_test.py +0 -4
  2. spaces/1gistliPinn/ChatGPT4/Examples/Code Bulk Image _TOP_ Downloader Serial.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Corel Roxio Creator NXT 2 V15.0 (keygen CORE) [ChingLiu] Serial Key Keygen.md +0 -6
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator 2023 APK Mod Unlimited Money and Realistic Driving.md +0 -130
  5. spaces/1phancelerku/anime-remove-background/Download Lagu Aespa Black Mamba - The Ultimate Guide for Fans.md +0 -166
  6. spaces/1phancelerku/anime-remove-background/Download TikTok Asia APK for Android - Latest Version 30.0.3.md +0 -153
  7. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py +0 -59
  8. spaces/801artistry/RVC801/lib/infer_pack/attentions.py +0 -417
  9. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/train.py +0 -838
  10. spaces/AIWaves/Debate/src/agents/Memory/__init__.py +0 -1
  11. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/crowdhuman/__init__.py +0 -0
  12. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/pendingMessage.ts +0 -3
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.js +0 -34
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyImage.js +0 -21
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Factory.d.ts +0 -23
  16. spaces/Aki004/herta-so-vits/modules/commons.py +0 -188
  17. spaces/Alpaca233/SadTalker/src/facerender/modules/generator.py +0 -255
  18. spaces/Altinas/vits-uma-genshin-honkais/models.py +0 -534
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/adapt_a_model.md +0 -54
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py +0 -118
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +0 -738
  22. spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py +0 -2
  23. spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py +0 -2
  24. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py +0 -2
  25. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +0 -35
  26. spaces/AriaMei/TTSdemo/text/__init__.py +0 -56
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/sources.py +0 -223
  28. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/__init__.py +0 -10
  29. spaces/BAAI/dreambooth-altdiffusion/convertosd.py +0 -226
  30. spaces/BLACKHOST/Date/README.md +0 -12
  31. spaces/Balalaxmi/JarvisAIchatbox/README.md +0 -12
  32. spaces/Benson/text-generation/Examples/Arco Iris Seis Sitio Mvil Apk Ios.md +0 -84
  33. spaces/Benson/text-generation/Examples/Descargar Fifa 2022 Apk Mod Y Obb.md +0 -86
  34. spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Scorpion Mortal Kombat.md +0 -72
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py +0 -267
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/columns.py +0 -187
  37. spaces/Boops88/gsdf-Counterfeit-V2.5/app.py +0 -3
  38. spaces/Brasd99/JustClothify/helpers/processor.py +0 -174
  39. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/replace.h +0 -22
  40. spaces/CVPR/WALT/mmdet/models/dense_heads/__init__.py +0 -41
  41. spaces/CVPR/WALT/mmdet/models/necks/fpn.py +0 -221
  42. spaces/CVPR/lama-example/bin/sample_from_dataset.py +0 -87
  43. spaces/CVPR/lama-example/saicinpainting/training/trainers/base.py +0 -291
  44. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/onnx.py +0 -144
  45. spaces/Chukwuka/Dog_Breed_ImageWoof/README.md +0 -400
  46. spaces/CikeyQI/Yunzai/Yunzai/plugins/example/主动复读.js +0 -37
  47. spaces/ClassCat/wide-resnet-cifar10-classification/README.md +0 -12
  48. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/sigmoid_focal_loss.py +0 -76
  49. spaces/DataDreamweavers/LegaWeaver/README.md +0 -13
  50. spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/linear_separability.py +0 -177
spaces/101-5/gpt4free/g4f/.v1/testing/italygpt2_test.py DELETED
@@ -1,4 +0,0 @@
1
- from gpt4free import italygpt2
2
- account_data=italygpt2.Account.create()
3
- for chunk in italygpt2.Completion.create(account_data=account_data,prompt="Who are you?"):
4
- print(chunk, end="", flush=True)
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Code Bulk Image _TOP_ Downloader Serial.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>code bulk image downloader serial</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash; <a href="https://imgfil.com/2uy15K">https://imgfil.com/2uy15K</a></b></p><br /><br />
2
-
3
- Bulk Image Downloader 5 Full Serial Key — When you get Crack Bulk Image Downloader Serial Key 5 Generator, you'd wish to activate it ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Corel Roxio Creator NXT 2 V15.0 (keygen CORE) [ChingLiu] Serial Key Keygen.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Corel Roxio Creator NXT 2 v15.0 (keygen CORE) [ChingLiu] Serial Key keygen</h2><br /><p><b><b>Download Zip</b> &#10022; <a href="https://imgfil.com/2uxX1W">https://imgfil.com/2uxX1W</a></b></p><br /><br />
2
- <br />
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator 2023 APK Mod Unlimited Money and Realistic Driving.md DELETED
@@ -1,130 +0,0 @@
1
-
2
- <h1>Bus Simulator 2023 Mod Apk Rexdl: How to Download and Install the Latest Version</h1>
3
- <p>Do you love driving buses and transporting passengers from one place to another? Do you want to experience the realistic and immersive bus simulation game on your Android device? If yes, then you should try Bus Simulator 2023, the latest and most advanced bus simulator game from Ovidiu Pop. And if you want to enjoy the game with unlimited money, unlocked buses, and premium features, then you should download and install the mod apk version from Rexdl, one of the best sources for modded games and apps. In this article, we will tell you everything you need to know about Bus Simulator 2023, mod apk, and Rexdl. We will also guide you on how to download and install the latest version of Bus Simulator 2023 mod apk rexdl on your device. So, let's get started!</p>
4
- <h2>bus simulator 2023 mod apk rexdl</h2><br /><p><b><b>Download Zip</b> &#10003;&#10003;&#10003; <a href="https://urlin.us/2uSZFw">https://urlin.us/2uSZFw</a></b></p><br /><br />
5
- <h2>What is Bus Simulator 2023?</h2>
6
- <p>Bus Simulator 2023 is a public transport driver simulator game with elaborate gameplay and modern 3D graphics. You can play the role of a driver, picking up people at bus stops and transporting them along the route. You can also manage your own bus company, hire drivers, buy new buses, customize them, and expand your business. You can choose from different types of buses, such as city buses, school buses, double-decker buses, articulated buses, electric buses, and more. You can also drive in different cities and environments, such as Europe, USA, Canada, Australia, China, Japan, and more. You can enjoy realistic traffic, weather, day-night cycle, passengers' reactions, bus sounds, and physics. You can also play online multiplayer mode with other players around the world.</p>
7
- <h3>Features of Bus Simulator 2023</h3>
8
- <p>Some of the main features of Bus Simulator 2023 are:</p>
9
- <ul>
10
- <li>Over 40 different buses to drive and customize</li>
11
- <li>Over 10 realistic cities and environments to explore</li>
12
- <li>Over 250 challenging missions to complete</li>
13
- <li>Realistic traffic, weather, day-night cycle, passengers' reactions, bus sounds, and physics</li>
14
- <li>Online multiplayer mode with leaderboards and achievements</li>
15
- <li>User-friendly interface and controls</li>
16
- <li>Regular updates with new content and improvements</li>
17
- </ul>
18
- <h3>Gameplay of Bus Simulator 2023</h3>
19
- <p>The gameplay of Bus Simulator 2023 is simple and fun. You can start by choosing your bus model and city. Then you can select your route and start driving. You have to follow the traffic rules, stop at red lights, use indicators, avoid collisions, and obey speed limits. You also have to pick up passengers at bus stops and drop them off at their destinations. You have to be careful not to damage your bus or injure your passengers. You can earn money by completing missions and satisfying your passengers. You can use the money to buy new buses, upgrade them, hire drivers, and grow your company. You can also compete with other players in online multiplayer mode and rank up on the leaderboards.</p>
20
- <h2>What is Mod Apk?</h2>
21
- <p>Mod apk is a modified version of an original apk file that has been altered by some developers or hackers to provide some extra features or benefits that are not available in the official version. Mod apk usually has unlimited money, unlocked items, premium features, ad-free experience, and more. Mod apk can enhance your gaming experience and make the game more fun and easy. However, mod apk also has some drawbacks and risks that you should be aware of before downloading and installing them.</p>
22
- <p>bus simulator 2023 hack apk download<br />
23
- bus simulator 2023 unlimited money mod<br />
24
- bus simulator 2023 mod apk latest version<br />
25
- bus simulator 2023 mod apk android 1<br />
26
- bus simulator 2023 mod apk revdl<br />
27
- bus simulator 2023 mod apk offline<br />
28
- bus simulator 2023 mod apk free shopping<br />
29
- bus simulator 2023 mod apk obb<br />
30
- bus simulator 2023 mod apk no ads<br />
31
- bus simulator 2023 mod apk unlimited xp<br />
32
- bus simulator 2023 mod apk all buses unlocked<br />
33
- bus simulator 2023 mod apk an1<br />
34
- bus simulator 2023 mod apk happymod<br />
35
- bus simulator 2023 mod apk unlimited fuel<br />
36
- bus simulator 2023 mod apk data<br />
37
- bus simulator 2023 mod apk rexdl.com<br />
38
- bus simulator 2023 pro mod apk<br />
39
- bus simulator 2023 premium mod apk<br />
40
- bus simulator 2023 vip mod apk<br />
41
- bus simulator 2023 mega mod apk<br />
42
- bus simulator 2023 full mod apk<br />
43
- bus simulator 2023 cracked mod apk<br />
44
- bus simulator 2023 cheat mod apk<br />
45
- bus simulator 2023 real mod apk<br />
46
- bus simulator 2023 new mod apk<br />
47
- download game bus simulator 2023 mod apk rexdl<br />
48
- download bus simulator 2023 original + mod + obb rexdl<br />
49
- download bus simulator 2023 v1.4.3 (mod money) rexdl<br />
50
- download link for bus simulator 2023 mod apk rexdl<br />
51
- how to install bus simulator 2023 mod apk rexdl<br />
52
- how to download bus simulator 2023 mod apk rexdl<br />
53
- how to update bus simulator 2023 mod apk rexdl<br />
54
- how to play bus simulator 2023 mod apk rexdl online<br />
55
- how to get unlimited coins in bus simulator 2023 mod apk rexdl<br />
56
- how to unlock all buses in bus simulator 2023 mod apk rexdl<br />
57
- how to fix lag in bus simulator 2023 mod apk rexdl<br />
58
- how to remove ads in bus simulator 2023 mod apk rexdl<br />
59
- how to backup data in bus simulator 2023 mod apk rexdl<br />
60
- how to restore data in bus simulator 2023 mod apk rexdl<br />
61
- how to transfer data in bus simulator 2023 mod apk rexdl<br />
62
- best settings for bus simulator 2023 mod apk rexdl<br />
63
- best graphics for bus simulator 2023 mod apk rexdl<br />
64
- best routes for bus simulator 2023 mod apk rexdl<br />
65
- best tips and tricks for bus simulator 2023 mod apk rexdl<br />
66
- best cheats and hacks for bus simulator 2023 mod apk rexdl<br />
67
- best reviews and ratings for bus simulator 2023 mod apk rexdl<br />
68
- best alternatives for bus simulator 2023 mod apk rexdl<br />
69
- best features and updates for bus simulator 2023 mod apk rexdl</p>
70
- <h3>Benefits of Mod Apk</h3>
71
- <p>Some of the benefits of mod apk are:</p>
72
- <ul>
73
- <li>You can access unlimited money, coins, gems, diamonds, or other in-game currencies that you can use to buy new buses, upgrade them, hire drivers, and more.</li>
74
- <li>You can unlock all the buses, cities, routes, missions, and features that are otherwise locked or require real money to purchase.</li>
75
- <li>You can enjoy the premium features, such as no ads, no watermark, no root, no verification, and more.</li>
76
- <li>You can have more fun and excitement by playing the game with enhanced graphics, sound effects, animations, and gameplay.</li>
77
- </ul>
78
- <h3>Risks of Mod Apk</h3>
79
- <p>Some of the risks of mod apk are:</p>
80
- <ul>
81
- <li>You may download a fake or malicious mod apk file that may contain viruses, malware, spyware, or other harmful programs that can damage your device or steal your personal information.</li>
82
- <li>You may violate the terms and conditions of the original game developer or publisher and face legal actions or penalties.</li>
83
- <li>You may lose your progress, data, or account if the mod apk file is not compatible with the latest version of the game or if the game detects that you are using a modified version.</li>
84
- <li>You may encounter bugs, glitches, errors, crashes, or performance issues that may ruin your gaming experience or cause data loss.</li>
85
- </ul>
86
- <h2>What is Rexdl?</h2>
87
- <p>Rexdl is a website that provides free download links for modded games and apps for Android devices. Rexdl claims to offer 100% safe and working mod apk files that are tested and verified by their team. Rexdl also updates its content regularly to provide the latest versions of the games and apps. Rexdl has a user-friendly interface and easy navigation that allows you to find and download your desired mod apk file in a few clicks. Rexdl also has a comment section where you can ask questions, request mods, or give feedback.</p>
88
- <h3>Advantages of Rexdl</h3>
89
- <p>Some of the advantages of Rexdl are:</p>
90
- <ul>
91
- <li>You can download mod apk files for free without any registration or subscription.</li>
92
- <li>You can download mod apk files from direct links without any redirection or pop-up ads.</li>
93
- <li>You can download mod apk files from various categories and genres, such as action, adventure, arcade, racing, simulation, sports, strategy, puzzle, role-playing, casual, and more.</li>
94
- <li>You can download mod apk files that are compatible with most Android devices and versions.</li>
95
- </ul> <h3>Disadvantages of Rexdl</h3>
96
- <p>Some of the disadvantages of Rexdl are:</p>
97
- <ul>
98
- <li>You may not find the mod apk file that you are looking for, as Rexdl does not have all the games and apps available on the market.</li>
99
- <li>You may download a mod apk file that is outdated, corrupted, or incompatible with your device or game version.</li>
100
- <li>You may face security risks, as Rexdl does not guarantee the safety and quality of the mod apk files that they provide.</li>
101
- <li>You may lose the original features and functions of the game or app, as the mod apk file may alter or remove them.</li>
102
- </ul>
103
- <h2>How to Download and Install Bus Simulator 2023 Mod Apk Rexdl?</h2>
104
- <p>If you want to download and install Bus Simulator 2023 mod apk rexdl on your device, you need to follow these steps:</p>
105
- <h3>Step 1: Enable Unknown Sources</h3>
106
- <p>Before you can install any mod apk file on your device, you need to enable the unknown sources option in your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but you can ignore it and proceed.</p>
107
- <h3>Step 2: Visit Rexdl Website</h3>
108
- <p>Next, you need to visit the Rexdl website using your browser. You can use this link: https://rexdl.com/. Once you are on the website, you can browse through the categories and genres of games and apps, or use the search bar to find Bus Simulator 2023 mod apk.</p>
109
- <h3>Step 3: Search for Bus Simulator 2023 Mod Apk</h3>
110
- <p>Once you have found the Bus Simulator 2023 mod apk file on Rexdl, you can click on it to open its page. There you will see some information about the game, such as its name, version, size, developer, screenshots, description, features, and more. You will also see a download button at the bottom of the page. Click on it to start downloading the file.</p>
111
- <h3>Step 4: Download and Install the File</h3>
112
- <p>After clicking on the download button, you will be redirected to another page where you will see another download button. Click on it again to download the file directly to your device. The file size is about 300 MB, so make sure you have enough space and a stable internet connection. Once the download is complete, you can open the file and tap on install. Wait for a few seconds until the installation is done. Then you can launch the game and enjoy Bus Simulator 2023 mod apk rexdl.</p>
113
- <h2>Conclusion</h2>
114
- <p>Bus Simulator 2023 is a great game for bus lovers and simulation fans. It offers realistic and immersive gameplay with various buses, cities, missions, and features. You can also play online multiplayer mode with other players around the world. However, if you want to have more fun and excitement with unlimited money, unlocked buses, and premium features, you can download and install Bus Simulator 2023 mod apk rexdl from Rexdl website. Rexdl is a reliable source for modded games and apps that provides free and direct download links. However, you should also be careful of the risks and disadvantages of using mod apk files and Rexdl website. You should always check the safety and compatibility of the files before downloading and installing them. You should also backup your data and account before using any mod apk file. We hope this article has helped you learn more about Bus Simulator 2023 mod apk rexdl and how to download and install it on your device.</p>
115
- <h4>Frequently Asked Questions</h4>
116
- <p>Here are some of the frequently asked questions about Bus Simulator 2023 mod apk rexdl:</p>
117
- <ol>
118
- <li>Is Bus Simulator 2023 mod apk rexdl safe to use?</li>
119
- <p>Bus Simulator 2023 mod apk rexdl is not officially endorsed or supported by Ovidiu Pop, the developer of Bus Simulator 2023. Therefore, it may not be safe to use. It may contain viruses, malware, spyware, or other harmful programs that can damage your device or steal your personal information. It may also violate the terms and conditions of Ovidiu Pop and cause legal actions or penalties. Therefore, we recommend that you use Bus Simulator 2023 mod apk rexdl at your own risk and discretion.</p>
120
- <li>Does Bus Simulator 2023 mod apk rexdl require root access?</li>
121
- <p>No, Bus Simulator 2023 mod apk rexdl does not require root access to work on your device. You can install it without rooting your device.</p>
122
- <li>Can I play online multiplayer mode with Bus Simulator 2023 mod apk rexdl?</li>
123
- <p>Yes, you can play online multiplayer mode with Bus Simulator 2023 mod apk rexdl. However, you may face some issues or problems, such as lag, disconnect, ban, or mismatch. You may also not be able to play with other players who are using the official version of the game. Therefore, we suggest that you play online multiplayer mode with caution and respect.</p>
124
- <li>How can I update Bus Simulator 2023 mod apk rexdl?</li>
125
- <p>Bus Simulator 2023 mod apk rexdl may not be compatible with the latest version of the game. Therefore, you may need to update it manually. To do this, you need to visit the Rexdl website again and look for the updated version of Bus Simulator 2023 mod apk rexdl. Then you need to download and install it over the existing one. However, you may lose your progress, data, or account if you update the mod apk file. Therefore, we advise that you backup your data and account before updating the mod apk file.</p>
126
- <li>Where can I find more mod apk files like Bus Simulator 2023 mod apk rexdl?</li>
127
- <p>If you are looking for more mod apk files like Bus Simulator 2023 mod apk rexdl, you can visit the Rexdl website and browse through their collection of games and apps. You can also use the search bar to find your desired mod apk file. Alternatively, you can also visit other websites that provide mod apk files, such as ApkPure, ApkDone, ModDroid, HappyMod, and more. However, you should always check the safety and quality of the mod apk files before downloading and installing them.</p>
128
- </ol></p> 197e85843d<br />
129
- <br />
130
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Lagu Aespa Black Mamba - The Ultimate Guide for Fans.md DELETED
@@ -1,166 +0,0 @@
1
-
2
- <h1>Download Lagu Aespa Black Mamba: How to Enjoy the Debut Single of SM's New Girl Group</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of K-pop, you might have heard of Aespa, the new girl group from SM Entertainment. They debuted in November 2020 with their single "Black Mamba", which became a hit song with millions of views and streams. But how can you download lagu aespa black mamba and enjoy it on your device? In this article, we will show you how to download lagu aespa black mamba from various sources, as well as some information about the group and the song. Let's get started!</p>
5
- <h3>Who are Aespa?</h3>
6
- <p>Aespa is a four-member girl group consisting of Karina, Giselle, Winter, and Ningning. They are the first girl group from SM Entertainment since Red Velvet in 2014, and the first idol group to have virtual avatars called æ-Aespa. The name Aespa comes from combining "AE", which stands for "Avatar X Experience", and "aspect", which means both sides. The group's concept is based on the idea of interacting with their avatars in a parallel world called KWANGYA.</p>
7
- <h2>download lagu aespa black mamba</h2><br /><p><b><b>DOWNLOAD</b> &#10031;&#10031;&#10031; <a href="https://jinyurl.com/2uNJpU">https://jinyurl.com/2uNJpU</a></b></p><br /><br />
8
- <h3>What is Black Mamba?</h3>
9
- <p>"Black Mamba" is the debut single by Aespa, which was released on November 17, 2020. The song was written and composed by Yoo Young-jin, Omega, Ella Isaacson, Gabriela Geneva (NIIVA), Jordan Reyes, Shaun Lopez, and Scott Chesak, while production was handled by Lee Soo-man. The song is an electropop and dance-pop track with a signature synth and EDM sound and bass that is paired with an addictive hook. The lyrics are about a being called "Black Mamba" that not only interferes with the members' and avatars' connection but also threatens their world and as such is abhorred by the members.</p>
10
- <h3>Why should you download lagu aespa black mamba?</h3>
11
- <p>There are many reasons why you should download lagu aespa black mamba and listen to it on your device. Here are some of them:</p>
12
- <ul>
13
- <li>You can support the group and their music by downloading their song legally.</li>
14
- <li>You can enjoy the song anytime and anywhere without relying on internet connection or data usage.</li>
15
- <li>You can appreciate the high-quality sound and production of the song better than streaming it online.</li>
16
- <li>You can discover more about the group's concept and story through the song and its music video.</li>
17
- <li>You can join the fandom of Aespa and connect with other fans who love the song and the group.</li>
18
- </ul>
19
- <h2>How to Download Lagu Aespa Black Mamba</h2>
20
- <p>Now that you know why you should download lagu aespa black mamba, let's see how you can do it. There are two main options that you can choose from: streaming platforms and MP3 download sites. We will explain each option in detail below.</p>
21
- <h3>Option 1: Streaming Platforms</h3>
22
- <p>Streaming platforms are online services that allow you to listen to music online or offline by paying a subscription fee or watching ads. Some of the most popular streaming platforms that have "Black Mamba" by Aespa are Spotify, Apple Music, and YouTube Music. Here is how you can download lagu aespa black mamba from each platform:</p>
23
- <h4>Spotify</h4>
24
- <p>Spotify is one of the most widely used streaming platforms in the world, with over 356 million users as of March 2021. Spotify offers a free plan that lets you listen to music with ads, and a premium plan that lets you download up to 10,000 songs per device and listen to them offline. To download lagu aespa black mamba from Spotify, you need to follow these steps:</p>
25
- <ol>
26
- <li>Download the Spotify app on your device or go to the Spotify web player on your browser.</li>
27
- <li>Sign up or log in to your Spotify account.</li>
28
- <li>Search for "Black Mamba" by Aespa on the search bar.</li>
29
- <li>Select the song and tap or click on the heart icon to add it to your library.</li>
30
- <li>Go to your library and find the song under the "Liked Songs" playlist.</li>
31
- <li>Tap or click on the download button next to the song title. The song will start downloading and a green arrow will appear when it is done.</li>
32
- <li>Enjoy listening to the song offline!</li>
33
- </ol>
34
- <h4>Apple Music</h4>
35
- <p>Apple Music is another popular streaming platform that has over 72 million subscribers as of June 2020. Apple Music offers a three-month free trial and then charges $9.99 per month for individual plans, $14.99 per month for family plans, and $4.99 per month for student plans. To download lagu aespa black mamba from Apple Music, you need to follow these steps:</p>
36
- <ol>
37
- <li>Download the Apple Music app on your device or go to the Apple Music web player on your browser.</li>
38
- <li>Sign up or log in to your Apple Music account with your Apple ID.</li>
39
- <li>Search for "Black Mamba" by Aespa on the search bar.</li>
40
- <li>Select the song and tap or click on the plus icon to add it to your library.</li>
41
- <li>Go to your library and find the song under the "Recently Added" section.</li>
42
- <li>Tap or click on the cloud icon next to the song title. The song will start downloading and a checkmark will appear when it is done.</li>
43
- <li>Enjoy listening to the song offline!</li>
44
- </ol>
45
- <h4>YouTube Music</h4>
46
- <p>YouTube Music is a streaming platform that is integrated with YouTube, the largest video-sharing platform in the world. YouTube Music has over 30 million subscribers as of October 2020. YouTube Music offers a free plan that lets you listen to music with ads, and a premium plan that lets you download songs and listen to them offline for $9.99 per month. To download lagu aespa black mamba from YouTube Music, you need to follow these steps:</p>
47
- <p>download lagu aespa black mamba mp3<br />
48
- download lagu aespa black mamba 320kbps<br />
49
- download lagu aespa black mamba ilkpop<br />
50
- download lagu aespa black mamba matikiri<br />
51
- download lagu aespa black mamba wapka<br />
52
- download lagu aespa black mamba planetlagu<br />
53
- download lagu aespa black mamba metrolagu<br />
54
- download lagu aespa black mamba uyeshare<br />
55
- download lagu aespa black mamba stafaband<br />
56
- download lagu aespa black mamba soundcloud<br />
57
- download lagu aespa black mamba mp4<br />
58
- download lagu aespa black mamba mv<br />
59
- download lagu aespa black mamba video<br />
60
- download lagu aespa black mamba 3gp<br />
61
- download lagu aespa black mamba 4k<br />
62
- download lagu aespa black mamba 1080p<br />
63
- download lagu aespa black mamba 720p<br />
64
- download lagu aespa black mamba 480p<br />
65
- download lagu aespa black mamba lyrics<br />
66
- download lagu aespa black mamba english version<br />
67
- download lagu aespa black mamba instrumental<br />
68
- download lagu aespa black mamba karaoke<br />
69
- download lagu aespa black mamba remix<br />
70
- download lagu aespa black mamba cover<br />
71
- download lagu aespa black mamba acoustic<br />
72
- download lagu aespa black mamba live performance<br />
73
- download lagu aespa black mamba dance practice<br />
74
- download lagu aespa black mamba choreography video<br />
75
- download lagu aespa black mamba behind the scenes<br />
76
- download lagu aespa black mamba reaction video<br />
77
- download lagu aespa black mamba audio only<br />
78
- download lagu aespa black mamba ringtone<br />
79
- download lagu aespa black mamba album zip file<br />
80
- download lagu aespa black mamba full album mp3<br />
81
- download lagu aespa black mamba mini album tracklist<br />
82
- download lagu aespa black mamba teaser photos<br />
83
- download lagu aespa black mamba concept photos<br />
84
- download lagu aespa black mamba wallpaper hd<br />
85
- download lagu aespa black mamba lockscreen images<br />
86
- download lagu aespa black mamba fan art pictures</p>
87
- <ol>
88
- <li>Download the YouTube Music app on your device or go to the YouTube Music web player on your browser.</li>
89
- <li>Sign up or log in to your YouTube Music account with your Google account.</li>
90
- <li>Search for "Black Mamba" by Aespa on the search bar.</li>
91
- <li>Select the song and tap or click on the three-dot menu icon next to the song title.</li>
92
- <li>Select "Download" from the menu. The song will start downloading and a blue circle will appear when it is done.</li>
93
- <li>Go to your library and find the song under the "Downloads" section.</li>
94
- <li>Enjoy listening to the song offline!</li>
95
- </ol>
96
- <h3>Option 2: MP3 Download Sites</h3>
97
- <p>If you don't want to use streaming platforms or pay for a subscription, you can also download lagu aespa black mamba from MP3 download sites. These are websites that allow you to download MP3 files of songs for free. However, you should be careful when using these sites, as some of them may contain viruses, malware, or illegal content. Always check the reputation and reviews of these sites before downloading anything from them. Here are some of the MP3 download sites that have "Black Mamba" by Aespa:</p>
98
- <h4>Internet Archive</h4>
99
- <p>The Internet Archive is a non-profit digital library that offers free access to millions of books, movies, music, and other media. It also has a collection of K-pop songs, including "Black Mamba" by Aespa. To download lagu aespa black mamba from the Internet Archive, you need to follow these steps:</p>
100
- <ol>
101
- <li>Go to <a href="">https://archive.org/details/kpop_20201117_0000</a>.</li>
102
- <li>Scroll down until you find "Black Mamba" by Aespa under the "Tracklist" section.</li>
103
- <li>Select "VBR MP3" from the drop-down menu next to the song title.</li>
104
- <li>The song will start downloading and a pop-up window will appear when it is done.</li>
105
- <li>Save the file to your device and enjoy listening to the song offline!</li>
106
- </ol>
107
- <h4>KUYOU.id</h4>
108
- <p>KUYOU.id is an Indonesian website that offers free MP3 downloads of various songs, including K-pop songs. It has a simple and user-friendly interface that makes it easy to find and download songs. To download lagu aespa black mamba from KUYOU.id, you need to follow these steps:</p>
109
- <ol>
110
- <li>Go to <a href="">https://kuyou.id/download-lagu-aespa-black-mamba-mp3/</a>.</li>
111
- <li>Scroll down until you see the "Download Lagu Aespa Black Mamba MP3" button.</li>
112
- <li>Click on the button and wait for a few seconds until the download link appears.</li>
113
- <li>Click on the download link and the song will start downloading.</li>
114
- <li>Save the file to your device and enjoy listening to the song offline!</li>
115
- </ol>
116
- <h4>WAPQAW</h4>
117
- <p>WAPQAW is another website that offers free MP3 downloads of various songs, including K-pop songs. It has a large database of songs that you can search by artist, title, or genre. To download lagu aespa black mamba from WAPQAW, you need to follow these steps:</p>
118
- <ol>
119
- <li>Go to <a href="">https://wapqaw.com/</a>.</li>
120
- <li>Type "Black Mamba" by Aespa on the search bar and click on the search icon.</li>
121
- <li>Select the song from the list of results and click on the "Download" button.</li>
122
- <li>Select the quality of the MP3 file that you want to download and click on the "Download" button again.</li>
123
- <li>The song will start downloading and a pop-up window will appear when it is done.</li>
124
- <li>Save the file to your device and enjoy listening to the song offline!</li>
125
- </ol>
126
- <h2>Conclusion</h2>
127
- <p>In this article, we have shown you how to download lagu aespa black mamba from various sources, as well as some information about the group and the song. We hope that you have enjoyed reading this article and that you have learned something new. Now, you can download lagu aespa black mamba and listen to it anytime and anywhere you want. You can also share it with your friends and family who love K-pop and Aespa. Don't forget to support the group and their music by streaming their song online or buying their album. Thank you for reading this article and have a great day!</p>
128
- <h3>FAQs</h3>
129
- <p>Here are some of the frequently asked questions about downloading lagu aespa black mamba:</p>
130
- <ol>
131
- <li><b>Is downloading lagu aespa black mamba legal?</b></li>
132
- <p>Downloading lagu aespa black mamba from streaming platforms is legal as long as you have a valid subscription or permission from the platform. However, downloading lagu aespa black mamba from MP3 download sites may not be legal, as some of them may violate the copyright laws or contain illegal content. Therefore, you should be careful when using these sites and always check their reputation and reviews before downloading anything from them.</p>
133
- <li><b>What are the benefits of downloading lagu aespa black mamba?</b></li>
134
- <p>Some of the benefits of downloading lagu aespa black mamba are:</p>
135
- <ul>
136
- <li>You can support the group and their music by downloading their song legally.</li>
137
- <li>You can enjoy the song anytime and anywhere without relying on internet connection or data usage.</li>
138
- <li>You can appreciate the high-quality sound and production of the song better than streaming it online.</li>
139
- <li>You can discover more about the group's concept and story through the song and its music video.</li>
140
- <li>You can join the fandom of Aespa and connect with other fans who love the song and the group.</li>
141
- </ul>
142
- <li><b>What are some other songs by Aespa?</b></li>
143
- <p>Some other songs by Aespa are:</p>
144
- <ul>
145
- <li>"Forever", a remake of Yoo Young-jin's 2000 song, which was released as a winter single on February 5, 2021.</li>
146
- <li>"Next Level", a remake of A$ton Wyld's 2019 song from the Fast & Furious Presents: Hobbs & Shaw soundtrack, which was released as their second single on May 17, 2021.</li>
147
- <li>"Savage", their first mini-album, which was released on October 5, 2021. It contains six tracks, including "Savage", "Dream Catcher", "Whiplash", "Energy", and "YEPPI YEPPI".</li>
148
- </ul>
149
- <li><b>How can I watch the music video of "Black Mamba" by Aespa?</b></li>
150
- <p>You can watch the music video of "Black Mamba" by Aespa on YouTube, where it has over 200 million views as of October 2021. You can also watch it on the official website of Aespa, where you can interact with their avatars and explore their world. Here are the links to watch the music video of "Black Mamba" by Aespa:</p>
151
- <ul>
152
- <li><a href="">https://www.youtube.com/watch?v=ZeerrnuLi5E</a></li>
153
- <li><a href="">https://aespa.smtown.com/</a></li>
154
- </ul>
155
- <li><b>How can I follow Aespa on social media?</b></li>
156
- <p>You can follow Aespa on various social media platforms, where they share their updates, photos, videos, and more. Here are some of the social media accounts of Aespa:</p>
157
- <ul>
158
- <li>Twitter: <a href="">https://twitter.com/aespa_official</a></li>
159
- <li>Instagram: <a href="">https://www.instagram.com/aespa_official/</a></li>
160
- <li>Facebook: <a href="">https://www.facebook.com/aespa.official/</a></li>
161
- <li>TikTok: <a href="">https://www.tiktok.com/@aespa_official</a></li>
162
- <li>Weibo: <a href="">https://weibo.com/u/7581456869</a></li>
163
- <li>V LIVE: <a href="">https://www.vlive.tv/channel/9C1B23</a></li>
164
- </ul></p> 401be4b1e0<br />
165
- <br />
166
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download TikTok Asia APK for Android - Latest Version 30.0.3.md DELETED
@@ -1,153 +0,0 @@
1
-
2
- <h1>How to Use TikTok for Fun and Profit</h1>
3
- <p>TikTok is a video-sharing app that allows you to create and share short-form videos on any topic. It’s mainly mobile-based, although you can still watch TikTok videos using the web app. The platform allows users to get creative with their content using filters, stickers, voiceovers, sound effects, and background music.</p>
4
- <h2>tiktok asia 2016 version apk download</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734; <a href="https://jinyurl.com/2uNTqn">https://jinyurl.com/2uNTqn</a></b></p><br /><br />
5
- <p>But TikTok is not just for entertainment. You can also use it to market your business, earn money from sponsorships and ads, or just have fun with your friends and followers. In this article, we’ll show you how to use TikTok for fun and profit in 2023.</p>
6
- <h2>What is TikTok and why should you use it?</h2>
7
- <p>TikTok is a popular social media app based in China that’s grown immensely in popularity over the last couple of years. On the app, creators can use a ton of sound effects, filters, and music to record short clips. User content ranges from DIY and craft videos to sketches and dance routines. To discover new content, users can follow specific creators and use hashtags.</p>
8
- <p>Since videos can only be between 15 and 60 seconds long, entertainment and engagement are both optimized — which is part of the reason that TikTok is so popular. In the United States alone, TikTok currently has close to 95 million users. By 2025, TikTok is expected to have an audience of 103 million U.S. users, which means roughly one in three Americans will have a TikTok account.</p>
9
- <p>TikTok is the first Chinese social media app to do this well. Since the release of the app, however, there have been concerns about privacy and users’ data, especially considering the fact that teenagers make up a large part of TikTok’s user base.</p>
10
- <h3>TikTok's features and benefits</h3>
11
- <p>What makes TikTok a popular platform for influencers and content creators? Check out these amazing features.</p>
12
- <ul>
13
- <li>Access to thousands of popular free music</li>
14
- <li>Hundreds of free emoji, stickers, and filters to make your video stand-out</li>
15
- <li>Built-in free and easy-to-use video editing tool</li>
16
- <li>Tons of free videos to inspire you with your next one</li>
17
- <li>Available in 154 countries and 39 languages creating a global community</li>
18
- <li>TikTok challenges and duets that help your video get quickly discovered</li>
19
- <li>A powerful algorithm that shows you more of the same videos you like</li>
20
- </ul>
21
- <h3>TikTok's One of the best ways to discover and enjoy TikTok videos is to browse the For You page and the Following page. These are two different feeds that show you videos based on your preferences and interests. Here's how they work and how to access them: The For You page The For You page is a personalized feed of videos that TikTok recommends for you based on your interactions with the app. You can find the For You page by tapping the Home icon at the bottom left corner of the screen, and then tapping For You at the top. The For You page shows you videos from a variety of creators, topics, and trends that match your taste. The more you use TikTok, the more the app learns about what you like and dislike, and the better it can tailor the recommendations for you. You can also influence the For You page by liking, commenting, sharing, and following the videos and creators that you enjoy. This will help TikTok understand your preferences and show you more of what you want to see. If you come across a video that you're not interested in, you can long-press on it and tap Not Interested. This will tell TikTok to show you less of that type of content in the future. You can also hide videos from a certain creator or with a certain sound by tapping More and choosing Hide videos from this user or Hide videos with this sound. The Following page The Following page is a feed of videos from the creators that you follow on TikTok. You can find the Following page by tapping the Home icon at the bottom left corner of the screen, and then tapping Following at the top. The Following page shows you the latest videos from your favorite creators in chronological order. You can also see when they go live by tapping the Live button next to their profile picture. You can follow any creator on TikTok by tapping their profile picture on their video, or by searching for their username in the Discover tab. You can also find new creators to follow by browsing hashtags, sounds, effects, and trending topics on TikTok. To unfollow a creator, simply tap their profile picture on their video, or go to their profile page, and tap Following. You can also remove followers from your own account by going to your profile page, tapping Followers, and tapping Remove next to their name. I hope this helps you understand how to browse videos on TikTok and enjoy the app to its fullest. If you have any questions, feel free to ask me. ? Now that you know how to browse videos on TikTok, let's learn how to make and edit your own videos. TikTok offers a variety of tools and features that allow you to create engaging and creative videos with ease. Here are some steps to help you get started: How to record a video using your phone or TikTok's native recorder You can record a video using your phone's camera or TikTok's native recorder. To use your phone's camera, simply open the app and tap the + icon at the bottom center of the screen. This will open your phone's camera and allow you to record a video as you normally would. To use TikTok's native recorder, tap the + icon at the bottom center of the screen, and then tap Templates at the top right corner. This will show you a list of templates that you can use to create different types of videos, such as music videos, slideshows, montages, and more. To use a template, tap on it and follow the instructions on the screen. You can also customize the template by adding your own photos, videos, text, stickers, and music. To preview your video, tap the Play button at the bottom right corner. To save your video, tap the Next button at the top right corner. How to use filters, stickers, music, and effects TikTok offers a variety of filters, stickers, music, and effects that you can use to enhance your video and make it more fun and attractive. To access these features, tap the + icon at the bottom center of the screen, and then tap Effects at the bottom left corner. This will open a menu of different categories of effects that you can choose from, such as Trending, Beauty, Funny, Animal, and more. To apply an effect, simply tap on it and it will appear on your screen. You can also adjust the intensity and duration of the effect by dragging the slider at the bottom. To add stickers to your video, tap Stickers at the bottom left corner. This will open a menu of different categories of stickers that you can choose from, such as Emoji, Text, GIFs, and more. To add a sticker, simply tap on it and it will appear on your screen. You can also resize, rotate, and move the sticker by using your fingers. To add music to your video, tap Sounds at the bottom center. This will open a menu of different categories of music that you can choose from, such as Popular, New Releases, Genres, Playlists, and more. You can also search for a specific song or artist by using the search bar at the top. To add music to your video, simply tap on it and it will start playing. You can also adjust the volume and trim the music by using the sliders at the bottom. To add filters to your video, tap Filters at the bottom right corner. This will open a menu of different categories of filters that you can choose from, such as Portrait, Landscape, Food, Vibe, and more. To apply a filter, simply swipe left or right on your screen until you find one that you like. You can also adjust the intensity of the filter by dragging the slider at the bottom. How to edit your video using TikTok's built-in editing tools TikTok also offers a built-in editing tool that allows you to edit your video after recording it. To access this tool, tap Next after recording or selecting a video. This will open a screen where you can edit your video in various ways. Some of the editing options that you can use are: - Trim: This allows you to cut out unwanted parts of your video by dragging the handles at both ends of the timeline. - Adjust clips: This allows you to rearrange or delete clips in your video by tapping and holding them on the timeline. - Voiceover: This allows you to record your own voice over your video by tapping and holding the microphone icon at the bottom. - Volume: This allows you to adjust the volume of your original sound or added music by dragging the sliders at the bottom. - Text: This allows you to add text to your video by tapping Text at the bottom. You can also change the font, color, size, alignment, and animation of the text by tapping on it and using the options at the bottom. - Stickers: This allows you to add stickers to your video by tapping Stickers at the bottom. You can also resize, rotate, and move the stickers by using your fingers. - Effects: This allows you to add effects to your video by tapping Effects at the bottom. You can also adjust the intensity and duration of the effects by dragging the slider at the bottom. - Filters: This allows you to add filters to your video by tapping Filters at the bottom. You can also adjust the intensity of the filters by dragging the slider at the bottom. After editing your video, you can tap Next to proceed to the next screen, where you can add a caption, hashtags, tags, and other settings to your video. You can also choose who can view, comment, duet, stitch, and react to your video by tapping Who can view this video at the bottom. When you're ready to post your video, tap Post at the top right corner. I hope this helps you understand how to make and edit TikTok videos and unleash your creativity on the app. If you have any questions, feel free to ask me. ? Now that you know how to make and edit TikTok videos, let's learn how to discover and engage with TikTok content. TikTok offers a variety of ways to interact with other users and their videos, such as hashtags, challenges, duets, and stitches. Here are some tips to help you get the most out of TikTok's social features: How to use hashtags, challenges, duets, and stitches Hashtags are keywords or phrases that you can add to your caption to categorize your video and make it easier for other users to find it. You can use hashtags that are relevant to your video's topic, genre, style, or mood. You can also use hashtags that are trending or popular on TikTok, such as #fyp (for you page), #viral, #funny, #dance, etc. Challenges are viral trends or activities that users can participate in by creating their own videos using a specific hashtag. Challenges can be fun, creative, educational, or social. Some examples of popular challenges on TikTok are #wipeitdown, #savagelove, #learnontiktok, #blindinglights, etc. Duets are videos that allow you to create a split-screen video with another user's video. You can use duets to react to, collaborate with, or parody another user's video. To create a duet, tap the Share button on the video that you want to duet with, and then tap Duet. Stitches are videos that allow you to clip and integrate another user's video into your own video. You can use stitches to add your own commentary, perspective, or twist to another user's video. To create a stitch, tap the Share button on the video that you want to stitch with, and then tap Stitch. How to like, comment, share, and save videos One of the simplest ways to engage with TikTok content is to like, comment, share, and save videos that you enjoy. These actions not only show your appreciation and support for the creators, but also help TikTok's algorithm to recommend more videos that suit your taste. To like a video, simply tap the heart icon at the bottom right corner of the screen. You can also double-tap the video to like it. To unlike a video, tap the heart icon again. To comment on a video, tap the speech bubble icon at the bottom right corner of the screen. This will open a comment section where you can type your comment and send it. You can also reply to other users' comments by tapping Reply under their comment. To share a video, tap the arrow icon at the bottom right corner of the screen. This will open a menu of different options that you can use to share the video with others. You can share the video via message, email, social media, or copy the link. To save a video, tap the arrow icon at the bottom right corner of the screen, and then tap Save Video. This will download the video to your device's gallery or camera roll. You can also save a video by long-pressing on it and tapping Save Video. I hope this helps you understand how to discover and engage with TikTok content and have fun on the app. If you have any questions, feel free to ask me. ? Now that you know how to discover and engage with TikTok content, let's learn how to grow your TikTok audience and influence. TikTok is a competitive platform where millions of creators are vying for attention and recognition. To stand out from the crowd and attract loyal fans, you need to have a clear and consistent content strategy that showcases your unique value and personality. Here are some tips to help you grow your TikTok audience and influence in 2023: How to identify your target audience and content strategy Before you start creating content on TikTok, you need to have a clear idea of who your target audience is and what kind of content they want to see from you. This will help you tailor your content to their needs, preferences, and interests, and increase your chances of getting views, likes, comments, shares, and follows. To identify your target audience, you need to do some research and analysis. You can use tools like TikTok Analytics, Google Trends, or Social Blade to find out more about your potential audience's demographics, behaviors, preferences, and trends. You can also look at other successful creators in your niche and see what kind of content they create, how they interact with their fans, and what hashtags they use. To identify your content strategy, you need to define your niche, your value proposition, your tone of voice, and your posting schedule. Your niche is the specific topic or category that you focus on in your content. Your value proposition is the unique benefit or solution that you offer to your audience through your content. Your tone of voice is the way you communicate with your audience through your words, expressions, and emotions. Your posting schedule is the frequency and timing of your content uploads. For example, if you are a fitness instructor who wants to target young women who want to lose weight and tone their bodies, your niche could be fitness tips and workouts for women. Your value proposition could be that you offer simple, effective, and fun exercises that can be done at home with minimal equipment. Your tone of voice could be friendly, motivational, and humorous. Your posting schedule could be three times a week at 9 am. How to use analytics to track your performance and optimize your content Once you have identified your target audience and content strategy, you need to monitor and measure how well your content is performing on TikTok. This will help you understand what works and what doesn't work for your audience, and how you can improve your content quality and reach. To use analytics on TikTok, you need to switch to a Pro account by going to your profile page, tapping the three dots at the top right corner, tapping Manage account, and tapping Switch to Pro account. This will give you access to a dashboard where you can see various metrics and insights about your account and content. Some of the metrics that you can track on TikTok are: - Profile views: The number of times users viewed your profile page. - Video views: The number of times users viewed your videos. - Followers: The number of users who followed your account. - Likes: The number of times users liked your videos. - Comments: The number of times users commented on your videos. - Shares: The number of times users shared your videos. - Average watch time: The average amount of time users spent watching your videos. - Traffic source: The sources from which users discovered your videos, such as For You page, Following page, hashtags, sounds, etc. - Audience territories: The countries or regions where most of your audience is located. - Audience demographics: The age and gender distribution of your audience. - Audience interests: The topics or categories that most interest your audience. By analyzing these metrics, you can find out which videos performed the best and why, which videos performed the worst and why, which times and days are the best for posting, which hashtags and sounds are the most effective for reaching more users, which countries or regions are the most engaged with your content, which age groups and genders are the most interested in your content, and which topics or categories are the most appealing to your audience. You can then use this information to optimize your content strategy by creating more of the content that resonates with your audience, improving the quality and relevance of your content, experimenting with different formats and styles of content, testing different posting times and frequencies , and using different hashtags and sounds to reach more users. How to collaborate with other creators and brands Another way to grow your TikTok audience and influence is to collaborate with other creators and brands that share your niche, values, and goals. Collaboration can help you expand your reach, increase your credibility, and create more value for your audience. To collaborate with other creators, you can use features like duets, stitches, live streams, or group chats to create joint content, cross-promote each other, or interact with each other's fans. You can also join or create a TikTok collective, which is a group of creators who work together to support each other and grow their influence. To collaborate with brands, you can use platforms like FameBit, AspireIQ, or Upfluence to find and connect with brands that are looking for influencers to promote their products or services. You can also pitch directly to brands that you like and want to work with by sending them an email or a direct message on TikTok. When collaborating with brands, you need to make sure that you follow the guidelines and best practices for creating sponsored content on TikTok. Some of these are: - Disclose the sponsorship by using hashtags like #ad, #sponsored, or #partner in your caption. - Be authentic and honest about your opinion and experience with the product or service. - Be creative and original in your content and avoid copying or imitating other creators or brands. - Be respectful and professional in your communication and interaction with the brand and the audience. - Follow the terms and conditions of the agreement and deliver the content on time and as agreed. I hope this helps you understand how to grow your TikTok audience and influence in 2023. If you have any questions, feel free to ask me. ? <h2>How to make money on TikTok</h2>
22
- <p>TikTok is not only a platform for fun and entertainment, but also a platform for making money. There are several ways that you can monetize your TikTok account and earn income from your content and influence. Here are some of the most common and effective ways to make money on TikTok in 2023:</p>
23
- <p>tiktok asia 2016 apk free download<br />
24
- tiktok asia 2016 version android app<br />
25
- tiktok asia 2016 old versions apkcombo<br />
26
- tiktok asia 2016 update apk<br />
27
- tiktok asia 2016 mod apk download<br />
28
- tiktok asia 2016 latest version apk<br />
29
- tiktok asia 2016 beta apk xapk<br />
30
- tiktok asia 2016 app for android<br />
31
- tiktok asia 2016 video players & editors<br />
32
- tiktok asia 2016 global video community<br />
33
- download tiktok asia 2016 apk<br />
34
- install tiktok asia 2016 apk<br />
35
- how to download tiktok asia 2016 apk<br />
36
- where to download tiktok asia 2016 apk<br />
37
- why download tiktok asia 2016 apk<br />
38
- tiktok asia 2016 apk mirror<br />
39
- tiktok asia 2016 apk pure<br />
40
- tiktok asia 2016 apkpure.com<br />
41
- tiktok asia 2016 apkmonk.com<br />
42
- tiktok asia 2016 apkmirror.com<br />
43
- tiktok asia 2016 apk file download<br />
44
- tiktok asia 2016 apk offline installer<br />
45
- tiktok asia 2016 apk online installer<br />
46
- tiktok asia 2016 apk direct download link<br />
47
- tiktok asia 2016 apk no ads<br />
48
- tiktok asia 2016 apk unlimited likes<br />
49
- tiktok asia 2016 apk premium features<br />
50
- tiktok asia 2016 apk unlocked features<br />
51
- tiktok asia 2016 apk pro version<br />
52
- tiktok asia 2016 apk cracked version<br />
53
- tiktok asia 2016 apk modded version<br />
54
- tiktok asia 2016 apk hacked version<br />
55
- tiktok asia 2016 apk patched version<br />
56
- tiktok asia 2016 apk full version<br />
57
- tiktok asia 2016 apk original version<br />
58
- tiktok asia 2016 apk safe download<br />
59
- tiktok asia 2016 apk virus free download<br />
60
- tiktok asia 2016 apk malware free download<br />
61
- tiktok asia 2016 apk secure download<br />
62
- tiktok asia 2016 apk trusted download source<br />
63
- best site to download tiktok asia 2016 apk<br />
64
- best app to download tiktok asia 2016 apk<br />
65
- best way to download tiktok asia 2016 apk<br />
66
- fastest way to download tiktok asia 2016 apk<br />
67
- easiest way to download tiktok asia 2016 apk<br />
68
- cheapest way to download tiktok asia 2016 apk<br />
69
- most popular way to download tiktok asia 2016 apk<br />
70
- most reliable way to download tiktok asia 2016 apk<br />
71
- most convenient way to download tiktok asia 2016 apk</p>
72
- <h3>How to join the TikTok Creator Fund and get paid for views</h3>
73
- <p>The TikTok Creator Fund is a program that pays eligible creators for their video views on TikTok. The program was launched in 2020 and has since expanded to several countries, including the US, UK, Germany, France, Italy, Spain, India, Japan, Korea, Australia, Brazil, Mexico, Canada, Indonesia, Thailand, Vietnam, Turkey, Egypt, Saudi Arabia, UAE, South Africa, Nigeria, Kenya, Pakistan, Bangladesh, Sri Lanka, Nepal, Malaysia, Singapore, Philippines, Cambodia, Myanmar, Laos.</p>
74
- <p>To join the TikTok Creator Fund, you need to meet the following requirements:</p>
75
- <ul>
76
- <li>Be at least 18 years old</li>
77
- <li>Have at least 10K followers</li>
78
- <li>Have at least 10K video views in the last 30 days</li>
79
- <li>Follow the TikTok Community Guidelines and Terms of Service</li>
80
- <li>Be located in an eligible country</li>
81
- </ul>
82
- <p>To apply for the TikTok Creator Fund, you need to go to your profile page, tap the three dots at the top right corner, tap Creator Tools, and tap Creator Fund. You will then need to fill out some information and agree to the terms and conditions of the program. Once you join the TikTok Creator Fund, you will start earning money based on your video views and engagement. The amount of money you earn depends on various factors, such as the number of views, the location of the viewers, the quality of the content, and the current market rates. You can check your earnings and balance in the Creator Fund dashboard. To withdraw your money from the TikTok Creator Fund, you need to link your PayPal or bank account to your TikTok account. You can do this by going to your profile page, tapping the three dots at the top right corner, tapping Wallet, and tapping Link Account. You can then request a withdrawal of your balance once it reaches a minimum threshold of $50. The withdrawal process may take up to 15 days to complete. <h3>How to partner with brands and create sponsored content</h3>
83
- <p>Another way to make money on TikTok is to partner with brands and create sponsored content for them. Sponsored content is content that promotes a brand's product or service in exchange for a fee or a commission. Sponsored content can be in the form of product reviews, tutorials, testimonials, challenges, giveaways, or any other creative format that showcases the brand's value and benefits.</p>
84
- <p>To partner with brands and create sponsored content, you need to have a large and engaged audience that matches the brand's target market. You also need to have a professional and attractive profile that showcases your niche, personality, and portfolio. You can use platforms like FameBit, AspireIQ, or Upfluence to find and connect with brands that are looking for influencers to work with. You can also pitch directly to brands that you like and want to work with by sending them an email or a direct message on TikTok.</p>
85
- <p>When partnering with brands and creating sponsored content, you need to make sure that you follow the guidelines and best practices for creating sponsored content on TikTok. Some of these are:</p>
86
- <ul>
87
- <li>Disclose the sponsorship by using hashtags like #ad, #sponsored, or #partner in your caption.</li>
88
- <li>Be authentic and honest about your opinion and experience with the product or service.</li>
89
- <li>Be creative and original in your content and avoid copying or imitating other creators or brands.</li>
90
- <li>Be respectful and professional in your communication and interaction with the brand and the audience.</li>
91
- <li>Follow the terms and conditions of the agreement and deliver the content on time and as agreed.</li>
92
- </ul>
93
- <h3>How to promote your own products or services on TikTok</h3>
94
- <p>If you have your own products or services that you want to sell or promote on TikTok, you can do so by creating engaging and informative content that showcases their value and benefits. You can also use features like TikTok Shop or TikTok Live Shopping to directly sell your products or services on the app.</p>
95
- TikTok Shop is a feature that allows you to create a mini-store within your profile page where you can display your products or services for sale. To use TikTok Shop, you need to have a verified business account on TikTok. You can apply for a business account by going to your profile page, tapping the three dots at the top right corner, tapping Manage account, tapping Switch to Pro account, tapping Business account, and filling out some information about your business. Once you have a business account on TikTok, you can create a shop by going to your profile page, tapping the three dots at the top right corner, tapping Creator Tools, and tapping TikTok Shop. You will then need to link your shop to a third-party e-commerce platform, such as Shopify, WooCommerce, or BigCommerce. You can then add your products or services to your shop by uploading their images, titles, prices, and descriptions. Once you have a shop on TikTok, you can promote your products or services by creating videos that showcase their features, benefits, reviews, testimonials, or tutorials. You can also add a Shop Now button to your videos that will direct viewers to your shop where they can purchase your products or services. TikTok Live Shopping is a feature that allows you to sell your products or services live on TikTok. To use TikTok Live Shopping, you need to have a verified business account on TikTok and a shop on TikTok. You can then go live by tapping the + icon at the bottom center of the screen and tapping Live. You can then select the products or services that you want to sell from your shop and display them on your live stream. During your live stream, you can talk about your products or services, answer questions from viewers, and encourage them to buy from you. You can also see how many viewers are watching your live stream, how many products or services have been sold, and how much revenue you have generated. You can also interact with viewers by sending them gifts, stickers, or messages. <h3>How to cross-promote your TikTok content on other platforms</h3>
96
- <p>Another way to make money on TikTok is to cross-promote your TikTok content on other platforms where you have an audience or a presence. This can help you drive more traffic to your TikTok account, increase your exposure and reach, and generate more revenue from different sources.</p>
97
- <p>Some of the platforms that you can cross-promote your TikTok content on are:</p>
98
- <ul>
99
- <li>YouTube: You can upload your TikTok videos to YouTube as short-form content or as part of a longer video. You can also create YouTube videos that are related to your TikTok niche or theme. You can then monetize your YouTube videos with ads, memberships, merchandise, or super chats.</li>
100
- <li>Instagram: You can share your TikTok videos to Instagram as posts or stories. You can also create Instagram reels that are similar to your TikTok videos. You can then monetize your Instagram account with sponsored posts, affiliate links, or shoppable posts.</li>
101
- <li>Facebook: You can post your TikTok videos to Facebook as posts or stories. You can also create Facebook videos that are related to your TikTok niche or theme. You can then monetize your Facebook account with ads, fan subscriptions, stars, or branded content.</li>
102
- <li>Twitter: You can tweet your TikTok videos to Twitter as tweets or fleets. You can also create Twitter videos that are related to your TikTok niche or theme. You can then monetize your Twitter account with sponsored tweets, affiliate links, or tip jar.</li>
103
- <li>Pinterest: You can pin your TikTok videos to Pinterest as pins or stories. You can also create Pinterest videos that are related to your TikTok niche or theme. You can then monetize your Pinterest account with ads, sponsored pins, or affiliate links.</li>
104
- </ul>
105
- I hope this helps you understand how to make money on TikTok in 2023. If you have any questions, feel free to ask me. ? <h2>Conclusion</h2>
106
- <p>TikTok is a powerful and popular platform that allows you to create and share short-form videos on any topic. It's also a platform that allows you to market your business, earn money from sponsorships and ads, or just have fun with your friends and followers. In this article, we showed you how to use TikTok for fun and profit in 2023. We covered the following topics: - What is TikTok and why should you use it? - How to create a TikTok account and profile - How to make and edit TikTok videos - How to discover and engage with TikTok content - How to grow your TikTok audience and influence - How to make money on TikTok We hope you found this article helpful and informative. If you want to learn more about TikTok, you can check out these resources: - [TikTok Help Center]: This is the official website where you can find answers to common questions, tips and tricks, and updates on TikTok. - [TikTok Newsroom]: This is the official blog where you can find the latest news, announcements, and stories about TikTok. - [TikTok Academy]: This is an online learning platform where you can find courses and tutorials on how to use TikTok for different purposes, such as education, entertainment, or business. Thank you for reading this article. We hope you enjoyed it and learned something new. If you have any feedback or questions, please let us know in the comments below. We would love to hear from you. ? <h2>FAQs</h2>
107
- <p>Here are some frequently asked questions about TikTok that you might find useful:</p>
108
- <h3>Is TikTok safe to use?</h3>
109
- <p>TikTok is generally safe to use, as long as you follow some basic safety precautions. Some of these are:</p>
110
- <ul>
111
- <li>Use a strong password and enable two-factor authentication for your account.</li>
112
- <li>Adjust your privacy settings to control who can view, comment, duet, stitch, and react to your videos.</li>
113
- <li>Be careful about what personal information you share on your profile and videos.</li>
114
- <li>Report and block any users who harass, bully, or spam you.</li>
115
- <li>Avoid clicking on suspicious links or downloading unknown files from other users.</li>
116
- <li>Follow the TikTok Community Guidelines and Terms of Service.</li>
117
- </ul>
118
- <h3>How do I get verified on TikTok?</h3>
119
- <p>TikTok verifies accounts that belong to authentic, notable, and active creators or brands. To get verified on TikTok, you need to meet the following criteria:</p>
120
- <ul>
121
- <li>You have a unique and original content style that stands out from others.</li>
122
- <li>You have a large and loyal fan base that engages with your content regularly.</li>
123
- <li>You have a consistent and high-quality content output that follows the trends and best practices on TikTok.</li>
124
- <li>You have a positive and influential impact on the TikTok community.</li>
125
- </ul>
126
- <p>If you think you meet these criteria, you can apply for verification by contacting TikTok's support team via email or feedback form. However, there is no guarantee that your application will be accepted, as verification is granted at TikTok's discretion.</p>
127
- <h3>How do I delete my TikTok account?</h3>
128
- <p>If you want to delete your TikTok account permanently, you need to follow these steps:</p>
129
- <ol>
130
- <li>Go to your profile page and tap the three dots at the top right corner.</li>
131
- <li>Tap Manage account and tap Delete account at the bottom.</li>
132
- <li>Follow the instructions on the screen and confirm your deletion request.</li>
133
- </ol>
134
- <p>Note that deleting your account will remove all your videos, likes, comments, messages, followers, and other data from TikTok. You will also lose access to any services or features that require a TikTok account. You will not be able to recover your account once it is deleted.</p>
135
- <h3>How do I download TikTok videos?</h3>
136
- <p>If you want to download TikTok videos to your device, you need to follow these steps:</p>
137
- <ol>
138
- <li>Find the video that you want to download and tap the Share button at the bottom right corner.</li>
139
- <li>Tap Save Video and wait for the download to finish.</li>
140
- <li>Go to your device's gallery or camera roll and find the downloaded video.</li>
141
- </ol>
142
- <p>Note that some videos may not be available for download due to the creator's or the platform's settings. You can also use third-party apps or websites to download TikTok videos, but be careful about their security and legality.</p>
143
- <h3>How do I go live on TikTok?</h3>
144
- <p>If you want to go live on TikTok and broadcast your video in real-time, you need to follow these steps:</p>
145
- <ol>
146
- <li>Tap the + icon at the bottom center of the screen and tap Live.</li> <li>Enter a title for your live stream and choose a category for it.</li>
147
- <li>Tap Go Live and start your live stream.</li>
148
- </ol>
149
- <p>Note that you need to have at least 1,000 followers to go live on TikTok. You can also invite other users to join your live stream by tapping the + icon at the bottom left corner and selecting a user from your following list. You can also interact with your viewers by sending them gifts, stickers, or messages.</p>
150
- <h2></h2>
151
- <p>That's it for this article. I hope you learned something new and useful about how to use TikTok for fun and profit in 2023. If you liked this article, please share it with your friends and family. And if you have any feedback or questions, please let me know in the comments below. I would love to hear from you. ?</p> 401be4b1e0<br />
152
- <br />
153
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py DELETED
@@ -1,59 +0,0 @@
1
- import numpy as np
2
- import onnx
3
- import torch
4
-
5
-
6
- def convert_onnx(net, path_module, output, opset=11, simplify=False):
7
- assert isinstance(net, torch.nn.Module)
8
- img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
9
- img = img.astype(np.float)
10
- img = (img / 255. - 0.5) / 0.5 # torch style norm
11
- img = img.transpose((2, 0, 1))
12
- img = torch.from_numpy(img).unsqueeze(0).float()
13
-
14
- weight = torch.load(path_module)
15
- net.load_state_dict(weight)
16
- net.eval()
17
- torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
18
- model = onnx.load(output)
19
- graph = model.graph
20
- graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
21
- if simplify:
22
- from onnxsim import simplify
23
- model, check = simplify(model)
24
- assert check, "Simplified ONNX model could not be validated"
25
- onnx.save(model, output)
26
-
27
-
28
- if __name__ == '__main__':
29
- import os
30
- import argparse
31
- from backbones import get_model
32
-
33
- parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx')
34
- parser.add_argument('input', type=str, help='input backbone.pth file or path')
35
- parser.add_argument('--output', type=str, default=None, help='output onnx path')
36
- parser.add_argument('--network', type=str, default=None, help='backbone network')
37
- parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify')
38
- args = parser.parse_args()
39
- input_file = args.input
40
- if os.path.isdir(input_file):
41
- input_file = os.path.join(input_file, "backbone.pth")
42
- assert os.path.exists(input_file)
43
- model_name = os.path.basename(os.path.dirname(input_file)).lower()
44
- params = model_name.split("_")
45
- if len(params) >= 3 and params[1] in ('arcface', 'cosface'):
46
- if args.network is None:
47
- args.network = params[2]
48
- assert args.network is not None
49
- print(args)
50
- backbone_onnx = get_model(args.network, dropout=0)
51
-
52
- output_path = args.output
53
- if output_path is None:
54
- output_path = os.path.join(os.path.dirname(__file__), 'onnx')
55
- if not os.path.exists(output_path):
56
- os.makedirs(output_path)
57
- assert os.path.isdir(output_path)
58
- output_file = os.path.join(output_path, "%s.onnx" % model_name)
59
- convert_onnx(backbone_onnx, input_file, output_file, simplify=args.simplify)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/infer_pack/attentions.py DELETED
@@ -1,417 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- from lib.infer_pack import commons
9
- from lib.infer_pack import modules
10
- from lib.infer_pack.modules import LayerNorm
11
-
12
-
13
- class Encoder(nn.Module):
14
- def __init__(
15
- self,
16
- hidden_channels,
17
- filter_channels,
18
- n_heads,
19
- n_layers,
20
- kernel_size=1,
21
- p_dropout=0.0,
22
- window_size=10,
23
- **kwargs
24
- ):
25
- super().__init__()
26
- self.hidden_channels = hidden_channels
27
- self.filter_channels = filter_channels
28
- self.n_heads = n_heads
29
- self.n_layers = n_layers
30
- self.kernel_size = kernel_size
31
- self.p_dropout = p_dropout
32
- self.window_size = window_size
33
-
34
- self.drop = nn.Dropout(p_dropout)
35
- self.attn_layers = nn.ModuleList()
36
- self.norm_layers_1 = nn.ModuleList()
37
- self.ffn_layers = nn.ModuleList()
38
- self.norm_layers_2 = nn.ModuleList()
39
- for i in range(self.n_layers):
40
- self.attn_layers.append(
41
- MultiHeadAttention(
42
- hidden_channels,
43
- hidden_channels,
44
- n_heads,
45
- p_dropout=p_dropout,
46
- window_size=window_size,
47
- )
48
- )
49
- self.norm_layers_1.append(LayerNorm(hidden_channels))
50
- self.ffn_layers.append(
51
- FFN(
52
- hidden_channels,
53
- hidden_channels,
54
- filter_channels,
55
- kernel_size,
56
- p_dropout=p_dropout,
57
- )
58
- )
59
- self.norm_layers_2.append(LayerNorm(hidden_channels))
60
-
61
- def forward(self, x, x_mask):
62
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
63
- x = x * x_mask
64
- for i in range(self.n_layers):
65
- y = self.attn_layers[i](x, x, attn_mask)
66
- y = self.drop(y)
67
- x = self.norm_layers_1[i](x + y)
68
-
69
- y = self.ffn_layers[i](x, x_mask)
70
- y = self.drop(y)
71
- x = self.norm_layers_2[i](x + y)
72
- x = x * x_mask
73
- return x
74
-
75
-
76
- class Decoder(nn.Module):
77
- def __init__(
78
- self,
79
- hidden_channels,
80
- filter_channels,
81
- n_heads,
82
- n_layers,
83
- kernel_size=1,
84
- p_dropout=0.0,
85
- proximal_bias=False,
86
- proximal_init=True,
87
- **kwargs
88
- ):
89
- super().__init__()
90
- self.hidden_channels = hidden_channels
91
- self.filter_channels = filter_channels
92
- self.n_heads = n_heads
93
- self.n_layers = n_layers
94
- self.kernel_size = kernel_size
95
- self.p_dropout = p_dropout
96
- self.proximal_bias = proximal_bias
97
- self.proximal_init = proximal_init
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.self_attn_layers = nn.ModuleList()
101
- self.norm_layers_0 = nn.ModuleList()
102
- self.encdec_attn_layers = nn.ModuleList()
103
- self.norm_layers_1 = nn.ModuleList()
104
- self.ffn_layers = nn.ModuleList()
105
- self.norm_layers_2 = nn.ModuleList()
106
- for i in range(self.n_layers):
107
- self.self_attn_layers.append(
108
- MultiHeadAttention(
109
- hidden_channels,
110
- hidden_channels,
111
- n_heads,
112
- p_dropout=p_dropout,
113
- proximal_bias=proximal_bias,
114
- proximal_init=proximal_init,
115
- )
116
- )
117
- self.norm_layers_0.append(LayerNorm(hidden_channels))
118
- self.encdec_attn_layers.append(
119
- MultiHeadAttention(
120
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
121
- )
122
- )
123
- self.norm_layers_1.append(LayerNorm(hidden_channels))
124
- self.ffn_layers.append(
125
- FFN(
126
- hidden_channels,
127
- hidden_channels,
128
- filter_channels,
129
- kernel_size,
130
- p_dropout=p_dropout,
131
- causal=True,
132
- )
133
- )
134
- self.norm_layers_2.append(LayerNorm(hidden_channels))
135
-
136
- def forward(self, x, x_mask, h, h_mask):
137
- """
138
- x: decoder input
139
- h: encoder output
140
- """
141
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
142
- device=x.device, dtype=x.dtype
143
- )
144
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
145
- x = x * x_mask
146
- for i in range(self.n_layers):
147
- y = self.self_attn_layers[i](x, x, self_attn_mask)
148
- y = self.drop(y)
149
- x = self.norm_layers_0[i](x + y)
150
-
151
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
152
- y = self.drop(y)
153
- x = self.norm_layers_1[i](x + y)
154
-
155
- y = self.ffn_layers[i](x, x_mask)
156
- y = self.drop(y)
157
- x = self.norm_layers_2[i](x + y)
158
- x = x * x_mask
159
- return x
160
-
161
-
162
- class MultiHeadAttention(nn.Module):
163
- def __init__(
164
- self,
165
- channels,
166
- out_channels,
167
- n_heads,
168
- p_dropout=0.0,
169
- window_size=None,
170
- heads_share=True,
171
- block_length=None,
172
- proximal_bias=False,
173
- proximal_init=False,
174
- ):
175
- super().__init__()
176
- assert channels % n_heads == 0
177
-
178
- self.channels = channels
179
- self.out_channels = out_channels
180
- self.n_heads = n_heads
181
- self.p_dropout = p_dropout
182
- self.window_size = window_size
183
- self.heads_share = heads_share
184
- self.block_length = block_length
185
- self.proximal_bias = proximal_bias
186
- self.proximal_init = proximal_init
187
- self.attn = None
188
-
189
- self.k_channels = channels // n_heads
190
- self.conv_q = nn.Conv1d(channels, channels, 1)
191
- self.conv_k = nn.Conv1d(channels, channels, 1)
192
- self.conv_v = nn.Conv1d(channels, channels, 1)
193
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
194
- self.drop = nn.Dropout(p_dropout)
195
-
196
- if window_size is not None:
197
- n_heads_rel = 1 if heads_share else n_heads
198
- rel_stddev = self.k_channels**-0.5
199
- self.emb_rel_k = nn.Parameter(
200
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
201
- * rel_stddev
202
- )
203
- self.emb_rel_v = nn.Parameter(
204
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
205
- * rel_stddev
206
- )
207
-
208
- nn.init.xavier_uniform_(self.conv_q.weight)
209
- nn.init.xavier_uniform_(self.conv_k.weight)
210
- nn.init.xavier_uniform_(self.conv_v.weight)
211
- if proximal_init:
212
- with torch.no_grad():
213
- self.conv_k.weight.copy_(self.conv_q.weight)
214
- self.conv_k.bias.copy_(self.conv_q.bias)
215
-
216
- def forward(self, x, c, attn_mask=None):
217
- q = self.conv_q(x)
218
- k = self.conv_k(c)
219
- v = self.conv_v(c)
220
-
221
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
222
-
223
- x = self.conv_o(x)
224
- return x
225
-
226
- def attention(self, query, key, value, mask=None):
227
- # reshape [b, d, t] -> [b, n_h, t, d_k]
228
- b, d, t_s, t_t = (*key.size(), query.size(2))
229
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
230
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
231
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
232
-
233
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
234
- if self.window_size is not None:
235
- assert (
236
- t_s == t_t
237
- ), "Relative attention is only available for self-attention."
238
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
239
- rel_logits = self._matmul_with_relative_keys(
240
- query / math.sqrt(self.k_channels), key_relative_embeddings
241
- )
242
- scores_local = self._relative_position_to_absolute_position(rel_logits)
243
- scores = scores + scores_local
244
- if self.proximal_bias:
245
- assert t_s == t_t, "Proximal bias is only available for self-attention."
246
- scores = scores + self._attention_bias_proximal(t_s).to(
247
- device=scores.device, dtype=scores.dtype
248
- )
249
- if mask is not None:
250
- scores = scores.masked_fill(mask == 0, -1e4)
251
- if self.block_length is not None:
252
- assert (
253
- t_s == t_t
254
- ), "Local attention is only available for self-attention."
255
- block_mask = (
256
- torch.ones_like(scores)
257
- .triu(-self.block_length)
258
- .tril(self.block_length)
259
- )
260
- scores = scores.masked_fill(block_mask == 0, -1e4)
261
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
262
- p_attn = self.drop(p_attn)
263
- output = torch.matmul(p_attn, value)
264
- if self.window_size is not None:
265
- relative_weights = self._absolute_position_to_relative_position(p_attn)
266
- value_relative_embeddings = self._get_relative_embeddings(
267
- self.emb_rel_v, t_s
268
- )
269
- output = output + self._matmul_with_relative_values(
270
- relative_weights, value_relative_embeddings
271
- )
272
- output = (
273
- output.transpose(2, 3).contiguous().view(b, d, t_t)
274
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
275
- return output, p_attn
276
-
277
- def _matmul_with_relative_values(self, x, y):
278
- """
279
- x: [b, h, l, m]
280
- y: [h or 1, m, d]
281
- ret: [b, h, l, d]
282
- """
283
- ret = torch.matmul(x, y.unsqueeze(0))
284
- return ret
285
-
286
- def _matmul_with_relative_keys(self, x, y):
287
- """
288
- x: [b, h, l, d]
289
- y: [h or 1, m, d]
290
- ret: [b, h, l, m]
291
- """
292
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
293
- return ret
294
-
295
- def _get_relative_embeddings(self, relative_embeddings, length):
296
- max_relative_position = 2 * self.window_size + 1
297
- # Pad first before slice to avoid using cond ops.
298
- pad_length = max(length - (self.window_size + 1), 0)
299
- slice_start_position = max((self.window_size + 1) - length, 0)
300
- slice_end_position = slice_start_position + 2 * length - 1
301
- if pad_length > 0:
302
- padded_relative_embeddings = F.pad(
303
- relative_embeddings,
304
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
305
- )
306
- else:
307
- padded_relative_embeddings = relative_embeddings
308
- used_relative_embeddings = padded_relative_embeddings[
309
- :, slice_start_position:slice_end_position
310
- ]
311
- return used_relative_embeddings
312
-
313
- def _relative_position_to_absolute_position(self, x):
314
- """
315
- x: [b, h, l, 2*l-1]
316
- ret: [b, h, l, l]
317
- """
318
- batch, heads, length, _ = x.size()
319
- # Concat columns of pad to shift from relative to absolute indexing.
320
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
321
-
322
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
323
- x_flat = x.view([batch, heads, length * 2 * length])
324
- x_flat = F.pad(
325
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
326
- )
327
-
328
- # Reshape and slice out the padded elements.
329
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
330
- :, :, :length, length - 1 :
331
- ]
332
- return x_final
333
-
334
- def _absolute_position_to_relative_position(self, x):
335
- """
336
- x: [b, h, l, l]
337
- ret: [b, h, l, 2*l-1]
338
- """
339
- batch, heads, length, _ = x.size()
340
- # padd along column
341
- x = F.pad(
342
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
343
- )
344
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
345
- # add 0's in the beginning that will skew the elements after reshape
346
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
347
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
348
- return x_final
349
-
350
- def _attention_bias_proximal(self, length):
351
- """Bias for self-attention to encourage attention to close positions.
352
- Args:
353
- length: an integer scalar.
354
- Returns:
355
- a Tensor with shape [1, 1, length, length]
356
- """
357
- r = torch.arange(length, dtype=torch.float32)
358
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
359
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
360
-
361
-
362
- class FFN(nn.Module):
363
- def __init__(
364
- self,
365
- in_channels,
366
- out_channels,
367
- filter_channels,
368
- kernel_size,
369
- p_dropout=0.0,
370
- activation=None,
371
- causal=False,
372
- ):
373
- super().__init__()
374
- self.in_channels = in_channels
375
- self.out_channels = out_channels
376
- self.filter_channels = filter_channels
377
- self.kernel_size = kernel_size
378
- self.p_dropout = p_dropout
379
- self.activation = activation
380
- self.causal = causal
381
-
382
- if causal:
383
- self.padding = self._causal_padding
384
- else:
385
- self.padding = self._same_padding
386
-
387
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
388
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
389
- self.drop = nn.Dropout(p_dropout)
390
-
391
- def forward(self, x, x_mask):
392
- x = self.conv_1(self.padding(x * x_mask))
393
- if self.activation == "gelu":
394
- x = x * torch.sigmoid(1.702 * x)
395
- else:
396
- x = torch.relu(x)
397
- x = self.drop(x)
398
- x = self.conv_2(self.padding(x * x_mask))
399
- return x * x_mask
400
-
401
- def _causal_padding(self, x):
402
- if self.kernel_size == 1:
403
- return x
404
- pad_l = self.kernel_size - 1
405
- pad_r = 0
406
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
407
- x = F.pad(x, commons.convert_pad_shape(padding))
408
- return x
409
-
410
- def _same_padding(self, x):
411
- if self.kernel_size == 1:
412
- return x
413
- pad_l = (self.kernel_size - 1) // 2
414
- pad_r = self.kernel_size // 2
415
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
416
- x = F.pad(x, commons.convert_pad_shape(padding))
417
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/train.py DELETED
@@ -1,838 +0,0 @@
1
- import json
2
- import logging
3
- import math
4
- import os
5
- import time
6
- from contextlib import suppress
7
-
8
- import numpy as np
9
- import torch
10
- import torch.nn.functional as F
11
-
12
- try:
13
- import wandb
14
- except ImportError:
15
- wandb = None
16
-
17
- from open_clip import ClipLoss, gather_features
18
- from .distributed import is_master
19
- from .zero_shot import zero_shot_eval
20
-
21
-
22
- class AverageMeter(object):
23
- """Computes and stores the average and current value"""
24
-
25
- def __init__(self):
26
- self.reset()
27
-
28
- def reset(self):
29
- self.val = 0
30
- self.avg = 0
31
- self.sum = 0
32
- self.count = 0
33
-
34
- def update(self, val, n=1):
35
- self.val = val
36
- self.sum += val * n
37
- self.count += n
38
- self.avg = self.sum / self.count
39
-
40
-
41
- def unwrap_model(model):
42
- if hasattr(model, "module"):
43
- return model.module
44
- else:
45
- return model
46
-
47
-
48
- def train_one_epoch(
49
- model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None
50
- ):
51
- device = torch.device(args.device)
52
- autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
53
- model.train()
54
- loss = ClipLoss(
55
- local_loss=args.local_loss,
56
- gather_with_grad=args.gather_with_grad,
57
- cache_labels=True,
58
- rank=args.rank,
59
- world_size=args.world_size,
60
- use_horovod=args.horovod,
61
- mlp_loss=args.clap_mlploss,
62
- weight_loss_kappa=args.kappa,
63
- )
64
-
65
- dataloader, sampler = data["train"].dataloader, data["train"].sampler
66
- if args.distributed and sampler is not None:
67
- sampler.set_epoch(epoch)
68
- num_batches_per_epoch = dataloader.num_batches
69
- sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))
70
-
71
- # for toy dataset
72
- if args.dataset_type == "toy":
73
- dataloader.dataset.generate_queue()
74
-
75
- loss_m = AverageMeter()
76
- batch_time_m = AverageMeter()
77
- data_time_m = AverageMeter()
78
- end = time.time()
79
-
80
- for i, batch in enumerate(dataloader):
81
- # logging.info(f"batch {i} of {num_batches_per_epoch}")
82
- step = num_batches_per_epoch * epoch + i
83
- if isinstance(scheduler, dict):
84
- for s in scheduler.values():
85
- s(step)
86
- else:
87
- scheduler(step)
88
- audios = batch # contains mel_spec, wavform, and longer list
89
- texts = batch["text"]
90
- # audios = audios.to(device=device, non_blocking=True)
91
- # texts = texts.to(device=device, non_blocking=True)
92
-
93
- data_time_m.update(time.time() - end)
94
- if isinstance(optimizer, dict):
95
- for o_ in optimizer.values():
96
- o_.zero_grad()
97
- else:
98
- optimizer.zero_grad()
99
-
100
- with autocast():
101
- (
102
- audio_features,
103
- text_features,
104
- audio_features_mlp,
105
- text_features_mlp,
106
- logit_scale_a,
107
- logit_scale_t,
108
- ) = model(audios, texts, device)
109
-
110
- if args.clap_mlploss:
111
- total_loss = loss(
112
- audio_features=audio_features,
113
- text_features=text_features,
114
- logit_scale_a=logit_scale_a,
115
- logit_scale_t=logit_scale_t,
116
- audio_features_mlp=audio_features_mlp,
117
- text_features_mlp=text_features_mlp,
118
- )
119
- else:
120
- total_loss = loss(
121
- audio_features=audio_features,
122
- text_features=text_features,
123
- logit_scale_a=logit_scale_a,
124
- )
125
- if isinstance(optimizer, dict):
126
- if scaler is not None:
127
- scaler.scale(total_loss).backward()
128
- for o_ in optimizer.values():
129
- if args.horovod:
130
- o_.synchronize()
131
- scaler.unscale_(o_)
132
- with o_.skip_synchronize():
133
- scaler.step(o_)
134
- else:
135
- scaler.step(o_)
136
- scaler.update()
137
- else:
138
- total_loss.backward()
139
- for o_ in optimizer.values():
140
- o_.step()
141
- else:
142
- if scaler is not None:
143
- scaler.scale(total_loss).backward()
144
- if args.horovod:
145
- optimizer.synchronize()
146
- scaler.unscale_(optimizer)
147
- with optimizer.skip_synchronize():
148
- scaler.step(optimizer)
149
- else:
150
- scaler.step(optimizer)
151
- scaler.update()
152
- else:
153
- total_loss.backward()
154
- optimizer.step()
155
-
156
- # Note: we clamp to 4.6052 = ln(100), as in the original paper.
157
- with torch.no_grad():
158
- unwrap_model(model).logit_scale_a.clamp_(0, math.log(100))
159
- if args.clap_mlploss:
160
- unwrap_model(model).logit_scale_t.clamp_(0, math.log(100))
161
-
162
- batch_time_m.update(time.time() - end)
163
- end = time.time()
164
- batch_count = i + 1
165
- if is_master(args) and (i % 100 == 0 or batch_count == num_batches_per_epoch):
166
- if isinstance(audios, dict):
167
- batch_size = len(audios["waveform"])
168
- else:
169
- batch_size = len(audios)
170
- num_samples = batch_count * batch_size * args.world_size
171
- samples_per_epoch = dataloader.num_samples
172
- percent_complete = 100.0 * batch_count / num_batches_per_epoch
173
-
174
- # NOTE loss is coarsely sampled, just master node and per log update
175
- loss_m.update(total_loss.item(), batch_size)
176
- logit_scale_scalar_a = logit_scale_a.item()
177
- logit_scale_scalar_t = logit_scale_t.item()
178
- if isinstance(optimizer, dict):
179
- if args.clap_mlploss:
180
- logging.info(
181
- f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
182
- f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
183
- f"Data (t): {data_time_m.avg:.3f} "
184
- f"Batch (t): {batch_time_m.avg:.3f} "
185
- f"LR: {[o_.param_groups[0]['lr'] for o_ in optimizer.values()]} "
186
- f"Logit Scale Audio: {logit_scale_scalar_a:.3f}"
187
- f"Logit Scale Text: {logit_scale_scalar_t:.3f}"
188
- )
189
- log_data = {
190
- "loss": loss_m.val,
191
- "data_time": data_time_m.val,
192
- "batch_time": batch_time_m.val,
193
- "scale_audio": logit_scale_scalar_a,
194
- "scale_text": logit_scale_scalar_t,
195
- "lr": [o_.param_groups[0]["lr"] for o_ in optimizer.values()],
196
- }
197
- else:
198
- logging.info(
199
- f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
200
- f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
201
- f"Data (t): {data_time_m.avg:.3f} "
202
- f"Batch (t): {batch_time_m.avg:.3f} "
203
- f"LR: {[o_.param_groups[0]['lr'] for o_ in optimizer.values()]} "
204
- f"Logit Scale Audio: {logit_scale_scalar_a:.3f}"
205
- )
206
- log_data = {
207
- "loss": loss_m.val,
208
- "data_time": data_time_m.val,
209
- "batch_time": batch_time_m.val,
210
- "scale_audio": logit_scale_scalar_a,
211
- "lr": [o_.param_groups[0]["lr"] for o_ in optimizer.values()],
212
- }
213
-
214
- else:
215
- if args.clap_mlploss:
216
- logging.info(
217
- f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
218
- f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
219
- f"Data (t): {data_time_m.avg:.3f} "
220
- f"Batch (t): {batch_time_m.avg:.3f} "
221
- f"LR: {optimizer.param_groups[0]['lr']:5f} "
222
- f"Logit Scale Audio: {logit_scale_scalar_a:.3f}"
223
- f"Logit Scale Text: {logit_scale_scalar_t:.3f}"
224
- )
225
-
226
- # Save train loss / etc. Using non avg meter values as loggers have their own smoothing
227
- log_data = {
228
- "loss": loss_m.val,
229
- "data_time": data_time_m.val,
230
- "batch_time": batch_time_m.val,
231
- "scale_audio": logit_scale_scalar_a,
232
- "scale_text": logit_scale_scalar_t,
233
- "lr": optimizer.param_groups[0]["lr"],
234
- }
235
- else:
236
- logging.info(
237
- f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
238
- f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
239
- f"Data (t): {data_time_m.avg:.3f} "
240
- f"Batch (t): {batch_time_m.avg:.3f} "
241
- f"LR: {optimizer.param_groups[0]['lr']:5f} "
242
- f"Logit Scale Audio: {logit_scale_scalar_a:.3f}"
243
- )
244
-
245
- # Save train loss / etc. Using non avg meter values as loggers have their own smoothing
246
- log_data = {
247
- "loss": loss_m.val,
248
- "data_time": data_time_m.val,
249
- "batch_time": batch_time_m.val,
250
- "scale_audio": logit_scale_scalar_a,
251
- "lr": optimizer.param_groups[0]["lr"],
252
- }
253
- for name, val in log_data.items():
254
- name = "train/" + name
255
- if tb_writer is not None:
256
- tb_writer.add_scalar(name, val, step)
257
- if args.wandb:
258
- assert wandb is not None, "Please install wandb."
259
- wandb.log({name: val, "step": step})
260
-
261
- # resetting batch / data time meters per log window
262
- batch_time_m.reset()
263
- data_time_m.reset()
264
- # end for
265
-
266
-
267
- def evaluate(model, data, epoch, args, tb_writer=None):
268
- metrics = {}
269
- if not args.parallel_eval:
270
- if not is_master(args):
271
- return metrics
272
- device = torch.device(args.device)
273
- model.eval()
274
-
275
- # CHANGE
276
- # zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
277
- # metrics.update(zero_shot_metrics)
278
- if is_master(args):
279
- print("Evaluating...")
280
- autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
281
- if args.val_dataset_names == ["Clotho", "audiocaps"]:
282
- # if only clotho and audiocaps are used, then we will use a different evaluation function.
283
- # This is because in the Clotho and audiocaps valid and test set, there are 5 text for 1 audio.
284
- if args.parallel_eval:
285
- # (yusong): just a hack here. Don't use parallel eval when evaluating only clotho and audiocaps.
286
- raise NotImplementedError(
287
- "Parallel evaluation not supported for eval only Clotho and audiocaps."
288
- )
289
- val_metrics_per_dataset = evaluate_clotho_audiocaps(
290
- model, data, epoch, args, autocast, device, tb_writer
291
- )
292
- for m in val_metrics_per_dataset.values():
293
- metrics.update(m)
294
- if "epoch" not in metrics.keys():
295
- metrics.update({"epoch": epoch})
296
- metrics = select_top_metric_clotho_audiocaps(
297
- metrics, val_metrics_per_dataset, args
298
- )
299
- elif "val" in data and (
300
- args.val_frequency
301
- and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)
302
- ):
303
- dataloader = data["val"].dataloader
304
- num_samples = 0
305
- samples_per_val = dataloader.num_samples
306
-
307
- # FIXME this does not scale past small eval datasets
308
- # all_audio_features @ all_text_features will blow up memory and compute very quickly
309
- eval_info = {}
310
- if args.clap_mlploss:
311
- eval_info["all"] = {
312
- "cumulative_loss": 0.0,
313
- "num_samples": 0,
314
- "all_audio_features": [],
315
- "all_text_features": [],
316
- "all_audio_features_mlp": [],
317
- "all_text_features_mlp": [],
318
- } # cumulative_loss = 0.0
319
- else:
320
- eval_info["all"] = {
321
- "cumulative_loss": 0.0,
322
- "num_samples": 0,
323
- "all_audio_features": [],
324
- "all_text_features": [],
325
- } # cumu
326
- # all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp = [], [], [], []
327
- with torch.no_grad():
328
- for i, batch in enumerate(dataloader):
329
- audios = batch # contains mel_spec, wavform, and longer list
330
- texts = batch["text"]
331
- # audios = audios.to(device=device, non_blocking=True)
332
-
333
- all_names = list(
334
- set(["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]])
335
- )
336
- for name in all_names:
337
- if name not in eval_info.keys():
338
- if args.clap_mlploss:
339
- eval_info[name] = {
340
- "cumulative_loss": 0.0,
341
- "num_samples": 0,
342
- "all_audio_features": [],
343
- "all_text_features": [],
344
- "all_audio_features_mlp": [],
345
- "all_text_features_mlp": [],
346
- }
347
- else:
348
- eval_info[name] = {
349
- "cumulative_loss": 0.0,
350
- "num_samples": 0,
351
- "all_audio_features": [],
352
- "all_text_features": [],
353
- }
354
- with autocast():
355
- (
356
- audio_features,
357
- text_features,
358
- audio_features_mlp,
359
- text_features_mlp,
360
- logit_scale_a,
361
- logit_scale_t,
362
- ) = model(audios, texts, device)
363
-
364
- if args.parallel_eval:
365
- # multi-GPU eval
366
- if args.clap_mlploss:
367
- (
368
- audio_features,
369
- text_features,
370
- audio_features_mlp,
371
- text_features_mlp,
372
- ) = gather_features(
373
- audio_features=audio_features,
374
- text_features=text_features,
375
- audio_features_mlp=audio_features_mlp,
376
- text_features_mlp=text_features_mlp,
377
- local_loss=False,
378
- gather_with_grad=False,
379
- rank=args.rank,
380
- world_size=args.world_size,
381
- use_horovod=args.horovod,
382
- mlp_loss=args.clap_mlploss,
383
- )
384
- else:
385
- (audio_features, text_features,) = gather_features(
386
- audio_features=audio_features,
387
- text_features=text_features,
388
- local_loss=False,
389
- gather_with_grad=False,
390
- rank=args.rank,
391
- world_size=args.world_size,
392
- use_horovod=args.horovod,
393
- mlp_loss=args.clap_mlploss,
394
- )
395
-
396
- if is_master(args):
397
- num_samples += audio_features.shape[0]
398
- for n in [*all_names, "all"]:
399
- if n == "all":
400
- eval_info[n]["all_audio_features"].append(
401
- audio_features.cpu()
402
- )
403
- eval_info[n]["all_text_features"].append(
404
- text_features.cpu()
405
- )
406
- if args.clap_mlploss:
407
- eval_info[n]["all_audio_features_mlp"].append(
408
- audio_features_mlp.cpu()
409
- )
410
- eval_info[n]["all_text_features_mlp"].append(
411
- text_features_mlp.cpu()
412
- )
413
- else:
414
- idx = np.where(
415
- np.array(
416
- [
417
- "-".join(b.split("/")[-3:-1])
418
- for b in batch["__url__"]
419
- ]
420
- )
421
- == n
422
- )[0]
423
- eval_info[n]["all_audio_features"].append(
424
- audio_features.cpu().index_select(
425
- 0, torch.tensor(idx).long()
426
- )
427
- )
428
- eval_info[n]["all_text_features"].append(
429
- text_features.cpu().index_select(
430
- 0, torch.tensor(idx).long()
431
- )
432
- )
433
- if args.clap_mlploss:
434
- eval_info[n]["all_audio_features_mlp"].append(
435
- audio_features_mlp.cpu().index_select(
436
- 0, torch.tensor(idx).long()
437
- )
438
- )
439
- eval_info[n]["all_text_features_mlp"].append(
440
- text_features_mlp.cpu().index_select(
441
- 0, torch.tensor(idx).long()
442
- )
443
- )
444
- # print(f'eval step {i}') # (yusong): for debug
445
-
446
- # cumulative_loss += total_loss * batch_size
447
- # num_samples += batch_size
448
- if is_master(args) and (i % 100) == 0: # and i != 0:
449
- logging.info(
450
- f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]"
451
- )
452
- if is_master(args):
453
- val_metrics_per_dataset = {}
454
- for n in eval_info.keys():
455
- if args.clap_mlploss:
456
- metrics_single_dataset = get_metrics(
457
- audio_features=torch.cat(
458
- eval_info[n]["all_audio_features"]
459
- ),
460
- text_features=torch.cat(eval_info[n]["all_text_features"]),
461
- logit_scale_a=logit_scale_a.cpu(),
462
- audio_features_mlp=torch.cat(
463
- eval_info[n]["all_audio_features_mlp"]
464
- ),
465
- text_features_mlp=torch.cat(
466
- eval_info[n]["all_text_features_mlp"]
467
- ),
468
- logit_scale_t=logit_scale_t.cpu(),
469
- mlp_loss=args.clap_mlploss,
470
- )
471
- else:
472
- metrics_single_dataset = get_metrics(
473
- audio_features=torch.cat(
474
- eval_info[n]["all_audio_features"]
475
- ),
476
- text_features=torch.cat(eval_info[n]["all_text_features"]),
477
- logit_scale_a=logit_scale_a.cpu(),
478
- mlp_loss=args.clap_mlploss,
479
- )
480
- val_metrics_per_dataset[n] = {
481
- n + "/" + k: v for k, v in metrics_single_dataset.items()
482
- }
483
- metrics.update(val_metrics_per_dataset[n])
484
- if "epoch" not in metrics.keys():
485
- metrics.update({"epoch": epoch})
486
- if is_master(args):
487
- if not metrics:
488
- return metrics
489
-
490
- logging.info(
491
- f"Eval Epoch: {epoch} "
492
- + "\n".join(
493
- [
494
- "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in m.items()])
495
- for m in val_metrics_per_dataset.values()
496
- ]
497
- )
498
- )
499
-
500
- if args.save_logs:
501
- for name, val in metrics.items():
502
- if tb_writer is not None:
503
- tb_writer.add_scalar(f"val/{name}", val, epoch)
504
-
505
- with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
506
- f.write(json.dumps(metrics))
507
- f.write("\n")
508
-
509
- if args.wandb:
510
- assert wandb is not None, "Please install wandb."
511
- for name, val in metrics.items():
512
- wandb.log({f"val/{name}": val, "epoch": epoch})
513
-
514
- return metrics
515
- else:
516
- return metrics
517
-
518
-
519
- def get_metrics(
520
- audio_features,
521
- text_features,
522
- logit_scale_a,
523
- audio_features_mlp=None,
524
- text_features_mlp=None,
525
- logit_scale_t=None,
526
- mlp_loss=False,
527
- ):
528
- metrics = {}
529
- if mlp_loss:
530
- # Set up audio to text & text to audio similary matrice
531
- a_logits_per_audio = (
532
- (logit_scale_a * audio_features @ text_features_mlp.t()).detach().cpu()
533
- )
534
- a_logits_per_text = a_logits_per_audio.t().detach().cpu()
535
- t_logits_per_audio = (
536
- (logit_scale_t * audio_features_mlp @ text_features.t()).detach().cpu()
537
- )
538
- t_logits_per_text = t_logits_per_audio.t().detach().cpu()
539
-
540
- labels = torch.arange(audio_features.shape[0]).long()
541
- # Change the loss from two terms into four terms with 2x2 combined CE loss
542
- total_loss = (
543
- F.cross_entropy(a_logits_per_audio, labels)
544
- + F.cross_entropy(a_logits_per_text, labels)
545
- + F.cross_entropy(t_logits_per_audio, labels)
546
- + F.cross_entropy(t_logits_per_text, labels)
547
- ) / 4
548
-
549
- metrics[f"cumulative_loss"] = total_loss.item()
550
- metrics[f"num_samples"] = audio_features.shape[0]
551
-
552
- logits = {
553
- "audio_to_text": (a_logits_per_audio + t_logits_per_audio) / 2,
554
- "text_to_audio": (a_logits_per_text + t_logits_per_text) / 2,
555
- }
556
- ground_truth = torch.arange(len(text_features)).view(-1, 1)
557
-
558
- else:
559
- # print("text_features", text_features)
560
- # print("text_features.shape", text_features.shape)
561
- logits_per_audio = (
562
- (logit_scale_a * audio_features @ text_features.t()).detach().cpu()
563
- )
564
- logits_per_text = logits_per_audio.t().detach().cpu()
565
-
566
- labels = torch.arange(audio_features.shape[0]).long()
567
- # Change the loss from two terms into four terms with 2x2 combined CE loss
568
- total_loss = (
569
- F.cross_entropy(logits_per_audio, labels)
570
- + F.cross_entropy(logits_per_text, labels)
571
- ) / 2
572
-
573
- metrics[f"cumulative_loss"] = total_loss.item()
574
- metrics[f"num_samples"] = audio_features.shape[0]
575
-
576
- logits = {"audio_to_text": logits_per_audio, "text_to_audio": logits_per_text}
577
-
578
- ground_truth = torch.arange(len(text_features)).view(-1, 1)
579
-
580
- for name, logit in logits.items():
581
- ranking = torch.argsort(logit, descending=True)
582
- preds = torch.where(ranking == ground_truth)[
583
- 1
584
- ] # (yusong) this line is slow because it uses single thread
585
- preds = preds.detach().cpu().numpy()
586
- metrics[f"{name}_mean_rank"] = preds.mean() + 1
587
- metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
588
- for k in [1, 5, 10]:
589
- metrics[f"{name}_R@{k}"] = np.mean(preds < k)
590
- # map@10
591
- metrics[f"{name}_mAP@10"] = np.mean(np.where(preds < 10, 1 / (preds + 1), 0.0))
592
-
593
- return metrics
594
-
595
-
596
- def evaluate_clotho_audiocaps(
597
- model, data, epoch, args, autocast, device, tb_writer=None
598
- ):
599
- """
600
- Adapted from https://github.com/XinhaoMei/audio-text_retrieval/blob/main/tools/utils.py.
601
- 1. for text-to-audio retrieval, do 5 times and average the results
602
- 2. for R@1, R@5, R@10 in audio-to-text retrieval, take the best rank among 5 text
603
- 3. for map@10 in audio-to-text retrieval:
604
- 3.1: sort the rank of 5 text
605
- 3.2: exclude the rank >=10 (0-index)
606
- 3.3: compute the map regarding the remaining ranks: np.mean(np.arange(1, len(ranks)+1) / ranks).
607
- (3.3) That is, take the top ranks of 5 text that is < 10, and assign the descending number as ground truth.
608
- (3.3) E.g.: the ground truth of first rank of the 5 text should be 1, the second rank should be 2, etc.
609
- """
610
- # TODO: (yusong) only support single GPU evaluation and only support non-mlp case for now.
611
- dataloader = data["val"].dataloader
612
- with torch.no_grad():
613
- eval_info = {}
614
- for i, batch in enumerate(dataloader):
615
- audios = batch # contains mel_spec, wavform, and longer list
616
-
617
- # each item in the list has 5 texts
618
- if args.tmodel == "transformer":
619
- from open_clip import tokenize
620
-
621
- texts = [tokenize(t) for t in batch["full_text"]]
622
- texts = torch.cat(texts)
623
- else:
624
- from .data import tokenizer
625
-
626
- texts = [
627
- tokenizer(t) for t in batch["full_text"]
628
- ] # 5 texts for each audio
629
- texts = {
630
- k: torch.cat([t[k] for t in texts]) for k in texts[0].keys()
631
- } # 5 x batch
632
-
633
- # audios = audios.to(device=device, non_blocking=True)
634
-
635
- all_names = list(
636
- set(["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]])
637
- )
638
- for name in all_names:
639
- if name not in eval_info.keys():
640
- # we will not use mlp outputs even if args.clap_mlploss=True
641
- eval_info[name] = {
642
- "cumulative_loss": 0.0,
643
- "num_samples": 0,
644
- "all_audio_features": [],
645
- "all_text_features": [],
646
- }
647
- with autocast():
648
- audio_features = model(audios, None, device)
649
- text_features = model(None, texts, device)
650
- audio_features = F.normalize(audio_features, dim=-1)
651
- text_features = F.normalize(text_features, dim=-1)
652
-
653
- all_names = list(
654
- set(["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]])
655
- )
656
- for n in all_names:
657
- idx = np.where(
658
- np.array(
659
- ["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]]
660
- )
661
- == n
662
- )[0]
663
- eval_info[n]["all_audio_features"].append(
664
- audio_features.cpu().index_select(0, torch.tensor(idx).long())
665
- )
666
- # (yusong) please double-check. This is for selecting 5 text features at once.
667
- # because idx is a list of indices in size of num_samples,
668
- # and text_features is a tensor of size (5*num_samples, dim)
669
- # so we need to select 5 consecutive indices at once for a single index in idx.
670
- eval_info[n]["all_text_features"].append(
671
- text_features.cpu()
672
- .reshape([-1, 5, text_features.shape[1]])
673
- .index_select(0, torch.tensor(idx).long())
674
- .reshape([-1, text_features.shape[1]])
675
- )
676
-
677
- val_metrics_all = {}
678
-
679
- for n in eval_info.keys():
680
- logit_scale_a, logit_scale_t = model(None, None, device)
681
- logit_scale_a = logit_scale_a.cpu()
682
-
683
- audio_features = torch.cat(eval_info[n]["all_audio_features"], dim=0)
684
- text_features = torch.cat(eval_info[n]["all_text_features"], dim=0)
685
-
686
- logits_per_audio = (
687
- (logit_scale_a * audio_features @ text_features.t()).detach().cpu()
688
- )
689
- logits_per_text = logits_per_audio.t().detach().cpu()
690
-
691
- # logits_per_audio shape: [num_samples, num_samples*5]
692
- # logits_per_text shape: [num_samples*5, num_samples]
693
-
694
- logging.info(
695
- f"dataset {n}, logits_per_audio shape: {logits_per_audio.shape}, "
696
- f"logits_per_text shape: {logits_per_text.shape}"
697
- )
698
-
699
- metrics = {}
700
- num_samples = audio_features.shape[0]
701
- metrics[f"num_samples"] = num_samples
702
-
703
- # (yusong) the following code is very important, please double-check:
704
- # logits_per_audio.reshape(num_samples, num_samples, 5)[:, :, d]
705
- # logits_per_text.reshape(num_samples, 5, num_samples)[:, d, :]
706
- # Those two are retrieving one of the 5 text for each audio.
707
- labels = torch.arange(audio_features.shape[0]).long()
708
- audio_to_text_loss = [
709
- F.cross_entropy(
710
- logits_per_audio.reshape(num_samples, num_samples, 5)[:, :, d],
711
- labels,
712
- )
713
- for d in range(5)
714
- ]
715
- text_to_audio_loss = [
716
- F.cross_entropy(
717
- logits_per_text.reshape(num_samples, 5, num_samples)[:, d, :],
718
- labels,
719
- )
720
- for d in range(5)
721
- ]
722
- total_loss = (np.mean(audio_to_text_loss) + np.mean(text_to_audio_loss)) / 2
723
-
724
- metrics[f"cumulative_loss"] = total_loss.item()
725
-
726
- # text to audio: do 5 times
727
- pred_text = []
728
- for d in range(5):
729
- logit = logits_per_text.reshape(num_samples, 5, num_samples)[:, d, :]
730
- ground_truth = torch.arange(len(logit)).view(-1, 1)
731
- ranking = torch.argsort(
732
- logit, descending=True
733
- ) # [num_samples, num_samples]
734
- preds = torch.where(ranking == ground_truth)[1]
735
- pred_text.append(preds.detach().cpu().numpy())
736
- pred_text_concat = np.concatenate(pred_text, axis=0) # [5*num_samples]
737
- metrics[f"text_to_audio_mean_rank"] = pred_text_concat.mean() + 1
738
- metrics[f"text_to_audio_median_rank"] = (
739
- np.floor(np.median(pred_text_concat)) + 1
740
- )
741
- for k in [1, 5, 10]:
742
- metrics[f"text_to_audio_R@{k}"] = np.mean(pred_text_concat < k)
743
- # map@10
744
- metrics[f"text_to_audio_mAP@10"] = np.mean(
745
- np.where(pred_text_concat < 10, 1 / (pred_text_concat + 1), 0.0)
746
- )
747
-
748
- # audio to text: take the best result
749
- # for audio to text map 10, sort and assign descending ground truth.
750
- # see https://github.com/XinhaoMei/audio-text_retrieval/blob/main/tools/utils.py#L103
751
- # map@10
752
- map_all = []
753
- pred_audio_all = []
754
- for d in range(num_samples):
755
- # logits_per_audio: [num_samples, num_samples*5]
756
- logit_single = logits_per_audio[d, :] # [5*num_samples]
757
- # Ground-truth index: [d*5, d*5+1, d*5+2, d*5+3, d*5+4]
758
- ranking = torch.argsort(
759
- logit_single, descending=True
760
- ) # [5*num_samples]
761
- # ranking: the index of first match, second match, ...
762
- ground_truth = torch.arange(d * 5, d * 5 + 5)[None]
763
- all_pred = torch.where(
764
- torch.stack([ranking] * 5) == ground_truth.view(-1, 1)
765
- )[1]
766
- min_pred = torch.min(all_pred)
767
- pred_audio_all.append(min_pred.detach().cpu().numpy())
768
- all_pred_filter = all_pred[all_pred < 10].detach().cpu().numpy()
769
- # /5 because we have 5 text, so it means for the text rank >=10 we count as 0.
770
- map_single = (
771
- np.sum(
772
- (np.arange(1, len(all_pred_filter) + 1) / (all_pred_filter + 1))
773
- )
774
- / 5
775
- )
776
- map_all.append(map_single)
777
- metrics[f"audio_to_text_mAP@10"] = np.mean(map_all)
778
- for k in [1, 5, 10]:
779
- metrics[f"audio_to_text_R@{k}"] = np.mean(np.array(pred_audio_all) < k)
780
-
781
- val_metrics_all[n] = {n + "/" + k: v for k, v in metrics.items()}
782
- return val_metrics_all
783
-
784
-
785
- def calculate_selection_performance_clotho_audiocaps(val_metrics_per_dataset):
786
- """
787
- Calculate performance for Clotho+AudioCaps for model selection.
788
- """
789
- selection_performance_all = []
790
- for n in val_metrics_per_dataset.keys():
791
- selection_performance = (
792
- val_metrics_per_dataset[n][f"{n}/audio_to_text_mAP@10"]
793
- + val_metrics_per_dataset[n][f"{n}/text_to_audio_mAP@10"]
794
- ) / 2
795
- selection_performance_all.append(selection_performance)
796
- return np.mean(selection_performance_all)
797
-
798
-
799
- def select_top_metric_clotho_audiocaps(metrics, val_metrics_per_dataset, args):
800
- # val_metrics_per_dataset: dict, key: dataset name, value: dict, key: metric name, value: metric value
801
- # metrics: dict, key: metric name, value: metric value
802
- # Hack: use args to save the top performance
803
- if not hasattr(args, "top_selection_performance"):
804
- selection_performance = calculate_selection_performance_clotho_audiocaps(
805
- val_metrics_per_dataset
806
- )
807
- # TODO: write the if and else together
808
- metric_update = {}
809
- for n in val_metrics_per_dataset.keys():
810
- for k in val_metrics_per_dataset[n].keys():
811
- metric_update[
812
- k.split("/")[0] + "-top" + "/" + k.split("/")[1]
813
- ] = val_metrics_per_dataset[n][k]
814
- metric_update["top_selection_performance"] = selection_performance
815
- metric_update["top-selection-epoch"] = metrics["epoch"]
816
- metrics.update(metric_update)
817
- args.top_metric = metric_update
818
- args.top_selection_performance = selection_performance
819
- else:
820
- selection_performance_new = calculate_selection_performance_clotho_audiocaps(
821
- val_metrics_per_dataset
822
- )
823
- selection_performance_old = args.top_selection_performance
824
- if selection_performance_new > selection_performance_old:
825
- metric_update = {}
826
- for n in val_metrics_per_dataset.keys():
827
- for k in val_metrics_per_dataset[n].keys():
828
- metric_update[
829
- k.split("/")[0] + "-top" + "/" + k.split("/")[1]
830
- ] = val_metrics_per_dataset[n][k]
831
- metric_update["top_selection_performance"] = selection_performance_new
832
- metric_update["top-selection-epoch"] = metrics["epoch"]
833
- metrics.update(metric_update)
834
- args.top_metric = metric_update
835
- args.top_selection_performance = selection_performance_new
836
- else:
837
- metrics.update(args.top_metric)
838
- return metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/Memory/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .base_Memory import Memory
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/crowdhuman/__init__.py DELETED
File without changes
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/pendingMessage.ts DELETED
@@ -1,3 +0,0 @@
1
- import { writable } from "svelte/store";
2
-
3
- export const pendingMessage = writable<string>("");
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.js DELETED
@@ -1,34 +0,0 @@
1
- import Base from '../base/Base.js';
2
- import { Arc } from '../utils/Geoms.js'
3
- import Yoyo from '../utils/Yoyo.js';
4
-
5
- class Spinner extends Base {
6
- constructor(scene, config) {
7
- super(scene, config);
8
- this.type = 'rexSpinnerSpinner';
9
- }
10
-
11
- buildShapes() {
12
- this.addShape((new Arc()).setName('arc'));
13
- }
14
-
15
- updateShapes() {
16
- var centerX = this.centerX;
17
- var centerY = this.centerY;
18
- var radius = this.radius;
19
- var lineWidth = Math.ceil(radius / 10);
20
- var maxRadius = radius - lineWidth;
21
-
22
- var endAngle = this.value * 720;
23
- var arcAngle = Yoyo(this.value) * 180;
24
- var startAngle = endAngle - arcAngle;
25
- this.getShape('arc')
26
- .lineStyle(lineWidth, this.color, 1)
27
- .setRadius(maxRadius)
28
- .setCenterPosition(centerX, centerY)
29
- .setAngle(startAngle + 315, endAngle + 315);
30
-
31
- }
32
- }
33
-
34
- export default Spinner;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyImage.js DELETED
@@ -1,21 +0,0 @@
1
- import MergeStyle from './MergeStyle.js';
2
- import SetTextureProperties from './SetTextureProperties.js';
3
-
4
- var CreateAnyImage = function (scene, data, view, styles, customBuilders, ImageClass) {
5
- data = MergeStyle(data, styles);
6
- var gameObject = new ImageClass(scene, 0, 0, data.key, data.frame);
7
-
8
- if (data.width !== undefined) {
9
- gameObject.setDisplayWidth(data.width);
10
- }
11
- if (data.height !== undefined) {
12
- gameObject.setDisplayHeight(data.height);
13
- }
14
-
15
- SetTextureProperties(gameObject, data);
16
-
17
- scene.add.existing(gameObject);
18
- return gameObject;
19
- }
20
-
21
- export default CreateAnyImage;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Factory.d.ts DELETED
@@ -1,23 +0,0 @@
1
- import Sizer from './Sizer';
2
-
3
- export default function (
4
- config?: Sizer.IConfig
5
- ): Sizer;
6
-
7
- export default function (
8
- x: number, y: number,
9
- config?: Sizer.IConfig
10
- ): Sizer;
11
-
12
- export default function (
13
- x: number, y: number,
14
- width: number, height: number,
15
- config?: Sizer.IConfig
16
- ): Sizer;
17
-
18
- export default function (
19
- x: number, y: number,
20
- width: number, height: number,
21
- orientation?: Sizer.OrientationTypes,
22
- config?: Sizer.IConfig
23
- ): Sizer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/modules/commons.py DELETED
@@ -1,188 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- def slice_pitch_segments(x, ids_str, segment_size=4):
8
- ret = torch.zeros_like(x[:, :segment_size])
9
- for i in range(x.size(0)):
10
- idx_str = ids_str[i]
11
- idx_end = idx_str + segment_size
12
- ret[i] = x[i, idx_str:idx_end]
13
- return ret
14
-
15
- def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
16
- b, d, t = x.size()
17
- if x_lengths is None:
18
- x_lengths = t
19
- ids_str_max = x_lengths - segment_size + 1
20
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
21
- ret = slice_segments(x, ids_str, segment_size)
22
- ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
23
- return ret, ret_pitch, ids_str
24
-
25
- def init_weights(m, mean=0.0, std=0.01):
26
- classname = m.__class__.__name__
27
- if classname.find("Conv") != -1:
28
- m.weight.data.normal_(mean, std)
29
-
30
-
31
- def get_padding(kernel_size, dilation=1):
32
- return int((kernel_size*dilation - dilation)/2)
33
-
34
-
35
- def convert_pad_shape(pad_shape):
36
- l = pad_shape[::-1]
37
- pad_shape = [item for sublist in l for item in sublist]
38
- return pad_shape
39
-
40
-
41
- def intersperse(lst, item):
42
- result = [item] * (len(lst) * 2 + 1)
43
- result[1::2] = lst
44
- return result
45
-
46
-
47
- def kl_divergence(m_p, logs_p, m_q, logs_q):
48
- """KL(P||Q)"""
49
- kl = (logs_q - logs_p) - 0.5
50
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
51
- return kl
52
-
53
-
54
- def rand_gumbel(shape):
55
- """Sample from the Gumbel distribution, protect from overflows."""
56
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
57
- return -torch.log(-torch.log(uniform_samples))
58
-
59
-
60
- def rand_gumbel_like(x):
61
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
62
- return g
63
-
64
-
65
- def slice_segments(x, ids_str, segment_size=4):
66
- ret = torch.zeros_like(x[:, :, :segment_size])
67
- for i in range(x.size(0)):
68
- idx_str = ids_str[i]
69
- idx_end = idx_str + segment_size
70
- ret[i] = x[i, :, idx_str:idx_end]
71
- return ret
72
-
73
-
74
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
75
- b, d, t = x.size()
76
- if x_lengths is None:
77
- x_lengths = t
78
- ids_str_max = x_lengths - segment_size + 1
79
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
80
- ret = slice_segments(x, ids_str, segment_size)
81
- return ret, ids_str
82
-
83
-
84
- def rand_spec_segments(x, x_lengths=None, segment_size=4):
85
- b, d, t = x.size()
86
- if x_lengths is None:
87
- x_lengths = t
88
- ids_str_max = x_lengths - segment_size
89
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
90
- ret = slice_segments(x, ids_str, segment_size)
91
- return ret, ids_str
92
-
93
-
94
- def get_timing_signal_1d(
95
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
96
- position = torch.arange(length, dtype=torch.float)
97
- num_timescales = channels // 2
98
- log_timescale_increment = (
99
- math.log(float(max_timescale) / float(min_timescale)) /
100
- (num_timescales - 1))
101
- inv_timescales = min_timescale * torch.exp(
102
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
103
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
104
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
105
- signal = F.pad(signal, [0, 0, 0, channels % 2])
106
- signal = signal.view(1, channels, length)
107
- return signal
108
-
109
-
110
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
111
- b, channels, length = x.size()
112
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
113
- return x + signal.to(dtype=x.dtype, device=x.device)
114
-
115
-
116
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
117
- b, channels, length = x.size()
118
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
119
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
120
-
121
-
122
- def subsequent_mask(length):
123
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
124
- return mask
125
-
126
-
127
- @torch.jit.script
128
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
129
- n_channels_int = n_channels[0]
130
- in_act = input_a + input_b
131
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
132
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
133
- acts = t_act * s_act
134
- return acts
135
-
136
-
137
- def convert_pad_shape(pad_shape):
138
- l = pad_shape[::-1]
139
- pad_shape = [item for sublist in l for item in sublist]
140
- return pad_shape
141
-
142
-
143
- def shift_1d(x):
144
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
145
- return x
146
-
147
-
148
- def sequence_mask(length, max_length=None):
149
- if max_length is None:
150
- max_length = length.max()
151
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
152
- return x.unsqueeze(0) < length.unsqueeze(1)
153
-
154
-
155
- def generate_path(duration, mask):
156
- """
157
- duration: [b, 1, t_x]
158
- mask: [b, 1, t_y, t_x]
159
- """
160
- device = duration.device
161
-
162
- b, _, t_y, t_x = mask.shape
163
- cum_duration = torch.cumsum(duration, -1)
164
-
165
- cum_duration_flat = cum_duration.view(b * t_x)
166
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
167
- path = path.view(b, t_x, t_y)
168
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
169
- path = path.unsqueeze(1).transpose(2,3) * mask
170
- return path
171
-
172
-
173
- def clip_grad_value_(parameters, clip_value, norm_type=2):
174
- if isinstance(parameters, torch.Tensor):
175
- parameters = [parameters]
176
- parameters = list(filter(lambda p: p.grad is not None, parameters))
177
- norm_type = float(norm_type)
178
- if clip_value is not None:
179
- clip_value = float(clip_value)
180
-
181
- total_norm = 0
182
- for p in parameters:
183
- param_norm = p.grad.data.norm(norm_type)
184
- total_norm += param_norm.item() ** norm_type
185
- if clip_value is not None:
186
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
187
- total_norm = total_norm ** (1. / norm_type)
188
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/facerender/modules/generator.py DELETED
@@ -1,255 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
- from src.facerender.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock
5
- from src.facerender.modules.dense_motion import DenseMotionNetwork
6
-
7
-
8
- class OcclusionAwareGenerator(nn.Module):
9
- """
10
- Generator follows NVIDIA architecture.
11
- """
12
-
13
- def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
14
- num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
15
- super(OcclusionAwareGenerator, self).__init__()
16
-
17
- if dense_motion_params is not None:
18
- self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
19
- estimate_occlusion_map=estimate_occlusion_map,
20
- **dense_motion_params)
21
- else:
22
- self.dense_motion_network = None
23
-
24
- self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
25
-
26
- down_blocks = []
27
- for i in range(num_down_blocks):
28
- in_features = min(max_features, block_expansion * (2 ** i))
29
- out_features = min(max_features, block_expansion * (2 ** (i + 1)))
30
- down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
31
- self.down_blocks = nn.ModuleList(down_blocks)
32
-
33
- self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
34
-
35
- self.reshape_channel = reshape_channel
36
- self.reshape_depth = reshape_depth
37
-
38
- self.resblocks_3d = torch.nn.Sequential()
39
- for i in range(num_resblocks):
40
- self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
41
-
42
- out_features = block_expansion * (2 ** (num_down_blocks))
43
- self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)
44
- self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)
45
-
46
- self.resblocks_2d = torch.nn.Sequential()
47
- for i in range(num_resblocks):
48
- self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))
49
-
50
- up_blocks = []
51
- for i in range(num_down_blocks):
52
- in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))
53
- out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1)))
54
- up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
55
- self.up_blocks = nn.ModuleList(up_blocks)
56
-
57
- self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3))
58
- self.estimate_occlusion_map = estimate_occlusion_map
59
- self.image_channel = image_channel
60
-
61
- def deform_input(self, inp, deformation):
62
- _, d_old, h_old, w_old, _ = deformation.shape
63
- _, _, d, h, w = inp.shape
64
- if d_old != d or h_old != h or w_old != w:
65
- deformation = deformation.permute(0, 4, 1, 2, 3)
66
- deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')
67
- deformation = deformation.permute(0, 2, 3, 4, 1)
68
- return F.grid_sample(inp, deformation)
69
-
70
- def forward(self, source_image, kp_driving, kp_source):
71
- # Encoding (downsampling) part
72
- out = self.first(source_image)
73
- for i in range(len(self.down_blocks)):
74
- out = self.down_blocks[i](out)
75
- out = self.second(out)
76
- bs, c, h, w = out.shape
77
- # print(out.shape)
78
- feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w)
79
- feature_3d = self.resblocks_3d(feature_3d)
80
-
81
- # Transforming feature representation according to deformation and occlusion
82
- output_dict = {}
83
- if self.dense_motion_network is not None:
84
- dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,
85
- kp_source=kp_source)
86
- output_dict['mask'] = dense_motion['mask']
87
-
88
- if 'occlusion_map' in dense_motion:
89
- occlusion_map = dense_motion['occlusion_map']
90
- output_dict['occlusion_map'] = occlusion_map
91
- else:
92
- occlusion_map = None
93
- deformation = dense_motion['deformation']
94
- out = self.deform_input(feature_3d, deformation)
95
-
96
- bs, c, d, h, w = out.shape
97
- out = out.view(bs, c*d, h, w)
98
- out = self.third(out)
99
- out = self.fourth(out)
100
-
101
- if occlusion_map is not None:
102
- if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:
103
- occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')
104
- out = out * occlusion_map
105
-
106
- # output_dict["deformed"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image
107
-
108
- # Decoding part
109
- out = self.resblocks_2d(out)
110
- for i in range(len(self.up_blocks)):
111
- out = self.up_blocks[i](out)
112
- out = self.final(out)
113
- out = F.sigmoid(out)
114
-
115
- output_dict["prediction"] = out
116
-
117
- return output_dict
118
-
119
-
120
- class SPADEDecoder(nn.Module):
121
- def __init__(self):
122
- super().__init__()
123
- ic = 256
124
- oc = 64
125
- norm_G = 'spadespectralinstance'
126
- label_nc = 256
127
-
128
- self.fc = nn.Conv2d(ic, 2 * ic, 3, padding=1)
129
- self.G_middle_0 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc)
130
- self.G_middle_1 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc)
131
- self.G_middle_2 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc)
132
- self.G_middle_3 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc)
133
- self.G_middle_4 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc)
134
- self.G_middle_5 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc)
135
- self.up_0 = SPADEResnetBlock(2 * ic, ic, norm_G, label_nc)
136
- self.up_1 = SPADEResnetBlock(ic, oc, norm_G, label_nc)
137
- self.conv_img = nn.Conv2d(oc, 3, 3, padding=1)
138
- self.up = nn.Upsample(scale_factor=2)
139
-
140
- def forward(self, feature):
141
- seg = feature
142
- x = self.fc(feature)
143
- x = self.G_middle_0(x, seg)
144
- x = self.G_middle_1(x, seg)
145
- x = self.G_middle_2(x, seg)
146
- x = self.G_middle_3(x, seg)
147
- x = self.G_middle_4(x, seg)
148
- x = self.G_middle_5(x, seg)
149
- x = self.up(x)
150
- x = self.up_0(x, seg) # 256, 128, 128
151
- x = self.up(x)
152
- x = self.up_1(x, seg) # 64, 256, 256
153
-
154
- x = self.conv_img(F.leaky_relu(x, 2e-1))
155
- # x = torch.tanh(x)
156
- x = F.sigmoid(x)
157
-
158
- return x
159
-
160
-
161
- class OcclusionAwareSPADEGenerator(nn.Module):
162
-
163
- def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
164
- num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
165
- super(OcclusionAwareSPADEGenerator, self).__init__()
166
-
167
- if dense_motion_params is not None:
168
- self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
169
- estimate_occlusion_map=estimate_occlusion_map,
170
- **dense_motion_params)
171
- else:
172
- self.dense_motion_network = None
173
-
174
- self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))
175
-
176
- down_blocks = []
177
- for i in range(num_down_blocks):
178
- in_features = min(max_features, block_expansion * (2 ** i))
179
- out_features = min(max_features, block_expansion * (2 ** (i + 1)))
180
- down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
181
- self.down_blocks = nn.ModuleList(down_blocks)
182
-
183
- self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
184
-
185
- self.reshape_channel = reshape_channel
186
- self.reshape_depth = reshape_depth
187
-
188
- self.resblocks_3d = torch.nn.Sequential()
189
- for i in range(num_resblocks):
190
- self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
191
-
192
- out_features = block_expansion * (2 ** (num_down_blocks))
193
- self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)
194
- self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)
195
-
196
- self.estimate_occlusion_map = estimate_occlusion_map
197
- self.image_channel = image_channel
198
-
199
- self.decoder = SPADEDecoder()
200
-
201
- def deform_input(self, inp, deformation):
202
- _, d_old, h_old, w_old, _ = deformation.shape
203
- _, _, d, h, w = inp.shape
204
- if d_old != d or h_old != h or w_old != w:
205
- deformation = deformation.permute(0, 4, 1, 2, 3)
206
- deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')
207
- deformation = deformation.permute(0, 2, 3, 4, 1)
208
- return F.grid_sample(inp, deformation)
209
-
210
- def forward(self, source_image, kp_driving, kp_source):
211
- # Encoding (downsampling) part
212
- out = self.first(source_image)
213
- for i in range(len(self.down_blocks)):
214
- out = self.down_blocks[i](out)
215
- out = self.second(out)
216
- bs, c, h, w = out.shape
217
- # print(out.shape)
218
- feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w)
219
- feature_3d = self.resblocks_3d(feature_3d)
220
-
221
- # Transforming feature representation according to deformation and occlusion
222
- output_dict = {}
223
- if self.dense_motion_network is not None:
224
- dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,
225
- kp_source=kp_source)
226
- output_dict['mask'] = dense_motion['mask']
227
-
228
- # import pdb; pdb.set_trace()
229
-
230
- if 'occlusion_map' in dense_motion:
231
- occlusion_map = dense_motion['occlusion_map']
232
- output_dict['occlusion_map'] = occlusion_map
233
- else:
234
- occlusion_map = None
235
- deformation = dense_motion['deformation']
236
- out = self.deform_input(feature_3d, deformation)
237
-
238
- bs, c, d, h, w = out.shape
239
- out = out.view(bs, c*d, h, w)
240
- out = self.third(out)
241
- out = self.fourth(out)
242
-
243
- # occlusion_map = torch.where(occlusion_map < 0.95, 0, occlusion_map)
244
-
245
- if occlusion_map is not None:
246
- if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:
247
- occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')
248
- out = out * occlusion_map
249
-
250
- # Decoding part
251
- out = self.decoder(out)
252
-
253
- output_dict["prediction"] = out
254
-
255
- return output_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Altinas/vits-uma-genshin-honkais/models.py DELETED
@@ -1,534 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- import modules
8
- import attentions
9
- import monotonic_align
10
-
11
- from torch.nn import Conv1d, ConvTranspose1d, Conv2d
12
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
13
- from commons import init_weights, get_padding
14
-
15
-
16
- class StochasticDurationPredictor(nn.Module):
17
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
18
- super().__init__()
19
- filter_channels = in_channels # it needs to be removed from future version.
20
- self.in_channels = in_channels
21
- self.filter_channels = filter_channels
22
- self.kernel_size = kernel_size
23
- self.p_dropout = p_dropout
24
- self.n_flows = n_flows
25
- self.gin_channels = gin_channels
26
-
27
- self.log_flow = modules.Log()
28
- self.flows = nn.ModuleList()
29
- self.flows.append(modules.ElementwiseAffine(2))
30
- for i in range(n_flows):
31
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
32
- self.flows.append(modules.Flip())
33
-
34
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
35
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
36
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
37
- self.post_flows = nn.ModuleList()
38
- self.post_flows.append(modules.ElementwiseAffine(2))
39
- for i in range(4):
40
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
41
- self.post_flows.append(modules.Flip())
42
-
43
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
44
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
45
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
46
- if gin_channels != 0:
47
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
48
-
49
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
50
- x = torch.detach(x)
51
- x = self.pre(x)
52
- if g is not None:
53
- g = torch.detach(g)
54
- x = x + self.cond(g)
55
- x = self.convs(x, x_mask)
56
- x = self.proj(x) * x_mask
57
-
58
- if not reverse:
59
- flows = self.flows
60
- assert w is not None
61
-
62
- logdet_tot_q = 0
63
- h_w = self.post_pre(w)
64
- h_w = self.post_convs(h_w, x_mask)
65
- h_w = self.post_proj(h_w) * x_mask
66
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
67
- z_q = e_q
68
- for flow in self.post_flows:
69
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
70
- logdet_tot_q += logdet_q
71
- z_u, z1 = torch.split(z_q, [1, 1], 1)
72
- u = torch.sigmoid(z_u) * x_mask
73
- z0 = (w - u) * x_mask
74
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
75
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
76
-
77
- logdet_tot = 0
78
- z0, logdet = self.log_flow(z0, x_mask)
79
- logdet_tot += logdet
80
- z = torch.cat([z0, z1], 1)
81
- for flow in flows:
82
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
83
- logdet_tot = logdet_tot + logdet
84
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
85
- return nll + logq # [b]
86
- else:
87
- flows = list(reversed(self.flows))
88
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
89
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
90
- for flow in flows:
91
- z = flow(z, x_mask, g=x, reverse=reverse)
92
- z0, z1 = torch.split(z, [1, 1], 1)
93
- logw = z0
94
- return logw
95
-
96
-
97
- class DurationPredictor(nn.Module):
98
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
99
- super().__init__()
100
-
101
- self.in_channels = in_channels
102
- self.filter_channels = filter_channels
103
- self.kernel_size = kernel_size
104
- self.p_dropout = p_dropout
105
- self.gin_channels = gin_channels
106
-
107
- self.drop = nn.Dropout(p_dropout)
108
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
109
- self.norm_1 = modules.LayerNorm(filter_channels)
110
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
111
- self.norm_2 = modules.LayerNorm(filter_channels)
112
- self.proj = nn.Conv1d(filter_channels, 1, 1)
113
-
114
- if gin_channels != 0:
115
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
116
-
117
- def forward(self, x, x_mask, g=None):
118
- x = torch.detach(x)
119
- if g is not None:
120
- g = torch.detach(g)
121
- x = x + self.cond(g)
122
- x = self.conv_1(x * x_mask)
123
- x = torch.relu(x)
124
- x = self.norm_1(x)
125
- x = self.drop(x)
126
- x = self.conv_2(x * x_mask)
127
- x = torch.relu(x)
128
- x = self.norm_2(x)
129
- x = self.drop(x)
130
- x = self.proj(x * x_mask)
131
- return x * x_mask
132
-
133
-
134
- class TextEncoder(nn.Module):
135
- def __init__(self,
136
- n_vocab,
137
- out_channels,
138
- hidden_channels,
139
- filter_channels,
140
- n_heads,
141
- n_layers,
142
- kernel_size,
143
- p_dropout):
144
- super().__init__()
145
- self.n_vocab = n_vocab
146
- self.out_channels = out_channels
147
- self.hidden_channels = hidden_channels
148
- self.filter_channels = filter_channels
149
- self.n_heads = n_heads
150
- self.n_layers = n_layers
151
- self.kernel_size = kernel_size
152
- self.p_dropout = p_dropout
153
-
154
- self.emb = nn.Embedding(n_vocab, hidden_channels)
155
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
156
-
157
- self.encoder = attentions.Encoder(
158
- hidden_channels,
159
- filter_channels,
160
- n_heads,
161
- n_layers,
162
- kernel_size,
163
- p_dropout)
164
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
165
-
166
- def forward(self, x, x_lengths):
167
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
168
- x = torch.transpose(x, 1, -1) # [b, h, t]
169
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
170
-
171
- x = self.encoder(x * x_mask, x_mask)
172
- stats = self.proj(x) * x_mask
173
-
174
- m, logs = torch.split(stats, self.out_channels, dim=1)
175
- return x, m, logs, x_mask
176
-
177
-
178
- class ResidualCouplingBlock(nn.Module):
179
- def __init__(self,
180
- channels,
181
- hidden_channels,
182
- kernel_size,
183
- dilation_rate,
184
- n_layers,
185
- n_flows=4,
186
- gin_channels=0):
187
- super().__init__()
188
- self.channels = channels
189
- self.hidden_channels = hidden_channels
190
- self.kernel_size = kernel_size
191
- self.dilation_rate = dilation_rate
192
- self.n_layers = n_layers
193
- self.n_flows = n_flows
194
- self.gin_channels = gin_channels
195
-
196
- self.flows = nn.ModuleList()
197
- for i in range(n_flows):
198
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
199
- self.flows.append(modules.Flip())
200
-
201
- def forward(self, x, x_mask, g=None, reverse=False):
202
- if not reverse:
203
- for flow in self.flows:
204
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
205
- else:
206
- for flow in reversed(self.flows):
207
- x = flow(x, x_mask, g=g, reverse=reverse)
208
- return x
209
-
210
-
211
- class PosteriorEncoder(nn.Module):
212
- def __init__(self,
213
- in_channels,
214
- out_channels,
215
- hidden_channels,
216
- kernel_size,
217
- dilation_rate,
218
- n_layers,
219
- gin_channels=0):
220
- super().__init__()
221
- self.in_channels = in_channels
222
- self.out_channels = out_channels
223
- self.hidden_channels = hidden_channels
224
- self.kernel_size = kernel_size
225
- self.dilation_rate = dilation_rate
226
- self.n_layers = n_layers
227
- self.gin_channels = gin_channels
228
-
229
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
230
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
231
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
232
-
233
- def forward(self, x, x_lengths, g=None):
234
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
235
- x = self.pre(x) * x_mask
236
- x = self.enc(x, x_mask, g=g)
237
- stats = self.proj(x) * x_mask
238
- m, logs = torch.split(stats, self.out_channels, dim=1)
239
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
240
- return z, m, logs, x_mask
241
-
242
-
243
- class Generator(torch.nn.Module):
244
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
245
- super(Generator, self).__init__()
246
- self.num_kernels = len(resblock_kernel_sizes)
247
- self.num_upsamples = len(upsample_rates)
248
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
249
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
250
-
251
- self.ups = nn.ModuleList()
252
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
253
- self.ups.append(weight_norm(
254
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
255
- k, u, padding=(k-u)//2)))
256
-
257
- self.resblocks = nn.ModuleList()
258
- for i in range(len(self.ups)):
259
- ch = upsample_initial_channel//(2**(i+1))
260
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
261
- self.resblocks.append(resblock(ch, k, d))
262
-
263
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
264
- self.ups.apply(init_weights)
265
-
266
- if gin_channels != 0:
267
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
268
-
269
- def forward(self, x, g=None):
270
- x = self.conv_pre(x)
271
- if g is not None:
272
- x = x + self.cond(g)
273
-
274
- for i in range(self.num_upsamples):
275
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
276
- x = self.ups[i](x)
277
- xs = None
278
- for j in range(self.num_kernels):
279
- if xs is None:
280
- xs = self.resblocks[i*self.num_kernels+j](x)
281
- else:
282
- xs += self.resblocks[i*self.num_kernels+j](x)
283
- x = xs / self.num_kernels
284
- x = F.leaky_relu(x)
285
- x = self.conv_post(x)
286
- x = torch.tanh(x)
287
-
288
- return x
289
-
290
- def remove_weight_norm(self):
291
- print('Removing weight norm...')
292
- for l in self.ups:
293
- remove_weight_norm(l)
294
- for l in self.resblocks:
295
- l.remove_weight_norm()
296
-
297
-
298
- class DiscriminatorP(torch.nn.Module):
299
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
300
- super(DiscriminatorP, self).__init__()
301
- self.period = period
302
- self.use_spectral_norm = use_spectral_norm
303
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
304
- self.convs = nn.ModuleList([
305
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
306
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
307
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
308
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
309
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
310
- ])
311
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
312
-
313
- def forward(self, x):
314
- fmap = []
315
-
316
- # 1d to 2d
317
- b, c, t = x.shape
318
- if t % self.period != 0: # pad first
319
- n_pad = self.period - (t % self.period)
320
- x = F.pad(x, (0, n_pad), "reflect")
321
- t = t + n_pad
322
- x = x.view(b, c, t // self.period, self.period)
323
-
324
- for l in self.convs:
325
- x = l(x)
326
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
327
- fmap.append(x)
328
- x = self.conv_post(x)
329
- fmap.append(x)
330
- x = torch.flatten(x, 1, -1)
331
-
332
- return x, fmap
333
-
334
-
335
- class DiscriminatorS(torch.nn.Module):
336
- def __init__(self, use_spectral_norm=False):
337
- super(DiscriminatorS, self).__init__()
338
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
339
- self.convs = nn.ModuleList([
340
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
341
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
342
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
343
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
344
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
345
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
346
- ])
347
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
348
-
349
- def forward(self, x):
350
- fmap = []
351
-
352
- for l in self.convs:
353
- x = l(x)
354
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
355
- fmap.append(x)
356
- x = self.conv_post(x)
357
- fmap.append(x)
358
- x = torch.flatten(x, 1, -1)
359
-
360
- return x, fmap
361
-
362
-
363
- class MultiPeriodDiscriminator(torch.nn.Module):
364
- def __init__(self, use_spectral_norm=False):
365
- super(MultiPeriodDiscriminator, self).__init__()
366
- periods = [2,3,5,7,11]
367
-
368
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
369
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
370
- self.discriminators = nn.ModuleList(discs)
371
-
372
- def forward(self, y, y_hat):
373
- y_d_rs = []
374
- y_d_gs = []
375
- fmap_rs = []
376
- fmap_gs = []
377
- for i, d in enumerate(self.discriminators):
378
- y_d_r, fmap_r = d(y)
379
- y_d_g, fmap_g = d(y_hat)
380
- y_d_rs.append(y_d_r)
381
- y_d_gs.append(y_d_g)
382
- fmap_rs.append(fmap_r)
383
- fmap_gs.append(fmap_g)
384
-
385
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
386
-
387
-
388
-
389
- class SynthesizerTrn(nn.Module):
390
- """
391
- Synthesizer for Training
392
- """
393
-
394
- def __init__(self,
395
- n_vocab,
396
- spec_channels,
397
- segment_size,
398
- inter_channels,
399
- hidden_channels,
400
- filter_channels,
401
- n_heads,
402
- n_layers,
403
- kernel_size,
404
- p_dropout,
405
- resblock,
406
- resblock_kernel_sizes,
407
- resblock_dilation_sizes,
408
- upsample_rates,
409
- upsample_initial_channel,
410
- upsample_kernel_sizes,
411
- n_speakers=0,
412
- gin_channels=0,
413
- use_sdp=True,
414
- **kwargs):
415
-
416
- super().__init__()
417
- self.n_vocab = n_vocab
418
- self.spec_channels = spec_channels
419
- self.inter_channels = inter_channels
420
- self.hidden_channels = hidden_channels
421
- self.filter_channels = filter_channels
422
- self.n_heads = n_heads
423
- self.n_layers = n_layers
424
- self.kernel_size = kernel_size
425
- self.p_dropout = p_dropout
426
- self.resblock = resblock
427
- self.resblock_kernel_sizes = resblock_kernel_sizes
428
- self.resblock_dilation_sizes = resblock_dilation_sizes
429
- self.upsample_rates = upsample_rates
430
- self.upsample_initial_channel = upsample_initial_channel
431
- self.upsample_kernel_sizes = upsample_kernel_sizes
432
- self.segment_size = segment_size
433
- self.n_speakers = n_speakers
434
- self.gin_channels = gin_channels
435
-
436
- self.use_sdp = use_sdp
437
-
438
- self.enc_p = TextEncoder(n_vocab,
439
- inter_channels,
440
- hidden_channels,
441
- filter_channels,
442
- n_heads,
443
- n_layers,
444
- kernel_size,
445
- p_dropout)
446
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
447
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
448
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
449
-
450
- if use_sdp:
451
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
452
- else:
453
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
454
-
455
- if n_speakers > 1:
456
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
457
-
458
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
459
-
460
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
461
- if self.n_speakers > 0:
462
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
463
- else:
464
- g = None
465
-
466
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
467
- z_p = self.flow(z, y_mask, g=g)
468
-
469
- with torch.no_grad():
470
- # negative cross-entropy
471
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
472
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
473
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
474
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
475
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
476
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
477
-
478
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
479
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
480
-
481
- w = attn.sum(2)
482
- if self.use_sdp:
483
- l_length = self.dp(x, x_mask, w, g=g)
484
- l_length = l_length / torch.sum(x_mask)
485
- else:
486
- logw_ = torch.log(w + 1e-6) * x_mask
487
- logw = self.dp(x, x_mask, g=g)
488
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
489
-
490
- # expand prior
491
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
492
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
493
-
494
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
495
- o = self.dec(z_slice, g=g)
496
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
497
-
498
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
499
- device = next(self.parameters()).device # 获取模型所在的设备
500
- x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device))
501
- if self.n_speakers > 0:
502
- g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1]
503
- else:
504
- g = None
505
-
506
- if self.use_sdp:
507
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
508
- else:
509
- logw = self.dp(x, x_mask, g=g)
510
- w = torch.exp(logw) * x_mask * length_scale
511
- w_ceil = torch.ceil(w)
512
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
513
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
514
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
515
- attn = commons.generate_path(w_ceil, attn_mask)
516
-
517
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
518
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
519
-
520
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
521
- z = self.flow(z_p, y_mask, g=g, reverse=True)
522
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
523
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
524
-
525
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
526
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
527
- g_src = self.emb_g(sid_src).unsqueeze(-1)
528
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
529
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
530
- z_p = self.flow(z, y_mask, g=g_src)
531
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
532
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
533
- return o_hat, y_mask, (z, z_p, z_hat)
534
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/adapt_a_model.md DELETED
@@ -1,54 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 새로운 작업에 대한 모델을 적용하기
14
-
15
- 많은 diffusion 시스템은 같은 구성 요소들을 공유하므로 한 작업에 대해 사전학습된 모델을 완전히 다른 작업에 적용할 수 있습니다.
16
-
17
- 이 인페인팅을 위한 가이드는 사전학습된 [`UNet2DConditionModel`]의 아키텍처를 초기화하고 수정하여 사전학습된 text-to-image 모델을 어떻게 인페인팅에 적용하는지를 알려줄 것입니다.
18
-
19
- ## UNet2DConditionModel 파라미터 구성
20
-
21
- [`UNet2DConditionModel`]은 [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels)에서 4개의 채널을 기본적으로 허용합니다. 예를 들어, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)와 같은 사전학습된 text-to-image 모델을 불러오고 `in_channels`의 수를 확인합니다:
22
-
23
- ```py
24
- from diffusers import StableDiffusionPipeline
25
-
26
- pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
27
- pipeline.unet.config["in_channels"]
28
- 4
29
- ```
30
-
31
- 인페인팅은 입력 샘플에 9개의 채널이 필요합니다. [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)와 같은 사전학습된 인페인팅 모델에서 이 값을 확인할 수 있습니다:
32
-
33
- ```py
34
- from diffusers import StableDiffusionPipeline
35
-
36
- pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
37
- pipeline.unet.config["in_channels"]
38
- 9
39
- ```
40
-
41
- 인페인팅에 대한 text-to-image 모델을 적용하기 위해, `in_channels` 수를 4에서 9로 수정해야 할 것입니다.
42
-
43
- 사전학습된 text-to-image 모델의 가중치와 [`UNet2DConditionModel`]을 초기화하고 `in_channels`를 9로 수정해 주세요. `in_channels`의 수를 수정하면 크기가 달라지기 때문에 크기가 안 맞는 오류를 피하기 위해 `ignore_mismatched_sizes=True` 및 `low_cpu_mem_usage=False`를 설정해야 합니다.
44
-
45
- ```py
46
- from diffusers import UNet2DConditionModel
47
-
48
- model_id = "runwayml/stable-diffusion-v1-5"
49
- unet = UNet2DConditionModel.from_pretrained(
50
- model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True
51
- )
52
- ```
53
-
54
- Text-to-image 모델로부터 다른 구성 요소의 사전학습된 가중치는 체크포인트로부터 초기화되지만 `unet`의 입력 채널 가중치 (`conv_in.weight`)는 랜덤하게 초기화됩니다. 그렇지 않으면 모델이 노이즈를 리턴하기 때문에 인페인팅의 모델을 파인튜닝 할 때 중요합니다.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py DELETED
@@ -1,118 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ PyTorch - Flax general utilities."""
16
- import re
17
-
18
- import jax.numpy as jnp
19
- from flax.traverse_util import flatten_dict, unflatten_dict
20
- from jax.random import PRNGKey
21
-
22
- from ..utils import logging
23
-
24
-
25
- logger = logging.get_logger(__name__)
26
-
27
-
28
- def rename_key(key):
29
- regex = r"\w+[.]\d+"
30
- pats = re.findall(regex, key)
31
- for pat in pats:
32
- key = key.replace(pat, "_".join(pat.split(".")))
33
- return key
34
-
35
-
36
- #####################
37
- # PyTorch => Flax #
38
- #####################
39
-
40
-
41
- # Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69
42
- # and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py
43
- def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict):
44
- """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary"""
45
-
46
- # conv norm or layer norm
47
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
48
- if (
49
- any("norm" in str_ for str_ in pt_tuple_key)
50
- and (pt_tuple_key[-1] == "bias")
51
- and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
52
- and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
53
- ):
54
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
55
- return renamed_pt_tuple_key, pt_tensor
56
- elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
57
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
58
- return renamed_pt_tuple_key, pt_tensor
59
-
60
- # embedding
61
- if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
62
- pt_tuple_key = pt_tuple_key[:-1] + ("embedding",)
63
- return renamed_pt_tuple_key, pt_tensor
64
-
65
- # conv layer
66
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",)
67
- if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
68
- pt_tensor = pt_tensor.transpose(2, 3, 1, 0)
69
- return renamed_pt_tuple_key, pt_tensor
70
-
71
- # linear layer
72
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",)
73
- if pt_tuple_key[-1] == "weight":
74
- pt_tensor = pt_tensor.T
75
- return renamed_pt_tuple_key, pt_tensor
76
-
77
- # old PyTorch layer norm weight
78
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",)
79
- if pt_tuple_key[-1] == "gamma":
80
- return renamed_pt_tuple_key, pt_tensor
81
-
82
- # old PyTorch layer norm bias
83
- renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",)
84
- if pt_tuple_key[-1] == "beta":
85
- return renamed_pt_tuple_key, pt_tensor
86
-
87
- return pt_tuple_key, pt_tensor
88
-
89
-
90
- def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42):
91
- # Step 1: Convert pytorch tensor to numpy
92
- pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()}
93
-
94
- # Step 2: Since the model is stateless, get random Flax params
95
- random_flax_params = flax_model.init_weights(PRNGKey(init_key))
96
-
97
- random_flax_state_dict = flatten_dict(random_flax_params)
98
- flax_state_dict = {}
99
-
100
- # Need to change some parameters name to match Flax names
101
- for pt_key, pt_tensor in pt_state_dict.items():
102
- renamed_pt_key = rename_key(pt_key)
103
- pt_tuple_key = tuple(renamed_pt_key.split("."))
104
-
105
- # Correctly rename weight parameters
106
- flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict)
107
-
108
- if flax_key in random_flax_state_dict:
109
- if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
110
- raise ValueError(
111
- f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
112
- f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}."
113
- )
114
-
115
- # also add unexpected weight so that warning is thrown
116
- flax_state_dict[flax_key] = jnp.asarray(flax_tensor)
117
-
118
- return unflatten_dict(flax_state_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py DELETED
@@ -1,738 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import numpy as np
20
- import PIL
21
- import torch
22
- from packaging import version
23
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
24
-
25
- from ...configuration_utils import FrozenDict
26
- from ...image_processor import VaeImageProcessor
27
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
- from ...models import AutoencoderKL, UNet2DConditionModel
29
- from ...schedulers import KarrasDiffusionSchedulers
30
- from ...utils import (
31
- PIL_INTERPOLATION,
32
- deprecate,
33
- is_accelerate_available,
34
- is_accelerate_version,
35
- logging,
36
- randn_tensor,
37
- )
38
- from ..pipeline_utils import DiffusionPipeline
39
- from . import StableDiffusionPipelineOutput
40
- from .safety_checker import StableDiffusionSafetyChecker
41
-
42
-
43
- logger = logging.get_logger(__name__)
44
-
45
-
46
- def preprocess_image(image, batch_size):
47
- w, h = image.size
48
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
49
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
50
- image = np.array(image).astype(np.float32) / 255.0
51
- image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
52
- image = torch.from_numpy(image)
53
- return 2.0 * image - 1.0
54
-
55
-
56
- def preprocess_mask(mask, batch_size, scale_factor=8):
57
- if not isinstance(mask, torch.FloatTensor):
58
- mask = mask.convert("L")
59
- w, h = mask.size
60
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
61
- mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
62
- mask = np.array(mask).astype(np.float32) / 255.0
63
- mask = np.tile(mask, (4, 1, 1))
64
- mask = np.vstack([mask[None]] * batch_size)
65
- mask = 1 - mask # repaint white, keep black
66
- mask = torch.from_numpy(mask)
67
- return mask
68
-
69
- else:
70
- valid_mask_channel_sizes = [1, 3]
71
- # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
72
- if mask.shape[3] in valid_mask_channel_sizes:
73
- mask = mask.permute(0, 3, 1, 2)
74
- elif mask.shape[1] not in valid_mask_channel_sizes:
75
- raise ValueError(
76
- f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
77
- f" but received mask of shape {tuple(mask.shape)}"
78
- )
79
- # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
80
- mask = mask.mean(dim=1, keepdim=True)
81
- h, w = mask.shape[-2:]
82
- h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
83
- mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
84
- return mask
85
-
86
-
87
- class StableDiffusionInpaintPipelineLegacy(
88
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
89
- ):
90
- r"""
91
- Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
92
-
93
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
94
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
95
-
96
- In addition the pipeline inherits the following loading methods:
97
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
98
- - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
99
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
100
-
101
- as well as the following saving methods:
102
- - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
103
-
104
- Args:
105
- vae ([`AutoencoderKL`]):
106
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
107
- text_encoder ([`CLIPTextModel`]):
108
- Frozen text-encoder. Stable Diffusion uses the text portion of
109
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
110
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
111
- tokenizer (`CLIPTokenizer`):
112
- Tokenizer of class
113
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
114
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
115
- scheduler ([`SchedulerMixin`]):
116
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
117
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
118
- safety_checker ([`StableDiffusionSafetyChecker`]):
119
- Classification module that estimates whether generated images could be considered offensive or harmful.
120
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
121
- feature_extractor ([`CLIPImageProcessor`]):
122
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
123
- """
124
- _optional_components = ["feature_extractor"]
125
-
126
- def __init__(
127
- self,
128
- vae: AutoencoderKL,
129
- text_encoder: CLIPTextModel,
130
- tokenizer: CLIPTokenizer,
131
- unet: UNet2DConditionModel,
132
- scheduler: KarrasDiffusionSchedulers,
133
- safety_checker: StableDiffusionSafetyChecker,
134
- feature_extractor: CLIPImageProcessor,
135
- requires_safety_checker: bool = True,
136
- ):
137
- super().__init__()
138
-
139
- deprecation_message = (
140
- f"The class {self.__class__} is deprecated and will be removed in v1.0.0. You can achieve exactly the same functionality"
141
- "by loading your model into `StableDiffusionInpaintPipeline` instead. See https://github.com/huggingface/diffusers/pull/3533"
142
- "for more information."
143
- )
144
- deprecate("legacy is outdated", "1.0.0", deprecation_message, standard_warn=False)
145
-
146
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
147
- deprecation_message = (
148
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
149
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
150
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
151
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
152
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
153
- " file"
154
- )
155
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
156
- new_config = dict(scheduler.config)
157
- new_config["steps_offset"] = 1
158
- scheduler._internal_dict = FrozenDict(new_config)
159
-
160
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
161
- deprecation_message = (
162
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
163
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
164
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
165
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
166
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
167
- )
168
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
169
- new_config = dict(scheduler.config)
170
- new_config["clip_sample"] = False
171
- scheduler._internal_dict = FrozenDict(new_config)
172
-
173
- if safety_checker is None and requires_safety_checker:
174
- logger.warning(
175
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
176
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
177
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
178
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
179
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
180
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
181
- )
182
-
183
- if safety_checker is not None and feature_extractor is None:
184
- raise ValueError(
185
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
186
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
187
- )
188
-
189
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
190
- version.parse(unet.config._diffusers_version).base_version
191
- ) < version.parse("0.9.0.dev0")
192
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
193
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
194
- deprecation_message = (
195
- "The configuration file of the unet has set the default `sample_size` to smaller than"
196
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
197
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
198
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
199
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
200
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
201
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
202
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
203
- " the `unet/config.json` file"
204
- )
205
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
206
- new_config = dict(unet.config)
207
- new_config["sample_size"] = 64
208
- unet._internal_dict = FrozenDict(new_config)
209
-
210
- self.register_modules(
211
- vae=vae,
212
- text_encoder=text_encoder,
213
- tokenizer=tokenizer,
214
- unet=unet,
215
- scheduler=scheduler,
216
- safety_checker=safety_checker,
217
- feature_extractor=feature_extractor,
218
- )
219
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
220
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
221
- self.register_to_config(requires_safety_checker=requires_safety_checker)
222
-
223
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
224
- def enable_model_cpu_offload(self, gpu_id=0):
225
- r"""
226
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
227
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
228
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
229
- iterative execution of the `unet`.
230
- """
231
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
232
- from accelerate import cpu_offload_with_hook
233
- else:
234
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
235
-
236
- device = torch.device(f"cuda:{gpu_id}")
237
-
238
- if self.device.type != "cpu":
239
- self.to("cpu", silence_dtype_warnings=True)
240
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
241
-
242
- hook = None
243
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
244
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
245
-
246
- if self.safety_checker is not None:
247
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
248
-
249
- # We'll offload the last model manually.
250
- self.final_offload_hook = hook
251
-
252
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
253
- def _encode_prompt(
254
- self,
255
- prompt,
256
- device,
257
- num_images_per_prompt,
258
- do_classifier_free_guidance,
259
- negative_prompt=None,
260
- prompt_embeds: Optional[torch.FloatTensor] = None,
261
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
262
- lora_scale: Optional[float] = None,
263
- ):
264
- r"""
265
- Encodes the prompt into text encoder hidden states.
266
-
267
- Args:
268
- prompt (`str` or `List[str]`, *optional*):
269
- prompt to be encoded
270
- device: (`torch.device`):
271
- torch device
272
- num_images_per_prompt (`int`):
273
- number of images that should be generated per prompt
274
- do_classifier_free_guidance (`bool`):
275
- whether to use classifier free guidance or not
276
- negative_prompt (`str` or `List[str]`, *optional*):
277
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
278
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
279
- less than `1`).
280
- prompt_embeds (`torch.FloatTensor`, *optional*):
281
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
282
- provided, text embeddings will be generated from `prompt` input argument.
283
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
284
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
285
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
286
- argument.
287
- lora_scale (`float`, *optional*):
288
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
289
- """
290
- # set lora scale so that monkey patched LoRA
291
- # function of text encoder can correctly access it
292
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
293
- self._lora_scale = lora_scale
294
-
295
- if prompt is not None and isinstance(prompt, str):
296
- batch_size = 1
297
- elif prompt is not None and isinstance(prompt, list):
298
- batch_size = len(prompt)
299
- else:
300
- batch_size = prompt_embeds.shape[0]
301
-
302
- if prompt_embeds is None:
303
- # textual inversion: procecss multi-vector tokens if necessary
304
- if isinstance(self, TextualInversionLoaderMixin):
305
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
306
-
307
- text_inputs = self.tokenizer(
308
- prompt,
309
- padding="max_length",
310
- max_length=self.tokenizer.model_max_length,
311
- truncation=True,
312
- return_tensors="pt",
313
- )
314
- text_input_ids = text_inputs.input_ids
315
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
316
-
317
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
318
- text_input_ids, untruncated_ids
319
- ):
320
- removed_text = self.tokenizer.batch_decode(
321
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
322
- )
323
- logger.warning(
324
- "The following part of your input was truncated because CLIP can only handle sequences up to"
325
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
326
- )
327
-
328
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
329
- attention_mask = text_inputs.attention_mask.to(device)
330
- else:
331
- attention_mask = None
332
-
333
- prompt_embeds = self.text_encoder(
334
- text_input_ids.to(device),
335
- attention_mask=attention_mask,
336
- )
337
- prompt_embeds = prompt_embeds[0]
338
-
339
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
340
-
341
- bs_embed, seq_len, _ = prompt_embeds.shape
342
- # duplicate text embeddings for each generation per prompt, using mps friendly method
343
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
344
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
345
-
346
- # get unconditional embeddings for classifier free guidance
347
- if do_classifier_free_guidance and negative_prompt_embeds is None:
348
- uncond_tokens: List[str]
349
- if negative_prompt is None:
350
- uncond_tokens = [""] * batch_size
351
- elif prompt is not None and type(prompt) is not type(negative_prompt):
352
- raise TypeError(
353
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
354
- f" {type(prompt)}."
355
- )
356
- elif isinstance(negative_prompt, str):
357
- uncond_tokens = [negative_prompt]
358
- elif batch_size != len(negative_prompt):
359
- raise ValueError(
360
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
361
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
362
- " the batch size of `prompt`."
363
- )
364
- else:
365
- uncond_tokens = negative_prompt
366
-
367
- # textual inversion: procecss multi-vector tokens if necessary
368
- if isinstance(self, TextualInversionLoaderMixin):
369
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
370
-
371
- max_length = prompt_embeds.shape[1]
372
- uncond_input = self.tokenizer(
373
- uncond_tokens,
374
- padding="max_length",
375
- max_length=max_length,
376
- truncation=True,
377
- return_tensors="pt",
378
- )
379
-
380
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
381
- attention_mask = uncond_input.attention_mask.to(device)
382
- else:
383
- attention_mask = None
384
-
385
- negative_prompt_embeds = self.text_encoder(
386
- uncond_input.input_ids.to(device),
387
- attention_mask=attention_mask,
388
- )
389
- negative_prompt_embeds = negative_prompt_embeds[0]
390
-
391
- if do_classifier_free_guidance:
392
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
393
- seq_len = negative_prompt_embeds.shape[1]
394
-
395
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
396
-
397
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
398
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
399
-
400
- # For classifier free guidance, we need to do two forward passes.
401
- # Here we concatenate the unconditional and text embeddings into a single batch
402
- # to avoid doing two forward passes
403
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
404
-
405
- return prompt_embeds
406
-
407
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
408
- def run_safety_checker(self, image, device, dtype):
409
- if self.safety_checker is None:
410
- has_nsfw_concept = None
411
- else:
412
- if torch.is_tensor(image):
413
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
414
- else:
415
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
416
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
417
- image, has_nsfw_concept = self.safety_checker(
418
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
419
- )
420
- return image, has_nsfw_concept
421
-
422
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
423
- def decode_latents(self, latents):
424
- warnings.warn(
425
- "The decode_latents method is deprecated and will be removed in a future version. Please"
426
- " use VaeImageProcessor instead",
427
- FutureWarning,
428
- )
429
- latents = 1 / self.vae.config.scaling_factor * latents
430
- image = self.vae.decode(latents, return_dict=False)[0]
431
- image = (image / 2 + 0.5).clamp(0, 1)
432
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
433
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
434
- return image
435
-
436
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
437
- def prepare_extra_step_kwargs(self, generator, eta):
438
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
439
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
440
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
441
- # and should be between [0, 1]
442
-
443
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
444
- extra_step_kwargs = {}
445
- if accepts_eta:
446
- extra_step_kwargs["eta"] = eta
447
-
448
- # check if the scheduler accepts generator
449
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
450
- if accepts_generator:
451
- extra_step_kwargs["generator"] = generator
452
- return extra_step_kwargs
453
-
454
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.check_inputs
455
- def check_inputs(
456
- self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
457
- ):
458
- if strength < 0 or strength > 1:
459
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
460
-
461
- if (callback_steps is None) or (
462
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
463
- ):
464
- raise ValueError(
465
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
466
- f" {type(callback_steps)}."
467
- )
468
-
469
- if prompt is not None and prompt_embeds is not None:
470
- raise ValueError(
471
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
472
- " only forward one of the two."
473
- )
474
- elif prompt is None and prompt_embeds is None:
475
- raise ValueError(
476
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
477
- )
478
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
479
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
480
-
481
- if negative_prompt is not None and negative_prompt_embeds is not None:
482
- raise ValueError(
483
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
484
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
485
- )
486
-
487
- if prompt_embeds is not None and negative_prompt_embeds is not None:
488
- if prompt_embeds.shape != negative_prompt_embeds.shape:
489
- raise ValueError(
490
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
491
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
492
- f" {negative_prompt_embeds.shape}."
493
- )
494
-
495
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
496
- def get_timesteps(self, num_inference_steps, strength, device):
497
- # get the original timestep using init_timestep
498
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
499
-
500
- t_start = max(num_inference_steps - init_timestep, 0)
501
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
502
-
503
- return timesteps, num_inference_steps - t_start
504
-
505
- def prepare_latents(self, image, timestep, num_images_per_prompt, dtype, device, generator):
506
- image = image.to(device=device, dtype=dtype)
507
- init_latent_dist = self.vae.encode(image).latent_dist
508
- init_latents = init_latent_dist.sample(generator=generator)
509
- init_latents = self.vae.config.scaling_factor * init_latents
510
-
511
- # Expand init_latents for batch_size and num_images_per_prompt
512
- init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
513
- init_latents_orig = init_latents
514
-
515
- # add noise to latents using the timesteps
516
- noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
517
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
518
- latents = init_latents
519
- return latents, init_latents_orig, noise
520
-
521
- @torch.no_grad()
522
- def __call__(
523
- self,
524
- prompt: Union[str, List[str]] = None,
525
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
526
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
527
- strength: float = 0.8,
528
- num_inference_steps: Optional[int] = 50,
529
- guidance_scale: Optional[float] = 7.5,
530
- negative_prompt: Optional[Union[str, List[str]]] = None,
531
- num_images_per_prompt: Optional[int] = 1,
532
- add_predicted_noise: Optional[bool] = False,
533
- eta: Optional[float] = 0.0,
534
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
535
- prompt_embeds: Optional[torch.FloatTensor] = None,
536
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
537
- output_type: Optional[str] = "pil",
538
- return_dict: bool = True,
539
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
540
- callback_steps: int = 1,
541
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
542
- ):
543
- r"""
544
- Function invoked when calling the pipeline for generation.
545
-
546
- Args:
547
- prompt (`str` or `List[str]`, *optional*):
548
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
549
- instead.
550
- image (`torch.FloatTensor` or `PIL.Image.Image`):
551
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
552
- process. This is the image whose masked region will be inpainted.
553
- mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
554
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
555
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
556
- PIL image, it will be converted to a single channel (luminance) before use. If mask is a tensor, the
557
- expected shape should be either `(B, H, W, C)` or `(B, C, H, W)`, where C is 1 or 3.
558
- strength (`float`, *optional*, defaults to 0.8):
559
- Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
560
- is 1, the denoising process will be run on the masked area for the full number of iterations specified
561
- in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to
562
- that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
563
- num_inference_steps (`int`, *optional*, defaults to 50):
564
- The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
565
- the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
566
- guidance_scale (`float`, *optional*, defaults to 7.5):
567
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
568
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
569
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
570
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
571
- usually at the expense of lower image quality.
572
- negative_prompt (`str` or `List[str]`, *optional*):
573
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
574
- `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
575
- is less than `1`).
576
- num_images_per_prompt (`int`, *optional*, defaults to 1):
577
- The number of images to generate per prompt.
578
- add_predicted_noise (`bool`, *optional*, defaults to True):
579
- Use predicted noise instead of random noise when constructing noisy versions of the original image in
580
- the reverse diffusion process
581
- eta (`float`, *optional*, defaults to 0.0):
582
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
583
- [`schedulers.DDIMScheduler`], will be ignored for others.
584
- generator (`torch.Generator`, *optional*):
585
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
586
- to make generation deterministic.
587
- prompt_embeds (`torch.FloatTensor`, *optional*):
588
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
589
- provided, text embeddings will be generated from `prompt` input argument.
590
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
591
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
592
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
593
- argument.
594
- output_type (`str`, *optional*, defaults to `"pil"`):
595
- The output format of the generate image. Choose between
596
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
597
- return_dict (`bool`, *optional*, defaults to `True`):
598
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
599
- plain tuple.
600
- callback (`Callable`, *optional*):
601
- A function that will be called every `callback_steps` steps during inference. The function will be
602
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
603
- callback_steps (`int`, *optional*, defaults to 1):
604
- The frequency at which the `callback` function will be called. If not specified, the callback will be
605
- called at every step.
606
- cross_attention_kwargs (`dict`, *optional*):
607
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
608
- `self.processor` in
609
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
610
-
611
- Returns:
612
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
613
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
614
- When returning a tuple, the first element is a list with the generated images, and the second element is a
615
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
616
- (nsfw) content, according to the `safety_checker`.
617
- """
618
- # 1. Check inputs
619
- self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
620
-
621
- # 2. Define call parameters
622
- if prompt is not None and isinstance(prompt, str):
623
- batch_size = 1
624
- elif prompt is not None and isinstance(prompt, list):
625
- batch_size = len(prompt)
626
- else:
627
- batch_size = prompt_embeds.shape[0]
628
-
629
- device = self._execution_device
630
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
631
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
632
- # corresponds to doing no classifier free guidance.
633
- do_classifier_free_guidance = guidance_scale > 1.0
634
-
635
- # 3. Encode input prompt
636
- text_encoder_lora_scale = (
637
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
638
- )
639
- prompt_embeds = self._encode_prompt(
640
- prompt,
641
- device,
642
- num_images_per_prompt,
643
- do_classifier_free_guidance,
644
- negative_prompt,
645
- prompt_embeds=prompt_embeds,
646
- negative_prompt_embeds=negative_prompt_embeds,
647
- lora_scale=text_encoder_lora_scale,
648
- )
649
-
650
- # 4. Preprocess image and mask
651
- if not isinstance(image, torch.FloatTensor):
652
- image = preprocess_image(image, batch_size)
653
-
654
- mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
655
-
656
- # 5. set timesteps
657
- self.scheduler.set_timesteps(num_inference_steps, device=device)
658
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
659
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
660
-
661
- # 6. Prepare latent variables
662
- # encode the init image into latents and scale the latents
663
- latents, init_latents_orig, noise = self.prepare_latents(
664
- image, latent_timestep, num_images_per_prompt, prompt_embeds.dtype, device, generator
665
- )
666
-
667
- # 7. Prepare mask latent
668
- mask = mask_image.to(device=device, dtype=latents.dtype)
669
- mask = torch.cat([mask] * num_images_per_prompt)
670
-
671
- # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
672
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
673
-
674
- # 9. Denoising loop
675
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
676
- with self.progress_bar(total=num_inference_steps) as progress_bar:
677
- for i, t in enumerate(timesteps):
678
- # expand the latents if we are doing classifier free guidance
679
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
680
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
681
-
682
- # predict the noise residual
683
- noise_pred = self.unet(
684
- latent_model_input,
685
- t,
686
- encoder_hidden_states=prompt_embeds,
687
- cross_attention_kwargs=cross_attention_kwargs,
688
- return_dict=False,
689
- )[0]
690
-
691
- # perform guidance
692
- if do_classifier_free_guidance:
693
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
694
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
695
-
696
- # compute the previous noisy sample x_t -> x_t-1
697
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
698
- # masking
699
- if add_predicted_noise:
700
- init_latents_proper = self.scheduler.add_noise(
701
- init_latents_orig, noise_pred_uncond, torch.tensor([t])
702
- )
703
- else:
704
- init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
705
-
706
- latents = (init_latents_proper * mask) + (latents * (1 - mask))
707
-
708
- # call the callback, if provided
709
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
710
- progress_bar.update()
711
- if callback is not None and i % callback_steps == 0:
712
- callback(i, t, latents)
713
-
714
- # use original latents corresponding to unmasked portions of the image
715
- latents = (init_latents_orig * mask) + (latents * (1 - mask))
716
-
717
- if not output_type == "latent":
718
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
719
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
720
- else:
721
- image = latents
722
- has_nsfw_concept = None
723
-
724
- if has_nsfw_concept is None:
725
- do_denormalize = [True] * image.shape[0]
726
- else:
727
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
728
-
729
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
730
-
731
- # Offload last model to CPU
732
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
733
- self.final_offload_hook.offload()
734
-
735
- if not return_dict:
736
- return (image, has_nsfw_concept)
737
-
738
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './ccnet_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './dmnet_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py DELETED
@@ -1,35 +0,0 @@
1
- _base_ = './cityscapes.py'
2
- img_norm_cfg = dict(
3
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4
- crop_size = (769, 769)
5
- train_pipeline = [
6
- dict(type='LoadImageFromFile'),
7
- dict(type='LoadAnnotations'),
8
- dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
9
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10
- dict(type='RandomFlip', prob=0.5),
11
- dict(type='PhotoMetricDistortion'),
12
- dict(type='Normalize', **img_norm_cfg),
13
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14
- dict(type='DefaultFormatBundle'),
15
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16
- ]
17
- test_pipeline = [
18
- dict(type='LoadImageFromFile'),
19
- dict(
20
- type='MultiScaleFlipAug',
21
- img_scale=(2049, 1025),
22
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23
- flip=False,
24
- transforms=[
25
- dict(type='Resize', keep_ratio=True),
26
- dict(type='RandomFlip'),
27
- dict(type='Normalize', **img_norm_cfg),
28
- dict(type='ImageToTensor', keys=['img']),
29
- dict(type='Collect', keys=['img']),
30
- ])
31
- ]
32
- data = dict(
33
- train=dict(pipeline=train_pipeline),
34
- val=dict(pipeline=test_pipeline),
35
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AriaMei/TTSdemo/text/__init__.py DELETED
@@ -1,56 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
- from text.symbols import symbols
4
-
5
-
6
- # Mappings from symbol to numeric ID and vice versa:
7
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
- _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
-
10
-
11
- def text_to_sequence(text, cleaner_names):
12
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
- Args:
14
- text: string to convert to a sequence
15
- cleaner_names: names of the cleaner functions to run the text through
16
- Returns:
17
- List of integers corresponding to the symbols in the text
18
- '''
19
- sequence = []
20
-
21
- clean_text = _clean_text(text, cleaner_names)
22
- for symbol in clean_text:
23
- if symbol not in _symbol_to_id.keys():
24
- continue
25
- symbol_id = _symbol_to_id[symbol]
26
- sequence += [symbol_id]
27
- return sequence
28
-
29
-
30
- def cleaned_text_to_sequence(cleaned_text):
31
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
32
- Args:
33
- text: string to convert to a sequence
34
- Returns:
35
- List of integers corresponding to the symbols in the text
36
- '''
37
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
38
- return sequence
39
-
40
-
41
- def sequence_to_text(sequence):
42
- '''Converts a sequence of IDs back to a string'''
43
- result = ''
44
- for symbol_id in sequence:
45
- s = _id_to_symbol[symbol_id]
46
- result += s
47
- return result
48
-
49
-
50
- def _clean_text(text, cleaner_names):
51
- for name in cleaner_names:
52
- cleaner = getattr(cleaners, name)
53
- if not cleaner:
54
- raise Exception('Unknown cleaner: %s' % name)
55
- text = cleaner(text)
56
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/sources.py DELETED
@@ -1,223 +0,0 @@
1
- import logging
2
- import mimetypes
3
- import os
4
- import pathlib
5
- from typing import Callable, Iterable, Optional, Tuple
6
-
7
- from pip._internal.models.candidate import InstallationCandidate
8
- from pip._internal.models.link import Link
9
- from pip._internal.utils.urls import path_to_url, url_to_path
10
- from pip._internal.vcs import is_url
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
- FoundCandidates = Iterable[InstallationCandidate]
15
- FoundLinks = Iterable[Link]
16
- CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]]
17
- PageValidator = Callable[[Link], bool]
18
-
19
-
20
- class LinkSource:
21
- @property
22
- def link(self) -> Optional[Link]:
23
- """Returns the underlying link, if there's one."""
24
- raise NotImplementedError()
25
-
26
- def page_candidates(self) -> FoundCandidates:
27
- """Candidates found by parsing an archive listing HTML file."""
28
- raise NotImplementedError()
29
-
30
- def file_links(self) -> FoundLinks:
31
- """Links found by specifying archives directly."""
32
- raise NotImplementedError()
33
-
34
-
35
- def _is_html_file(file_url: str) -> bool:
36
- return mimetypes.guess_type(file_url, strict=False)[0] == "text/html"
37
-
38
-
39
- class _FlatDirectorySource(LinkSource):
40
- """Link source specified by ``--find-links=<path-to-dir>``.
41
-
42
- This looks the content of the directory, and returns:
43
-
44
- * ``page_candidates``: Links listed on each HTML file in the directory.
45
- * ``file_candidates``: Archives in the directory.
46
- """
47
-
48
- def __init__(
49
- self,
50
- candidates_from_page: CandidatesFromPage,
51
- path: str,
52
- ) -> None:
53
- self._candidates_from_page = candidates_from_page
54
- self._path = pathlib.Path(os.path.realpath(path))
55
-
56
- @property
57
- def link(self) -> Optional[Link]:
58
- return None
59
-
60
- def page_candidates(self) -> FoundCandidates:
61
- for path in self._path.iterdir():
62
- url = path_to_url(str(path))
63
- if not _is_html_file(url):
64
- continue
65
- yield from self._candidates_from_page(Link(url))
66
-
67
- def file_links(self) -> FoundLinks:
68
- for path in self._path.iterdir():
69
- url = path_to_url(str(path))
70
- if _is_html_file(url):
71
- continue
72
- yield Link(url)
73
-
74
-
75
- class _LocalFileSource(LinkSource):
76
- """``--find-links=<path-or-url>`` or ``--[extra-]index-url=<path-or-url>``.
77
-
78
- If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to
79
- the option, it is converted to a URL first. This returns:
80
-
81
- * ``page_candidates``: Links listed on an HTML file.
82
- * ``file_candidates``: The non-HTML file.
83
- """
84
-
85
- def __init__(
86
- self,
87
- candidates_from_page: CandidatesFromPage,
88
- link: Link,
89
- ) -> None:
90
- self._candidates_from_page = candidates_from_page
91
- self._link = link
92
-
93
- @property
94
- def link(self) -> Optional[Link]:
95
- return self._link
96
-
97
- def page_candidates(self) -> FoundCandidates:
98
- if not _is_html_file(self._link.url):
99
- return
100
- yield from self._candidates_from_page(self._link)
101
-
102
- def file_links(self) -> FoundLinks:
103
- if _is_html_file(self._link.url):
104
- return
105
- yield self._link
106
-
107
-
108
- class _RemoteFileSource(LinkSource):
109
- """``--find-links=<url>`` or ``--[extra-]index-url=<url>``.
110
-
111
- This returns:
112
-
113
- * ``page_candidates``: Links listed on an HTML file.
114
- * ``file_candidates``: The non-HTML file.
115
- """
116
-
117
- def __init__(
118
- self,
119
- candidates_from_page: CandidatesFromPage,
120
- page_validator: PageValidator,
121
- link: Link,
122
- ) -> None:
123
- self._candidates_from_page = candidates_from_page
124
- self._page_validator = page_validator
125
- self._link = link
126
-
127
- @property
128
- def link(self) -> Optional[Link]:
129
- return self._link
130
-
131
- def page_candidates(self) -> FoundCandidates:
132
- if not self._page_validator(self._link):
133
- return
134
- yield from self._candidates_from_page(self._link)
135
-
136
- def file_links(self) -> FoundLinks:
137
- yield self._link
138
-
139
-
140
- class _IndexDirectorySource(LinkSource):
141
- """``--[extra-]index-url=<path-to-directory>``.
142
-
143
- This is treated like a remote URL; ``candidates_from_page`` contains logic
144
- for this by appending ``index.html`` to the link.
145
- """
146
-
147
- def __init__(
148
- self,
149
- candidates_from_page: CandidatesFromPage,
150
- link: Link,
151
- ) -> None:
152
- self._candidates_from_page = candidates_from_page
153
- self._link = link
154
-
155
- @property
156
- def link(self) -> Optional[Link]:
157
- return self._link
158
-
159
- def page_candidates(self) -> FoundCandidates:
160
- yield from self._candidates_from_page(self._link)
161
-
162
- def file_links(self) -> FoundLinks:
163
- return ()
164
-
165
-
166
- def build_source(
167
- location: str,
168
- *,
169
- candidates_from_page: CandidatesFromPage,
170
- page_validator: PageValidator,
171
- expand_dir: bool,
172
- cache_link_parsing: bool,
173
- ) -> Tuple[Optional[str], Optional[LinkSource]]:
174
- path: Optional[str] = None
175
- url: Optional[str] = None
176
- if os.path.exists(location): # Is a local path.
177
- url = path_to_url(location)
178
- path = location
179
- elif location.startswith("file:"): # A file: URL.
180
- url = location
181
- path = url_to_path(location)
182
- elif is_url(location):
183
- url = location
184
-
185
- if url is None:
186
- msg = (
187
- "Location '%s' is ignored: "
188
- "it is either a non-existing path or lacks a specific scheme."
189
- )
190
- logger.warning(msg, location)
191
- return (None, None)
192
-
193
- if path is None:
194
- source: LinkSource = _RemoteFileSource(
195
- candidates_from_page=candidates_from_page,
196
- page_validator=page_validator,
197
- link=Link(url, cache_link_parsing=cache_link_parsing),
198
- )
199
- return (url, source)
200
-
201
- if os.path.isdir(path):
202
- if expand_dir:
203
- source = _FlatDirectorySource(
204
- candidates_from_page=candidates_from_page,
205
- path=path,
206
- )
207
- else:
208
- source = _IndexDirectorySource(
209
- candidates_from_page=candidates_from_page,
210
- link=Link(url, cache_link_parsing=cache_link_parsing),
211
- )
212
- return (url, source)
213
- elif os.path.isfile(path):
214
- source = _LocalFileSource(
215
- candidates_from_page=candidates_from_page,
216
- link=Link(url, cache_link_parsing=cache_link_parsing),
217
- )
218
- return (url, source)
219
- logger.warning(
220
- "Location '%s' is ignored: it is neither a file nor a directory.",
221
- location,
222
- )
223
- return (url, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- from .utils.env import setup_environment
4
-
5
- setup_environment()
6
-
7
-
8
- # This line will be programatically read/write by setup.py.
9
- # Leave them at the bottom of this file and don't touch them.
10
- __version__ = "0.6"
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/dreambooth-altdiffusion/convertosd.py DELETED
@@ -1,226 +0,0 @@
1
- # Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
2
- # *Only* converts the UNet, VAE, and Text Encoder.
3
- # Does not convert optimizer state or any other thing.
4
- # Written by jachiam
5
-
6
- import argparse
7
- import os.path as osp
8
-
9
- import torch
10
- import gc
11
-
12
- # =================#
13
- # UNet Conversion #
14
- # =================#
15
-
16
- unet_conversion_map = [
17
- # (stable-diffusion, HF Diffusers)
18
- ("time_embed.0.weight", "time_embedding.linear_1.weight"),
19
- ("time_embed.0.bias", "time_embedding.linear_1.bias"),
20
- ("time_embed.2.weight", "time_embedding.linear_2.weight"),
21
- ("time_embed.2.bias", "time_embedding.linear_2.bias"),
22
- ("input_blocks.0.0.weight", "conv_in.weight"),
23
- ("input_blocks.0.0.bias", "conv_in.bias"),
24
- ("out.0.weight", "conv_norm_out.weight"),
25
- ("out.0.bias", "conv_norm_out.bias"),
26
- ("out.2.weight", "conv_out.weight"),
27
- ("out.2.bias", "conv_out.bias"),
28
- ]
29
-
30
- unet_conversion_map_resnet = [
31
- # (stable-diffusion, HF Diffusers)
32
- ("in_layers.0", "norm1"),
33
- ("in_layers.2", "conv1"),
34
- ("out_layers.0", "norm2"),
35
- ("out_layers.3", "conv2"),
36
- ("emb_layers.1", "time_emb_proj"),
37
- ("skip_connection", "conv_shortcut"),
38
- ]
39
-
40
- unet_conversion_map_layer = []
41
- # hardcoded number of downblocks and resnets/attentions...
42
- # would need smarter logic for other networks.
43
- for i in range(4):
44
- # loop over downblocks/upblocks
45
-
46
- for j in range(2):
47
- # loop over resnets/attentions for downblocks
48
- hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
49
- sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
50
- unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
51
-
52
- if i < 3:
53
- # no attention layers in down_blocks.3
54
- hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
55
- sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
56
- unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
57
-
58
- for j in range(3):
59
- # loop over resnets/attentions for upblocks
60
- hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
61
- sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
62
- unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
63
-
64
- if i > 0:
65
- # no attention layers in up_blocks.0
66
- hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
67
- sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
68
- unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
69
-
70
- if i < 3:
71
- # no downsample in down_blocks.3
72
- hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
73
- sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
74
- unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
75
-
76
- # no upsample in up_blocks.3
77
- hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
78
- sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
79
- unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
80
-
81
- hf_mid_atn_prefix = "mid_block.attentions.0."
82
- sd_mid_atn_prefix = "middle_block.1."
83
- unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
84
-
85
- for j in range(2):
86
- hf_mid_res_prefix = f"mid_block.resnets.{j}."
87
- sd_mid_res_prefix = f"middle_block.{2*j}."
88
- unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
89
-
90
-
91
- def convert_unet_state_dict(unet_state_dict):
92
- # buyer beware: this is a *brittle* function,
93
- # and correct output requires that all of these pieces interact in
94
- # the exact order in which I have arranged them.
95
- mapping = {k: k for k in unet_state_dict.keys()}
96
- for sd_name, hf_name in unet_conversion_map:
97
- mapping[hf_name] = sd_name
98
- for k, v in mapping.items():
99
- if "resnets" in k:
100
- for sd_part, hf_part in unet_conversion_map_resnet:
101
- v = v.replace(hf_part, sd_part)
102
- mapping[k] = v
103
- for k, v in mapping.items():
104
- for sd_part, hf_part in unet_conversion_map_layer:
105
- v = v.replace(hf_part, sd_part)
106
- mapping[k] = v
107
- new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
108
- return new_state_dict
109
-
110
-
111
- # ================#
112
- # VAE Conversion #
113
- # ================#
114
-
115
- vae_conversion_map = [
116
- # (stable-diffusion, HF Diffusers)
117
- ("nin_shortcut", "conv_shortcut"),
118
- ("norm_out", "conv_norm_out"),
119
- ("mid.attn_1.", "mid_block.attentions.0."),
120
- ]
121
-
122
- for i in range(4):
123
- # down_blocks have two resnets
124
- for j in range(2):
125
- hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
126
- sd_down_prefix = f"encoder.down.{i}.block.{j}."
127
- vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
128
-
129
- if i < 3:
130
- hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
131
- sd_downsample_prefix = f"down.{i}.downsample."
132
- vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
133
-
134
- hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
135
- sd_upsample_prefix = f"up.{3-i}.upsample."
136
- vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
137
-
138
- # up_blocks have three resnets
139
- # also, up blocks in hf are numbered in reverse from sd
140
- for j in range(3):
141
- hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
142
- sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
143
- vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
144
-
145
- # this part accounts for mid blocks in both the encoder and the decoder
146
- for i in range(2):
147
- hf_mid_res_prefix = f"mid_block.resnets.{i}."
148
- sd_mid_res_prefix = f"mid.block_{i+1}."
149
- vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
150
-
151
-
152
- vae_conversion_map_attn = [
153
- # (stable-diffusion, HF Diffusers)
154
- ("norm.", "group_norm."),
155
- ("q.", "query."),
156
- ("k.", "key."),
157
- ("v.", "value."),
158
- ("proj_out.", "proj_attn."),
159
- ]
160
-
161
-
162
- def reshape_weight_for_sd(w):
163
- # convert HF linear weights to SD conv2d weights
164
- return w.reshape(*w.shape, 1, 1)
165
-
166
-
167
- def convert_vae_state_dict(vae_state_dict):
168
- mapping = {k: k for k in vae_state_dict.keys()}
169
- for k, v in mapping.items():
170
- for sd_part, hf_part in vae_conversion_map:
171
- v = v.replace(hf_part, sd_part)
172
- mapping[k] = v
173
- for k, v in mapping.items():
174
- if "attentions" in k:
175
- for sd_part, hf_part in vae_conversion_map_attn:
176
- v = v.replace(hf_part, sd_part)
177
- mapping[k] = v
178
- new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
179
- weights_to_convert = ["q", "k", "v", "proj_out"]
180
- print("Converting to CKPT ...")
181
- for k, v in new_state_dict.items():
182
- for weight_name in weights_to_convert:
183
- if f"mid.attn_1.{weight_name}.weight" in k:
184
- new_state_dict[k] = reshape_weight_for_sd(v)
185
- return new_state_dict
186
-
187
-
188
- # =========================#
189
- # Text Encoder Conversion #
190
- # =========================#
191
- # pretty much a no-op
192
-
193
-
194
- def convert_text_enc_state_dict(text_enc_dict):
195
- return text_enc_dict
196
-
197
-
198
- def convert(model_path, checkpoint_path):
199
- unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
200
- vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
201
- text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
202
-
203
- # Convert the UNet model
204
- unet_state_dict = torch.load(unet_path, map_location='cpu')
205
- unet_state_dict = convert_unet_state_dict(unet_state_dict)
206
- unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
207
-
208
- # Convert the VAE model
209
- vae_state_dict = torch.load(vae_path, map_location='cpu')
210
- vae_state_dict = convert_vae_state_dict(vae_state_dict)
211
- vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
212
-
213
- # Convert the text encoder model
214
- text_enc_dict = torch.load(text_enc_path, map_location='cpu')
215
- text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
216
- text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
217
-
218
- # Put together new checkpoint
219
- state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
220
-
221
- state_dict = {k:v.half() for k,v in state_dict.items()}
222
- state_dict = {"state_dict": state_dict}
223
- torch.save(state_dict, checkpoint_path)
224
- del state_dict, text_enc_dict, vae_state_dict, unet_state_dict
225
- torch.cuda.empty_cache()
226
- gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BLACKHOST/Date/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Date
3
- emoji: 💩
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Balalaxmi/JarvisAIchatbox/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: JarvisAIchatbox
3
- emoji: 📚
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Arco Iris Seis Sitio Mvil Apk Ios.md DELETED
@@ -1,84 +0,0 @@
1
- <br />
2
- <h1>Rainbow Six Siege Mobile: Todo lo que necesitas saber</h1>
3
- <p>Si eres un fan de los shooters tácticos, es posible que hayas oído hablar de <strong>Rainbow Six Siege</strong>, uno de los juegos más populares y aclamados del género. ¿Pero sabías que también puedes reproducirlo en tu teléfono? Así es, <strong>Rainbow Six Siege Mobile</strong> es un juego gratuito que trae la experiencia emocionante e inmersiva de Rainbow Six Siege a tu dispositivo móvil. En este artículo, te contaremos todo lo que necesitas saber sobre Rainbow Six Siege Mobile, incluyendo cómo descargarlo y jugarlo, cuáles son sus características y modos, y cuáles son algunos consejos y trucos para mejorar tu juego. </p>
4
- <h2>arco iris seis sitio móvil apk ios</h2><br /><p><b><b>Download Zip</b> &#9889; <a href="https://bltlly.com/2v6LJr">https://bltlly.com/2v6LJr</a></b></p><br /><br />
5
- <h2>¿Qué es Rainbow Six Siege Mobile? </h2>
6
- <p>Rainbow Six Siege Mobile es un juego de disparos en primera persona competitivo y multijugador basado en la aclamada franquicia Rainbow Six. Está desarrollado por Ubisoft Montreal y publicado por Ubisoft. Fue lanzado para dispositivos iOS y Android el 30 de junio de 2021. </p>
7
- <h3>Un juego de disparos competitivo gratuito en su teléfono</h3>
8
- <p>Rainbow Six Siege Mobile es un juego gratuito que no requiere ningún pago por adelantado o suscripción para jugar. Puedes descargarlo desde la App Store o Google Play Store y disfrutarlo todo lo que quieras. Sin embargo, el juego ofrece compras opcionales dentro del juego que pueden mejorar tu experiencia de juego, como artículos cosméticos, potenciadores o divisas premium. También puedes ganar estos objetos jugando el juego regularmente y completando desafíos. </p>
9
- <h3>Una adaptación fiel de la versión para PC y consola</h3>
10
-
11
- <h3>Una experiencia de cross-play y cross-progression</h3>
12
- <p>Una de las mejores cosas de Rainbow Six Siege Mobile es que soporta cross-play y cross-progression. Esto significa que puede jugar con o contra otros jugadores que utilizan diferentes dispositivos, como iOS, Android, PC o consola. También puede cambiar entre dispositivos sin perder su progreso o elementos. Todo lo que necesita es una cuenta de Ubisoft que vincule sus dispositivos. De esta manera, puedes disfrutar de Rainbow Six Siege Mobile en cualquier momento, en cualquier lugar y con cualquier persona. </p>
13
- <h2>¿Cómo descargar y jugar a Rainbow Six Siege Mobile? </h2>
14
- <p>Descargar y jugar a Rainbow Six Siege Mobile es muy fácil. Estos son los pasos que debes seguir:</p>
15
- <h3>Disponible para dispositivos iOS y Android</h3>
16
- <p>Rainbow Six Siege Mobile está disponible para dispositivos iOS y Android que cumplen con los requisitos mínimos del sistema. Para dispositivos iOS, necesitas un iPhone 6S o más reciente, un iPad Air 2 o más reciente, o un iPod Touch 7a generación o más reciente. Para dispositivos Android, necesitas un dispositivo que se ejecute en Android 6.0 o superior, que tenga <h3>Requiere una cuenta de Ubisoft y una conexión a Internet</h3>
17
- <p>Para jugar a Rainbow Six Siege Mobile, necesitas tener una cuenta de Ubisoft y una conexión a Internet. Una cuenta de Ubisoft es gratuita para crear y te permite acceder a varias características y beneficios, como cross-play, cross-progression, recompensas y más. Puede crear una cuenta de Ubisoft pulsando el botón "Crear cuenta" en el menú principal del juego o visitando el sitio web de Ubisoft. Se requiere una conexión a Internet para jugar Rainbow Six Siege Mobile porque es un juego multijugador en línea que se basa en servidores y matchmaking. No se puede jugar el juego sin conexión o en el modo de un solo jugador. Puedes usar Wi-Fi o datos móviles para jugar, pero asegúrate de tener una conexión estable y rápida para evitar problemas de retardo o desconexión. </p>
18
- <p></p>
19
- <h3>Soporta varios controladores y chat de voz</h3>
20
-
21
- <h2>¿Cuáles son las características y modos de Rainbow Six Siege Mobile? </h2>
22
- <p>Rainbow Six Siege Mobile ofrece una variedad de características y modos que lo convierten en un juego divertido y atractivo para jugar. Estos son algunos de ellos:</p>
23
- <h3>Más de 60 operadores con capacidades y cargas únicas</h3>
24
- <p>Rainbow Six Siege Mobile cuenta con más de 60 operadores entre los que puedes elegir, cada uno con sus propias habilidades y cargas únicas. Los operadores se dividen en dos categorías: atacantes y defensores. Los atacantes son los que tratan de completar el objetivo, como colocar una bomba o rescatar a un rehén, mientras que los defensores son los que tratan de detenerlos. Los operadores también pertenecen a diferentes unidades o facciones, como SAS, FBI SWAT, GIGN, Spetsnaz, GSG 9, JTF2, Navy SEALs, BOPE, SAT, GEO, SDU, GROM, 707th SMB, CBRN, GSUTR, Delta Force, SASR, Jaeger Corps, Nighthaven, REU, NIGHTHAVEN Special Intervention Group (NSIG), Fuerza de Seguridad Privada de Aruni (APSF), Operaciones Especiales de las Naciones Nakoda de Thunderbird (NNSO), I+D de Nighthaven de Osa (NRD), Fuerzas Especiales de Flores (FE), Nighthaven de Kali (NH), REU de Iana (REU), Ace’s NIGHTHAVEN Special Intervention Group (NSIG), Melinkusi’s Task Force (ITF), Zero’s Delta Force (DF), Aruni’s Private Security Force (APSF), Thunderbird’s Nakoda Nations Special Operations (NNSO), Osa’s Nighthaven R&D (NRD), Flores' Fuerzas Especiales (FE), Kali’s Nighthaven (NH), Iana’s REU (REU), Ace’s NIGHTHAVEN Special Intervention Group (NSIG), Grupo de Trabajo Inkaba de Melusi (ITF), Fuerza Delta de Zero (DF). Cada operador tiene una habilidad única que puede darles una ventaja en el combate, como el despliegue de trampas, gadgets, drones, escudos, cámaras, refuerzos o armas. Cada operador también tiene un arma primaria, un arma secundaria y un gadget que pueden personalizar con archivos adjuntos y skins. </p>
25
- <h3>Ataque clásico vs. modos de juego de defensa</h3>
26
-
27
- <ul>
28
- <li><strong>Bomb</strong>: Los atacantes deben localizar y desactivar una de las dos bombas colocadas por los defensores dentro de un límite de tiempo. Los defensores deben impedir que lo hagan eliminándolos o corriendo el reloj. </li>
29
- <li><strong>Rehén</strong>: Los atacantes deben localizar y extraer un rehén retenido por los defensores dentro de un límite de tiempo. Los defensores deben impedir que lo hagan eliminándolos o corriendo el reloj. </li>
30
- <li><strong>Secure Área</strong>: Los atacantes deben localizar y asegurar un contenedor de riesgo biológico retenido por los defensores dentro de un límite de tiempo. Los defensores deben impedir que lo hagan eliminándolos o corriendo el reloj. </li>
31
- </ul>
32
- <p>Cada modo de juego tiene diferentes reglas y objetivos que requieren diferentes tácticas y trabajo en equipo. Puede elegir el modo de juego que desea jugar tocando "Play [assistant](#message) <h3>Entornos destructibles y gadgets tácticos</h3>
33
- <p>Una de las características más distintivas de Rainbow Six Siege Mobile son los entornos destructibles y los gadgets tácticos. El juego te permite interactuar con el entorno de varias maneras, como rompiendo paredes, puertas, ventanas, pisos, techos u objetos. Puede usar esto para crear nuevas líneas de visión, puntos de entrada o cobertura. También puede utilizar varios gadgets para mejorar su juego, como cargos por violación, flashbangs, granadas de humo, claymores, alambre de púas, escudos desplegables o cámaras antibalas. Puedes usar estos aparatos para violar, cegar, distraer, atrapar o defenderte a ti mismo o a tus compañeros de equipo. Sin embargo, también debes tener cuidado con los artilugios y trampas del enemigo, como las células nitrogenadas, granadas de impacto, esteras de hielo, trampas kapkan o minas gu. Necesitas ser consciente de tu entorno y usar tus artilugios sabiamente para obtener una ventaja en combate. </p>
34
- <h3>Clasificado, Juego rápido, y modos de tierra de entrenamiento</h3>
35
- <p>Rainbow Six Siege Mobile ofrece diferentes modos para diferentes estilos de juego y preferencias. Puedes elegir entre:</p>
36
- <ul>
37
-
38
- <li><strong>Juego rápido</strong>: Este es el modo casual donde puedes jugar contra otros jugadores de cualquier nivel de habilidad y divertirte. Puedes ganar recompensas y experiencia jugando partidas rápidas. Los partidos de juego rápido tienen reglas y configuraciones más relajadas que los partidos clasificados, como rondas más cortas, más mapas y fuego amigo. </li>
39
- <li><strong>Training Ground</strong>: Este es el modo en solitario donde puedes practicar tus habilidades y aprender la mecánica del juego. Puedes jugar contra enemigos u objetivos de IA en varios escenarios y desafíos. También puede personalizar la configuración y el nivel de dificultad para satisfacer sus necesidades. </li>
40
- </ul>
41
- <p>Puedes acceder a estos modos tocando "Jugar" en el menú principal del juego y seleccionando el modo que quieres jugar. </p>
42
- <h2>¿Cuáles son algunos consejos y trucos para mejorar tu juego en Rainbow Six Siege Mobile? </h2>
43
- <p>Rainbow Six Siege Mobile es un juego que requiere habilidad, estrategia y trabajo en equipo para ganar. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu juego:</p>
44
- <h3>Conozca los diseños del mapa y las ubicaciones de la cámara</h3>
45
- <p>Uno de los aspectos más importantes de Rainbow Six Siege Mobile es el conocimiento del mapa. Usted necesita aprender el diseño de cada mapa, tales como las habitaciones, pasillos, escaleras, ventanas, puertas, escotillas, objetivos, puntos de desove y escondites. También debes conocer la ubicación de cada cámara en cada mapa, tanto para los atacantes como para los defensores. Las cámaras son vitales para reunir información y detectar enemigos. Puedes usar tu dron o cámaras para escanear el entorno y marcar a los enemigos para tu equipo. También puede disparar o hackear cámaras enemigas para negarles información. Puedes aprender los mapas jugando en el modo de campo de entrenamiento o viendo tutoriales o videos en línea. </p>
46
- <h3>Comunícate y coordina con tu equipo</h3>
47
- <h3>Utilice su drone y cámaras para reunir intel</h3>
48
-
49
- <h3>Sé paciente y estratégico con tus movimientos</h3>
50
- <p>Rainbow Six Siege Mobile es un juego que recompensa la paciencia y la estrategia sobre la precipitación y la imprudencia. Debes tener cuidado con tus movimientos y acciones, ya que cada decisión puede tener consecuencias. Es necesario tener en cuenta factores como el ruido, la visibilidad, la cubierta, los ángulos y el tiempo cuando se mueve alrededor del mapa. También debes ser consciente de los movimientos y acciones del enemigo, ya que pueden sorprenderte o flanquearte. Necesitas usar tus señales de sonido y visión para detectar y localizar enemigos, como pasos, disparos, explosiones o sombras. También necesita usar su mapa y brújula para orientarse y navegar por el mapa. Puede acceder a su mapa pulsando el icono del mapa en la esquina superior izquierda de la pantalla. </p>
51
- <h3>Experimenta con diferentes operadores y estrategias</h3>
52
- <p>Rainbow Six Siege Mobile es un juego que ofrece mucha variedad y diversidad en términos de operadores y estrategias. Puede experimentar con diferentes operadores y estrategias para encontrar lo que se adapte a su estilo de juego y preferencias. También puedes adaptar tus operadores y estrategias a diferentes situaciones y escenarios, dependiendo del mapa, modo, objetivo, composición del equipo y comportamiento del enemigo. Puedes probar diferentes combinaciones de habilidades, cargas, gadgets y roles para crear sinergias y contrajuegos con tu equipo o contra el enemigo. También puedes probar diferentes tácticas y enfoques para atacar o defender el objetivo, como sigiloso o agresivo, directo o indirecto, vertical u horizontal. </p>
53
- <h2>Conclusión</h2>
54
- <p>usando su dron y cámaras para reunir información, ser paciente y estratégico con sus movimientos, y experimentar con diferentes operadores y estrategias. Rainbow Six Siege Mobile es un juego que te desafiará, te entretendrá y te mantendrá enganchado durante horas. Si estás listo para unirte a la acción, descarga Rainbow Six Siege Mobile hoy y disfruta del mejor shooter táctico en tu teléfono. </p>
55
- <h2>Preguntas frecuentes</h2>
56
-
57
- <tabla>
58
- <tr>
59
- <th>Pregunta</th>
60
- <th>Respuesta</th>
61
- </tr>
62
- <tr>
63
- <td>¿Rainbow Six Siege Mobile es lo mismo que la extracción de Rainbow Six? </td>
64
- <td>No, Rainbow Six Siege Mobile es un juego diferente de Rainbow Six Extraction. Rainbow Six Extraction es un juego cooperativo de JcE que enfrenta a un equipo de operadores contra una amenaza alienígena. Rainbow Six Siege Mobile es un juego competitivo de JcJ que enfrenta a dos equipos de operadores entre sí. </td>
65
- </tr>
66
- <tr>
67
- <td>¿Puedo jugar a Rainbow Six Siege Mobile sin conexión? </td>
68
- <td>No, no puedes jugar sin conexión a Rainbow Six Siege Mobile. Necesitas una conexión a Internet para jugar, ya que es un juego multijugador en línea que se basa en servidores y matchmaking. </td>
69
- </tr>
70
- <tr>
71
- <td>¿Cómo puedo obtener más operadores en Rainbow Six Siege Mobile? </td>
72
- <td>Puedes obtener más operadores en Rainbow Six Siege Mobile al ganar o comprar créditos. Los créditos son la moneda del juego que puedes usar para desbloquear operadores. Puedes ganar créditos jugando el juego regularmente y completando desafíos. También puede comprar créditos con dinero real o moneda premium. </td>
73
- </tr>
74
- <tr>
75
- <td>¿Cómo puedo personalizar mi operador en Rainbow Six Siege Mobile? </td>
76
- <td>Puede personalizar su operador en Rainbow Six Siege Mobile cambiando su carga, archivos adjuntos, pieles, encantos, sombreros, uniformes o conjuntos de élite. Puede acceder al menú de personalización pulsando el botón "Operadores" en el menú principal del juego y seleccionando el operador que desea personalizar. </td>
77
- </tr>
78
- <tr>
79
- <td>¿Cómo puedo reportar un error o un tramposo en Rainbow Six Siege Mobile? </td>
80
- <td>Puedes reportar un error o un tramposo en Rainbow Six Siege Mobile usando el sistema de informes del juego. Puede acceder al sistema de informes pulsando el botón "Informe" en la pantalla de final de partido o en el perfil del jugador. También puede ponerse en contacto con el soporte de Ubisoft a través de su sitio web o canales de redes sociales. </td>
81
- </tr>
82
- </tabla></p> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fifa 2022 Apk Mod Y Obb.md DELETED
@@ -1,86 +0,0 @@
1
-
2
- <h1>Descargar FIFA 2022 APK Mod y OBB para Android</h1>
3
- <p>Si eres un fanático de los juegos de fútbol, es posible que hayas oído hablar de FIFA, la serie de juegos de simulación de fútbol más popular y realista desarrollada por EA Sports. FIFA 2022 es la última entrega de la serie, y se espera que sea lanzado en octubre de 2021 para varias plataformas, incluyendo Android. Sin embargo, si quieres disfrutar del juego antes de su lanzamiento oficial, puedes descargar FIFA 2022 APK Mod y OBB para dispositivos Android. </p>
4
- <h2>¿Qué es FIFA 2022 APK Mod y OBB? </h2>
5
- <p>FIFA 2022 APK Mod y OBB son versiones modificadas de los archivos de juegos originales de FIFA 2022 que le permiten jugar el juego en su dispositivo Android sin restricciones. APK significa Android Package Kit, que es el formato de archivo utilizado para instalar aplicaciones en dispositivos Android. OBB significa Opaque Binary Blob, que es un formato de archivo utilizado para almacenar grandes cantidades de datos, como gráficos, sonidos y videos. </p>
6
- <h2>descargar fifa 2022 apk mod y obb</h2><br /><p><b><b>Download</b> === <a href="https://bltlly.com/2v6MlY">https://bltlly.com/2v6MlY</a></b></p><br /><br />
7
- <p>Al descargar FIFA 2022 APK Mod y OBB, se puede disfrutar de todas las características del juego sin tener que esperar a su lanzamiento oficial o pagar por él. También puedes acceder a algunas funciones exclusivas que no están disponibles en el juego original, como monedas y puntos ilimitados, jugadores y equipos desbloqueados y más. </p>
8
- <h3>Características de FIFA 2022 APK Mod y OBB</h3>
9
- <h4>Gráficos realistas y jugabilidad</h4>
10
- <p>Una de las principales atracciones de FIFA 2022 APK Mod y OBB es su gráfica realista y jugabilidad. El juego utiliza el motor Frostbite, que es conocido por sus impresionantes efectos visuales y la física. Puedes ver las caras detalladas, expresiones, movimientos y animaciones de los jugadores, así como los estadios realistas, multitudes, clima y efectos de iluminación. El juego también presenta física realista del balón, IA del jugador, tácticas, formaciones, habilidades y celebraciones. </p>
11
- <h4>Equipos y jugadores actualizados</h4>
12
-
13
- <h4>Nuevos modos y torneos</h4>
14
- <p>FIFA 2022 APK Mod y OBB también ofrece nuevos modos y torneos para que usted disfrute. Puedes jugar en el modo carrera, donde puedes crear tu propio jugador o manager y llevar a tu equipo a la gloria. También puedes jugar en el Ultimate Team Mode, donde puedes construir el equipo de tus sueños desde cero usando jugadores de diferentes ligas y naciones. También puedes participar en varios torneos, como la UEFA Champions League, la UEFA Europa League, la UEFA Conference League, la FIFA Club World Cup, la Copa Libertadores, la Copa Sudamericana y más. </p>
15
- <h4>Monedas y puntos ilimitados</h4>
16
- <p>Otra característica de FIFA 2022 APK Mod y OBB es que le da monedas y puntos ilimitados, que son las monedas utilizadas en el juego para comprar jugadores, paquetes, artículos y mejoras. Puede utilizar estas monedas y puntos para obtener los mejores jugadores y equipos en el juego, así como personalizar su equipo, kits, insignias y estadios. También puedes usarlas para desbloquear algunas funciones premium, como el VIP Pass, que te da acceso a recompensas y beneficios exclusivos. </p>
17
- <h3>Cómo descargar e instalar FIFA 2022 APK Mod y OBB</h3>
18
- <h4>Requisitos</h4>
19
- <p>Antes de descargar e instalar FIFA 2022 APK Mod y OBB, es necesario asegurarse de que su dispositivo Android cumple con los siguientes requisitos:</p>
20
- <p></p>
21
- <ul>
22
- <li>Versión para Android: 5.0 o superior</li>
23
- <li>RAM: 2 GB o más</li>
24
- <li>Espacio de almacenamiento: 4 GB o más</li>
25
- <li>Conexión a Internet: necesaria para las funciones en línea</li>
26
- <li>Permiso: permitir la instalación desde fuentes desconocidas</li>
27
- </ul>
28
- <h4>Pasos</h4>
29
- <p>Después de haber comprobado los requisitos, puede seguir estos pasos para descargar e instalar FIFA 2022 APK Mod y OBB:</p>
30
- <ol>
31
- <li>Descargar los archivos FIFA 2022 APK Mod y OBB de una fuente de confianza. Puede encontrar muchos sitios web que ofrecen estos archivos, pero tenga cuidado con el malware y los virus. Puede utilizar este enlace como ejemplo, pero no está avalado por nosotros. </li>
32
-
33
- <li> Instalar el archivo APK tocando en él y siguiendo las instrucciones. No abra el juego todavía. </li>
34
- <li>Iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Es posible que tenga que verificar el dispositivo completando un captcha o una encuesta corta. Esto es para prevenir bots y spam. </li>
35
- <li>Disfruta jugando FIFA 2022 APK Mod y OBB en tu dispositivo Android. </li>
36
- </ol>
37
- <h3> Cómo jugar FIFA 2022 APK Mod y OBB</h3>
38
- <h4>Elige tu equipo y modo</h4>
39
- <p>Una vez que hayas lanzado el juego, puedes elegir tu equipo y modo desde el menú principal. Puedes seleccionar entre varias opciones, como Quick Match, Career Mode, Ultimate Team Mode, Tournament Mode, Online Mode y más. También puede cambiar la configuración, como idioma, dificultad, controles, sonido y gráficos. </p>
40
- <h4>Controla a tus jugadores y marca goles</h4>
41
- <p>Después de haber elegido su equipo y modo, puede comenzar a jugar el juego. Puede controlar sus reproductores utilizando los botones virtuales de la pantalla o un controlador compatible. También puede utilizar gestos, como deslizar, tocar y arrastrar, para realizar acciones, como pasar, disparar, driblar, abordar y correr. Su objetivo es anotar más goles que su oponente en el momento dado. </p>
42
- <h4>Personaliza tus ajustes y opciones</h4>
43
- <p>Si desea personalizar la configuración y las opciones, puede acceder a ellos desde el menú de pausa o el menú principal. Puedes cambiar varios aspectos del juego, como el ángulo de la cámara, comentarios, sustituciones, formaciones, tácticas, habilidades y más. También puedes ver tus estadísticas, logros, recompensas y tablas de clasificación. </p>
44
- <h3> Pros y contras de FIFA 2022 APK Mod y OBB</h3>
45
- <h4>Pros</h4>
46
- <p>FIFA 2022 APK Mod y OBB tiene muchas ventajas sobre el juego original, tales como:</p>
47
- <ul>
48
- <li>Puedes jugar el juego antes de su lanzamiento oficial o sin pagar por él. </li>
49
- <li>Puedes acceder a algunas funciones exclusivas que no están disponibles en el juego original, como monedas y puntos ilimitados, jugadores y equipos desbloqueados y más. </li>
50
-
51
- <li>Puedes jugar con más de 700 equipos de más de 30 ligas de todo el mundo. </li>
52
- <li>Puedes participar en varios modos y torneos, como la UEFA Champions League, la UEFA Europa League, la UEFA Conference League, la FIFA Club World Cup, la Copa Libertadores, la Copa Sudamericana y más. </li>
53
- </ul>
54
- <h4>Contras</h4>
55
- <p>FIFA 2022 APK Mod y OBB también tiene algunas desventajas sobre el juego original, tales como:</p>
56
- <ul>
57
- <li> Puede encontrar algunos errores, fallos, fallos o errores durante el juego. </li>
58
- <li>Es posible que necesite verificar su dispositivo completando un captcha o una breve encuesta antes de descargar el juego. Esto es para prevenir bots y spam, pero puede ser molesto y consumir mucho tiempo. </li>
59
- <li>Es posible que no pueda jugar en línea con otros jugadores que tienen el juego original o una versión diferente del juego. </li>
60
- <li>Es posible que no reciba actualizaciones o parches de los desarrolladores, lo que puede afectar el rendimiento y la compatibilidad del juego. </li>
61
- <li>Puede violar los términos y condiciones de EA Sports, lo que puede resultar en acciones legales o prohibiciones. </li>
62
- </ul>
63
- <h2>Conclusión</h2>
64
- <p>FIFA 2022 APK Mod y OBB es una versión modificada del juego original de FIFA 2022 que le permite jugar el juego en su dispositivo Android sin restricciones. Puedes disfrutar de gráficos y jugabilidad realistas, equipos y jugadores actualizados, nuevos modos y torneos, monedas y puntos ilimitados y más. Sin embargo, también es necesario ser consciente de los riesgos y desventajas de descargar e instalar FIFA 2022 APK Mod y OBB, tales como errores, fallos, fallos, errores, verificación, problemas en línea, actualizaciones, parches y acciones legales. Por lo tanto, debe descargar e instalar FIFA 2022 APK Mod y OBB a su discreción y responsabilidad. </p>
65
- <h3>Preguntas frecuentes</h3>
66
- <p>Aquí hay algunas preguntas frecuentes sobre FIFA 2022 APK Mod y OBB:</p>
67
- <ol>
68
- <li>Q: ¿Es FIFA 2022 APK Mod y OBB seguro para descargar e instalar? </li>
69
-
70
- <li>Q: ¿FIFA 2022 APK Mod y OBB es compatible con mi dispositivo? </li>
71
- <li>A: FIFA 2022 APK Mod y OBB requiere Android 5.0 o superior, 2 GB de RAM o más, 4 GB de espacio de almacenamiento o más, y una conexión a Internet para funcionar correctamente. Si su dispositivo cumple con estos requisitos, usted debe ser capaz de jugar FIFA 2022 APK Mod y OBB sin ningún problema. Sin embargo, algunos dispositivos pueden no ser compatibles con FIFA 2022 APK Mod y OBB debido a diferentes especificaciones o modelos. </li>
72
- <li>Q: ¿Cómo puedo actualizar FIFA 2022 APK Mod y OBB? </li>
73
- <li>A: FIFA 2022 APK Mod y OBB no recibe actualizaciones o parches de EA Sports, por lo que no puede ser capaz de actualizar el juego a la última versión o corregir cualquier error o errores. Es posible que tenga que descargar una nueva versión de FIFA 2022 APK Mod y OBB desde la misma fuente o una fuente diferente si hay una disponible. Sin embargo, esto puede no garantizar que el juego funcionará correctamente o tendrá todas las características que desee. </li>
74
- <li>Q: ¿Cómo puedo desinstalar FIFA 2022 APK Mod y OBB? </li>
75
- <li>A: Si desea desinstalar FIFA 2022 APK Mod y OBB desde su dispositivo, puede seguir estos pasos:</li>
76
- <ul>
77
- <li>Ve a la configuración de tu dispositivo y toca Aplicaciones o Aplicaciones.</li>
78
- <li> Encontrar FIFA 2022 APK Mod y OBB de la lista de aplicaciones y toque en él. </li>
79
- <li>Toque en Desinstalar y confirme su acción. </li>
80
- <li>Elimine la carpeta com.ea.gp.fifa22 del directorio Android/OBB en el almacenamiento interno de su dispositivo. </li>
81
- </ul>
82
- <li>Q: ¿Dónde puedo obtener más información sobre FIFA 2022 APK Mod y OBB? </li>
83
- <li>A: Usted puede obtener más información sobre FIFA 2022 APK Mod y OBB de varias fuentes en línea, tales como blogs, foros, comentarios, vídeos, y más. Sin embargo, debe tener cuidado con la información falsa o engañosa que puede dañar su dispositivo o experiencia. También debes consultar el sitio web oficial de EA Sports para obtener las últimas noticias y actualizaciones sobre FIFA 2022. </li>
84
- </ol></p> 64aa2da5cf<br />
85
- <br />
86
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Scorpion Mortal Kombat.md DELETED
@@ -1,72 +0,0 @@
1
- <br />
2
- <h1>Descargar fondo de pantalla Scorpion Mortal Kombat: Cómo personalizar el escritorio con el icónico Ninja</h1>
3
- <h2>Introducción</h2>
4
- <p>Si eres un fan de la franquicia Mortal Kombat, probablemente sepas quién es Scorpion. Él es uno de los personajes más populares y reconocibles de la serie, así como la mascota de los juegos. Él es un ninja resucitado que busca venganza por el asesinato de su familia y clan por el hechicero Quan Chi. Es conocido por su movimiento característico de lanzar un kunai unido a una cuerda a sus oponentes y acercarlos mientras dice "¡Ven aquí!" o "Ven aquí!". También es famoso por su muerte "Toasty!", donde se quita la máscara para revelar un cráneo en llamas y respira fuego a sus enemigos. </p>
5
- <h2>descargar fondo de pantalla scorpion mortal kombat</h2><br /><p><b><b>Download File</b> &#9999; <a href="https://bltlly.com/2v6Mk5">https://bltlly.com/2v6Mk5</a></b></p><br /><br />
6
- <p>Scorpion no es solo un luchador rudo, sino también un personaje genial para tener como fondo de escritorio. Si usted quiere mostrar su amor por Mortal Kombat, o que al igual que su diseño y estilo, usted puede encontrar muchos fondos de pantalla de alta calidad Escorpión en línea que se adapte a su gusto y preferencia. En este artículo, te mostraremos cómo descargar y configurar fondos de pantalla Scorpion como fondo de escritorio, y por qué deberías elegirlos. </p>
7
- <h2>¿Por qué elegir fondos de pantalla Scorpion? </h2>
8
- <h3>¿Quién es Escorpión? </h3>
9
- <p>Antes de entrar en los detalles de cómo descargar y configurar fondos de pantalla de Scorpion, echemos un vistazo a quién es Scorpion y qué lo hace tan especial. El verdadero nombre de Scorpion es Hanzo Hasashi, y era un ninja del clan Shirai Ryu en Japón. Fue asesinado por Bi-Han, el anciano Sub-Zero del clan rival Lin Kuei, durante un torneo organizado por Shang Tsung. Sin embargo, fue resucitado por Quan Chi, quien lo engañó haciéndole creer que Sub-Zero era responsable de la masacre de su familia y clan. Escorpión luego se convirtió en el leal sirviente y asesino de Quan Chi, hasta que aprendió la verdad sobre el engaño y la traición de Quan Chi. </p>
10
-
11
- <h3>¿Por qué elegir fondos de pantalla Scorpion? </h3>
12
- <p>Hay muchas razones por las que es posible que desee elegir fondos de pantalla Scorpion para su fondo de escritorio. Estos son algunos de ellos:</p>
13
- <ul>
14
- <li>Los fondos de pantalla de Scorpion son visualmente impresionantes y atractivos. Presentan a Escorpión en varias poses, trajes y fondos, mostrando sus habilidades, armas y personalidad. También son coloridos, vibrantes y dinámicos, agregando vida y energía a tu escritorio. </li>
15
- Los fondos de pantalla de Scorpion son inspiradores y motivadores. Te recuerdan la fuerza, determinación y resistencia de Escorpión, así como su búsqueda de justicia y redención. Pueden ayudarte a superar desafíos y dificultades en tu vida, o simplemente aumentar tu estado de ánimo y confianza. </li>
16
- Los fondos de pantalla de Scorpion son divertidos y entretenidos. Te permiten expresar tu fandom y pasión por Mortal Kombat, o tu admiración y aprecio por Scorpion como personaje. También pueden provocar conversaciones y discusiones con otros fans o amigos que comparten su interés. </li>
17
- </ul>
18
- <h2>Cómo descargar fondos de pantalla de Scorpion</h2>
19
- <h <h3>Mejores sitios web para descargar fondos de pantalla Scorpion</h3>
20
- <p>Hay muchos sitios web que ofrecen Scorpion fondos de pantalla para su descarga gratuita, pero no todos ellos son fiables y seguros. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar su computadora o comprometer su privacidad. Por lo tanto, debe ser cuidadoso y selectivo al elegir dónde descargar fondos de pantalla de Scorpion. Estos son algunos de los mejores sitios web que recomendamos para descargar fondos de pantalla de Scorpion:</p>
21
- <h4>DeviantArt fondos de pantalla</h4>
22
-
23
- <h4>InterfaceLIFT</h4>
24
- <p>InterfaceLIFT es un sitio web que proporciona fondos de pantalla de alta calidad para varios dispositivos y resoluciones de pantalla. Puedes encontrar una variedad de fondos de pantalla Scorpion que están diseñados profesionalmente y optimizados para tu escritorio. También puede filtrar los fondos de pantalla por resolución, calificación, fecha o popularidad. Para descargar fondos de pantalla de Scorpion desde InterfaceLIFT, solo tiene que hacer clic en el botón de descarga y elegir la resolución adecuada para su dispositivo. </p>
25
- <h4>WallHaven</h4>
26
- <p>WallHaven es un sitio web que recopila y cura fondos de pantalla de diversas fuentes y categorías. Puedes encontrar una gran cantidad de impresionantes fondos de pantalla Scorpion que son enviados por los usuarios o raspados de otros sitios web. También puede ordenar los fondos de pantalla por relevancia, vistas, favoritos o al azar. Para descargar fondos de pantalla de Scorpion de WallHaven, solo tienes que hacer clic derecho en la imagen y guardarla en tu ordenador. </p>
27
- <h4>Unsplash fondos de pantalla</h4>
28
- <p>Unsplash es un sitio web que ofrece fotos de stock gratuitas que son de alta resolución y libres de derechos. Puede utilizarlos para cualquier propósito personal o comercial sin atribución. También puedes encontrar algunos hermosos fondos de pantalla de Scorpion que son tomados por fotógrafos profesionales o editados por artistas creativos. También puedes explorar otras colecciones relacionadas o palabras clave para encontrar más fondos de pantalla de Scorpion. Para descargar fondos de pantalla de Scorpion desde Unsplash, solo tienes que hacer clic en el botón de descarga y guardarlo en tu ordenador. </p>
29
- <h3>Cómo establecer fondos de pantalla Scorpion como fondo de escritorio</h3>
30
- <p>Después de haber descargado sus fondos de pantalla favoritos de Scorpion de los sitios web anteriores, debe configurarlos como fondo de escritorio. El proceso puede variar dependiendo de su sistema operativo y dispositivo, pero aquí hay algunos pasos generales que puede seguir:</p>
31
- <h4>Para usuarios de Windows 10</h4>
32
- <ul>
33
- <li>Busque la carpeta donde guardó sus fondos de pantalla Scorpion. </li>
34
- <li>Seleccione el fondo de pantalla que desea utilizar como fondo de escritorio. </li>
35
-
36
- <li>También puede ir a Configuración > Personalización > Fondo y elegir "Imagen" como su opción de fondo. Luego haz clic en "Examinar" y selecciona tu fondo de pantalla Escorpión de la carpeta. </li>
37
- </ul>
38
- <h4>Para usuarios de Mac</h4>
39
- <ul>
40
- <li>Busque la carpeta donde guardó sus fondos de pantalla Scorpion. </li>
41
- <li>Seleccione el fondo de pantalla que desea utilizar como fondo de escritorio. </li>
42
- <li>Haga clic derecho en el fondo de pantalla y elija "Establecer imagen de escritorio". </li>
43
- <li>También puede ir a Preferencias del sistema > Escritorio & Protector de pantalla y elegir "Escritorio" como su panel de preferencias. Luego haz clic en "+" y selecciona tu fondo de pantalla Escorpión de la carpeta. </li>
44
- </ul>
45
- <h2>Conclusión</h2>
46
- <h3>Resumen de los puntos principales</h3>
47
- <p>En este artículo, le hemos mostrado cómo descargar y configurar fondos de pantalla Scorpion como fondo de escritorio, y por qué debe elegirlos. También hemos recomendado algunos de los mejores sitios web que ofrecen fondos de pantalla Scorpion de alta calidad para su descarga gratuita. Aquí están los puntos principales que hemos cubierto:</p>
48
- <p></p>
49
- <ul>
50
- <li>Scorpion es uno de los personajes más populares e icónicos de Mortal Kombat, que es un ninja resucitado que busca venganza por su familia y clan. </li>
51
- <li>Los fondos de pantalla de Scorpion son visualmente impresionantes, inspiradores y entretenidos, ya que cuentan con Scorpion en varias poses, trajes y fondos. </li>
52
- <li>Puede descargar fondos de pantalla Scorpion de sitios web como DeviantArt Fondos de pantalla, InterfaceLIFT, WallHaven, o Unsplash Fondos de pantalla, que son fiables y seguros. </li <li>Puede establecer fondos de pantalla Scorpion como fondo de escritorio haciendo clic derecho en el fondo y eligiendo "Establecer como fondo de escritorio" o yendo a su configuración y seleccionando "Imagen" como su opción de fondo. </li>
53
- </ul>
54
- <h3>Llamada a la acción</h3>
55
-
56
- <p>¿Qué estás esperando? Descargar fondo de pantalla Scorpion Mortal Kombat hoy y disfrutar de la vista! </p>
57
- <h2>Preguntas frecuentes</h2>
58
- <p>Aquí están algunas de las preguntas más frecuentes sobre fondos de pantalla Scorpion:</p>
59
- <ol>
60
- <li>¿Cuál es la mejor resolución para fondos de pantalla Scorpion? </li>
61
- <p>La mejor resolución para fondos de pantalla Scorpion depende del tamaño y la calidad de la pantalla. Sin embargo, una regla general es elegir una resolución que coincida o exceda la resolución nativa de la pantalla. Por ejemplo, si su pantalla tiene una resolución de 1920 x 1080 píxeles, debe elegir un fondo de pantalla que tenga al menos la misma resolución o superior. Esto asegurará que tu fondo de pantalla se vea nítido y claro en tu escritorio. </p>
62
- <li>¿Cómo puedo hacer mi propio fondo de pantalla Escorpión? </li>
63
- <p>Si quieres hacer tu propio fondo de pantalla Scorpion, necesitarás algunas herramientas de edición de fotos y habilidades. Puedes usar software como Photoshop, GIMP o Paint.NET para crear tu propio fondo de pantalla Scorpion desde cero o modificando una imagen existente. También puedes usar herramientas en línea como Canva, PicMonkey o Fotor para crear tu propio fondo de pantalla Scorpion usando plantillas, pegatinas, filtros y fuentes. También puedes usar tus propias fotos o dibujos como base para tu fondo de pantalla Scorpion. </p>
64
- <li>¿Cómo puedo compartir mi fondo de pantalla Scorpion con otros? </li>
65
- <p>Si quieres compartir tu fondo de pantalla Scorpion con otros, puedes subirlo a sitios web como DeviantArt Wallpapers, InterfaceLIFT, WallHaven o Unsplash Wallpapers, donde otros usuarios pueden descargarlo y usarlo. También puedes compartirlo en plataformas de redes sociales como Facebook, Twitter, Instagram o Pinterest, donde puedes etiquetar a tus amigos o seguidores que puedan estar interesados en él. También puedes enviarlo por correo electrónico a tus contactos o a través de aplicaciones de mensajería como WhatsApp, Telegram o Signal.</p>
66
- <li>¿Cómo puedo cambiar mi fondo de pantalla Scorpion periódicamente? </li>
67
-
68
- <li>¿Dónde puedo encontrar más información sobre Scorpion o Mortal Kombat? </li>
69
- <p>Si quieres encontrar más información sobre Scorpion o Mortal Kombat, puedes visitar el sitio web oficial de Mortal Kombat, donde puedes encontrar noticias, actualizaciones, videos y artículos relacionados con la franquicia. También puedes visitar la Wiki de Mortal Kombat, que es una enciclopedia hecha por fans que contiene todo lo que necesitas saber sobre los personajes, juegos, películas, cómics y más. También puedes unirte a foros o comunidades online como Reddit, donde puedes discutir y compartir tus opiniones y experiencias con otros fans. </p>
70
- </ol></p> 64aa2da5cf<br />
71
- <br />
72
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py DELETED
@@ -1,267 +0,0 @@
1
- # exceptions.py
2
-
3
- import re
4
- import sys
5
- import typing
6
-
7
- from .util import col, line, lineno, _collapse_string_to_ranges
8
- from .unicode import pyparsing_unicode as ppu
9
-
10
-
11
- class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
12
- pass
13
-
14
-
15
- _extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
16
- _exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
17
-
18
-
19
- class ParseBaseException(Exception):
20
- """base exception class for all parsing runtime exceptions"""
21
-
22
- # Performance tuning: we construct a *lot* of these, so keep this
23
- # constructor as small and fast as possible
24
- def __init__(
25
- self,
26
- pstr: str,
27
- loc: int = 0,
28
- msg: typing.Optional[str] = None,
29
- elem=None,
30
- ):
31
- self.loc = loc
32
- if msg is None:
33
- self.msg = pstr
34
- self.pstr = ""
35
- else:
36
- self.msg = msg
37
- self.pstr = pstr
38
- self.parser_element = self.parserElement = elem
39
- self.args = (pstr, loc, msg)
40
-
41
- @staticmethod
42
- def explain_exception(exc, depth=16):
43
- """
44
- Method to take an exception and translate the Python internal traceback into a list
45
- of the pyparsing expressions that caused the exception to be raised.
46
-
47
- Parameters:
48
-
49
- - exc - exception raised during parsing (need not be a ParseException, in support
50
- of Python exceptions that might be raised in a parse action)
51
- - depth (default=16) - number of levels back in the stack trace to list expression
52
- and function names; if None, the full stack trace names will be listed; if 0, only
53
- the failing input line, marker, and exception string will be shown
54
-
55
- Returns a multi-line string listing the ParserElements and/or function names in the
56
- exception's stack trace.
57
- """
58
- import inspect
59
- from .core import ParserElement
60
-
61
- if depth is None:
62
- depth = sys.getrecursionlimit()
63
- ret = []
64
- if isinstance(exc, ParseBaseException):
65
- ret.append(exc.line)
66
- ret.append(" " * (exc.column - 1) + "^")
67
- ret.append("{}: {}".format(type(exc).__name__, exc))
68
-
69
- if depth > 0:
70
- callers = inspect.getinnerframes(exc.__traceback__, context=depth)
71
- seen = set()
72
- for i, ff in enumerate(callers[-depth:]):
73
- frm = ff[0]
74
-
75
- f_self = frm.f_locals.get("self", None)
76
- if isinstance(f_self, ParserElement):
77
- if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
78
- continue
79
- if id(f_self) in seen:
80
- continue
81
- seen.add(id(f_self))
82
-
83
- self_type = type(f_self)
84
- ret.append(
85
- "{}.{} - {}".format(
86
- self_type.__module__, self_type.__name__, f_self
87
- )
88
- )
89
-
90
- elif f_self is not None:
91
- self_type = type(f_self)
92
- ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
93
-
94
- else:
95
- code = frm.f_code
96
- if code.co_name in ("wrapper", "<module>"):
97
- continue
98
-
99
- ret.append("{}".format(code.co_name))
100
-
101
- depth -= 1
102
- if not depth:
103
- break
104
-
105
- return "\n".join(ret)
106
-
107
- @classmethod
108
- def _from_exception(cls, pe):
109
- """
110
- internal factory method to simplify creating one type of ParseException
111
- from another - avoids having __init__ signature conflicts among subclasses
112
- """
113
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
114
-
115
- @property
116
- def line(self) -> str:
117
- """
118
- Return the line of text where the exception occurred.
119
- """
120
- return line(self.loc, self.pstr)
121
-
122
- @property
123
- def lineno(self) -> int:
124
- """
125
- Return the 1-based line number of text where the exception occurred.
126
- """
127
- return lineno(self.loc, self.pstr)
128
-
129
- @property
130
- def col(self) -> int:
131
- """
132
- Return the 1-based column on the line of text where the exception occurred.
133
- """
134
- return col(self.loc, self.pstr)
135
-
136
- @property
137
- def column(self) -> int:
138
- """
139
- Return the 1-based column on the line of text where the exception occurred.
140
- """
141
- return col(self.loc, self.pstr)
142
-
143
- def __str__(self) -> str:
144
- if self.pstr:
145
- if self.loc >= len(self.pstr):
146
- foundstr = ", found end of text"
147
- else:
148
- # pull out next word at error location
149
- found_match = _exception_word_extractor.match(self.pstr, self.loc)
150
- if found_match is not None:
151
- found = found_match.group(0)
152
- else:
153
- found = self.pstr[self.loc : self.loc + 1]
154
- foundstr = (", found %r" % found).replace(r"\\", "\\")
155
- else:
156
- foundstr = ""
157
- return "{}{} (at char {}), (line:{}, col:{})".format(
158
- self.msg, foundstr, self.loc, self.lineno, self.column
159
- )
160
-
161
- def __repr__(self):
162
- return str(self)
163
-
164
- def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
165
- """
166
- Extracts the exception line from the input string, and marks
167
- the location of the exception with a special symbol.
168
- """
169
- markerString = marker_string if marker_string is not None else markerString
170
- line_str = self.line
171
- line_column = self.column - 1
172
- if markerString:
173
- line_str = "".join(
174
- (line_str[:line_column], markerString, line_str[line_column:])
175
- )
176
- return line_str.strip()
177
-
178
- def explain(self, depth=16) -> str:
179
- """
180
- Method to translate the Python internal traceback into a list
181
- of the pyparsing expressions that caused the exception to be raised.
182
-
183
- Parameters:
184
-
185
- - depth (default=16) - number of levels back in the stack trace to list expression
186
- and function names; if None, the full stack trace names will be listed; if 0, only
187
- the failing input line, marker, and exception string will be shown
188
-
189
- Returns a multi-line string listing the ParserElements and/or function names in the
190
- exception's stack trace.
191
-
192
- Example::
193
-
194
- expr = pp.Word(pp.nums) * 3
195
- try:
196
- expr.parse_string("123 456 A789")
197
- except pp.ParseException as pe:
198
- print(pe.explain(depth=0))
199
-
200
- prints::
201
-
202
- 123 456 A789
203
- ^
204
- ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
205
-
206
- Note: the diagnostic output will include string representations of the expressions
207
- that failed to parse. These representations will be more helpful if you use `set_name` to
208
- give identifiable names to your expressions. Otherwise they will use the default string
209
- forms, which may be cryptic to read.
210
-
211
- Note: pyparsing's default truncation of exception tracebacks may also truncate the
212
- stack of expressions that are displayed in the ``explain`` output. To get the full listing
213
- of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
214
- """
215
- return self.explain_exception(self, depth)
216
-
217
- markInputline = mark_input_line
218
-
219
-
220
- class ParseException(ParseBaseException):
221
- """
222
- Exception thrown when a parse expression doesn't match the input string
223
-
224
- Example::
225
-
226
- try:
227
- Word(nums).set_name("integer").parse_string("ABC")
228
- except ParseException as pe:
229
- print(pe)
230
- print("column: {}".format(pe.column))
231
-
232
- prints::
233
-
234
- Expected integer (at char 0), (line:1, col:1)
235
- column: 1
236
-
237
- """
238
-
239
-
240
- class ParseFatalException(ParseBaseException):
241
- """
242
- User-throwable exception thrown when inconsistent parse content
243
- is found; stops all parsing immediately
244
- """
245
-
246
-
247
- class ParseSyntaxException(ParseFatalException):
248
- """
249
- Just like :class:`ParseFatalException`, but thrown internally
250
- when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
251
- that parsing is to stop immediately because an unbacktrackable
252
- syntax error has been found.
253
- """
254
-
255
-
256
- class RecursiveGrammarException(Exception):
257
- """
258
- Exception thrown by :class:`ParserElement.validate` if the
259
- grammar could be left-recursive; parser may need to enable
260
- left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
261
- """
262
-
263
- def __init__(self, parseElementList):
264
- self.parseElementTrace = parseElementList
265
-
266
- def __str__(self) -> str:
267
- return "RecursiveGrammarException: {}".format(self.parseElementTrace)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/columns.py DELETED
@@ -1,187 +0,0 @@
1
- from collections import defaultdict
2
- from itertools import chain
3
- from operator import itemgetter
4
- from typing import Dict, Iterable, List, Optional, Tuple
5
-
6
- from .align import Align, AlignMethod
7
- from .console import Console, ConsoleOptions, RenderableType, RenderResult
8
- from .constrain import Constrain
9
- from .measure import Measurement
10
- from .padding import Padding, PaddingDimensions
11
- from .table import Table
12
- from .text import TextType
13
- from .jupyter import JupyterMixin
14
-
15
-
16
- class Columns(JupyterMixin):
17
- """Display renderables in neat columns.
18
-
19
- Args:
20
- renderables (Iterable[RenderableType]): Any number of Rich renderables (including str).
21
- width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None.
22
- padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1).
23
- expand (bool, optional): Expand columns to full width. Defaults to False.
24
- equal (bool, optional): Arrange in to equal sized columns. Defaults to False.
25
- column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False.
26
- right_to_left (bool, optional): Start column from right hand side. Defaults to False.
27
- align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None.
28
- title (TextType, optional): Optional title for Columns.
29
- """
30
-
31
- def __init__(
32
- self,
33
- renderables: Optional[Iterable[RenderableType]] = None,
34
- padding: PaddingDimensions = (0, 1),
35
- *,
36
- width: Optional[int] = None,
37
- expand: bool = False,
38
- equal: bool = False,
39
- column_first: bool = False,
40
- right_to_left: bool = False,
41
- align: Optional[AlignMethod] = None,
42
- title: Optional[TextType] = None,
43
- ) -> None:
44
- self.renderables = list(renderables or [])
45
- self.width = width
46
- self.padding = padding
47
- self.expand = expand
48
- self.equal = equal
49
- self.column_first = column_first
50
- self.right_to_left = right_to_left
51
- self.align: Optional[AlignMethod] = align
52
- self.title = title
53
-
54
- def add_renderable(self, renderable: RenderableType) -> None:
55
- """Add a renderable to the columns.
56
-
57
- Args:
58
- renderable (RenderableType): Any renderable object.
59
- """
60
- self.renderables.append(renderable)
61
-
62
- def __rich_console__(
63
- self, console: Console, options: ConsoleOptions
64
- ) -> RenderResult:
65
- render_str = console.render_str
66
- renderables = [
67
- render_str(renderable) if isinstance(renderable, str) else renderable
68
- for renderable in self.renderables
69
- ]
70
- if not renderables:
71
- return
72
- _top, right, _bottom, left = Padding.unpack(self.padding)
73
- width_padding = max(left, right)
74
- max_width = options.max_width
75
- widths: Dict[int, int] = defaultdict(int)
76
- column_count = len(renderables)
77
-
78
- get_measurement = Measurement.get
79
- renderable_widths = [
80
- get_measurement(console, options, renderable).maximum
81
- for renderable in renderables
82
- ]
83
- if self.equal:
84
- renderable_widths = [max(renderable_widths)] * len(renderable_widths)
85
-
86
- def iter_renderables(
87
- column_count: int,
88
- ) -> Iterable[Tuple[int, Optional[RenderableType]]]:
89
- item_count = len(renderables)
90
- if self.column_first:
91
- width_renderables = list(zip(renderable_widths, renderables))
92
-
93
- column_lengths: List[int] = [item_count // column_count] * column_count
94
- for col_no in range(item_count % column_count):
95
- column_lengths[col_no] += 1
96
-
97
- row_count = (item_count + column_count - 1) // column_count
98
- cells = [[-1] * column_count for _ in range(row_count)]
99
- row = col = 0
100
- for index in range(item_count):
101
- cells[row][col] = index
102
- column_lengths[col] -= 1
103
- if column_lengths[col]:
104
- row += 1
105
- else:
106
- col += 1
107
- row = 0
108
- for index in chain.from_iterable(cells):
109
- if index == -1:
110
- break
111
- yield width_renderables[index]
112
- else:
113
- yield from zip(renderable_widths, renderables)
114
- # Pad odd elements with spaces
115
- if item_count % column_count:
116
- for _ in range(column_count - (item_count % column_count)):
117
- yield 0, None
118
-
119
- table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False)
120
- table.expand = self.expand
121
- table.title = self.title
122
-
123
- if self.width is not None:
124
- column_count = (max_width) // (self.width + width_padding)
125
- for _ in range(column_count):
126
- table.add_column(width=self.width)
127
- else:
128
- while column_count > 1:
129
- widths.clear()
130
- column_no = 0
131
- for renderable_width, _ in iter_renderables(column_count):
132
- widths[column_no] = max(widths[column_no], renderable_width)
133
- total_width = sum(widths.values()) + width_padding * (
134
- len(widths) - 1
135
- )
136
- if total_width > max_width:
137
- column_count = len(widths) - 1
138
- break
139
- else:
140
- column_no = (column_no + 1) % column_count
141
- else:
142
- break
143
-
144
- get_renderable = itemgetter(1)
145
- _renderables = [
146
- get_renderable(_renderable)
147
- for _renderable in iter_renderables(column_count)
148
- ]
149
- if self.equal:
150
- _renderables = [
151
- None
152
- if renderable is None
153
- else Constrain(renderable, renderable_widths[0])
154
- for renderable in _renderables
155
- ]
156
- if self.align:
157
- align = self.align
158
- _Align = Align
159
- _renderables = [
160
- None if renderable is None else _Align(renderable, align)
161
- for renderable in _renderables
162
- ]
163
-
164
- right_to_left = self.right_to_left
165
- add_row = table.add_row
166
- for start in range(0, len(_renderables), column_count):
167
- row = _renderables[start : start + column_count]
168
- if right_to_left:
169
- row = row[::-1]
170
- add_row(*row)
171
- yield table
172
-
173
-
174
- if __name__ == "__main__": # pragma: no cover
175
- import os
176
-
177
- console = Console()
178
-
179
- files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))]
180
- columns = Columns(files, padding=(0, 1), expand=False, equal=False)
181
- console.print(columns)
182
- console.rule()
183
- columns.column_first = True
184
- console.print(columns)
185
- columns.right_to_left = True
186
- console.rule()
187
- console.print(columns)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boops88/gsdf-Counterfeit-V2.5/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/gsdf/Counterfeit-V2.5").launch()
 
 
 
 
spaces/Brasd99/JustClothify/helpers/processor.py DELETED
@@ -1,174 +0,0 @@
1
- import io
2
- import cv2
3
- import imageio
4
- import numpy as np
5
- import torch
6
- from typing import Dict, List
7
- from fvcore.common.config import CfgNode
8
- from detectron2.config import get_cfg
9
- from detectron2.engine.defaults import DefaultPredictor
10
- from detectron2.structures.instances import Instances
11
- from densepose import add_densepose_config
12
- from densepose.vis.base import CompoundVisualizer
13
- from densepose.vis.densepose_outputs_vertex import get_texture_atlases
14
- from densepose.vis.densepose_results_textures import DensePoseResultsVisualizerWithTexture as dp_iuv_texture
15
- from densepose.vis.extractor import CompoundExtractor, create_extractor, DensePoseResultExtractor
16
-
17
- class TextureProcessor:
18
- def __init__(self, config: str, weights: str) -> None:
19
- self.config = self.get_config(config, weights)
20
- self.predictor = DefaultPredictor(self.config)
21
- self.extractor = DensePoseResultExtractor()
22
-
23
- def process_texture(self, image: np.ndarray) -> np.ndarray:
24
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
25
- output = self.execute(image)
26
- if 'pred_densepose' in output:
27
- texture = self.create_iuv(output, image)
28
- atlas_texture_bytes = io.BytesIO()
29
- imageio.imwrite(atlas_texture_bytes, texture, format='PNG')
30
- texture_atlas_array = np.frombuffer(atlas_texture_bytes.getvalue(), dtype=np.uint8)
31
- texture_atlas = cv2.imdecode(texture_atlas_array, cv2.IMREAD_COLOR)
32
- texture_atlas = cv2.cvtColor(texture_atlas, cv2.COLOR_BGR2RGB)
33
- return texture_atlas
34
- else:
35
- raise Exception('Clothes not found')
36
-
37
- def extract(self, person_img, model_img):
38
- texture_atlas = self.process_texture(model_img)
39
- return self.overlay_texture(texture_atlas, person_img)
40
-
41
- def overlay_texture(self, texture_atlas: np.ndarray, original_image: np.ndarray) -> np.ndarray:
42
- texture_atlas[:, :, :3] = texture_atlas[:, :, 2::-1]
43
- texture_atlases_dict = get_texture_atlases(None)
44
- vis = dp_iuv_texture(
45
- cfg=self.config,
46
- texture_atlas=texture_atlas,
47
- texture_atlases_dict=texture_atlases_dict
48
- )
49
-
50
- extractor = create_extractor(vis)
51
-
52
- visualizer = CompoundVisualizer([vis])
53
- extractor = CompoundExtractor([extractor])
54
-
55
- with torch.no_grad():
56
- outputs = self.predictor(original_image)['instances']
57
-
58
- image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
59
- image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
60
- data = extractor(outputs)
61
- image_vis = visualizer.visualize(image, data)
62
-
63
- return image_vis
64
-
65
- def parse_iuv(self, result: Dict) -> np.ndarray:
66
- i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
67
- uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
68
- iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
69
- iuv = np.transpose(iuv, (1, 2, 0))
70
- return iuv
71
-
72
- def parse_bbox(self, result: Dict) -> np.ndarray:
73
- return result['pred_boxes_XYXY'][0].cpu().numpy()
74
-
75
- def interpolate_tex(self, tex: np.ndarray) -> np.ndarray:
76
- valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
77
- radius_increase = 10
78
- kernel = np.ones((radius_increase, radius_increase), np.uint8)
79
- dilated_mask = cv2.dilate(valid_mask, kernel, iterations=1)
80
-
81
- invalid_region = 1 - valid_mask
82
- actual_part_max = tex.max()
83
- actual_part_min = tex.min()
84
- actual_part_uint = np.array((tex - actual_part_min) / (actual_part_max - actual_part_min) * 255, dtype='uint8')
85
-
86
- actual_part_uint = cv2.inpaint(actual_part_uint.transpose((1, 2, 0)), invalid_region, 1, cv2.INPAINT_TELEA).transpose((2, 0, 1))
87
-
88
- actual_part = (actual_part_uint / 255.0) * (actual_part_max - actual_part_min) + actual_part_min
89
- actual_part = actual_part * dilated_mask
90
-
91
- return actual_part
92
-
93
- def concat_textures(self, array: List[np.ndarray]) -> np.ndarray:
94
- texture_rows = [np.concatenate(array[i:i+6], axis=1) for i in range(0, 24, 6)]
95
- texture = np.concatenate(texture_rows, axis=0)
96
- return texture
97
-
98
- def get_texture(
99
- self,
100
- im: np.ndarray,
101
- iuv: np.ndarray,
102
- bbox: List[int],
103
- tex_part_size: int = 200) -> np.ndarray:
104
-
105
- im = im.transpose(2, 1, 0) / 255
106
- image_w, image_h = im.shape[1], im.shape[2]
107
- bbox[2] = bbox[2] - bbox[0]
108
- bbox[3] = bbox[3] - bbox[1]
109
- x, y, w, h = [int(v) for v in bbox]
110
- bg = np.zeros((image_h, image_w, 3))
111
- bg[y:y + h, x:x + w, :] = iuv
112
- iuv = bg
113
- iuv = iuv.transpose((2, 1, 0))
114
- i, u, v = iuv[2], iuv[1], iuv[0]
115
-
116
- n_parts = 22
117
- texture = np.zeros((n_parts, 3, tex_part_size, tex_part_size))
118
-
119
- for part_id in range(1, n_parts + 1):
120
- generated = np.zeros((3, tex_part_size, tex_part_size))
121
-
122
- x, y = u[i == part_id], v[i == part_id]
123
-
124
- tex_u_coo = (x * (tex_part_size - 1) / 255).astype(int)
125
- tex_v_coo = (y * (tex_part_size - 1) / 255).astype(int)
126
-
127
- tex_u_coo = np.clip(tex_u_coo, 0, tex_part_size - 1)
128
- tex_v_coo = np.clip(tex_v_coo, 0, tex_part_size - 1)
129
-
130
- for channel in range(3):
131
- generated[channel][tex_v_coo, tex_u_coo] = im[channel][i == part_id]
132
-
133
- if np.sum(generated) > 0:
134
- generated = self.interpolate_tex(generated)
135
-
136
- texture[part_id - 1] = generated[:, ::-1, :]
137
-
138
- tex_concat = np.zeros((24, tex_part_size, tex_part_size, 3))
139
- for i in range(texture.shape[0]):
140
- tex_concat[i] = texture[i].transpose(2, 1, 0)
141
- tex = self.concat_textures(tex_concat)
142
-
143
- return tex
144
-
145
- def create_iuv(self, results: Dict, image: np.ndarray) -> np.ndarray:
146
- iuv = self.parse_iuv(results)
147
- bbox = self.parse_bbox(results)
148
- uv_texture = self.get_texture(image, iuv, bbox)
149
- uv_texture = uv_texture.transpose([1, 0, 2])
150
- return uv_texture
151
-
152
- def get_config(self, config_fpath: str, model_fpath: str) -> CfgNode:
153
- cfg = get_cfg()
154
- add_densepose_config(cfg)
155
- cfg.merge_from_file(config_fpath)
156
- cfg.MODEL.WEIGHTS = model_fpath
157
- cfg.MODEL.DEVICE = 'cpu'
158
- cfg.freeze()
159
- return cfg
160
-
161
- def execute(self, image: np.ndarray) -> Dict:
162
- with torch.no_grad():
163
- outputs = self.predictor(image)['instances']
164
- return self.execute_on_outputs(outputs)
165
-
166
- def execute_on_outputs(self, outputs: Instances) -> Dict:
167
- result = {}
168
- if outputs.has('scores'):
169
- result['scores'] = outputs.get('scores').cpu()
170
- if outputs.has('pred_boxes'):
171
- result['pred_boxes_XYXY'] = outputs.get('pred_boxes').tensor.cpu()
172
- if outputs.has('pred_densepose'):
173
- result['pred_densepose'] = self.extractor(outputs)[0]
174
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/replace.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special version of this algorithm
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/__init__.py DELETED
@@ -1,41 +0,0 @@
1
- from .anchor_free_head import AnchorFreeHead
2
- from .anchor_head import AnchorHead
3
- from .atss_head import ATSSHead
4
- from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
5
- from .centripetal_head import CentripetalHead
6
- from .corner_head import CornerHead
7
- from .embedding_rpn_head import EmbeddingRPNHead
8
- from .fcos_head import FCOSHead
9
- from .fovea_head import FoveaHead
10
- from .free_anchor_retina_head import FreeAnchorRetinaHead
11
- from .fsaf_head import FSAFHead
12
- from .ga_retina_head import GARetinaHead
13
- from .ga_rpn_head import GARPNHead
14
- from .gfl_head import GFLHead
15
- from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
16
- from .ld_head import LDHead
17
- from .nasfcos_head import NASFCOSHead
18
- from .paa_head import PAAHead
19
- from .pisa_retinanet_head import PISARetinaHead
20
- from .pisa_ssd_head import PISASSDHead
21
- from .reppoints_head import RepPointsHead
22
- from .retina_head import RetinaHead
23
- from .retina_sepbn_head import RetinaSepBNHead
24
- from .rpn_head import RPNHead
25
- from .sabl_retina_head import SABLRetinaHead
26
- from .ssd_head import SSDHead
27
- from .transformer_head import TransformerHead
28
- from .vfnet_head import VFNetHead
29
- from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
30
- from .yolo_head import YOLOV3Head
31
-
32
- __all__ = [
33
- 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
34
- 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
35
- 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
36
- 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
37
- 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
38
- 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
39
- 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead',
40
- 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead'
41
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/necks/fpn.py DELETED
@@ -1,221 +0,0 @@
1
- import warnings
2
-
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from mmcv.cnn import ConvModule, xavier_init
6
- from mmcv.runner import auto_fp16
7
-
8
- from ..builder import NECKS
9
-
10
-
11
- @NECKS.register_module()
12
- class FPN(nn.Module):
13
- r"""Feature Pyramid Network.
14
-
15
- This is an implementation of paper `Feature Pyramid Networks for Object
16
- Detection <https://arxiv.org/abs/1612.03144>`_.
17
-
18
- Args:
19
- in_channels (List[int]): Number of input channels per scale.
20
- out_channels (int): Number of output channels (used at each scale)
21
- num_outs (int): Number of output scales.
22
- start_level (int): Index of the start input backbone level used to
23
- build the feature pyramid. Default: 0.
24
- end_level (int): Index of the end input backbone level (exclusive) to
25
- build the feature pyramid. Default: -1, which means the last level.
26
- add_extra_convs (bool | str): If bool, it decides whether to add conv
27
- layers on top of the original feature maps. Default to False.
28
- If True, its actual mode is specified by `extra_convs_on_inputs`.
29
- If str, it specifies the source feature map of the extra convs.
30
- Only the following options are allowed
31
-
32
- - 'on_input': Last feat map of neck inputs (i.e. backbone feature).
33
- - 'on_lateral': Last feature map after lateral convs.
34
- - 'on_output': The last output feature map after fpn convs.
35
- extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
36
- on the original feature from the backbone. If True,
37
- it is equivalent to `add_extra_convs='on_input'`. If False, it is
38
- equivalent to set `add_extra_convs='on_output'`. Default to True.
39
- relu_before_extra_convs (bool): Whether to apply relu before the extra
40
- conv. Default: False.
41
- no_norm_on_lateral (bool): Whether to apply norm on lateral.
42
- Default: False.
43
- conv_cfg (dict): Config dict for convolution layer. Default: None.
44
- norm_cfg (dict): Config dict for normalization layer. Default: None.
45
- act_cfg (str): Config dict for activation layer in ConvModule.
46
- Default: None.
47
- upsample_cfg (dict): Config dict for interpolate layer.
48
- Default: `dict(mode='nearest')`
49
-
50
- Example:
51
- >>> import torch
52
- >>> in_channels = [2, 3, 5, 7]
53
- >>> scales = [340, 170, 84, 43]
54
- >>> inputs = [torch.rand(1, c, s, s)
55
- ... for c, s in zip(in_channels, scales)]
56
- >>> self = FPN(in_channels, 11, len(in_channels)).eval()
57
- >>> outputs = self.forward(inputs)
58
- >>> for i in range(len(outputs)):
59
- ... print(f'outputs[{i}].shape = {outputs[i].shape}')
60
- outputs[0].shape = torch.Size([1, 11, 340, 340])
61
- outputs[1].shape = torch.Size([1, 11, 170, 170])
62
- outputs[2].shape = torch.Size([1, 11, 84, 84])
63
- outputs[3].shape = torch.Size([1, 11, 43, 43])
64
- """
65
-
66
- def __init__(self,
67
- in_channels,
68
- out_channels,
69
- num_outs,
70
- start_level=0,
71
- end_level=-1,
72
- add_extra_convs=False,
73
- extra_convs_on_inputs=True,
74
- relu_before_extra_convs=False,
75
- no_norm_on_lateral=False,
76
- conv_cfg=None,
77
- norm_cfg=None,
78
- act_cfg=None,
79
- upsample_cfg=dict(mode='nearest')):
80
- super(FPN, self).__init__()
81
- assert isinstance(in_channels, list)
82
- self.in_channels = in_channels
83
- self.out_channels = out_channels
84
- self.num_ins = len(in_channels)
85
- self.num_outs = num_outs
86
- self.relu_before_extra_convs = relu_before_extra_convs
87
- self.no_norm_on_lateral = no_norm_on_lateral
88
- self.fp16_enabled = False
89
- self.upsample_cfg = upsample_cfg.copy()
90
-
91
- if end_level == -1:
92
- self.backbone_end_level = self.num_ins
93
- assert num_outs >= self.num_ins - start_level
94
- else:
95
- # if end_level < inputs, no extra level is allowed
96
- self.backbone_end_level = end_level
97
- assert end_level <= len(in_channels)
98
- assert num_outs == end_level - start_level
99
- self.start_level = start_level
100
- self.end_level = end_level
101
- self.add_extra_convs = add_extra_convs
102
- assert isinstance(add_extra_convs, (str, bool))
103
- if isinstance(add_extra_convs, str):
104
- # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
105
- assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
106
- elif add_extra_convs: # True
107
- if extra_convs_on_inputs:
108
- # TODO: deprecate `extra_convs_on_inputs`
109
- warnings.simplefilter('once')
110
- warnings.warn(
111
- '"extra_convs_on_inputs" will be deprecated in v2.9.0,'
112
- 'Please use "add_extra_convs"', DeprecationWarning)
113
- self.add_extra_convs = 'on_input'
114
- else:
115
- self.add_extra_convs = 'on_output'
116
-
117
- self.lateral_convs = nn.ModuleList()
118
- self.fpn_convs = nn.ModuleList()
119
-
120
- for i in range(self.start_level, self.backbone_end_level):
121
- l_conv = ConvModule(
122
- in_channels[i],
123
- out_channels,
124
- 1,
125
- conv_cfg=conv_cfg,
126
- norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
127
- act_cfg=act_cfg,
128
- inplace=False)
129
- fpn_conv = ConvModule(
130
- out_channels,
131
- out_channels,
132
- 3,
133
- padding=1,
134
- conv_cfg=conv_cfg,
135
- norm_cfg=norm_cfg,
136
- act_cfg=act_cfg,
137
- inplace=False)
138
-
139
- self.lateral_convs.append(l_conv)
140
- self.fpn_convs.append(fpn_conv)
141
-
142
- # add extra conv layers (e.g., RetinaNet)
143
- extra_levels = num_outs - self.backbone_end_level + self.start_level
144
- if self.add_extra_convs and extra_levels >= 1:
145
- for i in range(extra_levels):
146
- if i == 0 and self.add_extra_convs == 'on_input':
147
- in_channels = self.in_channels[self.backbone_end_level - 1]
148
- else:
149
- in_channels = out_channels
150
- extra_fpn_conv = ConvModule(
151
- in_channels,
152
- out_channels,
153
- 3,
154
- stride=2,
155
- padding=1,
156
- conv_cfg=conv_cfg,
157
- norm_cfg=norm_cfg,
158
- act_cfg=act_cfg,
159
- inplace=False)
160
- self.fpn_convs.append(extra_fpn_conv)
161
-
162
- # default init_weights for conv(msra) and norm in ConvModule
163
- def init_weights(self):
164
- """Initialize the weights of FPN module."""
165
- for m in self.modules():
166
- if isinstance(m, nn.Conv2d):
167
- xavier_init(m, distribution='uniform')
168
-
169
- @auto_fp16()
170
- def forward(self, inputs):
171
- """Forward function."""
172
- assert len(inputs) == len(self.in_channels)
173
-
174
- # build laterals
175
- laterals = [
176
- lateral_conv(inputs[i + self.start_level])
177
- for i, lateral_conv in enumerate(self.lateral_convs)
178
- ]
179
-
180
- # build top-down path
181
- used_backbone_levels = len(laterals)
182
- for i in range(used_backbone_levels - 1, 0, -1):
183
- # In some cases, fixing `scale factor` (e.g. 2) is preferred, but
184
- # it cannot co-exist with `size` in `F.interpolate`.
185
- if 'scale_factor' in self.upsample_cfg:
186
- laterals[i - 1] += F.interpolate(laterals[i],
187
- **self.upsample_cfg)
188
- else:
189
- prev_shape = laterals[i - 1].shape[2:]
190
- laterals[i - 1] += F.interpolate(
191
- laterals[i], size=prev_shape, **self.upsample_cfg)
192
-
193
- # build outputs
194
- # part 1: from original levels
195
- outs = [
196
- self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
197
- ]
198
- # part 2: add extra levels
199
- if self.num_outs > len(outs):
200
- # use max pool to get more levels on top of outputs
201
- # (e.g., Faster R-CNN, Mask R-CNN)
202
- if not self.add_extra_convs:
203
- for i in range(self.num_outs - used_backbone_levels):
204
- outs.append(F.max_pool2d(outs[-1], 1, stride=2))
205
- # add conv layers on top of original feature maps (RetinaNet)
206
- else:
207
- if self.add_extra_convs == 'on_input':
208
- extra_source = inputs[self.backbone_end_level - 1]
209
- elif self.add_extra_convs == 'on_lateral':
210
- extra_source = laterals[-1]
211
- elif self.add_extra_convs == 'on_output':
212
- extra_source = outs[-1]
213
- else:
214
- raise NotImplementedError
215
- outs.append(self.fpn_convs[used_backbone_levels](extra_source))
216
- for i in range(used_backbone_levels + 1, self.num_outs):
217
- if self.relu_before_extra_convs:
218
- outs.append(self.fpn_convs[i](F.relu(outs[-1])))
219
- else:
220
- outs.append(self.fpn_convs[i](outs[-1]))
221
- return tuple(outs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/sample_from_dataset.py DELETED
@@ -1,87 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import os
4
-
5
- import numpy as np
6
- import tqdm
7
- from skimage import io
8
- from skimage.segmentation import mark_boundaries
9
-
10
- from saicinpainting.evaluation.data import InpaintingDataset
11
- from saicinpainting.evaluation.vis import save_item_for_vis
12
-
13
- def save_mask_for_sidebyside(item, out_file):
14
- mask = item['mask']# > 0.5
15
- if mask.ndim == 3:
16
- mask = mask[0]
17
- mask = np.clip(mask * 255, 0, 255).astype('uint8')
18
- io.imsave(out_file, mask)
19
-
20
- def save_img_for_sidebyside(item, out_file):
21
- img = np.transpose(item['image'], (1, 2, 0))
22
- img = np.clip(img * 255, 0, 255).astype('uint8')
23
- io.imsave(out_file, img)
24
-
25
- def save_masked_img_for_sidebyside(item, out_file):
26
- mask = item['mask']
27
- img = item['image']
28
-
29
- img = (1-mask) * img + mask
30
- img = np.transpose(img, (1, 2, 0))
31
-
32
- img = np.clip(img * 255, 0, 255).astype('uint8')
33
- io.imsave(out_file, img)
34
-
35
- def main(args):
36
- dataset = InpaintingDataset(args.datadir, img_suffix='.png')
37
-
38
- area_bins = np.linspace(0, 1, args.area_bins + 1)
39
-
40
- heights = []
41
- widths = []
42
- image_areas = []
43
- hole_areas = []
44
- hole_area_percents = []
45
- area_bins_count = np.zeros(args.area_bins)
46
- area_bin_titles = [f'{area_bins[i] * 100:.0f}-{area_bins[i + 1] * 100:.0f}' for i in range(args.area_bins)]
47
-
48
- bin2i = [[] for _ in range(args.area_bins)]
49
-
50
- for i, item in enumerate(tqdm.tqdm(dataset)):
51
- h, w = item['image'].shape[1:]
52
- heights.append(h)
53
- widths.append(w)
54
- full_area = h * w
55
- image_areas.append(full_area)
56
- hole_area = (item['mask'] == 1).sum()
57
- hole_areas.append(hole_area)
58
- hole_percent = hole_area / full_area
59
- hole_area_percents.append(hole_percent)
60
- bin_i = np.clip(np.searchsorted(area_bins, hole_percent) - 1, 0, len(area_bins_count) - 1)
61
- area_bins_count[bin_i] += 1
62
- bin2i[bin_i].append(i)
63
-
64
- os.makedirs(args.outdir, exist_ok=True)
65
-
66
- for bin_i in range(args.area_bins):
67
- bindir = os.path.join(args.outdir, area_bin_titles[bin_i])
68
- os.makedirs(bindir, exist_ok=True)
69
- bin_idx = bin2i[bin_i]
70
- for sample_i in np.random.choice(bin_idx, size=min(len(bin_idx), args.samples_n), replace=False):
71
- item = dataset[sample_i]
72
- path = os.path.join(bindir, dataset.img_filenames[sample_i].split('/')[-1])
73
- save_masked_img_for_sidebyside(item, path)
74
-
75
-
76
- if __name__ == '__main__':
77
- import argparse
78
-
79
- aparser = argparse.ArgumentParser()
80
- aparser.add_argument('--datadir', type=str,
81
- help='Path to folder with images and masks (output of gen_mask_dataset.py)')
82
- aparser.add_argument('--outdir', type=str, help='Where to put results')
83
- aparser.add_argument('--samples-n', type=int, default=10,
84
- help='Number of sample images with masks to copy for visualization for each area bin')
85
- aparser.add_argument('--area-bins', type=int, default=10, help='How many area bins to have')
86
-
87
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/training/trainers/base.py DELETED
@@ -1,291 +0,0 @@
1
- import copy
2
- import logging
3
- from typing import Dict, Tuple
4
-
5
- import pandas as pd
6
- import pytorch_lightning as ptl
7
- import torch
8
- import torch.nn as nn
9
- import torch.nn.functional as F
10
- from torch.utils.data import DistributedSampler
11
-
12
- from saicinpainting.evaluation import make_evaluator
13
- from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader
14
- from saicinpainting.training.losses.adversarial import make_discrim_loss
15
- from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL
16
- from saicinpainting.training.modules import make_generator, make_discriminator
17
- from saicinpainting.training.visualizers import make_visualizer
18
- from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \
19
- get_has_ddp_rank
20
-
21
- LOGGER = logging.getLogger(__name__)
22
-
23
-
24
- def make_optimizer(parameters, kind='adamw', **kwargs):
25
- if kind == 'adam':
26
- optimizer_class = torch.optim.Adam
27
- elif kind == 'adamw':
28
- optimizer_class = torch.optim.AdamW
29
- else:
30
- raise ValueError(f'Unknown optimizer kind {kind}')
31
- return optimizer_class(parameters, **kwargs)
32
-
33
-
34
- def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999):
35
- with torch.no_grad():
36
- res_params = dict(result.named_parameters())
37
- new_params = dict(new_iterate_model.named_parameters())
38
-
39
- for k in res_params.keys():
40
- res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay)
41
-
42
-
43
- def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'):
44
- batch_size, _, height, width = base_tensor.shape
45
- cur_height, cur_width = height, width
46
- result = []
47
- align_corners = False if scale_mode in ('bilinear', 'bicubic') else None
48
- for _ in range(scales):
49
- cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device)
50
- cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners)
51
- result.append(cur_sample_scaled)
52
- cur_height //= 2
53
- cur_width //= 2
54
- return torch.cat(result, dim=1)
55
-
56
-
57
- class BaseInpaintingTrainingModule(ptl.LightningModule):
58
- def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100,
59
- average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000,
60
- average_generator_period=10, store_discr_outputs_for_vis=False,
61
- **kwargs):
62
- super().__init__(*args, **kwargs)
63
- LOGGER.info('BaseInpaintingTrainingModule init called')
64
-
65
- self.config = config
66
-
67
- self.generator = make_generator(config, **self.config.generator)
68
- self.use_ddp = use_ddp
69
-
70
- if not get_has_ddp_rank():
71
- LOGGER.info(f'Generator\n{self.generator}')
72
-
73
- if not predict_only:
74
- self.save_hyperparameters(self.config)
75
- self.discriminator = make_discriminator(**self.config.discriminator)
76
- self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial)
77
- self.visualizer = make_visualizer(**self.config.visualizer)
78
- self.val_evaluator = make_evaluator(**self.config.evaluator)
79
- self.test_evaluator = make_evaluator(**self.config.evaluator)
80
-
81
- if not get_has_ddp_rank():
82
- LOGGER.info(f'Discriminator\n{self.discriminator}')
83
-
84
- extra_val = self.config.data.get('extra_val', ())
85
- if extra_val:
86
- self.extra_val_titles = list(extra_val)
87
- self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator)
88
- for k in extra_val})
89
- else:
90
- self.extra_evaluators = {}
91
-
92
- self.average_generator = average_generator
93
- self.generator_avg_beta = generator_avg_beta
94
- self.average_generator_start_step = average_generator_start_step
95
- self.average_generator_period = average_generator_period
96
- self.generator_average = None
97
- self.last_generator_averaging_step = -1
98
- self.store_discr_outputs_for_vis = store_discr_outputs_for_vis
99
-
100
- if self.config.losses.get("l1", {"weight_known": 0})['weight_known'] > 0:
101
- self.loss_l1 = nn.L1Loss(reduction='none')
102
-
103
- if self.config.losses.get("mse", {"weight": 0})['weight'] > 0:
104
- self.loss_mse = nn.MSELoss(reduction='none')
105
-
106
- if self.config.losses.perceptual.weight > 0:
107
- self.loss_pl = PerceptualLoss()
108
-
109
- if self.config.losses.get("resnet_pl", {"weight": 0})['weight'] > 0:
110
- self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl)
111
- else:
112
- self.loss_resnet_pl = None
113
-
114
- self.visualize_each_iters = visualize_each_iters
115
- LOGGER.info('BaseInpaintingTrainingModule init done')
116
-
117
- def configure_optimizers(self):
118
- discriminator_params = list(self.discriminator.parameters())
119
- return [
120
- dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)),
121
- dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator)),
122
- ]
123
-
124
- def train_dataloader(self):
125
- kwargs = dict(self.config.data.train)
126
- if self.use_ddp:
127
- kwargs['ddp_kwargs'] = dict(num_replicas=self.trainer.num_nodes * self.trainer.num_processes,
128
- rank=self.trainer.global_rank,
129
- shuffle=True)
130
- dataloader = make_default_train_dataloader(**self.config.data.train)
131
- return dataloader
132
-
133
- def val_dataloader(self):
134
- res = [make_default_val_dataloader(**self.config.data.val)]
135
-
136
- if self.config.data.visual_test is not None:
137
- res = res + [make_default_val_dataloader(**self.config.data.visual_test)]
138
- else:
139
- res = res + res
140
-
141
- extra_val = self.config.data.get('extra_val', ())
142
- if extra_val:
143
- res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles]
144
-
145
- return res
146
-
147
- def training_step(self, batch, batch_idx, optimizer_idx=None):
148
- self._is_training_step = True
149
- return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx)
150
-
151
- def validation_step(self, batch, batch_idx, dataloader_idx):
152
- extra_val_key = None
153
- if dataloader_idx == 0:
154
- mode = 'val'
155
- elif dataloader_idx == 1:
156
- mode = 'test'
157
- else:
158
- mode = 'extra_val'
159
- extra_val_key = self.extra_val_titles[dataloader_idx - 2]
160
- self._is_training_step = False
161
- return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key)
162
-
163
- def training_step_end(self, batch_parts_outputs):
164
- if self.training and self.average_generator \
165
- and self.global_step >= self.average_generator_start_step \
166
- and self.global_step >= self.last_generator_averaging_step + self.average_generator_period:
167
- if self.generator_average is None:
168
- self.generator_average = copy.deepcopy(self.generator)
169
- else:
170
- update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta)
171
- self.last_generator_averaging_step = self.global_step
172
-
173
- full_loss = (batch_parts_outputs['loss'].mean()
174
- if torch.is_tensor(batch_parts_outputs['loss']) # loss is not tensor when no discriminator used
175
- else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True))
176
- log_info = {k: v.mean() for k, v in batch_parts_outputs['log_info'].items()}
177
- self.log_dict(log_info, on_step=True, on_epoch=False)
178
- return full_loss
179
-
180
- def validation_epoch_end(self, outputs):
181
- outputs = [step_out for out_group in outputs for step_out in out_group]
182
- averaged_logs = average_dicts(step_out['log_info'] for step_out in outputs)
183
- self.log_dict({k: v.mean() for k, v in averaged_logs.items()})
184
-
185
- pd.set_option('display.max_columns', 500)
186
- pd.set_option('display.width', 1000)
187
-
188
- # standard validation
189
- val_evaluator_states = [s['val_evaluator_state'] for s in outputs if 'val_evaluator_state' in s]
190
- val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states)
191
- val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0)
192
- val_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
193
- LOGGER.info(f'Validation metrics after epoch #{self.current_epoch}, '
194
- f'total {self.global_step} iterations:\n{val_evaluator_res_df}')
195
-
196
- for k, v in flatten_dict(val_evaluator_res).items():
197
- self.log(f'val_{k}', v)
198
-
199
- # standard visual test
200
- test_evaluator_states = [s['test_evaluator_state'] for s in outputs
201
- if 'test_evaluator_state' in s]
202
- test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states)
203
- test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0)
204
- test_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
205
- LOGGER.info(f'Test metrics after epoch #{self.current_epoch}, '
206
- f'total {self.global_step} iterations:\n{test_evaluator_res_df}')
207
-
208
- for k, v in flatten_dict(test_evaluator_res).items():
209
- self.log(f'test_{k}', v)
210
-
211
- # extra validations
212
- if self.extra_evaluators:
213
- for cur_eval_title, cur_evaluator in self.extra_evaluators.items():
214
- cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state'
215
- cur_states = [s[cur_state_key] for s in outputs if cur_state_key in s]
216
- cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states)
217
- cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0)
218
- cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
219
- LOGGER.info(f'Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, '
220
- f'total {self.global_step} iterations:\n{cur_evaluator_res_df}')
221
- for k, v in flatten_dict(cur_evaluator_res).items():
222
- self.log(f'extra_val_{cur_eval_title}_{k}', v)
223
-
224
- def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None):
225
- if optimizer_idx == 0: # step for generator
226
- set_requires_grad(self.generator, True)
227
- set_requires_grad(self.discriminator, False)
228
- elif optimizer_idx == 1: # step for discriminator
229
- set_requires_grad(self.generator, False)
230
- set_requires_grad(self.discriminator, True)
231
-
232
- batch = self(batch)
233
-
234
- total_loss = 0
235
- metrics = {}
236
-
237
- if optimizer_idx is None or optimizer_idx == 0: # step for generator
238
- total_loss, metrics = self.generator_loss(batch)
239
-
240
- elif optimizer_idx is None or optimizer_idx == 1: # step for discriminator
241
- if self.config.losses.adversarial.weight > 0:
242
- total_loss, metrics = self.discriminator_loss(batch)
243
-
244
- if self.get_ddp_rank() in (None, 0) and (batch_idx % self.visualize_each_iters == 0 or mode == 'test'):
245
- if self.config.losses.adversarial.weight > 0:
246
- if self.store_discr_outputs_for_vis:
247
- with torch.no_grad():
248
- self.store_discr_outputs(batch)
249
- vis_suffix = f'_{mode}'
250
- if mode == 'extra_val':
251
- vis_suffix += f'_{extra_val_key}'
252
- self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix)
253
-
254
- metrics_prefix = f'{mode}_'
255
- if mode == 'extra_val':
256
- metrics_prefix += f'{extra_val_key}_'
257
- result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix))
258
- if mode == 'val':
259
- result['val_evaluator_state'] = self.val_evaluator.process_batch(batch)
260
- elif mode == 'test':
261
- result['test_evaluator_state'] = self.test_evaluator.process_batch(batch)
262
- elif mode == 'extra_val':
263
- result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch)
264
-
265
- return result
266
-
267
- def get_current_generator(self, no_average=False):
268
- if not no_average and not self.training and self.average_generator and self.generator_average is not None:
269
- return self.generator_average
270
- return self.generator
271
-
272
- def forward(self, batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
273
- """Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys"""
274
- raise NotImplementedError()
275
-
276
- def generator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
277
- raise NotImplementedError()
278
-
279
- def discriminator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
280
- raise NotImplementedError()
281
-
282
- def store_discr_outputs(self, batch):
283
- out_size = batch['image'].shape[2:]
284
- discr_real_out, _ = self.discriminator(batch['image'])
285
- discr_fake_out, _ = self.discriminator(batch['predicted_image'])
286
- batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest')
287
- batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest')
288
- batch['discr_output_diff'] = batch['discr_output_real'] - batch['discr_output_fake']
289
-
290
- def get_ddp_rank(self):
291
- return self.trainer.global_rank if (self.trainer.num_nodes * self.trainer.num_processes) > 1 else None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/onnx.py DELETED
@@ -1,144 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import torch
8
- import torch.nn as nn
9
- from torch.nn import functional as F
10
-
11
- from typing import Tuple
12
-
13
- from ..modeling import Sam
14
- from .amg import calculate_stability_score
15
-
16
-
17
- class SamOnnxModel(nn.Module):
18
- """
19
- This model should not be called directly, but is used in ONNX export.
20
- It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
21
- with some functions modified to enable model tracing. Also supports extra
22
- options controlling what information. See the ONNX export script for details.
23
- """
24
-
25
- def __init__(
26
- self,
27
- model: Sam,
28
- return_single_mask: bool,
29
- use_stability_score: bool = False,
30
- return_extra_metrics: bool = False,
31
- ) -> None:
32
- super().__init__()
33
- self.mask_decoder = model.mask_decoder
34
- self.model = model
35
- self.img_size = model.image_encoder.img_size
36
- self.return_single_mask = return_single_mask
37
- self.use_stability_score = use_stability_score
38
- self.stability_score_offset = 1.0
39
- self.return_extra_metrics = return_extra_metrics
40
-
41
- @staticmethod
42
- def resize_longest_image_size(
43
- input_image_size: torch.Tensor, longest_side: int
44
- ) -> torch.Tensor:
45
- input_image_size = input_image_size.to(torch.float32)
46
- scale = longest_side / torch.max(input_image_size)
47
- transformed_size = scale * input_image_size
48
- transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
49
- return transformed_size
50
-
51
- def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
52
- point_coords = point_coords + 0.5
53
- point_coords = point_coords / self.img_size
54
- point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
55
- point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
56
-
57
- point_embedding = point_embedding * (point_labels != -1)
58
- point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
59
- point_labels == -1
60
- )
61
-
62
- for i in range(self.model.prompt_encoder.num_point_embeddings):
63
- point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
64
- i
65
- ].weight * (point_labels == i)
66
-
67
- return point_embedding
68
-
69
- def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
70
- mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
71
- mask_embedding = mask_embedding + (
72
- 1 - has_mask_input
73
- ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
74
- return mask_embedding
75
-
76
- def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
77
- masks = F.interpolate(
78
- masks,
79
- size=(self.img_size, self.img_size),
80
- mode="bilinear",
81
- align_corners=False,
82
- )
83
-
84
- prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)
85
- masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]
86
-
87
- orig_im_size = orig_im_size.to(torch.int64)
88
- h, w = orig_im_size[0], orig_im_size[1]
89
- masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
90
- return masks
91
-
92
- def select_masks(
93
- self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
94
- ) -> Tuple[torch.Tensor, torch.Tensor]:
95
- # Determine if we should return the multiclick mask or not from the number of points.
96
- # The reweighting is used to avoid control flow.
97
- score_reweight = torch.tensor(
98
- [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
99
- ).to(iou_preds.device)
100
- score = iou_preds + (num_points - 2.5) * score_reweight
101
- best_idx = torch.argmax(score, dim=1)
102
- masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
103
- iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
104
-
105
- return masks, iou_preds
106
-
107
- @torch.no_grad()
108
- def forward(
109
- self,
110
- image_embeddings: torch.Tensor,
111
- point_coords: torch.Tensor,
112
- point_labels: torch.Tensor,
113
- mask_input: torch.Tensor,
114
- has_mask_input: torch.Tensor,
115
- orig_im_size: torch.Tensor,
116
- ):
117
- sparse_embedding = self._embed_points(point_coords, point_labels)
118
- dense_embedding = self._embed_masks(mask_input, has_mask_input)
119
-
120
- masks, scores = self.model.mask_decoder.predict_masks(
121
- image_embeddings=image_embeddings,
122
- image_pe=self.model.prompt_encoder.get_dense_pe(),
123
- sparse_prompt_embeddings=sparse_embedding,
124
- dense_prompt_embeddings=dense_embedding,
125
- )
126
-
127
- if self.use_stability_score:
128
- scores = calculate_stability_score(
129
- masks, self.model.mask_threshold, self.stability_score_offset
130
- )
131
-
132
- if self.return_single_mask:
133
- masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
134
-
135
- upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
136
-
137
- if self.return_extra_metrics:
138
- stability_scores = calculate_stability_score(
139
- upscaled_masks, self.model.mask_threshold, self.stability_score_offset
140
- )
141
- areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
142
- return upscaled_masks, scores, stability_scores, areas, masks
143
-
144
- return upscaled_masks, scores, masks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chukwuka/Dog_Breed_ImageWoof/README.md DELETED
@@ -1,400 +0,0 @@
1
- ---
2
- title: Dog Breed ImageWoof
3
- emoji: ⚡
4
- colorFrom: blue
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.17.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
- # ImageWoof Classification
13
- ![](https://miro.medium.com/max/2240/1*e84otk0ul3xS_65l94qWeA.png)
14
-
15
- <a href="https://github.com/Sylvesterchuks/dogbreed_app">Click to visit the Github Repo</a>
16
- ## Problem Statement And Description
17
- A subset of 10 harder to classify classes from Imagenet (all dog breeds): Australian terrier, Border terrier, Samoyed, beagle, Shih-Tzu, English foxhound, Rhodesian ridgeback, dingo, golden retriever, Old English sheepdog.
18
- An EfficientNetB2 feature extractor computer vision model to classify images of Dog breeds was created.
19
- summary(eff_b2, (3,224,224),device='cpu')
20
- <pre>
21
- ----------------------------------------------------------------
22
- Layer (type) Output Shape Param #
23
- ================================================================
24
- Conv2d-1 [-1, 32, 112, 112] 864
25
- BatchNorm2d-2 [-1, 32, 112, 112] 64
26
- SiLU-3 [-1, 32, 112, 112] 0
27
- Conv2d-4 [-1, 32, 112, 112] 288
28
- BatchNorm2d-5 [-1, 32, 112, 112] 64
29
- SiLU-6 [-1, 32, 112, 112] 0
30
- AdaptiveAvgPool2d-7 [-1, 32, 1, 1] 0
31
- Conv2d-8 [-1, 8, 1, 1] 264
32
- SiLU-9 [-1, 8, 1, 1] 0
33
- Conv2d-10 [-1, 32, 1, 1] 288
34
- Sigmoid-11 [-1, 32, 1, 1] 0
35
- SqueezeExcitation-12 [-1, 32, 112, 112] 0
36
- Conv2d-13 [-1, 16, 112, 112] 512
37
- BatchNorm2d-14 [-1, 16, 112, 112] 32
38
- MBConv-15 [-1, 16, 112, 112] 0
39
- Conv2d-16 [-1, 16, 112, 112] 144
40
- BatchNorm2d-17 [-1, 16, 112, 112] 32
41
- SiLU-18 [-1, 16, 112, 112] 0
42
- AdaptiveAvgPool2d-19 [-1, 16, 1, 1] 0
43
- Conv2d-20 [-1, 4, 1, 1] 68
44
- SiLU-21 [-1, 4, 1, 1] 0
45
- Conv2d-22 [-1, 16, 1, 1] 80
46
- Sigmoid-23 [-1, 16, 1, 1] 0
47
- SqueezeExcitation-24 [-1, 16, 112, 112] 0
48
- Conv2d-25 [-1, 16, 112, 112] 256
49
- BatchNorm2d-26 [-1, 16, 112, 112] 32
50
- StochasticDepth-27 [-1, 16, 112, 112] 0
51
- MBConv-28 [-1, 16, 112, 112] 0
52
- Conv2d-29 [-1, 96, 112, 112] 1,536
53
- BatchNorm2d-30 [-1, 96, 112, 112] 192
54
- SiLU-31 [-1, 96, 112, 112] 0
55
- Conv2d-32 [-1, 96, 56, 56] 864
56
- BatchNorm2d-33 [-1, 96, 56, 56] 192
57
- SiLU-34 [-1, 96, 56, 56] 0
58
- AdaptiveAvgPool2d-35 [-1, 96, 1, 1] 0
59
- Conv2d-36 [-1, 4, 1, 1] 388
60
- SiLU-37 [-1, 4, 1, 1] 0
61
- Conv2d-38 [-1, 96, 1, 1] 480
62
- Sigmoid-39 [-1, 96, 1, 1] 0
63
- SqueezeExcitation-40 [-1, 96, 56, 56] 0
64
- Conv2d-41 [-1, 24, 56, 56] 2,304
65
- BatchNorm2d-42 [-1, 24, 56, 56] 48
66
- MBConv-43 [-1, 24, 56, 56] 0
67
- Conv2d-44 [-1, 144, 56, 56] 3,456
68
- BatchNorm2d-45 [-1, 144, 56, 56] 288
69
- SiLU-46 [-1, 144, 56, 56] 0
70
- Conv2d-47 [-1, 144, 56, 56] 1,296
71
- BatchNorm2d-48 [-1, 144, 56, 56] 288
72
- SiLU-49 [-1, 144, 56, 56] 0
73
- AdaptiveAvgPool2d-50 [-1, 144, 1, 1] 0
74
- Conv2d-51 [-1, 6, 1, 1] 870
75
- SiLU-52 [-1, 6, 1, 1] 0
76
- Conv2d-53 [-1, 144, 1, 1] 1,008
77
- Sigmoid-54 [-1, 144, 1, 1] 0
78
- SqueezeExcitation-55 [-1, 144, 56, 56] 0
79
- Conv2d-56 [-1, 24, 56, 56] 3,456
80
- BatchNorm2d-57 [-1, 24, 56, 56] 48
81
- StochasticDepth-58 [-1, 24, 56, 56] 0
82
- MBConv-59 [-1, 24, 56, 56] 0
83
- Conv2d-60 [-1, 144, 56, 56] 3,456
84
- BatchNorm2d-61 [-1, 144, 56, 56] 288
85
- SiLU-62 [-1, 144, 56, 56] 0
86
- Conv2d-63 [-1, 144, 56, 56] 1,296
87
- BatchNorm2d-64 [-1, 144, 56, 56] 288
88
- SiLU-65 [-1, 144, 56, 56] 0
89
- AdaptiveAvgPool2d-66 [-1, 144, 1, 1] 0
90
- Conv2d-67 [-1, 6, 1, 1] 870
91
- SiLU-68 [-1, 6, 1, 1] 0
92
- Conv2d-69 [-1, 144, 1, 1] 1,008
93
- Sigmoid-70 [-1, 144, 1, 1] 0
94
- SqueezeExcitation-71 [-1, 144, 56, 56] 0
95
- Conv2d-72 [-1, 24, 56, 56] 3,456
96
- BatchNorm2d-73 [-1, 24, 56, 56] 48
97
- StochasticDepth-74 [-1, 24, 56, 56] 0
98
- MBConv-75 [-1, 24, 56, 56] 0
99
- Conv2d-76 [-1, 144, 56, 56] 3,456
100
- BatchNorm2d-77 [-1, 144, 56, 56] 288
101
- SiLU-78 [-1, 144, 56, 56] 0
102
- Conv2d-79 [-1, 144, 28, 28] 3,600
103
- BatchNorm2d-80 [-1, 144, 28, 28] 288
104
- SiLU-81 [-1, 144, 28, 28] 0
105
- AdaptiveAvgPool2d-82 [-1, 144, 1, 1] 0
106
- Conv2d-83 [-1, 6, 1, 1] 870
107
- SiLU-84 [-1, 6, 1, 1] 0
108
- Conv2d-85 [-1, 144, 1, 1] 1,008
109
- Sigmoid-86 [-1, 144, 1, 1] 0
110
- SqueezeExcitation-87 [-1, 144, 28, 28] 0
111
- Conv2d-88 [-1, 48, 28, 28] 6,912
112
- BatchNorm2d-89 [-1, 48, 28, 28] 96
113
- MBConv-90 [-1, 48, 28, 28] 0
114
- Conv2d-91 [-1, 288, 28, 28] 13,824
115
- BatchNorm2d-92 [-1, 288, 28, 28] 576
116
- SiLU-93 [-1, 288, 28, 28] 0
117
- Conv2d-94 [-1, 288, 28, 28] 7,200
118
- BatchNorm2d-95 [-1, 288, 28, 28] 576
119
- SiLU-96 [-1, 288, 28, 28] 0
120
- AdaptiveAvgPool2d-97 [-1, 288, 1, 1] 0
121
- Conv2d-98 [-1, 12, 1, 1] 3,468
122
- SiLU-99 [-1, 12, 1, 1] 0
123
- Conv2d-100 [-1, 288, 1, 1] 3,744
124
- Sigmoid-101 [-1, 288, 1, 1] 0
125
- SqueezeExcitation-102 [-1, 288, 28, 28] 0
126
- Conv2d-103 [-1, 48, 28, 28] 13,824
127
- BatchNorm2d-104 [-1, 48, 28, 28] 96
128
- StochasticDepth-105 [-1, 48, 28, 28] 0
129
- MBConv-106 [-1, 48, 28, 28] 0
130
- Conv2d-107 [-1, 288, 28, 28] 13,824
131
- BatchNorm2d-108 [-1, 288, 28, 28] 576
132
- SiLU-109 [-1, 288, 28, 28] 0
133
- Conv2d-110 [-1, 288, 28, 28] 7,200
134
- BatchNorm2d-111 [-1, 288, 28, 28] 576
135
- SiLU-112 [-1, 288, 28, 28] 0
136
- AdaptiveAvgPool2d-113 [-1, 288, 1, 1] 0
137
- Conv2d-114 [-1, 12, 1, 1] 3,468
138
- SiLU-115 [-1, 12, 1, 1] 0
139
- Conv2d-116 [-1, 288, 1, 1] 3,744
140
- Sigmoid-117 [-1, 288, 1, 1] 0
141
- SqueezeExcitation-118 [-1, 288, 28, 28] 0
142
- Conv2d-119 [-1, 48, 28, 28] 13,824
143
- BatchNorm2d-120 [-1, 48, 28, 28] 96
144
- StochasticDepth-121 [-1, 48, 28, 28] 0
145
- MBConv-122 [-1, 48, 28, 28] 0
146
- Conv2d-123 [-1, 288, 28, 28] 13,824
147
- BatchNorm2d-124 [-1, 288, 28, 28] 576
148
- SiLU-125 [-1, 288, 28, 28] 0
149
- Conv2d-126 [-1, 288, 14, 14] 2,592
150
- BatchNorm2d-127 [-1, 288, 14, 14] 576
151
- SiLU-128 [-1, 288, 14, 14] 0
152
- AdaptiveAvgPool2d-129 [-1, 288, 1, 1] 0
153
- Conv2d-130 [-1, 12, 1, 1] 3,468
154
- SiLU-131 [-1, 12, 1, 1] 0
155
- Conv2d-132 [-1, 288, 1, 1] 3,744
156
- Sigmoid-133 [-1, 288, 1, 1] 0
157
- SqueezeExcitation-134 [-1, 288, 14, 14] 0
158
- Conv2d-135 [-1, 88, 14, 14] 25,344
159
- BatchNorm2d-136 [-1, 88, 14, 14] 176
160
- MBConv-137 [-1, 88, 14, 14] 0
161
- Conv2d-138 [-1, 528, 14, 14] 46,464
162
- BatchNorm2d-139 [-1, 528, 14, 14] 1,056
163
- SiLU-140 [-1, 528, 14, 14] 0
164
- Conv2d-141 [-1, 528, 14, 14] 4,752
165
- BatchNorm2d-142 [-1, 528, 14, 14] 1,056
166
- SiLU-143 [-1, 528, 14, 14] 0
167
- AdaptiveAvgPool2d-144 [-1, 528, 1, 1] 0
168
- Conv2d-145 [-1, 22, 1, 1] 11,638
169
- SiLU-146 [-1, 22, 1, 1] 0
170
- Conv2d-147 [-1, 528, 1, 1] 12,144
171
- Sigmoid-148 [-1, 528, 1, 1] 0
172
- SqueezeExcitation-149 [-1, 528, 14, 14] 0
173
- Conv2d-150 [-1, 88, 14, 14] 46,464
174
- BatchNorm2d-151 [-1, 88, 14, 14] 176
175
- StochasticDepth-152 [-1, 88, 14, 14] 0
176
- MBConv-153 [-1, 88, 14, 14] 0
177
- Conv2d-154 [-1, 528, 14, 14] 46,464
178
- BatchNorm2d-155 [-1, 528, 14, 14] 1,056
179
- SiLU-156 [-1, 528, 14, 14] 0
180
- Conv2d-157 [-1, 528, 14, 14] 4,752
181
- BatchNorm2d-158 [-1, 528, 14, 14] 1,056
182
- SiLU-159 [-1, 528, 14, 14] 0
183
- AdaptiveAvgPool2d-160 [-1, 528, 1, 1] 0
184
- Conv2d-161 [-1, 22, 1, 1] 11,638
185
- SiLU-162 [-1, 22, 1, 1] 0
186
- Conv2d-163 [-1, 528, 1, 1] 12,144
187
- Sigmoid-164 [-1, 528, 1, 1] 0
188
- SqueezeExcitation-165 [-1, 528, 14, 14] 0
189
- Conv2d-166 [-1, 88, 14, 14] 46,464
190
- BatchNorm2d-167 [-1, 88, 14, 14] 176
191
- StochasticDepth-168 [-1, 88, 14, 14] 0
192
- MBConv-169 [-1, 88, 14, 14] 0
193
- Conv2d-170 [-1, 528, 14, 14] 46,464
194
- BatchNorm2d-171 [-1, 528, 14, 14] 1,056
195
- SiLU-172 [-1, 528, 14, 14] 0
196
- Conv2d-173 [-1, 528, 14, 14] 4,752
197
- BatchNorm2d-174 [-1, 528, 14, 14] 1,056
198
- SiLU-175 [-1, 528, 14, 14] 0
199
- AdaptiveAvgPool2d-176 [-1, 528, 1, 1] 0
200
- Conv2d-177 [-1, 22, 1, 1] 11,638
201
- SiLU-178 [-1, 22, 1, 1] 0
202
- Conv2d-179 [-1, 528, 1, 1] 12,144
203
- Sigmoid-180 [-1, 528, 1, 1] 0
204
- SqueezeExcitation-181 [-1, 528, 14, 14] 0
205
- Conv2d-182 [-1, 88, 14, 14] 46,464
206
- BatchNorm2d-183 [-1, 88, 14, 14] 176
207
- StochasticDepth-184 [-1, 88, 14, 14] 0
208
- MBConv-185 [-1, 88, 14, 14] 0
209
- Conv2d-186 [-1, 528, 14, 14] 46,464
210
- BatchNorm2d-187 [-1, 528, 14, 14] 1,056
211
- SiLU-188 [-1, 528, 14, 14] 0
212
- Conv2d-189 [-1, 528, 14, 14] 13,200
213
- BatchNorm2d-190 [-1, 528, 14, 14] 1,056
214
- SiLU-191 [-1, 528, 14, 14] 0
215
- AdaptiveAvgPool2d-192 [-1, 528, 1, 1] 0
216
- Conv2d-193 [-1, 22, 1, 1] 11,638
217
- SiLU-194 [-1, 22, 1, 1] 0
218
- Conv2d-195 [-1, 528, 1, 1] 12,144
219
- Sigmoid-196 [-1, 528, 1, 1] 0
220
- SqueezeExcitation-197 [-1, 528, 14, 14] 0
221
- Conv2d-198 [-1, 120, 14, 14] 63,360
222
- BatchNorm2d-199 [-1, 120, 14, 14] 240
223
- MBConv-200 [-1, 120, 14, 14] 0
224
- Conv2d-201 [-1, 720, 14, 14] 86,400
225
- BatchNorm2d-202 [-1, 720, 14, 14] 1,440
226
- SiLU-203 [-1, 720, 14, 14] 0
227
- Conv2d-204 [-1, 720, 14, 14] 18,000
228
- BatchNorm2d-205 [-1, 720, 14, 14] 1,440
229
- SiLU-206 [-1, 720, 14, 14] 0
230
- AdaptiveAvgPool2d-207 [-1, 720, 1, 1] 0
231
- Conv2d-208 [-1, 30, 1, 1] 21,630
232
- SiLU-209 [-1, 30, 1, 1] 0
233
- Conv2d-210 [-1, 720, 1, 1] 22,320
234
- Sigmoid-211 [-1, 720, 1, 1] 0
235
- SqueezeExcitation-212 [-1, 720, 14, 14] 0
236
- Conv2d-213 [-1, 120, 14, 14] 86,400
237
- BatchNorm2d-214 [-1, 120, 14, 14] 240
238
- StochasticDepth-215 [-1, 120, 14, 14] 0
239
- MBConv-216 [-1, 120, 14, 14] 0
240
- Conv2d-217 [-1, 720, 14, 14] 86,400
241
- BatchNorm2d-218 [-1, 720, 14, 14] 1,440
242
- SiLU-219 [-1, 720, 14, 14] 0
243
- Conv2d-220 [-1, 720, 14, 14] 18,000
244
- BatchNorm2d-221 [-1, 720, 14, 14] 1,440
245
- SiLU-222 [-1, 720, 14, 14] 0
246
- AdaptiveAvgPool2d-223 [-1, 720, 1, 1] 0
247
- Conv2d-224 [-1, 30, 1, 1] 21,630
248
- SiLU-225 [-1, 30, 1, 1] 0
249
- Conv2d-226 [-1, 720, 1, 1] 22,320
250
- Sigmoid-227 [-1, 720, 1, 1] 0
251
- SqueezeExcitation-228 [-1, 720, 14, 14] 0
252
- Conv2d-229 [-1, 120, 14, 14] 86,400
253
- BatchNorm2d-230 [-1, 120, 14, 14] 240
254
- StochasticDepth-231 [-1, 120, 14, 14] 0
255
- MBConv-232 [-1, 120, 14, 14] 0
256
- Conv2d-233 [-1, 720, 14, 14] 86,400
257
- BatchNorm2d-234 [-1, 720, 14, 14] 1,440
258
- SiLU-235 [-1, 720, 14, 14] 0
259
- Conv2d-236 [-1, 720, 14, 14] 18,000
260
- BatchNorm2d-237 [-1, 720, 14, 14] 1,440
261
- SiLU-238 [-1, 720, 14, 14] 0
262
- AdaptiveAvgPool2d-239 [-1, 720, 1, 1] 0
263
- Conv2d-240 [-1, 30, 1, 1] 21,630
264
- SiLU-241 [-1, 30, 1, 1] 0
265
- Conv2d-242 [-1, 720, 1, 1] 22,320
266
- Sigmoid-243 [-1, 720, 1, 1] 0
267
- SqueezeExcitation-244 [-1, 720, 14, 14] 0
268
- Conv2d-245 [-1, 120, 14, 14] 86,400
269
- BatchNorm2d-246 [-1, 120, 14, 14] 240
270
- StochasticDepth-247 [-1, 120, 14, 14] 0
271
- MBConv-248 [-1, 120, 14, 14] 0
272
- Conv2d-249 [-1, 720, 14, 14] 86,400
273
- BatchNorm2d-250 [-1, 720, 14, 14] 1,440
274
- SiLU-251 [-1, 720, 14, 14] 0
275
- Conv2d-252 [-1, 720, 7, 7] 18,000
276
- BatchNorm2d-253 [-1, 720, 7, 7] 1,440
277
- SiLU-254 [-1, 720, 7, 7] 0
278
- AdaptiveAvgPool2d-255 [-1, 720, 1, 1] 0
279
- Conv2d-256 [-1, 30, 1, 1] 21,630
280
- SiLU-257 [-1, 30, 1, 1] 0
281
- Conv2d-258 [-1, 720, 1, 1] 22,320
282
- Sigmoid-259 [-1, 720, 1, 1] 0
283
- SqueezeExcitation-260 [-1, 720, 7, 7] 0
284
- Conv2d-261 [-1, 208, 7, 7] 149,760
285
- BatchNorm2d-262 [-1, 208, 7, 7] 416
286
- MBConv-263 [-1, 208, 7, 7] 0
287
- Conv2d-264 [-1, 1248, 7, 7] 259,584
288
- BatchNorm2d-265 [-1, 1248, 7, 7] 2,496
289
- SiLU-266 [-1, 1248, 7, 7] 0
290
- Conv2d-267 [-1, 1248, 7, 7] 31,200
291
- BatchNorm2d-268 [-1, 1248, 7, 7] 2,496
292
- SiLU-269 [-1, 1248, 7, 7] 0
293
- AdaptiveAvgPool2d-270 [-1, 1248, 1, 1] 0
294
- Conv2d-271 [-1, 52, 1, 1] 64,948
295
- SiLU-272 [-1, 52, 1, 1] 0
296
- Conv2d-273 [-1, 1248, 1, 1] 66,144
297
- Sigmoid-274 [-1, 1248, 1, 1] 0
298
- SqueezeExcitation-275 [-1, 1248, 7, 7] 0
299
- Conv2d-276 [-1, 208, 7, 7] 259,584
300
- BatchNorm2d-277 [-1, 208, 7, 7] 416
301
- StochasticDepth-278 [-1, 208, 7, 7] 0
302
- MBConv-279 [-1, 208, 7, 7] 0
303
- Conv2d-280 [-1, 1248, 7, 7] 259,584
304
- BatchNorm2d-281 [-1, 1248, 7, 7] 2,496
305
- SiLU-282 [-1, 1248, 7, 7] 0
306
- Conv2d-283 [-1, 1248, 7, 7] 31,200
307
- BatchNorm2d-284 [-1, 1248, 7, 7] 2,496
308
- SiLU-285 [-1, 1248, 7, 7] 0
309
- AdaptiveAvgPool2d-286 [-1, 1248, 1, 1] 0
310
- Conv2d-287 [-1, 52, 1, 1] 64,948
311
- SiLU-288 [-1, 52, 1, 1] 0
312
- Conv2d-289 [-1, 1248, 1, 1] 66,144
313
- Sigmoid-290 [-1, 1248, 1, 1] 0
314
- SqueezeExcitation-291 [-1, 1248, 7, 7] 0
315
- Conv2d-292 [-1, 208, 7, 7] 259,584
316
- BatchNorm2d-293 [-1, 208, 7, 7] 416
317
- StochasticDepth-294 [-1, 208, 7, 7] 0
318
- MBConv-295 [-1, 208, 7, 7] 0
319
- Conv2d-296 [-1, 1248, 7, 7] 259,584
320
- BatchNorm2d-297 [-1, 1248, 7, 7] 2,496
321
- SiLU-298 [-1, 1248, 7, 7] 0
322
- Conv2d-299 [-1, 1248, 7, 7] 31,200
323
- BatchNorm2d-300 [-1, 1248, 7, 7] 2,496
324
- SiLU-301 [-1, 1248, 7, 7] 0
325
- AdaptiveAvgPool2d-302 [-1, 1248, 1, 1] 0
326
- Conv2d-303 [-1, 52, 1, 1] 64,948
327
- SiLU-304 [-1, 52, 1, 1] 0
328
- Conv2d-305 [-1, 1248, 1, 1] 66,144
329
- Sigmoid-306 [-1, 1248, 1, 1] 0
330
- SqueezeExcitation-307 [-1, 1248, 7, 7] 0
331
- Conv2d-308 [-1, 208, 7, 7] 259,584
332
- BatchNorm2d-309 [-1, 208, 7, 7] 416
333
- StochasticDepth-310 [-1, 208, 7, 7] 0
334
- MBConv-311 [-1, 208, 7, 7] 0
335
- Conv2d-312 [-1, 1248, 7, 7] 259,584
336
- BatchNorm2d-313 [-1, 1248, 7, 7] 2,496
337
- SiLU-314 [-1, 1248, 7, 7] 0
338
- Conv2d-315 [-1, 1248, 7, 7] 31,200
339
- BatchNorm2d-316 [-1, 1248, 7, 7] 2,496
340
- SiLU-317 [-1, 1248, 7, 7] 0
341
- AdaptiveAvgPool2d-318 [-1, 1248, 1, 1] 0
342
- Conv2d-319 [-1, 52, 1, 1] 64,948
343
- SiLU-320 [-1, 52, 1, 1] 0
344
- Conv2d-321 [-1, 1248, 1, 1] 66,144
345
- Sigmoid-322 [-1, 1248, 1, 1] 0
346
- SqueezeExcitation-323 [-1, 1248, 7, 7] 0
347
- Conv2d-324 [-1, 208, 7, 7] 259,584
348
- BatchNorm2d-325 [-1, 208, 7, 7] 416
349
- StochasticDepth-326 [-1, 208, 7, 7] 0
350
- MBConv-327 [-1, 208, 7, 7] 0
351
- Conv2d-328 [-1, 1248, 7, 7] 259,584
352
- BatchNorm2d-329 [-1, 1248, 7, 7] 2,496
353
- SiLU-330 [-1, 1248, 7, 7] 0
354
- Conv2d-331 [-1, 1248, 7, 7] 11,232
355
- BatchNorm2d-332 [-1, 1248, 7, 7] 2,496
356
- SiLU-333 [-1, 1248, 7, 7] 0
357
- AdaptiveAvgPool2d-334 [-1, 1248, 1, 1] 0
358
- Conv2d-335 [-1, 52, 1, 1] 64,948
359
- SiLU-336 [-1, 52, 1, 1] 0
360
- Conv2d-337 [-1, 1248, 1, 1] 66,144
361
- Sigmoid-338 [-1, 1248, 1, 1] 0
362
- SqueezeExcitation-339 [-1, 1248, 7, 7] 0
363
- Conv2d-340 [-1, 352, 7, 7] 439,296
364
- BatchNorm2d-341 [-1, 352, 7, 7] 704
365
- MBConv-342 [-1, 352, 7, 7] 0
366
- Conv2d-343 [-1, 2112, 7, 7] 743,424
367
- BatchNorm2d-344 [-1, 2112, 7, 7] 4,224
368
- SiLU-345 [-1, 2112, 7, 7] 0
369
- Conv2d-346 [-1, 2112, 7, 7] 19,008
370
- BatchNorm2d-347 [-1, 2112, 7, 7] 4,224
371
- SiLU-348 [-1, 2112, 7, 7] 0
372
- AdaptiveAvgPool2d-349 [-1, 2112, 1, 1] 0
373
- Conv2d-350 [-1, 88, 1, 1] 185,944
374
- SiLU-351 [-1, 88, 1, 1] 0
375
- Conv2d-352 [-1, 2112, 1, 1] 187,968
376
- Sigmoid-353 [-1, 2112, 1, 1] 0
377
- SqueezeExcitation-354 [-1, 2112, 7, 7] 0
378
- Conv2d-355 [-1, 352, 7, 7] 743,424
379
- BatchNorm2d-356 [-1, 352, 7, 7] 704
380
- StochasticDepth-357 [-1, 352, 7, 7] 0
381
- MBConv-358 [-1, 352, 7, 7] 0
382
- Conv2d-359 [-1, 1408, 7, 7] 495,616
383
- BatchNorm2d-360 [-1, 1408, 7, 7] 2,816
384
- SiLU-361 [-1, 1408, 7, 7] 0
385
- AdaptiveAvgPool2d-362 [-1, 1408, 1, 1] 0
386
- Dropout-363 [-1, 1408] 0
387
- Linear-364 [-1, 10] 14,090
388
- EfficientNet-365 [-1, 10] 0
389
- ================================================================
390
- Total params: 7,715,084
391
- Trainable params: 14,090
392
- Non-trainable params: 7,700,994
393
- ----------------------------------------------------------------
394
- Input size (MB): 0.57
395
- Forward/backward pass size (MB): 257.42
396
- Params size (MB): 29.43
397
- Estimated Total Size (MB): 287.43
398
- ----------------------------------------------------------------
399
- </pre>
400
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/example/主动复读.js DELETED
@@ -1,37 +0,0 @@
1
- import plugin from '../../lib/plugins/plugin.js'
2
-
3
- export class example2 extends plugin {
4
- constructor () {
5
- super({
6
- name: '复读',
7
- dsc: '复读用户发送的内容,然后撤回',
8
- /** https://oicqjs.github.io/oicq/#events */
9
- event: 'message',
10
- priority: 5000,
11
- rule: [
12
- {
13
- /** 命令正则匹配 */
14
- reg: '^#复读$',
15
- /** 执行方法 */
16
- fnc: 'repeat'
17
- }
18
- ]
19
- })
20
- }
21
-
22
- /** 复读 */
23
- async repeat () {
24
- /** 设置上下文,后续接收到内容会执行doRep方法 */
25
- this.setContext('doRep')
26
- /** 回复 */
27
- await this.reply('请发送要复读的内容', false, { at: true })
28
- }
29
-
30
- /** 接受内容 */
31
- doRep () {
32
- /** 复读内容 */
33
- this.reply(this.e.message, false, { recallMsg: 5 })
34
- /** 结束上下文 */
35
- this.finish('doRep')
36
- }
37
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClassCat/wide-resnet-cifar10-classification/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Wide Resnet Cifar10 Classification
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/sigmoid_focal_loss.py DELETED
@@ -1,76 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from torch.autograd import Function
4
- from torch.autograd.function import once_differentiable
5
-
6
- from maskrcnn_benchmark import _C
7
-
8
- # TODO: Use JIT to replace CUDA implementation in the future.
9
- class _SigmoidFocalLoss(Function):
10
- @staticmethod
11
- def forward(ctx, logits, targets, gamma, alpha):
12
- ctx.save_for_backward(logits, targets)
13
- num_classes = logits.shape[1]
14
- ctx.num_classes = num_classes
15
- ctx.gamma = gamma
16
- ctx.alpha = alpha
17
-
18
- losses = _C.sigmoid_focalloss_forward(
19
- logits, targets, num_classes, gamma, alpha
20
- )
21
- return losses
22
-
23
- @staticmethod
24
- @once_differentiable
25
- def backward(ctx, d_loss):
26
- logits, targets = ctx.saved_tensors
27
- num_classes = ctx.num_classes
28
- gamma = ctx.gamma
29
- alpha = ctx.alpha
30
- d_loss = d_loss.contiguous()
31
- d_logits = _C.sigmoid_focalloss_backward(
32
- logits, targets, d_loss, num_classes, gamma, alpha
33
- )
34
- return d_logits, None, None, None, None
35
-
36
-
37
- sigmoid_focal_loss_cuda = _SigmoidFocalLoss.apply
38
-
39
-
40
- def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha):
41
- num_classes = logits.shape[1]
42
- gamma = gamma[0]
43
- alpha = alpha[0]
44
- dtype = targets.dtype
45
- device = targets.device
46
- class_range = torch.arange(1, num_classes+1, dtype=dtype, device=device).unsqueeze(0)
47
-
48
- t = targets.unsqueeze(1)
49
- p = torch.sigmoid(logits)
50
- term1 = (1 - p) ** gamma * torch.log(p)
51
- term2 = p ** gamma * torch.log(1 - p)
52
- return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha)
53
-
54
-
55
- class SigmoidFocalLoss(nn.Module):
56
- def __init__(self, gamma, alpha):
57
- super(SigmoidFocalLoss, self).__init__()
58
- self.gamma = gamma
59
- self.alpha = alpha
60
-
61
- def forward(self, logits, targets):
62
- device = logits.device
63
- if logits.is_cuda:
64
- loss_func = sigmoid_focal_loss_cuda
65
- else:
66
- loss_func = sigmoid_focal_loss_cpu
67
-
68
- loss = loss_func(logits, targets, self.gamma, self.alpha)
69
- return loss.sum()
70
-
71
- def __repr__(self):
72
- tmpstr = self.__class__.__name__ + "("
73
- tmpstr += "gamma=" + str(self.gamma)
74
- tmpstr += ", alpha=" + str(self.alpha)
75
- tmpstr += ")"
76
- return tmpstr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DataDreamweavers/LegaWeaver/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: LegaWeaver
3
- emoji: 🌍
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.25.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/linear_separability.py DELETED
@@ -1,177 +0,0 @@
1
- # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # This work is licensed under the Creative Commons Attribution-NonCommercial
4
- # 4.0 International License. To view a copy of this license, visit
5
- # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
- # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
-
8
- """Linear Separability (LS)."""
9
-
10
- from collections import defaultdict
11
- import numpy as np
12
- import sklearn.svm
13
- import tensorflow as tf
14
- import dnnlib.tflib as tflib
15
-
16
- from metrics import metric_base
17
- from training import misc
18
-
19
- #----------------------------------------------------------------------------
20
-
21
- classifier_urls = [
22
- 'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl
23
- 'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl
24
- 'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl
25
- 'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl
26
- 'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl
27
- 'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl
28
- 'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl
29
- 'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl
30
- 'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl
31
- 'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl
32
- 'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl
33
- 'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl
34
- 'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl
35
- 'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl
36
- 'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl
37
- 'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl
38
- 'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl
39
- 'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl
40
- 'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl
41
- 'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl
42
- 'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl
43
- 'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl
44
- 'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl
45
- 'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl
46
- 'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl
47
- 'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl
48
- 'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl
49
- 'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl
50
- 'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl
51
- 'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl
52
- 'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl
53
- 'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl
54
- 'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl
55
- 'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl
56
- 'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl
57
- 'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl
58
- 'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl
59
- 'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl
60
- 'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl
61
- 'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl
62
- ]
63
-
64
- #----------------------------------------------------------------------------
65
-
66
- def prob_normalize(p):
67
- p = np.asarray(p).astype(np.float32)
68
- assert len(p.shape) == 2
69
- return p / np.sum(p)
70
-
71
- def mutual_information(p):
72
- p = prob_normalize(p)
73
- px = np.sum(p, axis=1)
74
- py = np.sum(p, axis=0)
75
- result = 0.0
76
- for x in range(p.shape[0]):
77
- p_x = px[x]
78
- for y in range(p.shape[1]):
79
- p_xy = p[x][y]
80
- p_y = py[y]
81
- if p_xy > 0.0:
82
- result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output
83
- return result
84
-
85
- def entropy(p):
86
- p = prob_normalize(p)
87
- result = 0.0
88
- for x in range(p.shape[0]):
89
- for y in range(p.shape[1]):
90
- p_xy = p[x][y]
91
- if p_xy > 0.0:
92
- result -= p_xy * np.log2(p_xy)
93
- return result
94
-
95
- def conditional_entropy(p):
96
- # H(Y|X) where X corresponds to axis 0, Y to axis 1
97
- # i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0?
98
- p = prob_normalize(p)
99
- y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y)
100
- return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up.
101
-
102
- #----------------------------------------------------------------------------
103
-
104
- class LS(metric_base.MetricBase):
105
- def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs):
106
- assert num_keep <= num_samples
107
- super().__init__(**kwargs)
108
- self.num_samples = num_samples
109
- self.num_keep = num_keep
110
- self.attrib_indices = attrib_indices
111
- self.minibatch_per_gpu = minibatch_per_gpu
112
-
113
- def _evaluate(self, Gs, num_gpus):
114
- minibatch_size = num_gpus * self.minibatch_per_gpu
115
-
116
- # Construct TensorFlow graph for each GPU.
117
- result_expr = []
118
- for gpu_idx in range(num_gpus):
119
- with tf.device('/gpu:%d' % gpu_idx):
120
- Gs_clone = Gs.clone()
121
-
122
- # Generate images.
123
- latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
124
- dlatents = Gs_clone.components.mapping.get_output_for(latents, None, is_validation=True)
125
- images = Gs_clone.components.synthesis.get_output_for(dlatents, is_validation=True, randomize_noise=True)
126
-
127
- # Downsample to 256x256. The attribute classifiers were built for 256x256.
128
- if images.shape[2] > 256:
129
- factor = images.shape[2] // 256
130
- images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
131
- images = tf.reduce_mean(images, axis=[3, 5])
132
-
133
- # Run classifier for each attribute.
134
- result_dict = dict(latents=latents, dlatents=dlatents[:,-1])
135
- for attrib_idx in self.attrib_indices:
136
- classifier = misc.load_pkl(classifier_urls[attrib_idx])
137
- logits = classifier.get_output_for(images, None)
138
- predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1))
139
- result_dict[attrib_idx] = predictions
140
- result_expr.append(result_dict)
141
-
142
- # Sampling loop.
143
- results = []
144
- for _ in range(0, self.num_samples, minibatch_size):
145
- results += tflib.run(result_expr)
146
- results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()}
147
-
148
- # Calculate conditional entropy for each attribute.
149
- conditional_entropies = defaultdict(list)
150
- for attrib_idx in self.attrib_indices:
151
- # Prune the least confident samples.
152
- pruned_indices = list(range(self.num_samples))
153
- pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i]))
154
- pruned_indices = pruned_indices[:self.num_keep]
155
-
156
- # Fit SVM to the remaining samples.
157
- svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1)
158
- for space in ['latents', 'dlatents']:
159
- svm_inputs = results[space][pruned_indices]
160
- try:
161
- svm = sklearn.svm.LinearSVC()
162
- svm.fit(svm_inputs, svm_targets)
163
- svm.score(svm_inputs, svm_targets)
164
- svm_outputs = svm.predict(svm_inputs)
165
- except:
166
- svm_outputs = svm_targets # assume perfect prediction
167
-
168
- # Calculate conditional entropy.
169
- p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)]
170
- conditional_entropies[space].append(conditional_entropy(p))
171
-
172
- # Calculate separability scores.
173
- scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()}
174
- self._report_result(scores['latents'], suffix='_z')
175
- self._report_result(scores['dlatents'], suffix='_w')
176
-
177
- #----------------------------------------------------------------------------