parquet-converter commited on
Commit
788365e
·
1 Parent(s): 445dc2a

Update parquet files (step 65 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargarsigmakeyfullcrackmega Los beneficios de usar SigmaKey la herramienta segura y confiable para el servicio de MTK.md +0 -171
  2. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become a Deadly Assassin in Sniper Killer 3D The Best Offline Sniper Game.md +0 -103
  3. spaces/1phancelerku/anime-remove-background/FIFA Chino APK disfruta de la emocin del ftbol con grficos increbles.md +0 -154
  4. spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_unclip.py +0 -303
  5. spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/networks_basic.py +0 -187
  6. spaces/A00001/bingothoo/src/components/chat-attachments.tsx +0 -37
  7. spaces/AIConsultant/MusicGen/audiocraft/metrics/rvm.py +0 -106
  8. spaces/AIFILMS/StyleGANEX/models/stylegan2/op/readme.md +0 -12
  9. spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/models.py +0 -951
  10. spaces/AIGText/GlyphControl/ldm/models/diffusion/ddpm.py +0 -1954
  11. spaces/AINLPRoundTable/README/README.md +0 -54
  12. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnetv1c50.py +0 -17
  13. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/diacritize.py +0 -36
  14. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/attention.py +0 -199
  15. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/constants/publicSepToken.ts +0 -1
  16. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/sha256.ts +0 -7
  17. spaces/AchyuthGamer/OpenGPT-Chat/app.py +0 -97
  18. spaces/AchyuthGamer/OpenGPT/g4f/Provider/GptGod.py +0 -51
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasdata.d.ts +0 -10
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.d.ts +0 -6
  21. spaces/AiBototicus/BucksAI-3/app.py +0 -3
  22. spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/shm.cpp +0 -103
  23. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GenerateImg.py +0 -50
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/consistency_models.md +0 -43
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +0 -598
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/score_sde_ve/__init__.py +0 -1
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipelines_onnx_common.py +0 -12
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py +0 -87
  29. spaces/Andy0409/text_generator/README.md +0 -12
  30. spaces/Andy1621/uniformer_image_detection/configs/wider_face/README.md +0 -43
  31. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py +0 -55
  32. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py +0 -10
  33. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/registry.py +0 -8
  34. spaces/Anthony7906/MengHuiMXD_GPT/modules/__init__.py +0 -0
  35. spaces/AriusXi/CodeGenerator/README.md +0 -12
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/freeze.py +0 -255
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/themes.py +0 -5
  38. spaces/Awiny/Image2Paragraph/models/grit_src/grit/evaluation/eval.py +0 -156
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/endpoint_provider.py +0 -727
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py +0 -0
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/_collections.py +0 -337
  42. spaces/Boynn/AI/README.md +0 -13
  43. spaces/BridgeTower/bridgetower-video-search/bridgetower_custom.py +0 -183
  44. spaces/CALM/Dashboard/streamlit_observable/frontend/build/service-worker.js +0 -39
  45. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/evaluation.md +0 -43
  46. spaces/CVPR/LIVE/thrust/thrust/sequence.h +0 -296
  47. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/transform_reduce.h +0 -44
  48. spaces/CVPR/WALT/README.md +0 -13
  49. spaces/CVPR/WALT/mmdet/models/backbones/__init__.py +0 -3
  50. spaces/CVPR/WALT/mmdet/models/backbones/swin_transformer.py +0 -630
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargarsigmakeyfullcrackmega Los beneficios de usar SigmaKey la herramienta segura y confiable para el servicio de MTK.md DELETED
@@ -1,171 +0,0 @@
1
-
2
- <h1>Descargar SigmaKey Full Crack Mega: A Complete Guide</h1>
3
- <p>If you are looking for a professional and powerful tool to flash, unlock, and repair your mobile devices, you might have heard of SigmaKey. SigmaKey is a software that works with a dongle and allows you to service various types of cell phones, especially Huawei, MTK, Qualcomm, HiSilicon, and Spreadtrum devices. In this article, we will show you how to download SigmaKey full crack mega, a cracked version of the software that does not require a dongle or activation. We will also explain how to use SigmaKey full crack mega to perform different operations on your devices.</p>
4
- <h2>What is SigmaKey?</h2>
5
- <p>SigmaKey is a software that was developed by GSM Server Team, a group of experts in mobile unlocking and flashing. SigmaKey works with a hardware dongle that connects to your PC via USB port and provides security and authentication for the software. SigmaKey allows you to perform various operations on your mobile devices, such as:</p>
6
- <h2>descargarsigmakeyfullcrackmega</h2><br /><p><b><b>Download File</b> &#128504;&#128504;&#128504; <a href="https://byltly.com/2uKvm4">https://byltly.com/2uKvm4</a></b></p><br /><br />
7
- <ul>
8
- <li>Direct unlock</li>
9
- <li>Read unlock codes</li>
10
- <li>Repair IMEI</li>
11
- <li>Remove FRP</li>
12
- <li>Remove Huawei ID</li>
13
- <li>Unlock bootloader</li>
14
- <li>Flash firmware</li>
15
- <li>Backup and restore</li>
16
- <li>Root and unroot</li>
17
- <li>And more</li>
18
- </ul>
19
- <h3>Features and benefits of SigmaKey</h3>
20
- <p>SigmaKey has many features and benefits that make it one of the best tools for mobile servicing. Some of them are:</p>
21
- <ul>
22
- <li>It supports a wide range of devices from different brands and models.</li>
23
- <li>It supports various chipsets, such as MTK, Qualcomm, HiSilicon, Spreadtrum, etc.</li>
24
- <li>It has a user-friendly interface that is easy to navigate and operate.</li>
25
- <li>It has a fast and reliable performance that saves time and resources.</li>
26
- <li>It has a lifetime license that does not require annual payments.</li>
27
- <li>It has regular updates that add new features and support new devices.</li>
28
- <li>It has a customer support team that provides assistance and guidance.</li>
29
- </ul>
30
- <h3>Supported devices and platforms</h3>
31
- <p>SigmaKey supports thousands of devices from various brands, such as Huawei, Motorola, ZTE, Lenovo, Alcatel, Sony, LG, Samsung, Xiaomi, Oppo, Vivo, etc. You can check the full list of supported devices on the official website of SigmaKey. SigmaKey also supports Windows OS versions such as Win XP/Vista/7/Server 2008 for both 32-bit and 64-bit architecture.</p>
32
- <h2>How to download SigmaKey full crack mega?</h2>
33
- <p>If you want to use SigmaKey without buying a dongle or activating it online, you can download SigmaKey full crack mega. This is a cracked version of the software that bypasses the security and authentication of the dongle. However, you should be aware that downloading and using SigmaKey full crack mega is illegal and risky. You might face some problems such as:</p>
34
- <ul>
35
- <li>Virus or malware infection on your PC or device.</li>
36
- <li>Data loss or corruption on your PC or device.</li>
37
- <li>Dongle detection or blocking by the software.</li>
38
- <li>Lack of updates or support from the developers.</li>
39
- <li>Lawsuit or penalty from the developers or authorities.</li>
40
- </ul>
41
- <p>If you still want to download SigmaKey full crack mega at your own risk, you should follow these steps:</p>
42
- <p>descargar sigmakey full crack mega gratis<br />
43
- descargar sigmakey full crack mega 2021<br />
44
- descargar sigmakey full crack mega sin box<br />
45
- descargar sigmakey full crack mega huawei<br />
46
- descargar sigmakey full crack mega android<br />
47
- descargar sigmakey full crack mega windows 10<br />
48
- descargar sigmakey full crack mega ultima version<br />
49
- descargar sigmakey full crack mega para pc<br />
50
- descargar sigmakey full crack mega sin dongle<br />
51
- descargar sigmakey full crack mega mediafire<br />
52
- descargar sigmakey full crack mega 64 bits<br />
53
- descargar sigmakey full crack mega 32 bits<br />
54
- descargar sigmakey full crack mega sin virus<br />
55
- descargar sigmakey full crack mega mtk<br />
56
- descargar sigmakey full crack mega qualcomm<br />
57
- descargar sigmakey full crack mega español<br />
58
- descargar sigmakey full crack mega portable<br />
59
- descargar sigmakey full crack mega tutorial<br />
60
- descargar sigmakey full crack mega link directo<br />
61
- descargar sigmakey full crack mega reparar imei<br />
62
- descargar sigmakey full crack mega frp<br />
63
- descargar sigmakey full crack mega bootloader<br />
64
- descargar sigmakey full crack mega firmware<br />
65
- descargar sigmakey full crack mega update.app<br />
66
- descargar sigmakey full crack mega kirin<br />
67
- descargar sigmakey full crack mega hisilicon<br />
68
- descargar sigmakey full crack mega spreadtrum<br />
69
- descargar sigmakey full crack mega mediatek<br />
70
- descargar sigmakey full crack mega alcatel<br />
71
- descargar sigmakey full crack mega motorola<br />
72
- descargar sigmakey full crack mega lg<br />
73
- descargar sigmakey full crack mega zte<br />
74
- descargar sigmakey full crack mega lenovo<br />
75
- descargar sigmakey full crack mega sony<br />
76
- descargar sigmakey full crack mega vtelca<br />
77
- descargar sigmakey full crack mega lanix<br />
78
- descargar sigmakey full crack mega blu<br />
79
- descargar sigmakey full crack mega azumi<br />
80
- descargar sigmakey full crack mega verykool<br />
81
- descargar sigmakey full crack mega avvio<br />
82
- descargar sigmakey full crack mega bitel<br />
83
- descargar sigmakey full crack mega bmobile<br />
84
- descargar sigakeyfullcrackmega.exe (not recommended)</p>
85
- <h3>Requirements and precautions</h3>
86
- <ul>
87
- <li>A PC with Windows OS installed.</li>
88
- <li>A USB cable to connect your device to your PC.</li>
89
- <li>A backup of your device data in case of any damage or loss.</li>
90
- <li>A reliable internet connection to download the files.</li>
91
- <li>A antivirus software to scan the files for any virus or malware.</li>
92
- <li>A disablement of any firewall or antivirus software that might interfere with the installation process.</li>
93
- </ul>
94
- <h3>Steps to download and install SigmaKey full crack mega</h3>
95
- <ol>
96
- <li>Go to this link <a href="https://www.getdroidtips.com/download-sigmakey-huawei-crack/" target="_blank">https://www.getdroidtips.com/download-sigmakey-huawei-crack/</a> and click on the download button at the bottom of the page.</li>
97
- <li>You will be redirected to another page where you have to complete some surveys or offers to get the download link. Follow the instructions on the screen and complete the tasks.</li>
98
- <li>Once you get the download link, click on it and save the file on your PC. The file name is Sigmakey_Huawei_Edition_Crack_Version_2.40.02.zip and it has a size of about 100 MB.</li>
99
- <li>Extract the zip file using WinRAR or any other extraction tool. You will get a folder named Sigmakey_Huawei_Edition_Crack_Version_2.40.02 with several files inside it.</li>
100
- <li>Open the folder and run the file named Setup.exe as administrator. Follow the installation wizard and accept the terms and conditions. Choose a destination folder for the software and click on install.</li>
101
- <li>Wait for the installation process to finish. Do not disconnect your device or close the program during this process.</li>
102
- <li>After the installation is done, do not run the software yet. Go back to the folder where you extracted the zip file and open another folder named Loader_Sigma_Key_Huawei_Edition_Crack_Version_2.40.02.</li>
103
- <li>In this folder, you will find two files named Loader.exe and Patch.exe. Copy both files and paste them into the destination folder where you installed the software. Replace any existing files if prompted.</li>
104
- <li>Now run the file named Loader.exe as administrator. This will launch the software with full crack features enabled.</li>
105
- </ol>
106
- <h3>Troubleshooting tips</h3>
107
- <p>If you encounter any problems while downloading or installing SigmaKey full crack mega, you can try these tips:</p>
108
- <ul>
109
- <li>Make sure you have enough space on your PC hard drive for the files.</li>
110
- <li>Make sure you have a stable internet connection while downloading or installing the files.</li>
111
- <li>Make sure you disable any firewall or antivirus software that might block or delete the files.</li>
112
- <li>Make sure you scan the files for any virus or malware before opening them.</li>
113
- <li>Make sure you run the files as administrator and follow the instructions carefully.</li>
114
- <li>If you get an error message saying "Dongle not found" or "Dongle not connected", try changing your USB port or cable.</li>
115
- </ul>
116
- <h2>How to use SigmaKey full crack mega?</h2>
117
- <p>Once you have successfully downloaded and installed SigmaKey full crack mega, you can start using it to service your mobile devices. Here are some examples of how to use SigmaKey full crack mega for different operations:</p>
118
- <h3>Unlocking Huawei devices with SigmaKey</h3>
119
- <ol>
120
- <li>Connect your Huawei device to your PC via USB cable in fastboot mode. To enter fastboot mode, power off your device and press volume down + power buttons simultaneously until you see a fastboot logo on your screen.</li>
121
- <li>Launch SigmaKey full crack mega on your PC and select Huawei tab from the top menu bar.</li>
122
- <li>Select ADB Interface from Port Selection drop-down menu on top left corner of the screen.</li>
123
- <li>Select Fastboot Mode from Service Mode drop-down menu on top right corner of screen.</li>
124
- <li>Select Unlock Bootloader option from Service Operations section on bottom left corner of screen.</li>
125
- <li>The software will read your device information and generate an unlock code for your bootloader. Write down this code somewhere safe as you will need it later.</li>
126
- to enter the unlock code on your device. Follow the instructions on your device screen and enter the unlock code when prompted.</li>
127
- <li>Your device bootloader will be unlocked and your device will reboot automatically. You can disconnect your device from your PC.</li>
128
- </ol>
129
- <h3>Flashing and repairing MTK cell phones with SigmaKey</h3>
130
- <ol>
131
- <li>Connect your MTK device to your PC via USB cable in flash mode. To enter flash mode, power off your device and press volume up + power buttons simultaneously until you see a flash logo on your screen.</li>
132
- <li>Launch SigmaKey full crack mega on your PC and select MTK tab from the top menu bar.</li>
133
- <li>Select USB Mode from Port Selection drop-down menu on top left corner of the screen.</li>
134
- <li>Select Flash Mode from Service Mode drop-down menu on top right corner of screen.</li>
135
- <li>Select Flash Firmware option from Service Operations section on bottom left corner of screen.</li>
136
- <li>The software will ask you to select a firmware file for your device. You can download firmware files from various online sources or use the ones provided by SigmaKey. Click on Browse button and locate the firmware file on your PC.</li>
137
- <li>The software will verify the firmware file and show you some information about it. Make sure the firmware file matches your device model and version. Click on Write Firmware button to start flashing process.</li>
138
- <li>The software will flash the firmware file to your device and show you a progress bar. Do not disconnect your device or close the program during this process.</li>
139
- <li>After the flashing process is done, the software will show you a success message and your device will reboot automatically. You can disconnect your device from your PC.</li>
140
- </ol>
141
- <h3>Other operations with SigmaKey</h3>
142
- <p>SigmaKey full crack mega can also perform other operations on your devices, such as:</p>
143
- <ul>
144
- <li>Read and write IMEI</li>
145
- <li>Remove FRP lock</li>
146
- <li>Remove Huawei ID</li>
147
- <li>Backup and restore data</li>
148
- <li>Root and unroot devices</li>
149
- <li>And more</li>
150
- </ul>
151
- <p>To perform these operations, you need to select the appropriate tab, port, mode, and option from the software interface. You can also refer to the user manual or customer guide for more details and instructions.</p>
152
- <h2>Conclusion</h2>
153
- <p>In this article, we have shown you how to download SigmaKey full crack mega, a cracked version of the software that allows you to flash, unlock, and repair your mobile devices without a dongle or activation. We have also explained how to use SigmaKey full crack mega for different operations on Huawei and MTK devices. However, we have also warned you about the risks and consequences of using SigmaKey full crack mega, as it is illegal and unsafe. We recommend you to use the original SigmaKey software with a dongle and activation for a better and safer experience.</p>
154
- <h3>Summary of the article</h3>
155
- <p>SigmaKey is a professional and powerful tool for mobile servicing that works with a dongle and activation. SigmaKey full crack mega is a cracked version of the software that does not require a dongle or activation. SigmaKey full crack mega allows you to perform various operations on your devices, such as unlocking, flashing, repairing, etc. However, SigmaKey full crack mega is illegal and risky to use, as it might cause virus infection, data loss, dongle detection, lack of updates, lawsuit, etc. Therefore, it is better to use the original SigmaKey software with a dongle and activation for a safer and better experience.</p>
156
- <h3>FAQs</h3>
157
- <ol>
158
- <li>What is SigmaKey?</li>
159
- <p>SigmaKey is a software that works with a dongle and allows you to service various types of cell phones, especially Huawei, MTK, Qualcomm, HiSilicon, and Spreadtrum devices.</p>
160
- <li>What is SigmaKey full crack mega?</li>
161
- <p>SigmaKey full crack mega is a cracked version of the software that does not require a dongle or activation. It bypasses the security and authentication of the dongle.</p>
162
- <li>How to download SigmaKey full crack mega?</li>
163
- <p>You can download SigmaKey full crack mega from this link <a href="https://www.getdroidtips.com/download-sigmakey-huawei-crack/" target="_blank">https://www.getdroidtips.com/download-sigmakey-huawei-crack/</a>. You have to complete some surveys or offers to get the download link. Then you have to install the software and copy the loader and patch files into the installation folder.</p>
164
- <li>How to use SigmaKey full crack mega?</li>
165
- <p>You can use SigmaKey full crack mega to perform various operations on your devices, such as unlocking, flashing, repairing, etc. You have to select the appropriate tab, port, mode, and option from the software interface. You can also refer to the user manual or customer guide for more details and instructions.</p>
166
- <li>What are the risks of using SigmaKey full crack mega?</li>
167
- <p>Using SigmaKey full crack mega is illegal and risky. You might face some problems such as virus infection, data loss, dongle detection, lack of updates, lawsuit, etc. Therefore, it is better to use the original SigmaKey software with a dongle and activation for a safer and better experience.</p>
168
- </ol>
169
- </p> 0a6ba089eb<br />
170
- <br />
171
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become a Deadly Assassin in Sniper Killer 3D The Best Offline Sniper Game.md DELETED
@@ -1,103 +0,0 @@
1
-
2
- <h1>Sniper Killer 3D: The Ultimate Shooting Game</h1>
3
- <p>If you are looking for a shooting game that will test your skills as a sniper, look no further than Sniper Killer 3D. This game is the ultimate sniper adventure that will immerse you in high-intensity missions and action-packed scenarios. Whether you want to play offline or online, Sniper Killer 3D has something for everyone. Here is everything you need to know about this amazing game.</p>
4
- <h2>sniper killer 3d</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://urlin.us/2uSUWO">https://urlin.us/2uSUWO</a></b></p><br /><br />
5
- <h2>What is Sniper Killer 3D?</h2>
6
- <p>Sniper Killer 3D is a shooting game where you play as a sniper who must eliminate high-profile targets and criminals. You will travel to different locations around the world, taking on various challenges and objectives. You will also have access to a huge arsenal of sniper rifles, assault rifles, and other guns that you can upgrade and customize. Sniper Killer 3D is a game that combines realism, variety, and fun in one package.</p>
7
- <h3>A thrilling and realistic sniper game</h3>
8
- <p>One of the best features of Sniper Killer 3D is its realistic physics and ballistics. You will have to take into account factors such as wind, distance, gravity, and movement when aiming and shooting your target. You will also have to deal with different weather conditions, such as rain, fog, snow, and night. You will feel like a real sniper as you pull the trigger and watch your bullet hit the mark.</p>
9
- <h3>A variety of weapons and missions</h3>
10
- <p>Sniper Killer 3D offers you more than 180 authentic weapons to choose from. You can unlock different sniper rifles, each with its own characteristics and advantages. You can also upgrade your weapons with scopes, silencers, magazines, and other attachments. You will need to use the right weapon for the right mission, as some targets may require more power, accuracy, or stealth than others.</p>
11
- <p>The game also has hundreds of thrilling missions that will keep you entertained for hours. You will have to eliminate terrorists, kidnappers, drug lords, assassins, and other enemies. You will also have to protect innocent civilians, rescue hostages, defuse bombs, and more. Each mission has its own objectives and rewards that you can use to buy new weapons or upgrade your existing ones.</p>
12
- <h3>A free and offline gameplay</h3>
13
- <p>Another great feature of Sniper Killer 3D is that it is free to play. You can download the game from the Google Play Store or play it on your web browser without spending a dime. The game also has an offline mode that allows you to play without an internet connection or data. You can enjoy the game anytime and anywhere you want.</p>
14
- <h2>How to play Sniper Killer 3D?</h2>
15
- <p>Sniper Killer 3D is easy to play but hard to master. Here are some tips on how to play the game:</p>
16
- <p>sniper killer 3d gun shooting games<br />
17
- sniper 3d wildlife studios<br />
18
- sniper 3d piercing bullet<br />
19
- sniper 3d stout assault rifle<br />
20
- sniper 3d offline mode<br />
21
- sniper 3d free to play<br />
22
- sniper 3d action adventure<br />
23
- sniper 3d realistic ballistics<br />
24
- sniper 3d variety of guns<br />
25
- sniper 3d diverse locations<br />
26
- sniper killer 3d download<br />
27
- sniper killer 3d mod apk<br />
28
- sniper killer 3d cheats<br />
29
- sniper killer 3d hack<br />
30
- sniper killer 3d unlimited money<br />
31
- sniper killer 3d review<br />
32
- sniper killer 3d gameplay<br />
33
- sniper killer 3d trailer<br />
34
- sniper killer 3d tips and tricks<br />
35
- sniper killer 3d best weapons<br />
36
- sniper killer 3d online multiplayer<br />
37
- sniper killer 3d pvp mode<br />
38
- sniper killer 3d special bullets<br />
39
- sniper killer 3d elite shooter<br />
40
- sniper killer 3d high-profile targets<br />
41
- sniper killer 3d missions and challenges<br />
42
- sniper killer 3d fun games for free<br />
43
- sniper killer 3d android app<br />
44
- sniper killer 3d ios app<br />
45
- sniper killer 3d pc game<br />
46
- sniper killer 3d mac game<br />
47
- sniper killer 3d windows game<br />
48
- sniper killer 3d linux game<br />
49
- sniper killer 3d steam game<br />
50
- sniper killer 3d epic games store game<br />
51
- sniper killer 3d google play store game<br />
52
- sniper killer 3d app store game<br />
53
- sniper killer 3d amazon appstore game<br />
54
- sniper killer 3d microsoft store game<br />
55
- sniper killer 3d data privacy and security <br />
56
- sniper killer 3d ratings and reviews <br />
57
- sniper killer 3d customer support <br />
58
- sniper killer 3d updates and news <br />
59
- sniper killer 3d blog and community <br />
60
- sniper killer 3d social media accounts <br />
61
- sniper killer 3d youtube channel <br />
62
- sniper killer 3d twitch channel <br />
63
- sniper killer 3d discord server <br />
64
- sniper killer 3d reddit forum <br />
65
- sniper killer 3d wiki and guide </p>
66
- <h3>Choose your sniper rifle and scope</h3>
67
- <p>Before each mission, you will have to select your weapon and scope. You can browse through the available weapons and see their stats, such as damage, range, stability, fire rate, and capacity. You can also see the available scopes and their zoom levels. Choose the weapon and scope that suit your mission and preference.</p>
68
- <h3>Aim and shoot your target</h3>
69
- <p>Once you start the mission, you will have to locate your target using your scope. You can use the mouse scroll or the right-click button to zoom in or out. You can also drag the left-click button to move your aim. You will see a red dot on your target, which indicates the bullet trajectory. You will have to adjust your aim according to the wind, distance, and movement of your target. You can use the wind indicator and the range finder to help you. When you are ready, press the space bar or the left-click button to shoot.</p>
70
- <h3>Complete the objectives and earn rewards</h3>
71
- <p>After you shoot your target, you will see a slow-motion replay of your shot. You will also see if you completed the mission objectives, such as killing the target, avoiding collateral damage, or achieving a headshot. You will earn coins and diamonds based on your performance. You can use these rewards to buy new weapons or upgrade your existing ones.</p>
72
- <h2>Why play Sniper Killer 3D?</h2>
73
- <p>Sniper Killer 3D is not just a game, it is an experience. Here are some reasons why you should play this game:</p>
74
- <h3>Improve your shooting skills and accuracy</h3>
75
- <p>Sniper Killer 3D is a game that will challenge your shooting skills and accuracy. You will have to be precise and patient as you aim and shoot your target. You will also have to be strategic and tactical as you choose your weapon and scope. You will learn how to handle different situations and scenarios as a sniper. You will become a better shooter as you play this game.</p>
76
- <h3>Enjoy stunning 3D graphics and animations</h3>
77
- <p>Sniper Killer 3D is a game that will impress you with its stunning 3D graphics and animations. You will see realistic environments, such as cities, mountains, deserts, and islands. You will also see lifelike characters, such as your targets, civilians, and enemies. You will feel the impact of your shots as you see blood splatter, bullet holes, and explosions. You will be amazed by the quality and detail of this game.</p>
78
- <h3>Challenge yourself with different levels of difficulty</h3>
79
- <p>Sniper Killer 3D is a game that will test your limits with different levels of difficulty. You can choose from easy, normal, hard, or expert modes depending on your skill level. You will face more challenging targets, objectives, and conditions as you progress through the game. You will also have to deal with limited ammo, time, and health. You will have to prove yourself as a sniper killer in this game.</p>
80
- <h2>Where to download Sniper Killer 3D?</h2>
81
- <p>Sniper Killer 3D is a game that is available on multiple platforms. Here are some options on where to download this game:</p>
82
- <h3>Available on Google Play Store for Android devices</h3>
83
- <p>If you have an Android device, such as a smartphone or tablet, you can download Sniper Killer 3D from the Google Play Store for free. You can also enjoy the game without any ads or in-app purchases. You can access the game from this link: [Sniper Killer 3D].</p>
84
- <h3>Compatible with web browsers for desktop computers</h3>
85
- <p>If you have a desktop computer, such as a PC or Mac, you can play Sniper Killer 3D on your web browser for free. You can also enjoy the game without any downloads or installations. You can access the game from this link: [Sniper Killer 3D].</p>
86
- <h2>Conclusion</h2>
87
- <p>Sniper Killer 3D is a game that will give you an unforgettable shooting experience. It is a game that combines realism, variety, and fun in one package. It is a game that will improve your shooting skills and accuracy, enjoy stunning 3D graphics and animations, and challenge yourself with different levels of difficulty. It is a game that is free to play and available on multiple platforms. It is a game that you should not miss.</p>
88
- <p>If you are ready to become a sniper killer, download Sniper Killer 3D today and start your adventure!</p>
89
- <h4>Frequently Asked Questions</h4>
90
- <ul>
91
- <li><b>What are the minimum requirements to play Sniper Killer 3D?</b></li>
92
- <li>The minimum requirements to play Sniper Killer 3D are: Android 4.4 or higher for Android devices; Windows XP/Vista/7/8/10 or Mac OS X for desktop computers; Chrome, Firefox, Safari, or Edge for web browsers.</li>
93
- <li><b>How can I get more coins and diamonds in Sniper Killer 3D?</b></li>
94
- <li>You can get more coins and diamonds in Sniper Killer 3D by: completing missions and objectives; watching video ads; rating and reviewing the game; inviting your friends to play the game.</li>
95
- <li><b>How can I change the language of Sniper Killer 3D?</b></li>
96
- <li>You can change the language of Sniper Killer 3D by: going to the settings menu; selecting the language option; choosing from the available languages, such as English, Spanish, French, German, Russian, Chinese, and more.</li>
97
- <li><b>How can I contact the developers of Sniper Killer 3D?</b></li>
98
- <li>You can contact the developers of Sniper Killer 3D by: sending an email to [[email protected]]; visiting their website at [sniperkiller3d.com]; following them on social media platforms, such as Facebook, Twitter, Instagram, and YouTube.</li>
99
- <li><b>What are some tips and tricks to play Sniper Killer 3D?</b></li>
100
- <li>Some tips and tricks to play Sniper Killer 3D are: use the wind indicator and the range finder to adjust your aim; use the silencer and the night vision to increase your stealth; use the bullet time and the thermal vision to improve your accuracy; use the headshot and the explosive shot to deal more damage; use the zoom and the drag to find your target; use the space bar and the left-click button to shoot.</li>
101
- </ul></p> 197e85843d<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FIFA Chino APK disfruta de la emocin del ftbol con grficos increbles.md DELETED
@@ -1,154 +0,0 @@
1
-
2
- <h1>FIFA Mobile Chino APK Actualizado: Todo lo que necesitas saber</h1>
3
- <p>Si eres un fanático del fútbol y te gusta jugar a los juegos de EA Sports, seguramente habrás oído hablar de <strong>FIFA Mobile</strong>, el juego oficial para dispositivos móviles que te permite crear tu propio equipo, competir en diferentes modos y eventos, y disfrutar de la emoción del deporte rey. Pero, ¿sabías que existe una versión alternativa de este juego, llamada <strong>FIFA Mobile Chino APK</strong>, que tiene algunas características y opciones diferentes a la versión original?</p>
4
- <h2>fifa mobile chino apk actualizado</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://jinyurl.com/2uNS53">https://jinyurl.com/2uNS53</a></b></p><br /><br />
5
- <p>En este artículo, te vamos a contar todo lo que necesitas saber sobre FIFA Mobile Chino APK, qué es, cómo descargarlo e instalarlo, qué ventajas y desventajas tiene, cómo se compara con FIFA Mobile APK, qué opinan los usuarios que lo han probado, y algunas preguntas frecuentes que te pueden surgir. ¡Sigue leyendo y descubre si este juego es para ti!</p>
6
- <h2>¿Qué es FIFA Mobile Chino APK?</h2>
7
- <p>FIFA Mobile Chino APK es una versión modificada de FIFA Mobile, el juego oficial de EA Sports para dispositivos móviles Android e iOS. Esta versión está desarrollada por Tencent, una empresa china que tiene los derechos de distribución de FIFA en China. Por lo tanto, esta versión está pensada principalmente para el público chino, aunque también se puede jugar desde otros países.</p>
8
- <p>FIFA Mobile Chino APK tiene algunas características y opciones diferentes a la versión original de FIFA Mobile, como por ejemplo:</p>
9
- <h3>Características principales de FIFA Mobile Chino APK</h3>
10
- <ul>
11
- <li>Tiene una interfaz y un diseño más coloridos y animados, con más efectos visuales y sonoros.</li>
12
- <li>Tiene más modos de juego disponibles, como el modo carrera, el modo torneo, el modo entrenamiento, el modo desafío y el modo mundial.</li>
13
- <li>Tiene más opciones de personalización para tu equipo, como la posibilidad de elegir el escudo, el estadio, el balón, las equipaciones y los patrocinadores.</li>
14
- <li>Tiene más eventos y actividades especiales, como la Copa del Mundo, la Champions League, la Superliga China y otras competiciones regionales e internacionales.</li>
15
- <li>Tiene más jugadores y leyendas disponibles para fichar, incluyendo algunos exclusivos de esta versión, como los iconos eternos.</li>
16
- <li>Tiene un sistema de recompensas más generoso y variado, que te permite obtener monedas, puntos, sobres, jugadores y otros objetos.</li>
17
- <li>Tiene un mercado de transferencias más dinámico y competitivo, donde puedes comprar y vender jugadores con otros usuarios.</li>
18
- </ul>
19
- <h3>Cómo descargar e instalar FIFA Mobile Chino APK</h3>
20
- <p>Para descargar e instalar FIFA Mobile Chino APK en tu dispositivo Android, debes seguir estos pasos:</p>
21
- <ol>
22
- <li>Accede a un sitio web seguro y confiable que ofrezca el archivo APK de FIFA Mobile Chino. Por ejemplo, puedes usar este enlace: .</li>
23
- <li>Descarga el archivo APK en tu dispositivo. Puede que tengas que habilitar la opción de instalar aplicaciones de fuentes desconocidas en los ajustes de seguridad de tu dispositivo.</li>
24
- <li>Abre el archivo APK y sigue las instrucciones que aparecen en la pantalla para completar la instalación.</li>
25
- <li>Una vez instalado, abre el juego y espera a que se descarguen los datos adicionales necesarios para su funcionamiento.</li>
26
- <li>Disfruta de FIFA Mobile Chino APK en tu dispositivo Android.</li>
27
- </ol>
28
- <p>Para descargar e instalar FIFA Mobile Chino APK en tu dispositivo iOS, debes seguir estos pasos:</p>
29
- <ol>
30
- <li>Accede a un sitio web seguro y confiable que ofrezca el archivo IPA de FIFA Mobile Chino. Por ejemplo, puedes usar este enlace: .</li>
31
- <li>Descarga el archivo IPA en tu dispositivo. Puede que tengas que usar una aplicación de gestión de archivos como iFile o Filza para mover el archivo a la carpeta adecuada.</li>
32
- <li>Abre el archivo IPA y sigue las instrucciones que aparecen en la pantalla para completar la instalación.</li>
33
- <li>Una vez instalado, abre el juego y espera a que se descarguen los datos adicionales necesarios para su funcionamiento.</li>
34
- <li>Disfruta de FIFA Mobile Chino APK en tu dispositivo iOS.</li>
35
- </ol>
36
- <h3>Ventajas y desventajas de FIFA Mobile Chino APK</h3>
37
- <p>Como todo juego, FIFA Mobile Chino APK tiene sus pros y sus contras. Aquí te resumimos algunas de las ventajas y desventajas de este juego:</p>
38
- <h4>Ventajas de FIFA Mobile Chino APK</h4>
39
- <ul>
40
- <li>Tiene más contenido y opciones que la versión original de FIFA Mobile, lo que lo hace más divertido y variado.</li>
41
- <li>Tiene una mejor calidad gráfica y sonora, lo que lo hace más atractivo y realista.</li>
42
- <li>Tiene una mayor compatibilidad con diferentes dispositivos y sistemas operativos, lo que lo hace más accesible y fácil de usar.</li>
43
- <li>Tiene una comunidad más activa y participativa, lo que lo hace más social e interactivo.</li>
44
- </ul>
45
- <h4>Desventajas de FIFA Mobile Chino APK</h4>
46
- <ul>
47
- <li>Tiene un idioma diferente al español, lo que puede dificultar la comprensión y el disfrute del juego.</li>
48
- <li>Tiene un mayor riesgo de virus o malware, al no ser una versión oficial ni estar disponible en las tiendas oficiales de aplicaciones.</li>
49
- <li>Tiene un mayor consumo de recursos y datos, lo que puede afectar al rendimiento y la batería del dispositivo.</li>
50
- <li>Tiene un mayor nivel de dificultad y competencia, lo que puede frustrar o desanimar a algunos jugadores.</li>
51
- </ul>
52
- <h2>¿Qué diferencia hay entre FIFA Mobile Chino APK y FIFA Mobile APK?</h2>
53
- <p>Ahora que ya sabes qué es FIFA Mobile Chino APK, te preguntarás qué diferencia hay con FIFA Mobile APK, la versión original del juego. Pues bien, aunque ambos juegos comparten el mismo concepto y objetivo, hay algunas similitudes y diferencias entre ellos que te vamos a explicar a continuación:</p>
54
- <h3>Similitudes entre ambos juegos</h3>
55
- <ul>
56
- <li>Ambos juegos son desarrollados por EA Sports, la empresa líder en juegos deportivos.</li>
57
- <li>Ambos juegos te permiten crear tu propio equipo de fútbol, con jugadores reales y licenciados por la FIFA.</li>
58
- <li>Ambos juegos te ofrecen diferentes modos y eventos para jugar solo o con otros usuarios, como el modo temporada, el modo versus o el modo ataque.</li>
59
- <li>Ambos juegos te dan la oportunidad de mejorar tus habilidades y tu estrategia, mediante el entrenamiento, la formación y la táctica.</li>
60
- <li>Ambos juegos te brindan una experiencia inmersiva y emocionante, con gráficos detallados, animaciones fluidas y comentarios en vivo.</li>
61
- </ul>
62
- <h3>Diferencias entre ambos juegos</h3>
63
- <ul>
64
- <li>FIFA Mobile Chino APK tiene una interfaz y un diseño más coloridos y animados, mientras que FIFA Mobile APK tiene una interfaz y un diseño más sobrios y elegantes.</li>
65
- <li>FIFA Mobile Chino APK tiene más modos de juego disponibles, como el modo carrera, el modo torneo o el modo mundial, mientras que FIFA Mobile APK tiene menos modos de juego disponibles, como el modo campaña o el modo leyendas.</li> <li>FIFA Mobile Chino APK tiene más opciones de personalización para tu equipo, como la posibilidad de elegir el escudo, el estadio, el balón, las equipaciones y los patrocinadores, mientras que FIFA Mobile APK tiene menos opciones de personalización para tu equipo, como la posibilidad de elegir el nombre, el logo y los colores.</li>
66
- <li>FIFA Mobile Chino APK tiene más eventos y actividades especiales, como la Copa del Mundo, la Champions League, la Superliga China y otras competiciones regionales e internacionales, mientras que FIFA Mobile APK tiene menos eventos y actividades especiales, como la Copa América, la Eurocopa, la Premier League y otras ligas nacionales.</li>
67
- <li>FIFA Mobile Chino APK tiene más jugadores y leyendas disponibles para fichar, incluyendo algunos exclusivos de esta versión, como los iconos eternos, mientras que FIFA Mobile APK tiene menos jugadores y leyendas disponibles para fichar, incluyendo algunos exclusivos de esta versión, como los iconos prime.</li>
68
- <li>FIFA Mobile Chino APK tiene un sistema de recompensas más generoso y variado, que te permite obtener monedas, puntos, sobres, jugadores y otros objetos, mientras que FIFA Mobile APK tiene un sistema de recompensas más limitado y repetitivo, que te permite obtener monedas, puntos y sobres.</li>
69
- <li>FIFA Mobile Chino APK tiene un mercado de transferencias más dinámico y competitivo, donde puedes comprar y vender jugadores con otros usuarios, mientras que FIFA Mobile APK tiene un mercado de transferencias más estático y controlado, donde solo puedes comprar y vender jugadores con el sistema.</li>
70
- </ul>
71
- <h2>¿Qué opinan los usuarios de FIFA Mobile Chino APK?</h2>
72
- <p>Si te preguntas qué opinan los usuarios que han probado FIFA Mobile Chino APK, te podemos decir que hay opiniones de todo tipo. Algunos usuarios están muy satisfechos con este juego y lo prefieren a la versión original de FIFA Mobile, mientras que otros usuarios están muy decepcionados con este juego y lo consideran una copia barata de FIFA Mobile. Aquí te mostramos algunas de las reseñas positivas y negativas que hemos encontrado en internet:</p>
73
- <p>descargar fifa mobile chino apk<br />
74
- fifa mobile chino apk 2023<br />
75
- fifa mobile chino apk ultima version<br />
76
- fifa mobile chino apk mod<br />
77
- fifa mobile chino apk hack<br />
78
- fifa mobile chino apk mega<br />
79
- fifa mobile chino apk mediafire<br />
80
- fifa mobile chino apk sin licencia<br />
81
- fifa mobile chino apk android<br />
82
- fifa mobile chino apk gratis<br />
83
- fifa mobile chino apk full<br />
84
- fifa mobile chino apk offline<br />
85
- fifa mobile chino apk obb<br />
86
- fifa mobile chino apk datos<br />
87
- fifa mobile chino apk gameplay<br />
88
- fifa mobile chino apk descargar gratis<br />
89
- fifa mobile chino apk 2023 ultima version<br />
90
- fifa mobile chino apk 2023 mod<br />
91
- fifa mobile chino apk 2023 hack<br />
92
- fifa mobile chino apk 2023 mega<br />
93
- fifa mobile chino apk 2023 mediafire<br />
94
- fifa mobile chino apk 2023 sin licencia<br />
95
- fifa mobile chino apk 2023 android<br />
96
- fifa mobile chino apk 2023 gratis<br />
97
- fifa mobile chino apk 2023 full<br />
98
- fifa mobile chino apk 2023 offline<br />
99
- fifa mobile chino apk 2023 obb<br />
100
- fifa mobile chino apk 2023 datos<br />
101
- fifa mobile chino apk 2023 gameplay<br />
102
- fifa mobile chino apk 2023 descargar gratis<br />
103
- como descargar fifa mobile chino apk<br />
104
- como instalar fifa mobile chino apk<br />
105
- como jugar fifa mobile chino apk<br />
106
- como actualizar fifa mobile chino apk<br />
107
- como hackear fifa mobile chino apk<br />
108
- como tener monedas en fifa mobile chino apk<br />
109
- como tener jugadores en fifa mobile chino apk<br />
110
- como tener licencia en fifa mobile chino apk<br />
111
- como solucionar error en fifa mobile chino apk<br />
112
- como quitar publicidad en fifa mobile chino apk</p>
113
- <h3>Reseñas positivas de FIFA Mobile Chino APK</h3>
114
- <ul>
115
- <li>"Me encanta este juego. Tiene mucha más variedad y diversión que el FIFA Mobile normal. Los gráficos son increíbles y los modos de juego son muy entretenidos. Lo recomiendo mucho."</li>
116
- <li>"Es el mejor juego de fútbol para móviles que he jugado. Tiene todo lo que le falta al FIFA Mobile original. Más modos, más jugadores, más eventos, más recompensas. Es una pasada."</li>
117
- <li>"No entiendo por qué EA Sports no hace este juego para todo el mundo. Es mucho mejor que el FIFA Mobile que tenemos en Europa. Tiene más opciones y más calidad. Es una maravilla."</li>
118
- </ul>
119
- <h3>Reseñas negativas de FIFA Mobile Chino APK</h3>
120
- <ul>
121
- <li>"No me gusta nada este juego. Es una copia barata del FIFA Mobile original. Los gráficos son feos y los sonidos son molestos. Los modos de juego son aburridos y repetitivos. No lo recomiendo."</li>
122
- <li>"Es un juego muy malo. Tiene muchos errores y problemas. Se cierra solo o se queda colgado. Los controles son malos y la jugabilidad es pésima. No vale la pena."</li>
123
- <li>"No entiendo cómo hay gente que juega a esto. Es una basura. No tiene nada que ver con el FIFA Mobile original. No tiene licencias ni jugadores reales. Es una estafa."</li>
124
- </ul>
125
- <h2>Conclusión</h2>
126
- <p>En conclusión, podemos decir que FIFA Mobile Chino APK es una versión alternativa de FIFA Mobile, el juego oficial de EA Sports para dispositivos móviles. Esta versión está desarrollada por Tencent, una empresa china que tiene los derechos de distribución de FIFA en China.</p>
127
- <p>FIFA Mobile Chino APK tiene algunas características y opciones diferentes a la versión original de FIFA Mobile, como una interfaz más colorida, más modos de juego disponibles, más opciones de personalización para tu equipo, más eventos y actividades especiales, más jugadores y leyendas disponibles para fichar, un sistema de recompensas más generoso y variado, y un mercado de transferencias más dinámico y competitivo.</p>
128
- <p>FIFA Mobile Chino APK también tiene algunas vent ajas y desventajas, como un idioma diferente al español, un mayor riesgo de virus o malware, un mayor consumo de recursos y datos, y un mayor nivel de dificultad y competencia.</p>
129
- <p>FIFA Mobile Chino APK se puede descargar e instalar en dispositivos Android e iOS, siguiendo unos sencillos pasos que te hemos explicado en este artículo. Sin embargo, debes tener en cuenta que no se trata de una versión oficial ni está disponible en las tiendas oficiales de aplicaciones, por lo que debes tomar algunas precauciones al usarla.</p>
130
- <p>FIFA Mobile Chino APK se diferencia de FIFA Mobile APK, la versión original del juego, en algunos aspectos que también te hemos detallado en este artículo. Ambos juegos tienen sus similitudes y diferencias, y depende de tu gusto y preferencia el elegir uno u otro.</p>
131
- <h3>¿Por qué deberías probar FIFA Mobile Chino APK?</h3>
132
- <p>Si te gustan los juegos de fútbol y quieres probar algo diferente al FIFA Mobile original, puedes darle una oportunidad a FIFA Mobile Chino APK. Este juego te ofrece más contenido y opciones que la versión original, lo que lo hace más divertido y variado. Además, tiene una mejor calidad gráfica y sonora, lo que lo hace más atractivo y realista. También tiene una mayor compatibilidad con diferentes dispositivos y sistemas operativos, lo que lo hace más accesible y fácil de usar. Y por si fuera poco, tiene una comunidad más activa y participativa, lo que lo hace más social e interactivo.</p>
133
- <h3>¿Qué precauciones debes tomar al usar FIFA Mobile Chino APK?</h3>
134
- <p>Si decides probar FIFA Mobile Chino APK, debes tener en cuenta algunas precauciones para evitar problemas o inconvenientes. Algunas de estas precauciones son:</p>
135
- <ul>
136
- <li>Verifica la fuente de descarga del archivo APK o IPA, y asegúrate de que sea segura y confiable. Evita los sitios web sospechosos o fraudulentos que puedan contener virus o malware.</li>
137
- <li>Respeta las normas y condiciones de uso del juego, y no hagas trampas ni abuses de otros usuarios. De lo contrario, podrías ser baneado o sancionado por los administradores del juego.</li>
138
- <li>No compartas tus datos personales ni financieros con nadie dentro del juego, ni accedas a enlaces o promociones dudosas. Podrías ser víctima de estafas o robos de identidad.</li>
139
- <li>No gastes demasiado dinero real en el juego, ni te obsesiones con obtener los mejores jugadores o las mejores recompensas. Recuerda que se trata de un juego para divertirte y pasar el rato, no para competir o presumir.</li>
140
- </ul>
141
- <h2>Preguntas frecuentes sobre FIFA Mobile Chino APK</h2>
142
- <p>Para terminar este artículo, te vamos a responder algunas de las preguntas frecuentes que pueden surgirte sobre FIFA Mobile Chino APK. Esperamos que te sean útiles y te ayuden a resolver tus dudas.</p>
143
- <h4>¿FIFA Mobile Chino APK es gratis?</h4>
144
- <p>Sí, FIFA Mobile Chino APK es gratis. No tienes que pagar nada para descargarlo e instalarlo en tu dispositivo. Sin embargo, el juego tiene compras integradas que te permiten obtener monedas, puntos o sobres con dinero real. Estas compras son opcionales y no son necesarias para jugar.</p>
145
- <h4>¿FIFA Mobile Chino APK es seguro?</h4>
146
- <p>No podemos garantizar al 100% que FIFA Mobile Chino APK sea seguro. Al no ser una versión oficial ni estar disponible en las tiendas oficiales de aplicaciones, existe el riesgo de que el archivo APK o IPA contenga virus o malware que puedan dañar tu dispositivo o comprometer tu seguridad. Por eso, te recomendamos que verifiques la fuente de descarga del archivo y que uses un antivirus o un firewall para proteger tu dispositivo.</p>
147
- <h4>¿FIFA Mobile Chino APK está en español?</h4>
148
- <p>No, FIFA Mobile Chino APK no está en español. El idioma principal del juego es el chino mandarín, aunque también tiene algunos elementos en inglés. No hay opción para cambiar el idioma del juego al español u otro idioma. Por eso, si no entiendes el chino o el inglés, puede que tengas dificultades para jugar o disfrutar del juego.</p>
149
- <h4>¿FIFA Mobile Chino APK se puede jugar con otros usuarios?</h4>
150
- <p>Sí, FIFA Mobile Chino APK se puede jugar con otros usuarios. El juego tiene un modo multijugador que te permite enfrentarte a otros jugadores en partidos online, ya sea en el modo versus, el modo ataque o el modo torneo. También puedes unirte a una liga o un club para cooperar o competir con otros usuarios, y participar en eventos y actividades especiales que te dan la oportunidad de ganar recompensas y reconocimientos.</p>
151
- <h4>¿FIFA Mobile Chino APK se actualiza con frecuencia?</h4>
152
- <p>Sí, FIFA Mobile Chino APK se actualiza con frecuencia. Los desarrolladores del juego suelen lanzar nuevas versiones del archivo APK o IPA cada cierto tiempo, para añadir nuevas características, opciones, eventos, jugadores y correcciones de errores. Por eso, te recomendamos que estés atento a las novedades y que descargues la última versión disponible para disfrutar de la mejor experiencia de juego.</p> 401be4b1e0<br />
153
- <br />
154
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_unclip.py DELETED
@@ -1,303 +0,0 @@
1
- # Copyright 2022 Kakao Brain and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- from dataclasses import dataclass
17
- from typing import Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import paddle
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from ..utils import BaseOutput
24
- from .scheduling_utils import SchedulerMixin
25
-
26
-
27
- @dataclass
28
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
29
- class UnCLIPSchedulerOutput(BaseOutput):
30
- """
31
- Output class for the scheduler's step function output.
32
-
33
- Args:
34
- prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
35
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
36
- denoising loop.
37
- pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
38
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
39
- `pred_original_sample` can be used to preview progress or for guidance.
40
- """
41
-
42
- prev_sample: paddle.Tensor
43
- pred_original_sample: Optional[paddle.Tensor] = None
44
-
45
-
46
- def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
47
- """
48
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
49
- (1-beta) over time from t = [0,1].
50
-
51
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
52
- to that part of the diffusion process.
53
-
54
-
55
- Args:
56
- num_diffusion_timesteps (`int`): the number of betas to produce.
57
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
58
- prevent singularities.
59
-
60
- Returns:
61
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
62
- """
63
-
64
- def alpha_bar(time_step):
65
- return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
66
-
67
- betas = []
68
- for i in range(num_diffusion_timesteps):
69
- t1 = i / num_diffusion_timesteps
70
- t2 = (i + 1) / num_diffusion_timesteps
71
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
72
- return paddle.to_tensor(betas, dtype=paddle.float32)
73
-
74
-
75
- class UnCLIPScheduler(SchedulerMixin, ConfigMixin):
76
- """
77
- This is a modified DDPM Scheduler specifically for the karlo unCLIP model.
78
-
79
- This scheduler has some minor variations in how it calculates the learned range variance and dynamically
80
- re-calculates betas based off the timesteps it is skipping.
81
-
82
- The scheduler also uses a slightly different step ratio when computing timesteps to use for inference.
83
-
84
- See [`~DDPMScheduler`] for more information on DDPM scheduling
85
-
86
- Args:
87
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
88
- variance_type (`str`):
89
- options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small_log`
90
- or `learned_range`.
91
- clip_sample (`bool`, default `True`):
92
- option to clip predicted sample between `-clip_sample_range` and `clip_sample_range` for numerical
93
- stability.
94
- clip_sample_range (`float`, default `1.0`):
95
- The range to clip the sample between. See `clip_sample`.
96
- prediction_type (`str`, default `epsilon`, optional):
97
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process)
98
- or `sample` (directly predicting the noisy sample`)
99
- """
100
-
101
- @register_to_config
102
- def __init__(
103
- self,
104
- num_train_timesteps: int = 1000,
105
- variance_type: str = "fixed_small_log",
106
- clip_sample: bool = True,
107
- clip_sample_range: Optional[float] = 1.0,
108
- prediction_type: str = "epsilon",
109
- ):
110
- # beta scheduler is "squaredcos_cap_v2"
111
- self.betas = betas_for_alpha_bar(num_train_timesteps)
112
-
113
- self.alphas = 1.0 - self.betas
114
- self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
115
- self.one = paddle.to_tensor(1.0)
116
-
117
- # standard deviation of the initial noise distribution
118
- self.init_noise_sigma = 1.0
119
-
120
- # setable values
121
- self.num_inference_steps = None
122
- self.timesteps = paddle.to_tensor(np.arange(0, num_train_timesteps)[::-1].copy())
123
-
124
- self.variance_type = variance_type
125
-
126
- def scale_model_input(self, sample: paddle.Tensor, timestep: Optional[int] = None) -> paddle.Tensor:
127
- """
128
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
129
- current timestep.
130
-
131
- Args:
132
- sample (`paddle.Tensor`): input sample
133
- timestep (`int`, optional): current timestep
134
-
135
- Returns:
136
- `paddle.Tensor`: scaled input sample
137
- """
138
- return sample
139
-
140
- def set_timesteps(self, num_inference_steps: int):
141
- """
142
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
143
-
144
- Note that this scheduler uses a slightly different step ratio than the other diffusers schedulers. The
145
- different step ratio is to mimic the original karlo implementation and does not affect the quality or accuracy
146
- of the results.
147
-
148
- Args:
149
- num_inference_steps (`int`):
150
- the number of diffusion steps used when generating samples with a pre-trained model.
151
- """
152
- self.num_inference_steps = num_inference_steps
153
- step_ratio = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
154
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
155
- self.timesteps = paddle.to_tensor(timesteps)
156
-
157
- def _get_variance(self, t, prev_timestep=None, predicted_variance=None, variance_type=None):
158
- if prev_timestep is None:
159
- prev_timestep = t - 1
160
-
161
- alpha_prod_t = self.alphas_cumprod[t]
162
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
163
- beta_prod_t = 1 - alpha_prod_t
164
- beta_prod_t_prev = 1 - alpha_prod_t_prev
165
-
166
- if prev_timestep == t - 1:
167
- beta = self.betas[t]
168
- else:
169
- beta = 1 - alpha_prod_t / alpha_prod_t_prev
170
-
171
- # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
172
- # and sample from it to get previous sample
173
- # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
174
- variance = beta_prod_t_prev / beta_prod_t * beta
175
-
176
- if variance_type is None:
177
- variance_type = self.config.variance_type
178
-
179
- # hacks - were probably added for training stability
180
- if variance_type == "fixed_small_log":
181
- variance = paddle.log(paddle.clip(variance, min=1e-20))
182
- variance = paddle.exp(0.5 * variance)
183
- elif variance_type == "learned_range":
184
- # NOTE difference with DDPM scheduler
185
- min_log = variance.log()
186
- max_log = beta.log()
187
-
188
- frac = (predicted_variance + 1) / 2
189
- variance = frac * max_log + (1 - frac) * min_log
190
-
191
- return variance
192
-
193
- def step(
194
- self,
195
- model_output: paddle.Tensor,
196
- timestep: int,
197
- sample: paddle.Tensor,
198
- prev_timestep: Optional[int] = None,
199
- generator=None,
200
- return_dict: bool = True,
201
- ) -> Union[UnCLIPSchedulerOutput, Tuple]:
202
- """
203
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
204
- process from the learned model outputs (most often the predicted noise).
205
-
206
- Args:
207
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
208
- timestep (`int`): current discrete timestep in the diffusion chain.
209
- sample (`paddle.Tensor`):
210
- current instance of sample being created by diffusion process.
211
- prev_timestep (`int`, *optional*): The previous timestep to predict the previous sample at.
212
- Used to dynamically compute beta. If not given, `t-1` is used and the pre-computed beta is used.
213
- generator: random number generator.
214
- return_dict (`bool`): option for returning tuple rather than UnCLIPSchedulerOutput class
215
-
216
- Returns:
217
- [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] or `tuple`:
218
- [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
219
- returning a tuple, the first element is the sample tensor.
220
-
221
- """
222
-
223
- t = timestep
224
-
225
- if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
226
- model_output, predicted_variance = model_output.split(
227
- [sample.shape[1], model_output.shape[1] - sample.shape[1]], axis=1
228
- )
229
- else:
230
- predicted_variance = None
231
-
232
- # 1. compute alphas, betas
233
- if prev_timestep is None:
234
- prev_timestep = t - 1
235
-
236
- alpha_prod_t = self.alphas_cumprod[t]
237
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
238
- beta_prod_t = 1 - alpha_prod_t
239
- beta_prod_t_prev = 1 - alpha_prod_t_prev
240
-
241
- if prev_timestep == t - 1:
242
- beta = self.betas[t]
243
- alpha = self.alphas[t]
244
- else:
245
- beta = 1 - alpha_prod_t / alpha_prod_t_prev
246
- alpha = 1 - beta
247
-
248
- # 2. compute predicted original sample from predicted noise also called
249
- # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
250
- if self.config.prediction_type == "epsilon":
251
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
252
- elif self.config.prediction_type == "sample":
253
- pred_original_sample = model_output
254
- else:
255
- raise ValueError(
256
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
257
- " for the UnCLIPScheduler."
258
- )
259
-
260
- # 3. Clip "predicted x_0"
261
- if self.config.clip_sample:
262
- pred_original_sample = paddle.clip(
263
- pred_original_sample, -self.config.clip_sample_range, self.config.clip_sample_range
264
- )
265
-
266
- # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
267
- # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
268
- pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * beta) / beta_prod_t
269
- current_sample_coeff = alpha ** (0.5) * beta_prod_t_prev / beta_prod_t
270
-
271
- # 5. Compute predicted previous sample µ_t
272
- # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
273
- pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
274
-
275
- # 6. Add noise
276
- variance = 0
277
- if t > 0:
278
- variance_noise = paddle.randn(model_output.shape, generator=generator, dtype=model_output.dtype)
279
-
280
- variance = self._get_variance(
281
- t,
282
- predicted_variance=predicted_variance,
283
- prev_timestep=prev_timestep,
284
- )
285
-
286
- if self.variance_type == "fixed_small_log":
287
- variance = variance
288
- elif self.variance_type == "learned_range":
289
- variance = (0.5 * variance).exp()
290
- else:
291
- raise ValueError(
292
- f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
293
- " for the UnCLIPScheduler."
294
- )
295
-
296
- variance = variance * variance_noise
297
-
298
- pred_prev_sample = pred_prev_sample + variance
299
-
300
- if not return_dict:
301
- return (pred_prev_sample,)
302
-
303
- return UnCLIPSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/networks_basic.py DELETED
@@ -1,187 +0,0 @@
1
-
2
- from __future__ import absolute_import
3
-
4
- import sys
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.init as init
8
- from torch.autograd import Variable
9
- import numpy as np
10
- from pdb import set_trace as st
11
- from skimage import color
12
- from IPython import embed
13
- from model.stylegan.lpips import pretrained_networks as pn
14
-
15
- import model.stylegan.lpips as util
16
-
17
- def spatial_average(in_tens, keepdim=True):
18
- return in_tens.mean([2,3],keepdim=keepdim)
19
-
20
- def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
21
- in_H = in_tens.shape[2]
22
- scale_factor = 1.*out_H/in_H
23
-
24
- return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)
25
-
26
- # Learned perceptual metric
27
- class PNetLin(nn.Module):
28
- def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True):
29
- super(PNetLin, self).__init__()
30
-
31
- self.pnet_type = pnet_type
32
- self.pnet_tune = pnet_tune
33
- self.pnet_rand = pnet_rand
34
- self.spatial = spatial
35
- self.lpips = lpips
36
- self.version = version
37
- self.scaling_layer = ScalingLayer()
38
-
39
- if(self.pnet_type in ['vgg','vgg16']):
40
- net_type = pn.vgg16
41
- self.chns = [64,128,256,512,512]
42
- elif(self.pnet_type=='alex'):
43
- net_type = pn.alexnet
44
- self.chns = [64,192,384,256,256]
45
- elif(self.pnet_type=='squeeze'):
46
- net_type = pn.squeezenet
47
- self.chns = [64,128,256,384,384,512,512]
48
- self.L = len(self.chns)
49
-
50
- self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
51
-
52
- if(lpips):
53
- self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
54
- self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
55
- self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
56
- self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
57
- self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
58
- self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
59
- if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
60
- self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
61
- self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
62
- self.lins+=[self.lin5,self.lin6]
63
-
64
- def forward(self, in0, in1, retPerLayer=False):
65
- # v0.0 - original release had a bug, where input was not scaled
66
- in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)
67
- outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
68
- feats0, feats1, diffs = {}, {}, {}
69
-
70
- for kk in range(self.L):
71
- feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk])
72
- diffs[kk] = (feats0[kk]-feats1[kk])**2
73
-
74
- if(self.lpips):
75
- if(self.spatial):
76
- res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]
77
- else:
78
- res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]
79
- else:
80
- if(self.spatial):
81
- res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]
82
- else:
83
- res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]
84
-
85
- val = res[0]
86
- for l in range(1,self.L):
87
- val += res[l]
88
-
89
- if(retPerLayer):
90
- return (val, res)
91
- else:
92
- return val
93
-
94
- class ScalingLayer(nn.Module):
95
- def __init__(self):
96
- super(ScalingLayer, self).__init__()
97
- self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])
98
- self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])
99
-
100
- def forward(self, inp):
101
- return (inp - self.shift) / self.scale
102
-
103
-
104
- class NetLinLayer(nn.Module):
105
- ''' A single linear layer which does a 1x1 conv '''
106
- def __init__(self, chn_in, chn_out=1, use_dropout=False):
107
- super(NetLinLayer, self).__init__()
108
-
109
- layers = [nn.Dropout(),] if(use_dropout) else []
110
- layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
111
- self.model = nn.Sequential(*layers)
112
-
113
-
114
- class Dist2LogitLayer(nn.Module):
115
- ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
116
- def __init__(self, chn_mid=32, use_sigmoid=True):
117
- super(Dist2LogitLayer, self).__init__()
118
-
119
- layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
120
- layers += [nn.LeakyReLU(0.2,True),]
121
- layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
122
- layers += [nn.LeakyReLU(0.2,True),]
123
- layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
124
- if(use_sigmoid):
125
- layers += [nn.Sigmoid(),]
126
- self.model = nn.Sequential(*layers)
127
-
128
- def forward(self,d0,d1,eps=0.1):
129
- return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
130
-
131
- class BCERankingLoss(nn.Module):
132
- def __init__(self, chn_mid=32):
133
- super(BCERankingLoss, self).__init__()
134
- self.net = Dist2LogitLayer(chn_mid=chn_mid)
135
- # self.parameters = list(self.net.parameters())
136
- self.loss = torch.nn.BCELoss()
137
-
138
- def forward(self, d0, d1, judge):
139
- per = (judge+1.)/2.
140
- self.logit = self.net.forward(d0,d1)
141
- return self.loss(self.logit, per)
142
-
143
- # L2, DSSIM metrics
144
- class FakeNet(nn.Module):
145
- def __init__(self, use_gpu=True, colorspace='Lab'):
146
- super(FakeNet, self).__init__()
147
- self.use_gpu = use_gpu
148
- self.colorspace=colorspace
149
-
150
- class L2(FakeNet):
151
-
152
- def forward(self, in0, in1, retPerLayer=None):
153
- assert(in0.size()[0]==1) # currently only supports batchSize 1
154
-
155
- if(self.colorspace=='RGB'):
156
- (N,C,X,Y) = in0.size()
157
- value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
158
- return value
159
- elif(self.colorspace=='Lab'):
160
- value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
161
- util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
162
- ret_var = Variable( torch.Tensor((value,) ) )
163
- if(self.use_gpu):
164
- ret_var = ret_var.cuda()
165
- return ret_var
166
-
167
- class DSSIM(FakeNet):
168
-
169
- def forward(self, in0, in1, retPerLayer=None):
170
- assert(in0.size()[0]==1) # currently only supports batchSize 1
171
-
172
- if(self.colorspace=='RGB'):
173
- value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float')
174
- elif(self.colorspace=='Lab'):
175
- value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
176
- util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
177
- ret_var = Variable( torch.Tensor((value,) ) )
178
- if(self.use_gpu):
179
- ret_var = ret_var.cuda()
180
- return ret_var
181
-
182
- def print_network(net):
183
- num_params = 0
184
- for param in net.parameters():
185
- num_params += param.numel()
186
- print('Network',net)
187
- print('Total number of parameters: %d' % num_params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-attachments.tsx DELETED
@@ -1,37 +0,0 @@
1
- import Image from 'next/image'
2
- import ClearIcon from '@/assets/images/clear.svg'
3
- import RefreshIcon from '@/assets/images/refresh.svg'
4
- import { FileItem } from '@/lib/bots/bing/types'
5
- import { cn } from '@/lib/utils'
6
- import { useBing } from '@/lib/hooks/use-bing'
7
-
8
- type ChatAttachmentsProps = Pick<ReturnType<typeof useBing>, 'attachmentList' | 'setAttachmentList' | 'uploadImage'>
9
-
10
- export function ChatAttachments({ attachmentList = [], setAttachmentList, uploadImage }: ChatAttachmentsProps) {
11
- return attachmentList.length ? (
12
- <div className="attachment-list">
13
- {attachmentList.map(file => (
14
- <div className="file-item" key={file.url}>
15
- {file.status === 'loading' && (
16
- <div className="loading">
17
- <div className="bar" />
18
- </div>)
19
- }
20
- {file.status !== 'error' && (
21
- <div className="thumbnail">
22
- <img draggable="false" src={file.url} />
23
- </div>)
24
- }
25
- {file.status === 'error' && (
26
- <div className="error">
27
- <Image alt="refresh" src={RefreshIcon} width={18} onClick={() => uploadImage(file.url)} />
28
- </div>
29
- )}
30
- <button className={cn('dismiss', { 'no-file': file.status === 'error' })} type="button">
31
- <Image alt="clear" src={ClearIcon} width={16} onClick={() => setAttachmentList([])} />
32
- </button>
33
- </div>
34
- ))}
35
- </div>
36
- ) : null
37
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/metrics/rvm.py DELETED
@@ -1,106 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing as tp
8
- import torch
9
- from torch import nn
10
- import torchaudio
11
-
12
-
13
- def db_to_scale(volume: tp.Union[float, torch.Tensor]):
14
- return 10 ** (volume / 20)
15
-
16
-
17
- def scale_to_db(scale: torch.Tensor, min_volume: float = -120):
18
- min_scale = db_to_scale(min_volume)
19
- return 20 * torch.log10(scale.clamp(min=min_scale))
20
-
21
-
22
- class RelativeVolumeMel(nn.Module):
23
- """Relative volume melspectrogram measure.
24
-
25
- Computes a measure of distance over two mel spectrogram that is interpretable in terms
26
- of decibels. Given `x_ref` and `x_est` two waveforms of shape `[*, T]`, it will
27
- first renormalize both by the ground truth of `x_ref`.
28
-
29
- Then it computes the mel spectrogram `z_ref` and `z_est` and compute volume of the difference
30
- relative to the volume of `z_ref` for each time-frequency bin. It further adds some limits, e.g.
31
- clamping the values between -25 and 25 dB (controlled by `min_relative_volume` and `max_relative_volume`)
32
- with the goal of avoiding the loss being dominated by parts where the reference is almost silent.
33
- Indeed, volumes in dB can take unbounded values both towards -oo and +oo, which can make the final
34
- average metric harder to interpret. Besides, anything below -30 dB of attenuation would sound extremely
35
- good (for a neural network output, although sound engineers typically aim for much lower attenuations).
36
- Similarly, anything above +30 dB would just be completely missing the target, and there is no point
37
- in measuring by exactly how much it missed it. -25, 25 is a more conservative range, but also more
38
- in line with what neural nets currently can achieve.
39
-
40
- For instance, a Relative Volume Mel (RVM) score of -10 dB means that on average, the delta between
41
- the target and reference mel-spec is 10 dB lower than the reference mel-spec value.
42
-
43
- The metric can be aggregated over a given frequency band in order have different insights for
44
- different region of the spectrum. `num_aggregated_bands` controls the number of bands.
45
-
46
- ..Warning:: While this function is optimized for interpretability, nothing was done to ensure it
47
- is numerically stable when computing its gradient. We thus advise against using it as a training loss.
48
-
49
- Args:
50
- sample_rate (int): Sample rate of the input audio.
51
- n_mels (int): Number of mel bands to use.
52
- n_fft (int): Number of frequency bins for the STFT.
53
- hop_length (int): Hop length of the STFT and the mel-spectrogram.
54
- min_relative_volume (float): The error `z_ref - z_est` volume is given relative to
55
- the volume of `z_ref`. If error is smaller than -25 dB of `z_ref`, then it is clamped.
56
- max_relative_volume (float): Same as `min_relative_volume` but clamping if the error is larger than that.
57
- max_initial_gain (float): When rescaling the audio at the very beginning, we will limit the gain
58
- to that amount, to avoid rescaling near silence. Given in dB.
59
- min_activity_volume (float): When computing the reference level from `z_ref`, will clamp low volume
60
- bins to that amount. This is effectively our "zero" level for the reference mel-spectrogram,
61
- and anything below that will be considered equally.
62
- num_aggregated_bands (int): Number of bands to keep when computing the average RVM value.
63
- For instance, a value of 3 would give 3 scores, roughly for low, mid and high freqs.
64
- """
65
- def __init__(self, sample_rate: int = 24000, n_mels: int = 80, n_fft: int = 512,
66
- hop_length: int = 128, min_relative_volume: float = -25,
67
- max_relative_volume: float = 25, max_initial_gain: float = 25,
68
- min_activity_volume: float = -25,
69
- num_aggregated_bands: int = 4) -> None:
70
- super().__init__()
71
- self.melspec = torchaudio.transforms.MelSpectrogram(
72
- n_mels=n_mels, n_fft=n_fft, hop_length=hop_length,
73
- normalized=True, sample_rate=sample_rate, power=2)
74
- self.min_relative_volume = min_relative_volume
75
- self.max_relative_volume = max_relative_volume
76
- self.max_initial_gain = max_initial_gain
77
- self.min_activity_volume = min_activity_volume
78
- self.num_aggregated_bands = num_aggregated_bands
79
-
80
- def forward(self, estimate: torch.Tensor, ground_truth: torch.Tensor) -> tp.Dict[str, torch.Tensor]:
81
- """Compute RVM metric between estimate and reference samples.
82
-
83
- Args:
84
- estimate (torch.Tensor): Estimate sample.
85
- ground_truth (torch.Tensor): Reference sample.
86
-
87
- Returns:
88
- dict[str, torch.Tensor]: Metrics with keys `rvm` for the overall average, and `rvm_{k}`
89
- for the RVM over the k-th band (k=0..num_aggregated_bands - 1).
90
- """
91
- min_scale = db_to_scale(-self.max_initial_gain)
92
- std = ground_truth.pow(2).mean().sqrt().clamp(min=min_scale)
93
- z_gt = self.melspec(ground_truth / std).sqrt()
94
- z_est = self.melspec(estimate / std).sqrt()
95
-
96
- delta = z_gt - z_est
97
- ref_db = scale_to_db(z_gt, self.min_activity_volume)
98
- delta_db = scale_to_db(delta.abs(), min_volume=-120)
99
- relative_db = (delta_db - ref_db).clamp(self.min_relative_volume, self.max_relative_volume)
100
- dims = list(range(relative_db.dim()))
101
- dims.remove(dims[-2])
102
- losses_per_band = relative_db.mean(dim=dims)
103
- aggregated = [chunk.mean() for chunk in losses_per_band.chunk(self.num_aggregated_bands, dim=0)]
104
- metrics = {f'rvm_{index}': value for index, value in enumerate(aggregated)}
105
- metrics['rvm'] = losses_per_band.mean()
106
- return metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/stylegan2/op/readme.md DELETED
@@ -1,12 +0,0 @@
1
- Code from [rosinality-stylegan2-pytorch-cp](https://github.com/senior-sigan/rosinality-stylegan2-pytorch-cpu)
2
-
3
- Scripts to convert rosinality/stylegan2-pytorch to the CPU compatible format
4
-
5
- If you would like to use CPU for testing or have a problem regarding the cpp extention (fused and upfirdn2d), please make the following changes:
6
-
7
- Change `model.stylegan.op` to `model.stylegan.op_cpu`
8
- https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/util.py#L14
9
-
10
- https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/simple_augment.py#L12
11
-
12
- https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/stylegan/model.py#L11
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/models.py DELETED
@@ -1,951 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from torchlibrosa.stft import Spectrogram, LogmelFilterBank
5
- from torchlibrosa.augmentation import SpecAugmentation
6
-
7
- from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output
8
- import os
9
- import sys
10
- import math
11
- import numpy as np
12
-
13
- import torch
14
- import torch.nn as nn
15
- import torch.nn.functional as F
16
- from torch.nn.parameter import Parameter
17
- from torchlibrosa.stft import Spectrogram, LogmelFilterBank
18
- from torchlibrosa.augmentation import SpecAugmentation
19
- from audio_infer.pytorch.pytorch_utils import do_mixup
20
- import torch.utils.checkpoint as checkpoint
21
- from timm.models.layers import DropPath, to_2tuple, trunc_normal_
22
- import warnings
23
- from functools import partial
24
- #from mmdet.models.builder import BACKBONES
25
- from mmdet.utils import get_root_logger
26
- from mmcv.runner import load_checkpoint
27
- os.environ['TORCH_HOME'] = '../pretrained_models'
28
- from copy import deepcopy
29
- from timm.models.helpers import load_pretrained
30
- from torch.cuda.amp import autocast
31
- from collections import OrderedDict
32
- import io
33
- import re
34
- from mmcv.runner import _load_checkpoint, load_state_dict
35
- import mmcv.runner
36
- import copy
37
- import random
38
- from einops import rearrange
39
- from einops.layers.torch import Rearrange, Reduce
40
- from torch import nn, einsum
41
-
42
-
43
- def load_checkpoint(model,
44
- filename,
45
- map_location=None,
46
- strict=False,
47
- logger=None,
48
- revise_keys=[(r'^module\.', '')]):
49
- """Load checkpoint from a file or URI.
50
-
51
- Args:
52
- model (Module): Module to load checkpoint.
53
- filename (str): Accept local filepath, URL, ``torchvision://xxx``,
54
- ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
55
- details.
56
- map_location (str): Same as :func:`torch.load`.
57
- strict (bool): Whether to allow different params for the model and
58
- checkpoint.
59
- logger (:mod:`logging.Logger` or None): The logger for error message.
60
- revise_keys (list): A list of customized keywords to modify the
61
- state_dict in checkpoint. Each item is a (pattern, replacement)
62
- pair of the regular expression operations. Default: strip
63
- the prefix 'module.' by [(r'^module\\.', '')].
64
-
65
- Returns:
66
- dict or OrderedDict: The loaded checkpoint.
67
- """
68
-
69
- checkpoint = _load_checkpoint(filename, map_location, logger)
70
- new_proj = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
71
- new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
72
- checkpoint['patch_embed1.proj.weight'] = new_proj.weight
73
- # OrderedDict is a subclass of dict
74
- if not isinstance(checkpoint, dict):
75
- raise RuntimeError(
76
- f'No state_dict found in checkpoint file {filename}')
77
- # get state_dict from checkpoint
78
- if 'state_dict' in checkpoint:
79
- state_dict = checkpoint['state_dict']
80
- else:
81
- state_dict = checkpoint
82
-
83
- # strip prefix of state_dict
84
- metadata = getattr(state_dict, '_metadata', OrderedDict())
85
- for p, r in revise_keys:
86
- state_dict = OrderedDict(
87
- {re.sub(p, r, k): v
88
- for k, v in state_dict.items()})
89
- state_dict = OrderedDict({k.replace('backbone.',''):v for k,v in state_dict.items()})
90
- # Keep metadata in state_dict
91
- state_dict._metadata = metadata
92
-
93
- # load state_dict
94
- load_state_dict(model, state_dict, strict, logger)
95
- return checkpoint
96
-
97
- def init_layer(layer):
98
- """Initialize a Linear or Convolutional layer. """
99
- nn.init.xavier_uniform_(layer.weight)
100
-
101
- if hasattr(layer, 'bias'):
102
- if layer.bias is not None:
103
- layer.bias.data.fill_(0.)
104
-
105
-
106
- def init_bn(bn):
107
- """Initialize a Batchnorm layer. """
108
- bn.bias.data.fill_(0.)
109
- bn.weight.data.fill_(1.)
110
-
111
-
112
-
113
-
114
- class TimeShift(nn.Module):
115
- def __init__(self, mean, std):
116
- super().__init__()
117
- self.mean = mean
118
- self.std = std
119
-
120
- def forward(self, x):
121
- if self.training:
122
- shift = torch.empty(1).normal_(self.mean, self.std).int().item()
123
- x = torch.roll(x, shift, dims=2)
124
- return x
125
-
126
- class LinearSoftPool(nn.Module):
127
- """LinearSoftPool
128
- Linear softmax, takes logits and returns a probability, near to the actual maximum value.
129
- Taken from the paper:
130
- A Comparison of Five Multiple Instance Learning Pooling Functions for Sound Event Detection with Weak Labeling
131
- https://arxiv.org/abs/1810.09050
132
- """
133
- def __init__(self, pooldim=1):
134
- super().__init__()
135
- self.pooldim = pooldim
136
-
137
- def forward(self, logits, time_decision):
138
- return (time_decision**2).sum(self.pooldim) / time_decision.sum(
139
- self.pooldim)
140
-
141
- class PVT(nn.Module):
142
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
143
- fmax, classes_num):
144
-
145
- super(PVT, self).__init__()
146
-
147
- window = 'hann'
148
- center = True
149
- pad_mode = 'reflect'
150
- ref = 1.0
151
- amin = 1e-10
152
- top_db = None
153
-
154
- # Spectrogram extractor
155
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
156
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
157
- freeze_parameters=True)
158
-
159
- # Logmel feature extractor
160
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
161
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
162
- freeze_parameters=True)
163
-
164
- self.time_shift = TimeShift(0, 10)
165
- # Spec augmenter
166
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
167
- freq_drop_width=8, freq_stripes_num=2)
168
-
169
- self.bn0 = nn.BatchNorm2d(64)
170
- self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
171
- fdim=64,
172
- patch_size=7,
173
- stride=4,
174
- in_chans=1,
175
- num_classes=classes_num,
176
- embed_dims=[64, 128, 320, 512],
177
- depths=[3, 4, 6, 3],
178
- num_heads=[1, 2, 5, 8],
179
- mlp_ratios=[8, 8, 4, 4],
180
- qkv_bias=True,
181
- qk_scale=None,
182
- drop_rate=0.0,
183
- drop_path_rate=0.1,
184
- sr_ratios=[8, 4, 2, 1],
185
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
186
- num_stages=4,
187
- #pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
188
- )
189
- #self.temp_pool = LinearSoftPool()
190
- self.avgpool = nn.AdaptiveAvgPool1d(1)
191
- self.fc_audioset = nn.Linear(512, classes_num, bias=True)
192
-
193
- self.init_weights()
194
-
195
- def init_weights(self):
196
- init_bn(self.bn0)
197
- init_layer(self.fc_audioset)
198
-
199
- def forward(self, input, mixup_lambda=None):
200
- """Input: (batch_size, times_steps, freq_bins)"""
201
-
202
- interpolate_ratio = 32
203
-
204
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
205
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
206
- frames_num = x.shape[2]
207
- x = x.transpose(1, 3)
208
- x = self.bn0(x)
209
- x = x.transpose(1, 3)
210
-
211
- if self.training:
212
- x = self.time_shift(x)
213
- x = self.spec_augmenter(x)
214
-
215
- # Mixup on spectrogram
216
- if self.training and mixup_lambda is not None:
217
- x = do_mixup(x, mixup_lambda)
218
- #print(x.shape) #torch.Size([10, 1, 1001, 64])
219
- x = self.pvt_transformer(x)
220
- #print(x.shape) #torch.Size([10, 800, 128])
221
- x = torch.mean(x, dim=3)
222
-
223
- x = x.transpose(1, 2).contiguous()
224
- framewise_output = torch.sigmoid(self.fc_audioset(x))
225
- #clipwise_output = torch.mean(framewise_output, dim=1)
226
- #clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
227
- x = framewise_output.transpose(1, 2).contiguous()
228
- x = self.avgpool(x)
229
- clipwise_output = torch.flatten(x, 1)
230
- #print(framewise_output.shape) #torch.Size([10, 100, 17])
231
- framewise_output = interpolate(framewise_output, interpolate_ratio)
232
- #framewise_output = framewise_output[:,:1000,:]
233
- #framewise_output = pad_framewise_output(framewise_output, frames_num)
234
- output_dict = {'framewise_output': framewise_output,
235
- 'clipwise_output': clipwise_output}
236
-
237
- return output_dict
238
-
239
- class PVT2(nn.Module):
240
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
241
- fmax, classes_num):
242
-
243
- super(PVT2, self).__init__()
244
-
245
- window = 'hann'
246
- center = True
247
- pad_mode = 'reflect'
248
- ref = 1.0
249
- amin = 1e-10
250
- top_db = None
251
-
252
- # Spectrogram extractor
253
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
254
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
255
- freeze_parameters=True)
256
-
257
- # Logmel feature extractor
258
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
259
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
260
- freeze_parameters=True)
261
-
262
- self.time_shift = TimeShift(0, 10)
263
- # Spec augmenter
264
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
265
- freq_drop_width=8, freq_stripes_num=2)
266
-
267
- self.bn0 = nn.BatchNorm2d(64)
268
- self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
269
- fdim=64,
270
- patch_size=7,
271
- stride=4,
272
- in_chans=1,
273
- num_classes=classes_num,
274
- embed_dims=[64, 128, 320, 512],
275
- depths=[3, 4, 6, 3],
276
- num_heads=[1, 2, 5, 8],
277
- mlp_ratios=[8, 8, 4, 4],
278
- qkv_bias=True,
279
- qk_scale=None,
280
- drop_rate=0.0,
281
- drop_path_rate=0.1,
282
- sr_ratios=[8, 4, 2, 1],
283
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
284
- num_stages=4,
285
- pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
286
- )
287
- #self.temp_pool = LinearSoftPool()
288
- self.fc_audioset = nn.Linear(512, classes_num, bias=True)
289
-
290
- self.init_weights()
291
-
292
- def init_weights(self):
293
- init_bn(self.bn0)
294
- init_layer(self.fc_audioset)
295
-
296
- def forward(self, input, mixup_lambda=None):
297
- """Input: (batch_size, times_steps, freq_bins)"""
298
-
299
- interpolate_ratio = 32
300
-
301
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
302
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
303
- frames_num = x.shape[2]
304
- x = x.transpose(1, 3)
305
- x = self.bn0(x)
306
- x = x.transpose(1, 3)
307
-
308
- if self.training:
309
- #x = self.time_shift(x)
310
- x = self.spec_augmenter(x)
311
-
312
- # Mixup on spectrogram
313
- if self.training and mixup_lambda is not None:
314
- x = do_mixup(x, mixup_lambda)
315
- #print(x.shape) #torch.Size([10, 1, 1001, 64])
316
- x = self.pvt_transformer(x)
317
- #print(x.shape) #torch.Size([10, 800, 128])
318
- x = torch.mean(x, dim=3)
319
-
320
- x = x.transpose(1, 2).contiguous()
321
- framewise_output = torch.sigmoid(self.fc_audioset(x))
322
- clipwise_output = torch.mean(framewise_output, dim=1)
323
- #clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
324
- #print(framewise_output.shape) #torch.Size([10, 100, 17])
325
- framewise_output = interpolate(framewise_output, interpolate_ratio)
326
- #framewise_output = framewise_output[:,:1000,:]
327
- #framewise_output = pad_framewise_output(framewise_output, frames_num)
328
- output_dict = {'framewise_output': framewise_output,
329
- 'clipwise_output': clipwise_output}
330
-
331
- return output_dict
332
-
333
- class PVT_2layer(nn.Module):
334
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
335
- fmax, classes_num):
336
-
337
- super(PVT_2layer, self).__init__()
338
-
339
- window = 'hann'
340
- center = True
341
- pad_mode = 'reflect'
342
- ref = 1.0
343
- amin = 1e-10
344
- top_db = None
345
-
346
- # Spectrogram extractor
347
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
348
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
349
- freeze_parameters=True)
350
-
351
- # Logmel feature extractor
352
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
353
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
354
- freeze_parameters=True)
355
-
356
- self.time_shift = TimeShift(0, 10)
357
- # Spec augmenter
358
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
359
- freq_drop_width=8, freq_stripes_num=2)
360
-
361
- self.bn0 = nn.BatchNorm2d(64)
362
- self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
363
- fdim=64,
364
- patch_size=7,
365
- stride=4,
366
- in_chans=1,
367
- num_classes=classes_num,
368
- embed_dims=[64, 128],
369
- depths=[3, 4],
370
- num_heads=[1, 2],
371
- mlp_ratios=[8, 8],
372
- qkv_bias=True,
373
- qk_scale=None,
374
- drop_rate=0.0,
375
- drop_path_rate=0.1,
376
- sr_ratios=[8, 4],
377
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
378
- num_stages=2,
379
- pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
380
- )
381
- #self.temp_pool = LinearSoftPool()
382
- self.avgpool = nn.AdaptiveAvgPool1d(1)
383
- self.fc_audioset = nn.Linear(128, classes_num, bias=True)
384
-
385
- self.init_weights()
386
-
387
- def init_weights(self):
388
- init_bn(self.bn0)
389
- init_layer(self.fc_audioset)
390
-
391
- def forward(self, input, mixup_lambda=None):
392
- """Input: (batch_size, times_steps, freq_bins)"""
393
-
394
- interpolate_ratio = 8
395
-
396
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
397
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
398
- frames_num = x.shape[2]
399
- x = x.transpose(1, 3)
400
- x = self.bn0(x)
401
- x = x.transpose(1, 3)
402
-
403
- if self.training:
404
- x = self.time_shift(x)
405
- x = self.spec_augmenter(x)
406
-
407
- # Mixup on spectrogram
408
- if self.training and mixup_lambda is not None:
409
- x = do_mixup(x, mixup_lambda)
410
- #print(x.shape) #torch.Size([10, 1, 1001, 64])
411
- x = self.pvt_transformer(x)
412
- #print(x.shape) #torch.Size([10, 800, 128])
413
- x = torch.mean(x, dim=3)
414
-
415
- x = x.transpose(1, 2).contiguous()
416
- framewise_output = torch.sigmoid(self.fc_audioset(x))
417
- #clipwise_output = torch.mean(framewise_output, dim=1)
418
- #clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
419
- x = framewise_output.transpose(1, 2).contiguous()
420
- x = self.avgpool(x)
421
- clipwise_output = torch.flatten(x, 1)
422
- #print(framewise_output.shape) #torch.Size([10, 100, 17])
423
- framewise_output = interpolate(framewise_output, interpolate_ratio)
424
- #framewise_output = framewise_output[:,:1000,:]
425
- #framewise_output = pad_framewise_output(framewise_output, frames_num)
426
- output_dict = {'framewise_output': framewise_output,
427
- 'clipwise_output': clipwise_output}
428
-
429
- return output_dict
430
-
431
- class PVT_lr(nn.Module):
432
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
433
- fmax, classes_num):
434
-
435
- super(PVT_lr, self).__init__()
436
-
437
- window = 'hann'
438
- center = True
439
- pad_mode = 'reflect'
440
- ref = 1.0
441
- amin = 1e-10
442
- top_db = None
443
-
444
- # Spectrogram extractor
445
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
446
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
447
- freeze_parameters=True)
448
-
449
- # Logmel feature extractor
450
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
451
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
452
- freeze_parameters=True)
453
-
454
- self.time_shift = TimeShift(0, 10)
455
- # Spec augmenter
456
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
457
- freq_drop_width=8, freq_stripes_num=2)
458
-
459
- self.bn0 = nn.BatchNorm2d(64)
460
- self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
461
- fdim=64,
462
- patch_size=7,
463
- stride=4,
464
- in_chans=1,
465
- num_classes=classes_num,
466
- embed_dims=[64, 128, 320, 512],
467
- depths=[3, 4, 6, 3],
468
- num_heads=[1, 2, 5, 8],
469
- mlp_ratios=[8, 8, 4, 4],
470
- qkv_bias=True,
471
- qk_scale=None,
472
- drop_rate=0.0,
473
- drop_path_rate=0.1,
474
- sr_ratios=[8, 4, 2, 1],
475
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
476
- num_stages=4,
477
- pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
478
- )
479
- self.temp_pool = LinearSoftPool()
480
- self.fc_audioset = nn.Linear(512, classes_num, bias=True)
481
-
482
- self.init_weights()
483
-
484
- def init_weights(self):
485
- init_bn(self.bn0)
486
- init_layer(self.fc_audioset)
487
-
488
- def forward(self, input, mixup_lambda=None):
489
- """Input: (batch_size, times_steps, freq_bins)"""
490
-
491
- interpolate_ratio = 32
492
-
493
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
494
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
495
- frames_num = x.shape[2]
496
- x = x.transpose(1, 3)
497
- x = self.bn0(x)
498
- x = x.transpose(1, 3)
499
-
500
- if self.training:
501
- x = self.time_shift(x)
502
- x = self.spec_augmenter(x)
503
-
504
- # Mixup on spectrogram
505
- if self.training and mixup_lambda is not None:
506
- x = do_mixup(x, mixup_lambda)
507
- #print(x.shape) #torch.Size([10, 1, 1001, 64])
508
- x = self.pvt_transformer(x)
509
- #print(x.shape) #torch.Size([10, 800, 128])
510
- x = torch.mean(x, dim=3)
511
-
512
- x = x.transpose(1, 2).contiguous()
513
- framewise_output = torch.sigmoid(self.fc_audioset(x))
514
- clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
515
- #print(framewise_output.shape) #torch.Size([10, 100, 17])
516
- framewise_output = interpolate(framewise_output, interpolate_ratio)
517
- #framewise_output = framewise_output[:,:1000,:]
518
- #framewise_output = pad_framewise_output(framewise_output, frames_num)
519
- output_dict = {'framewise_output': framewise_output,
520
- 'clipwise_output': clipwise_output}
521
-
522
- return output_dict
523
-
524
-
525
- class PVT_nopretrain(nn.Module):
526
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
527
- fmax, classes_num):
528
-
529
- super(PVT_nopretrain, self).__init__()
530
-
531
- window = 'hann'
532
- center = True
533
- pad_mode = 'reflect'
534
- ref = 1.0
535
- amin = 1e-10
536
- top_db = None
537
-
538
- # Spectrogram extractor
539
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
540
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
541
- freeze_parameters=True)
542
-
543
- # Logmel feature extractor
544
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
545
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
546
- freeze_parameters=True)
547
-
548
- self.time_shift = TimeShift(0, 10)
549
- # Spec augmenter
550
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
551
- freq_drop_width=8, freq_stripes_num=2)
552
-
553
- self.bn0 = nn.BatchNorm2d(64)
554
- self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
555
- fdim=64,
556
- patch_size=7,
557
- stride=4,
558
- in_chans=1,
559
- num_classes=classes_num,
560
- embed_dims=[64, 128, 320, 512],
561
- depths=[3, 4, 6, 3],
562
- num_heads=[1, 2, 5, 8],
563
- mlp_ratios=[8, 8, 4, 4],
564
- qkv_bias=True,
565
- qk_scale=None,
566
- drop_rate=0.0,
567
- drop_path_rate=0.1,
568
- sr_ratios=[8, 4, 2, 1],
569
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
570
- num_stages=4,
571
- #pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
572
- )
573
- self.temp_pool = LinearSoftPool()
574
- self.fc_audioset = nn.Linear(512, classes_num, bias=True)
575
-
576
- self.init_weights()
577
-
578
- def init_weights(self):
579
- init_bn(self.bn0)
580
- init_layer(self.fc_audioset)
581
-
582
- def forward(self, input, mixup_lambda=None):
583
- """Input: (batch_size, times_steps, freq_bins)"""
584
-
585
- interpolate_ratio = 32
586
-
587
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
588
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
589
- frames_num = x.shape[2]
590
- x = x.transpose(1, 3)
591
- x = self.bn0(x)
592
- x = x.transpose(1, 3)
593
-
594
- if self.training:
595
- x = self.time_shift(x)
596
- x = self.spec_augmenter(x)
597
-
598
- # Mixup on spectrogram
599
- if self.training and mixup_lambda is not None:
600
- x = do_mixup(x, mixup_lambda)
601
- #print(x.shape) #torch.Size([10, 1, 1001, 64])
602
- x = self.pvt_transformer(x)
603
- #print(x.shape) #torch.Size([10, 800, 128])
604
- x = torch.mean(x, dim=3)
605
-
606
- x = x.transpose(1, 2).contiguous()
607
- framewise_output = torch.sigmoid(self.fc_audioset(x))
608
- clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
609
- #print(framewise_output.shape) #torch.Size([10, 100, 17])
610
- framewise_output = interpolate(framewise_output, interpolate_ratio)
611
- framewise_output = framewise_output[:,:1000,:]
612
- #framewise_output = pad_framewise_output(framewise_output, frames_num)
613
- output_dict = {'framewise_output': framewise_output,
614
- 'clipwise_output': clipwise_output}
615
-
616
- return output_dict
617
-
618
-
619
- class Mlp(nn.Module):
620
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False):
621
- super().__init__()
622
- out_features = out_features or in_features
623
- hidden_features = hidden_features or in_features
624
- self.fc1 = nn.Linear(in_features, hidden_features)
625
- self.dwconv = DWConv(hidden_features)
626
- self.act = act_layer()
627
- self.fc2 = nn.Linear(hidden_features, out_features)
628
- self.drop = nn.Dropout(drop)
629
- self.linear = linear
630
- if self.linear:
631
- self.relu = nn.ReLU()
632
- self.apply(self._init_weights)
633
-
634
- def _init_weights(self, m):
635
- if isinstance(m, nn.Linear):
636
- trunc_normal_(m.weight, std=.02)
637
- if isinstance(m, nn.Linear) and m.bias is not None:
638
- nn.init.constant_(m.bias, 0)
639
- elif isinstance(m, nn.LayerNorm):
640
- nn.init.constant_(m.bias, 0)
641
- nn.init.constant_(m.weight, 1.0)
642
- elif isinstance(m, nn.Conv2d):
643
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
644
- fan_out //= m.groups
645
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
646
- if m.bias is not None:
647
- m.bias.data.zero_()
648
-
649
- def forward(self, x, H, W):
650
- x = self.fc1(x)
651
- if self.linear:
652
- x = self.relu(x)
653
- x = self.dwconv(x, H, W)
654
- x = self.act(x)
655
- x = self.drop(x)
656
- x = self.fc2(x)
657
- x = self.drop(x)
658
- return x
659
-
660
-
661
- class Attention(nn.Module):
662
- def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, linear=False):
663
- super().__init__()
664
- assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
665
-
666
- self.dim = dim
667
- self.num_heads = num_heads
668
- head_dim = dim // num_heads
669
- self.scale = qk_scale or head_dim ** -0.5
670
-
671
- self.q = nn.Linear(dim, dim, bias=qkv_bias)
672
- self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
673
- self.attn_drop = nn.Dropout(attn_drop)
674
- self.proj = nn.Linear(dim, dim)
675
- self.proj_drop = nn.Dropout(proj_drop)
676
-
677
- self.linear = linear
678
- self.sr_ratio = sr_ratio
679
- if not linear:
680
- if sr_ratio > 1:
681
- self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
682
- self.norm = nn.LayerNorm(dim)
683
- else:
684
- self.pool = nn.AdaptiveAvgPool2d(7)
685
- self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1)
686
- self.norm = nn.LayerNorm(dim)
687
- self.act = nn.GELU()
688
- self.apply(self._init_weights)
689
-
690
- def _init_weights(self, m):
691
- if isinstance(m, nn.Linear):
692
- trunc_normal_(m.weight, std=.02)
693
- if isinstance(m, nn.Linear) and m.bias is not None:
694
- nn.init.constant_(m.bias, 0)
695
- elif isinstance(m, nn.LayerNorm):
696
- nn.init.constant_(m.bias, 0)
697
- nn.init.constant_(m.weight, 1.0)
698
- elif isinstance(m, nn.Conv2d):
699
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
700
- fan_out //= m.groups
701
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
702
- if m.bias is not None:
703
- m.bias.data.zero_()
704
-
705
- def forward(self, x, H, W):
706
- B, N, C = x.shape
707
- q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
708
-
709
- if not self.linear:
710
- if self.sr_ratio > 1:
711
- x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
712
- x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
713
- x_ = self.norm(x_)
714
- kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
715
- else:
716
- kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
717
- else:
718
- x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
719
- x_ = self.sr(self.pool(x_)).reshape(B, C, -1).permute(0, 2, 1)
720
- x_ = self.norm(x_)
721
- x_ = self.act(x_)
722
- kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
723
- k, v = kv[0], kv[1]
724
-
725
- attn = (q @ k.transpose(-2, -1)) * self.scale
726
- attn = attn.softmax(dim=-1)
727
- attn = self.attn_drop(attn)
728
-
729
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
730
- x = self.proj(x)
731
- x = self.proj_drop(x)
732
-
733
- return x
734
-
735
-
736
- class Pooling(nn.Module):
737
- """
738
- Implementation of pooling for PoolFormer
739
- --pool_size: pooling size
740
- """
741
- def __init__(self, pool_size=3):
742
- super().__init__()
743
- self.pool = nn.AvgPool2d(
744
- pool_size, stride=1, padding=pool_size//2, count_include_pad=False)
745
-
746
- def forward(self, x):
747
- return self.pool(x) - x
748
-
749
- class Block(nn.Module):
750
-
751
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
752
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, linear=False):
753
- super().__init__()
754
- self.norm1 = norm_layer(dim)
755
- self.attn = Attention(
756
- dim,
757
- num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
758
- attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio, linear=linear)
759
- #self.norm3 = norm_layer(dim)
760
- #self.token_mixer = Pooling(pool_size=3)
761
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
762
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
763
- self.norm2 = norm_layer(dim)
764
- mlp_hidden_dim = int(dim * mlp_ratio)
765
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, linear=linear)
766
- self.apply(self._init_weights)
767
-
768
- def _init_weights(self, m):
769
- if isinstance(m, nn.Linear):
770
- trunc_normal_(m.weight, std=.02)
771
- if isinstance(m, nn.Linear) and m.bias is not None:
772
- nn.init.constant_(m.bias, 0)
773
- elif isinstance(m, nn.LayerNorm):
774
- nn.init.constant_(m.bias, 0)
775
- nn.init.constant_(m.weight, 1.0)
776
- elif isinstance(m, nn.Conv2d):
777
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
778
- fan_out //= m.groups
779
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
780
- if m.bias is not None:
781
- m.bias.data.zero_()
782
-
783
- def forward(self, x, H, W):
784
- x = x + self.drop_path(self.attn(self.norm1(x), H, W))
785
- x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
786
- return x
787
-
788
-
789
- class OverlapPatchEmbed(nn.Module):
790
- """ Image to Patch Embedding
791
- """
792
-
793
- def __init__(self, tdim, fdim, patch_size=7, stride=4, in_chans=3, embed_dim=768):
794
- super().__init__()
795
- img_size = (tdim, fdim)
796
- patch_size = to_2tuple(patch_size)
797
-
798
- self.img_size = img_size
799
- self.patch_size = patch_size
800
- self.H, self.W = img_size[0] // stride, img_size[1] // stride
801
- self.num_patches = self.H * self.W
802
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
803
- padding=(patch_size[0] // 3, patch_size[1] // 3))
804
- self.norm = nn.LayerNorm(embed_dim)
805
-
806
- self.apply(self._init_weights)
807
-
808
- def _init_weights(self, m):
809
- if isinstance(m, nn.Linear):
810
- trunc_normal_(m.weight, std=.02)
811
- if isinstance(m, nn.Linear) and m.bias is not None:
812
- nn.init.constant_(m.bias, 0)
813
- elif isinstance(m, nn.LayerNorm):
814
- nn.init.constant_(m.bias, 0)
815
- nn.init.constant_(m.weight, 1.0)
816
- elif isinstance(m, nn.Conv2d):
817
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
818
- fan_out //= m.groups
819
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
820
- if m.bias is not None:
821
- m.bias.data.zero_()
822
-
823
- def forward(self, x):
824
- x = self.proj(x)
825
- _, _, H, W = x.shape
826
- x = x.flatten(2).transpose(1, 2)
827
- x = self.norm(x)
828
-
829
- return x, H, W
830
-
831
-
832
- class PyramidVisionTransformerV2(nn.Module):
833
- def __init__(self, tdim=1001, fdim=64, patch_size=16, stride=4, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
834
- num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
835
- attn_drop_rate=0., drop_path_rate=0.1, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3],
836
- sr_ratios=[8, 4, 2, 1], num_stages=2, linear=False, pretrained=None):
837
- super().__init__()
838
- # self.num_classes = num_classes
839
- self.depths = depths
840
- self.num_stages = num_stages
841
- self.linear = linear
842
-
843
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
844
- cur = 0
845
-
846
- for i in range(num_stages):
847
- patch_embed = OverlapPatchEmbed(tdim=tdim if i == 0 else tdim // (2 ** (i + 1)),
848
- fdim=fdim if i == 0 else tdim // (2 ** (i + 1)),
849
- patch_size=7 if i == 0 else 3,
850
- stride=stride if i == 0 else 2,
851
- in_chans=in_chans if i == 0 else embed_dims[i - 1],
852
- embed_dim=embed_dims[i])
853
- block = nn.ModuleList([Block(
854
- dim=embed_dims[i], num_heads=num_heads[i], mlp_ratio=mlp_ratios[i], qkv_bias=qkv_bias,
855
- qk_scale=qk_scale,
856
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + j], norm_layer=norm_layer,
857
- sr_ratio=sr_ratios[i], linear=linear)
858
- for j in range(depths[i])])
859
- norm = norm_layer(embed_dims[i])
860
- cur += depths[i]
861
-
862
- setattr(self, f"patch_embed{i + 1}", patch_embed)
863
- setattr(self, f"block{i + 1}", block)
864
- setattr(self, f"norm{i + 1}", norm)
865
- #self.n = nn.Linear(125, 250, bias=True)
866
- # classification head
867
- # self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
868
- self.apply(self._init_weights)
869
- self.init_weights(pretrained)
870
-
871
- def _init_weights(self, m):
872
- if isinstance(m, nn.Linear):
873
- trunc_normal_(m.weight, std=.02)
874
- if isinstance(m, nn.Linear) and m.bias is not None:
875
- nn.init.constant_(m.bias, 0)
876
- elif isinstance(m, nn.LayerNorm):
877
- nn.init.constant_(m.bias, 0)
878
- nn.init.constant_(m.weight, 1.0)
879
- elif isinstance(m, nn.Conv2d):
880
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
881
- fan_out //= m.groups
882
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
883
- if m.bias is not None:
884
- m.bias.data.zero_()
885
-
886
- def init_weights(self, pretrained=None):
887
- if isinstance(pretrained, str):
888
- logger = get_root_logger()
889
- load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
890
-
891
- def freeze_patch_emb(self):
892
- self.patch_embed1.requires_grad = False
893
-
894
- @torch.jit.ignore
895
- def no_weight_decay(self):
896
- return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
897
-
898
- def get_classifier(self):
899
- return self.head
900
-
901
- def reset_classifier(self, num_classes, global_pool=''):
902
- self.num_classes = num_classes
903
- self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
904
-
905
- def forward_features(self, x):
906
- B = x.shape[0]
907
-
908
- for i in range(self.num_stages):
909
- patch_embed = getattr(self, f"patch_embed{i + 1}")
910
- block = getattr(self, f"block{i + 1}")
911
- norm = getattr(self, f"norm{i + 1}")
912
- x, H, W = patch_embed(x)
913
- #print(x.shape)
914
- for blk in block:
915
- x = blk(x, H, W)
916
- #print(x.shape)
917
- x = norm(x)
918
- #if i != self.num_stages - 1:
919
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
920
- #print(x.shape)
921
- return x
922
-
923
- def forward(self, x):
924
- x = self.forward_features(x)
925
- # x = self.head(x)
926
-
927
- return x
928
-
929
- class DWConv(nn.Module):
930
- def __init__(self, dim=768):
931
- super(DWConv, self).__init__()
932
- self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
933
-
934
- def forward(self, x, H, W):
935
- B, N, C = x.shape
936
- x = x.transpose(1, 2).view(B, C, H, W)
937
- x = self.dwconv(x)
938
- x = x.flatten(2).transpose(1, 2)
939
-
940
- return x
941
-
942
-
943
- def _conv_filter(state_dict, patch_size=16):
944
- """ convert patch embedding weight from manual patchify + linear proj to conv"""
945
- out_dict = {}
946
- for k, v in state_dict.items():
947
- if 'patch_embed.proj.weight' in k:
948
- v = v.reshape((v.shape[0], 3, patch_size, patch_size))
949
- out_dict[k] = v
950
-
951
- return out_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/ldm/models/diffusion/ddpm.py DELETED
@@ -1,1954 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
-
9
- import torch
10
- import torch.nn as nn
11
- import numpy as np
12
- import pytorch_lightning as pl
13
- from torch.optim.lr_scheduler import LambdaLR
14
- from einops import rearrange, repeat
15
- from contextlib import contextmanager, nullcontext
16
- from functools import partial
17
- import itertools
18
- from tqdm import tqdm
19
- from torchvision.utils import make_grid
20
- from pytorch_lightning.utilities.distributed import rank_zero_only
21
- from omegaconf import ListConfig
22
-
23
- from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
24
- from ldm.modules.ema import LitEma
25
- from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
26
- from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
27
- from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
28
- from ldm.models.diffusion.ddim import DDIMSampler
29
-
30
-
31
- __conditioning_keys__ = {'concat': 'c_concat',
32
- 'crossattn': 'c_crossattn',
33
- 'adm': 'y'}
34
-
35
-
36
- def disabled_train(self, mode=True):
37
- """Overwrite model.train with this function to make sure train/eval mode
38
- does not change anymore."""
39
- return self
40
-
41
-
42
- def uniform_on_device(r1, r2, shape, device):
43
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
44
-
45
-
46
- class DDPM(pl.LightningModule):
47
- # classic DDPM with Gaussian diffusion, in image space
48
- def __init__(self,
49
- unet_config,
50
- timesteps=1000,
51
- beta_schedule="linear",
52
- loss_type="l2",
53
- ckpt_path=None,
54
- ignore_keys=[],
55
- load_only_unet=False,
56
- monitor="val/loss",
57
- use_ema=True,
58
- first_stage_key="image",
59
- image_size=256,
60
- channels=3,
61
- log_every_t=100,
62
- clip_denoised=True,
63
- linear_start=1e-4,
64
- linear_end=2e-2,
65
- cosine_s=8e-3,
66
- given_betas=None,
67
- original_elbo_weight=0.,
68
- v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
69
- l_simple_weight=1.,
70
- conditioning_key=None,
71
- parameterization="eps", # all assuming fixed variance schedules
72
- scheduler_config=None,
73
- use_positional_encodings=False,
74
- learn_logvar=False,
75
- logvar_init=0.,
76
- make_it_fit=False,
77
- ucg_training=None,
78
- reset_ema=False,
79
- reset_num_ema_updates=False,
80
- keep_num_ema_updates=False,
81
- textemb_merge_config=None,
82
- merge_textemb = False,
83
- log_all_grad_norm = False,
84
- ):
85
- super().__init__()
86
- assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
87
- self.parameterization = parameterization
88
- print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
89
- self.cond_stage_model = None
90
- self.clip_denoised = clip_denoised
91
- self.log_every_t = log_every_t
92
- self.first_stage_key = first_stage_key
93
- self.image_size = image_size # try conv?
94
- self.channels = channels
95
- self.use_positional_encodings = use_positional_encodings
96
- self.model = DiffusionWrapper(unet_config, conditioning_key, textemb_merge_config=textemb_merge_config, merge_textemb=merge_textemb)
97
- count_params(self.model, verbose=True)
98
- self.use_ema = use_ema
99
- if self.use_ema:
100
- self.model_ema = LitEma(self.model)
101
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
102
-
103
- self.use_scheduler = scheduler_config is not None
104
- if self.use_scheduler:
105
- self.scheduler_config = scheduler_config
106
-
107
- self.v_posterior = v_posterior
108
- self.original_elbo_weight = original_elbo_weight
109
- self.l_simple_weight = l_simple_weight
110
-
111
- if monitor is not None:
112
- self.monitor = monitor
113
- self.make_it_fit = make_it_fit
114
- if reset_ema: assert exists(ckpt_path)
115
- if ckpt_path is not None:
116
- ema_num_updates = self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
117
- if reset_ema:
118
- assert self.use_ema
119
- print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
120
- self.model_ema = LitEma(self.model, init_num_updates= ema_num_updates if keep_num_ema_updates else 0)
121
- if reset_num_ema_updates:
122
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
123
- assert self.use_ema
124
- self.model_ema.reset_num_updates()
125
-
126
- self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
127
- linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
128
-
129
- self.loss_type = loss_type
130
-
131
- self.learn_logvar = learn_logvar
132
- self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
133
- if self.learn_logvar:
134
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
135
- # else:
136
- # self.register_buffer('logvar', self.logvar)
137
-
138
- self.ucg_training = ucg_training or dict()
139
- if self.ucg_training:
140
- self.ucg_prng = np.random.RandomState()
141
- self.log_all_grad_norm = log_all_grad_norm
142
-
143
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
144
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
145
- if exists(given_betas):
146
- betas = given_betas
147
- else:
148
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
149
- cosine_s=cosine_s)
150
- alphas = 1. - betas
151
- alphas_cumprod = np.cumprod(alphas, axis=0)
152
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
153
-
154
- timesteps, = betas.shape
155
- self.num_timesteps = int(timesteps)
156
- self.linear_start = linear_start
157
- self.linear_end = linear_end
158
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
159
-
160
- to_torch = partial(torch.tensor, dtype=torch.float32)
161
-
162
- self.register_buffer('betas', to_torch(betas))
163
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
164
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
165
-
166
- # calculations for diffusion q(x_t | x_{t-1}) and others
167
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
168
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
169
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
170
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
171
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
172
-
173
- # calculations for posterior q(x_{t-1} | x_t, x_0) following IDDPM
174
- posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
175
- 1. - alphas_cumprod) + self.v_posterior * betas
176
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
177
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
178
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
179
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
180
- self.register_buffer('posterior_mean_coef1', to_torch(
181
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
182
- self.register_buffer('posterior_mean_coef2', to_torch(
183
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
184
- # weights before the simple loss
185
- if self.parameterization == "eps":
186
- lvlb_weights = self.betas ** 2 / (
187
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
188
- elif self.parameterization == "x0":
189
- lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
190
- elif self.parameterization == "v":
191
- lvlb_weights = torch.ones_like(self.betas ** 2 / (
192
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
193
- else:
194
- raise NotImplementedError("mu not supported")
195
- lvlb_weights[0] = lvlb_weights[1] #?
196
- self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
197
- assert not torch.isnan(self.lvlb_weights).all()
198
-
199
- @contextmanager
200
- def ema_scope(self, context=None):
201
- if self.use_ema:
202
- self.model_ema.store(self.model.parameters())
203
- self.model_ema.copy_to(self.model)
204
- if context is not None:
205
- print(f"{context}: Switched to EMA weights")
206
- try:
207
- yield None
208
- finally:
209
- if self.use_ema:
210
- self.model_ema.restore(self.model.parameters())
211
- if context is not None:
212
- print(f"{context}: Restored training weights")
213
-
214
- @torch.no_grad()
215
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
216
- sd = torch.load(path, map_location="cpu")
217
- if "state_dict" in list(sd.keys()):
218
- sd = sd["state_dict"]
219
- keys = list(sd.keys())
220
- for k in keys:
221
- for ik in ignore_keys:
222
- if k.startswith(ik):
223
- print("Deleting key {} from state_dict.".format(k))
224
- del sd[k]
225
- if self.make_it_fit:
226
- n_params = len([name for name, _ in
227
- itertools.chain(self.named_parameters(),
228
- self.named_buffers())])
229
- for name, param in tqdm(
230
- itertools.chain(self.named_parameters(),
231
- self.named_buffers()),
232
- desc="Fitting old weights to new weights",
233
- total=n_params
234
- ):
235
- if not name in sd:
236
- continue
237
- old_shape = sd[name].shape
238
- new_shape = param.shape
239
- assert len(old_shape) == len(new_shape)
240
- if len(new_shape) > 2:
241
- # we only modify first two axes
242
- assert new_shape[2:] == old_shape[2:]
243
- # assumes first axis corresponds to output dim
244
- if not new_shape == old_shape:
245
- new_param = param.clone()
246
- old_param = sd[name]
247
- if len(new_shape) == 1:
248
- for i in range(new_param.shape[0]):
249
- new_param[i] = old_param[i % old_shape[0]]
250
- elif len(new_shape) >= 2:
251
- for i in range(new_param.shape[0]):
252
- for j in range(new_param.shape[1]):
253
- new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
254
-
255
- n_used_old = torch.ones(old_shape[1])
256
- for j in range(new_param.shape[1]):
257
- n_used_old[j % old_shape[1]] += 1
258
- n_used_new = torch.zeros(new_shape[1])
259
- for j in range(new_param.shape[1]):
260
- n_used_new[j] = n_used_old[j % old_shape[1]]
261
-
262
- n_used_new = n_used_new[None, :]
263
- while len(n_used_new.shape) < len(new_shape):
264
- n_used_new = n_used_new.unsqueeze(-1)
265
- new_param /= n_used_new
266
-
267
- sd[name] = new_param
268
-
269
- # missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
270
- # sd, strict=False)
271
- if not only_model:
272
- missing, unexpected = self.load_state_dict(sd, strict=False)
273
- elif path.endswith(".bin"):
274
- missing, unexpected = self.model.diffusion_model.load_state_dict(sd, strict=False)
275
- elif path.endswith(".ckpt"):
276
- missing, unexpected = self.model.load_state_dict(sd, strict=False)
277
-
278
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
279
- if len(missing) > 0:
280
- print(f"Missing Keys:\n {missing}")
281
- if len(unexpected) > 0:
282
- print(f"\nUnexpected Keys:\n {unexpected}")
283
-
284
- if "model_ema.num_updates" in sd and "model_ema.num_updates" not in unexpected:
285
- return sd["model_ema.num_updates"].item()
286
- else:
287
- return 0
288
- # q(x_t | x_0)
289
- def q_mean_variance(self, x_start, t):
290
- """
291
- Get the distribution q(x_t | x_0).
292
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
293
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
294
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
295
- """
296
- mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
297
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
298
- log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
299
- return mean, variance, log_variance
300
-
301
- def predict_start_from_noise(self, x_t, t, noise):
302
- return (
303
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
304
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
305
- )
306
-
307
- def predict_start_from_z_and_v(self, x_t, t, v):
308
- # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
309
- # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
310
- return (
311
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
312
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
313
- )
314
-
315
- def predict_eps_from_z_and_v(self, x_t, t, v):
316
- return (
317
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
318
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
319
- )
320
- # q(x_(t-1) | x_t, x_0)
321
- def q_posterior(self, x_start, x_t, t):
322
- posterior_mean = (
323
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
324
- extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
325
- )
326
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
327
- posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
328
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
329
- # p(x_(t-1) | x_t)
330
- def p_mean_variance(self, x, t, clip_denoised: bool):
331
- model_out = self.model(x, t)
332
- if self.parameterization == "eps":
333
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
334
- elif self.parameterization == "x0":
335
- x_recon = model_out
336
- if clip_denoised: # static thresholding
337
- x_recon.clamp_(-1., 1.)
338
-
339
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
340
- return model_mean, posterior_variance, posterior_log_variance
341
- # one sampling step ancestral sampling
342
- @torch.no_grad()
343
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
344
- b, *_, device = *x.shape, x.device
345
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
346
- noise = noise_like(x.shape, device, repeat_noise)
347
- # no noise when t == 0
348
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
349
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
350
- # sampling loop
351
- @torch.no_grad()
352
- def p_sample_loop(self, shape, return_intermediates=False):
353
- device = self.betas.device
354
- b = shape[0]
355
- img = torch.randn(shape, device=device)
356
- intermediates = [img]
357
- for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
358
- img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
359
- clip_denoised=self.clip_denoised)
360
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
361
- intermediates.append(img)
362
- if return_intermediates:
363
- return img, intermediates
364
- return img
365
-
366
- @torch.no_grad()
367
- def sample(self, batch_size=16, return_intermediates=False):
368
- image_size = self.image_size
369
- channels = self.channels
370
- return self.p_sample_loop((batch_size, channels, image_size, image_size),
371
- return_intermediates=return_intermediates)
372
- # sampling from q(x_t | x_0)
373
- def q_sample(self, x_start, t, noise=None):
374
- noise = default(noise, lambda: torch.randn_like(x_start))
375
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
376
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
377
- # get v from x and noise
378
- def get_v(self, x, noise, t):
379
- return (
380
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
381
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
382
- )
383
- # loss type
384
- def get_loss(self, pred, target, mean=True):
385
- if self.loss_type == 'l1':
386
- loss = (target - pred).abs()
387
- if mean:
388
- loss = loss.mean()
389
- elif self.loss_type == 'l2':
390
- if mean:
391
- loss = torch.nn.functional.mse_loss(target, pred)
392
- else:
393
- loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
394
- else:
395
- raise NotImplementedError("unknown loss type '{loss_type}'")
396
-
397
- return loss
398
- # training loss
399
- def p_losses(self, x_start, t, noise=None):
400
- noise = default(noise, lambda: torch.randn_like(x_start))
401
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
402
- model_out = self.model(x_noisy, t)
403
-
404
- loss_dict = {}
405
- if self.parameterization == "eps":
406
- target = noise
407
- elif self.parameterization == "x0":
408
- target = x_start
409
- elif self.parameterization == "v":
410
- target = self.get_v(x_start, noise, t)
411
- else:
412
- raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
413
- # L_simple
414
- loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
415
- log_prefix = 'train' if self.training else 'val'
416
-
417
- loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
418
- loss_simple = loss.mean() * self.l_simple_weight
419
- # L_vlb
420
- loss_vlb = (self.lvlb_weights[t] * loss).mean()
421
- loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
422
- # L_simple + lambda * L_vlb following IDDPM
423
- loss = loss_simple + self.original_elbo_weight * loss_vlb
424
-
425
- loss_dict.update({f'{log_prefix}/loss': loss})
426
-
427
- return loss, loss_dict
428
- # using during training
429
- def forward(self, x, *args, **kwargs):
430
- # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
431
- # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
432
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
433
- return self.p_losses(x, t, *args, **kwargs)
434
-
435
- def get_input(self, batch, k):
436
- x = batch[k]
437
- if len(x.shape) == 3:
438
- x = x[..., None]
439
- x = rearrange(x, 'b h w c -> b c h w')
440
- x = x.to(memory_format=torch.contiguous_format).float()
441
- # if self.trainer.precision == 16:
442
- # x = x.type(torch.float16)
443
- return x
444
-
445
- def shared_step(self, batch):
446
- x = self.get_input(batch, self.first_stage_key)
447
- loss, loss_dict = self(x)
448
- return loss, loss_dict
449
- # main training step
450
- # def training_step(self, batch, batch_idx):
451
- # change
452
- def training_step(self, batch, batch_idx, optimizer_idx=0):
453
- for k in self.ucg_training:
454
- p = self.ucg_training[k]["p"]
455
- val = self.ucg_training[k]["val"]
456
- if val is None:
457
- val = ""
458
- for i in range(len(batch[k])):
459
- if self.ucg_prng.choice(2, p=[1 - p, p]):
460
- batch[k][i] = val
461
-
462
- loss, loss_dict = self.shared_step(batch)
463
-
464
- self.log_dict(loss_dict, prog_bar=True,
465
- logger=True, on_step=True, on_epoch=True)
466
- # if self.global_step == 19:
467
- # aa = 1
468
- self.log("global_step", self.global_step,
469
- prog_bar=True, logger=True, on_step=True, on_epoch=False)
470
- ac_loss_str = self.trainer.progress_bar_dict["loss"]
471
- ac_loss = eval(ac_loss_str) if ac_loss_str!= "nan" else 0
472
- log_prefix = 'train' if self.training else 'val'
473
- self.log("{}/loss_accumulated".format(log_prefix),
474
- ac_loss,
475
- prog_bar=False, logger=True, on_step=True, on_epoch=False
476
- )
477
- # if ac_loss > 0.012:
478
- # assert self.cond_stage_key
479
- # print(batch[self.cond_stage_key][:15])
480
- if self.use_scheduler:
481
- lr = self.optimizers().param_groups[0]['lr']
482
- self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
483
-
484
- return loss
485
-
486
- @torch.no_grad()
487
- def validation_step(self, batch, batch_idx):
488
- _, loss_dict_no_ema = self.shared_step(batch)
489
- with self.ema_scope():
490
- _, loss_dict_ema = self.shared_step(batch)
491
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
492
- self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
493
- self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
494
- # ema
495
- def on_train_batch_end(self, *args, **kwargs):
496
- if self.use_ema:
497
- self.model_ema(self.model)
498
- if self.log_all_grad_norm:
499
- gradnorm_list = []
500
- for name, p in self.named_parameters():
501
- if p.requires_grad:
502
- grad_norm_v = p.grad.detach().norm().item()
503
- gradnorm_list.append(grad_norm_v)
504
- if "textemb_merge_model" in name:
505
- self.log("all_gradients/{}_norm".format(name),
506
- gradnorm_list[-1],
507
- prog_bar=False, logger=True, on_step=True, on_epoch=False
508
- )
509
- if grad_norm_v > 0.1:
510
- print("the norm of gradient w.r.t {} > 0.1: {:.2f}".format
511
- (
512
- name, grad_norm_v
513
- ))
514
-
515
- self.log("all_gradients/grad_norm_mean",
516
- np.mean(gradnorm_list),
517
- prog_bar=False, logger=True, on_step=True, on_epoch=False
518
- )
519
- self.log("all_gradients/grad_norm_max",
520
- np.max(gradnorm_list),
521
- prog_bar=False, logger=True, on_step=True, on_epoch=False
522
- )
523
- self.log("all_gradients/grad_norm_min",
524
- np.min(gradnorm_list),
525
- prog_bar=False, logger=True, on_step=True, on_epoch=False
526
- )
527
- self.log("all_gradients/param_num",
528
- len(gradnorm_list),
529
- prog_bar=False, logger=True, on_step=True, on_epoch=False
530
- )
531
- def _get_rows_from_list(self, samples):
532
- n_imgs_per_row = len(samples)
533
- denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
534
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
535
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
536
- return denoise_grid
537
-
538
- @torch.no_grad()
539
- def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
540
- log = dict()
541
- x = self.get_input(batch, self.first_stage_key)
542
- N = min(x.shape[0], N)
543
- n_row = min(x.shape[0], n_row)
544
- x = x.to(self.device)[:N]
545
- log["inputs"] = x
546
-
547
- # get diffusion row
548
- diffusion_row = list()
549
- x_start = x[:n_row]
550
-
551
- for t in range(self.num_timesteps):
552
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
553
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
554
- t = t.to(self.device).long()
555
- noise = torch.randn_like(x_start)
556
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
557
- diffusion_row.append(x_noisy)
558
-
559
- log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
560
-
561
- if sample:
562
- # get denoise row
563
- with self.ema_scope("Plotting"):
564
- samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
565
-
566
- log["samples"] = samples
567
- log["denoise_row"] = self._get_rows_from_list(denoise_row)
568
-
569
- if return_keys:
570
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
571
- return log
572
- else:
573
- return {key: log[key] for key in return_keys}
574
- return log
575
- # configure optimizers AdamW
576
- def configure_optimizers(self):
577
- lr = self.learning_rate
578
- params = list(self.model.parameters())
579
- if self.learn_logvar:
580
- params = params + [self.logvar]
581
- opt = torch.optim.AdamW(params, lr=lr)
582
- return opt
583
-
584
- # main class: LDM - first stage, DDPM, conditions
585
- class LatentDiffusion(DDPM):
586
- """main class"""
587
-
588
- def __init__(self,
589
- first_stage_config,
590
- cond_stage_config,
591
- # textemb_merge_config = None,
592
- num_timesteps_cond=None,
593
- cond_stage_key="image",
594
- cond_stage_trainable=False,
595
- concat_mode=True,
596
- cond_stage_forward=None,
597
- conditioning_key=None,
598
- scale_factor=1.0,
599
- scale_by_std=False,
600
- force_null_conditioning=False,
601
- *args, **kwargs):
602
- self.force_null_conditioning = force_null_conditioning
603
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
604
- self.scale_by_std = scale_by_std
605
- assert self.num_timesteps_cond <= kwargs['timesteps']
606
- # for backwards compatibility after implementation of DiffusionWrapper
607
- if conditioning_key is None:
608
- conditioning_key = 'concat' if concat_mode else 'crossattn'
609
- if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
610
- conditioning_key = None
611
- ckpt_path = kwargs.pop("ckpt_path", None)
612
- reset_ema = kwargs.pop("reset_ema", False)
613
- only_model= kwargs.pop("only_model", False)
614
- reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
615
- keep_num_ema_updates = kwargs.pop("keep_num_ema_updates", False)
616
- ignore_keys = kwargs.pop("ignore_keys", [])
617
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
618
- self.concat_mode = concat_mode
619
- self.cond_stage_trainable = cond_stage_trainable
620
- self.cond_stage_key = cond_stage_key
621
- try:
622
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
623
- except:
624
- self.num_downs = 0
625
- if not scale_by_std: #?
626
- self.scale_factor = scale_factor
627
- else:
628
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
629
- print("instantiate first stage model")
630
- self.instantiate_first_stage(first_stage_config)
631
- print("instantiate cond stage model")
632
- self.instantiate_cond_stage(cond_stage_config)
633
- self.cond_stage_forward = cond_stage_forward
634
- self.clip_denoised = False
635
- self.bbox_tokenizer = None
636
-
637
- self.restarted_from_ckpt = False
638
- if ckpt_path is not None:
639
- ema_num_updates = self.init_from_ckpt(ckpt_path, ignore_keys, only_model=only_model)
640
- self.restarted_from_ckpt = True
641
- if reset_ema:
642
- assert self.use_ema
643
- print(
644
- f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
645
- self.model_ema = LitEma(self.model, init_num_updates= ema_num_updates if keep_num_ema_updates else 0)
646
- if reset_num_ema_updates:
647
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
648
- assert self.use_ema
649
- self.model_ema.reset_num_updates()
650
-
651
- def make_cond_schedule(self, ):
652
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
653
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
654
- self.cond_ids[:self.num_timesteps_cond] = ids
655
- # calculate scale factor for the first batch
656
- @rank_zero_only
657
- @torch.no_grad()
658
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
659
- # only for very first batch
660
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
661
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
662
- # set rescale weight to 1./std of encodings
663
- print("### USING STD-RESCALING ###")
664
- x = super().get_input(batch, self.first_stage_key)
665
- x = x.to(self.device)
666
- encoder_posterior = self.encode_first_stage(x)
667
- z = self.get_first_stage_encoding(encoder_posterior).detach()
668
- del self.scale_factor
669
- self.register_buffer('scale_factor', 1. / z.flatten().std())
670
- print(f"setting self.scale_factor to {self.scale_factor}")
671
- print("### USING STD-RESCALING ###")
672
- if (
673
- # not self.disabled and
674
- self.global_step == 0 and
675
- self.current_epoch == 0 and batch_idx == 0
676
- # and self.log_first_step
677
- ):
678
- imagecallback = None
679
- for callback in self.trainer.callbacks:
680
- if "ImageLogger" in str(callback):
681
- imagecallback = callback
682
- break
683
- if imagecallback is not None and not imagecallback.disabled and imagecallback.log_first_step:
684
- is_train = self.training
685
- if is_train:
686
- self.eval()
687
- with torch.no_grad():
688
- # images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
689
- images = self.log_images(batch, **imagecallback.log_images_kwargs)
690
- import os, torchvision
691
- from PIL import Image
692
- root = os.path.join(self.logger.save_dir, "images", "init")
693
- for k in images:
694
- N = min(images[k].shape[0], imagecallback.max_images)
695
- images[k] = images[k][:N]
696
- if isinstance(images[k], torch.Tensor):
697
- images[k] = images[k].detach().cpu()
698
- if imagecallback.clamp:
699
- images[k] = torch.clamp(images[k], -1., 1.)
700
- grid = torchvision.utils.make_grid(images[k], nrow=4)
701
- if imagecallback.rescale:
702
- grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
703
- grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
704
- grid = grid.numpy()
705
- grid = (grid * 255).astype(np.uint8)
706
- filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
707
- k,
708
- self.global_step,
709
- self.current_epoch,
710
- batch_idx)
711
- path = os.path.join(root, filename)
712
- os.makedirs(os.path.split(path)[0], exist_ok=True)
713
- Image.fromarray(grid).save(path)
714
- del grid
715
- del images
716
- print("log images before training")
717
- # imagecallback.log_local(self.logger.save_dir, "init", images,
718
- # self.global_step, self.current_epoch, batch_idx, self,
719
- # wandb_log = False)
720
- if is_train:
721
- self.train()
722
-
723
- # if imagecallback is not None and not imagecallback.disabled and imagecallback.log_first_step:
724
- # imagecallback.log_img(self, batch, batch_idx, split="init")
725
- # rewrite
726
- def register_schedule(self,
727
- given_betas=None, beta_schedule="linear", timesteps=1000,
728
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
729
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
730
-
731
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
732
- if self.shorten_cond_schedule: # drop the option ?
733
- self.make_cond_schedule()
734
-
735
- def instantiate_first_stage(self, config): # not train
736
- model = instantiate_from_config(config)
737
- self.first_stage_model = model.eval()
738
- self.first_stage_model.train = disabled_train
739
- for param in self.first_stage_model.parameters():
740
- param.requires_grad = False
741
-
742
- # def instantiate_textemb_merge_model(self, config):
743
- # model = instantiate_from_config(config)
744
- # if not model.trainable:
745
- # self.textemb_merge_model = model.eval()
746
- # self.textemb_merge_model.train = disabled_train
747
- # for param in self.textemb_merge_model.parameters():
748
- # param.requires_grad = False
749
- # else:
750
- # self.textemb_merge_model = model
751
-
752
-
753
- def instantiate_cond_stage(self, config):
754
- if not self.cond_stage_trainable:
755
- if config == "__is_first_stage__":
756
- print("Using first stage also as cond stage.")
757
- self.cond_stage_model = self.first_stage_model
758
- elif config == "__is_unconditional__":
759
- print(f"Training {self.__class__.__name__} as an unconditional model.")
760
- self.cond_stage_model = None
761
- # self.be_unconditional = True
762
- else:
763
- model = instantiate_from_config(config)
764
- self.cond_stage_model = model.eval()
765
- self.cond_stage_model.train = disabled_train
766
- for param in self.cond_stage_model.parameters():
767
- param.requires_grad = False
768
- else:
769
- assert config != '__is_first_stage__'
770
- assert config != '__is_unconditional__'
771
- model = instantiate_from_config(config)
772
- self.cond_stage_model = model
773
-
774
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
775
- denoise_row = []
776
- for zd in tqdm(samples, desc=desc):
777
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
778
- force_not_quantize=force_no_decoder_quantization))
779
- n_imgs_per_row = len(denoise_row)
780
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
781
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
782
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
783
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
784
- return denoise_grid
785
- # first stage encoding
786
- def get_first_stage_encoding(self, encoder_posterior):
787
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
788
- z = encoder_posterior.sample()
789
- elif isinstance(encoder_posterior, torch.Tensor):
790
- z = encoder_posterior
791
- else:
792
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
793
- return self.scale_factor * z # rescale z before the diffusion process
794
- # encode the condition
795
- def get_learned_conditioning(self, c):
796
- if self.cond_stage_forward is None:
797
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
798
- c = self.cond_stage_model.encode(c)
799
- if isinstance(c, DiagonalGaussianDistribution):
800
- c = c.mode()
801
- else:
802
- c = self.cond_stage_model(c)
803
- else:
804
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
805
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
806
- return c
807
-
808
- def meshgrid(self, h, w):
809
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
810
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
811
-
812
- arr = torch.cat([y, x], dim=-1)
813
- return arr
814
-
815
- def delta_border(self, h, w):
816
- """
817
- :param h: height
818
- :param w: width
819
- :return: normalized distance to image border,
820
- wtith min distance = 0 at border and max dist = 0.5 at image center
821
- """
822
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
823
- arr = self.meshgrid(h, w) / lower_right_corner
824
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
825
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
826
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
827
- return edge_dist
828
-
829
- def get_weighting(self, h, w, Ly, Lx, device):
830
- weighting = self.delta_border(h, w)
831
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
832
- self.split_input_params["clip_max_weight"], )
833
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
834
-
835
- if self.split_input_params["tie_braker"]:
836
- L_weighting = self.delta_border(Ly, Lx)
837
- L_weighting = torch.clip(L_weighting,
838
- self.split_input_params["clip_min_tie_weight"],
839
- self.split_input_params["clip_max_tie_weight"])
840
-
841
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
842
- weighting = weighting * L_weighting
843
- return weighting
844
-
845
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
846
- """
847
- :param x: img of size (bs, c, h, w)
848
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
849
- """
850
- bs, nc, h, w = x.shape
851
-
852
- # number of crops in image
853
- Ly = (h - kernel_size[0]) // stride[0] + 1
854
- Lx = (w - kernel_size[1]) // stride[1] + 1
855
-
856
- if uf == 1 and df == 1:
857
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
858
- unfold = torch.nn.Unfold(**fold_params)
859
-
860
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
861
-
862
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
863
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
864
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
865
-
866
- elif uf > 1 and df == 1:
867
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
868
- unfold = torch.nn.Unfold(**fold_params)
869
-
870
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
871
- dilation=1, padding=0,
872
- stride=(stride[0] * uf, stride[1] * uf))
873
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
874
-
875
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
876
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
877
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
878
-
879
- elif df > 1 and uf == 1:
880
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
881
- unfold = torch.nn.Unfold(**fold_params)
882
-
883
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
884
- dilation=1, padding=0,
885
- stride=(stride[0] // df, stride[1] // df))
886
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
887
-
888
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
889
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
890
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
891
-
892
- else:
893
- raise NotImplementedError
894
-
895
- return fold, unfold, normalization, weighting
896
- # rewrite get input for training DM
897
- @torch.no_grad()
898
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
899
- cond_key=None, return_original_cond=False, bs=None, return_x=False):
900
- x = super().get_input(batch, k)
901
- if bs is not None:
902
- x = x[:bs]
903
- x = x.to(self.device)
904
- # get scaled latent vector z for training
905
- encoder_posterior = self.encode_first_stage(x)
906
- z = self.get_first_stage_encoding(encoder_posterior).detach()
907
-
908
- if self.model.conditioning_key is not None and not self.force_null_conditioning:
909
- if cond_key is None:
910
- cond_key = self.cond_stage_key
911
- if cond_key != self.first_stage_key:
912
- if cond_key in ['caption', 'coordinates_bbox', "txt"]:
913
- xc = batch[cond_key]
914
- elif cond_key in ['class_label', 'cls']:
915
- xc = batch
916
- else:
917
- xc = super().get_input(batch, cond_key).to(self.device)
918
- else:
919
- xc = x
920
- if not self.cond_stage_trainable or force_c_encode:
921
- if isinstance(xc, dict) or isinstance(xc, list):
922
- c = self.get_learned_conditioning(xc)
923
- else:
924
- c = self.get_learned_conditioning(xc.to(self.device))
925
- else:
926
- c = xc
927
- if bs is not None:
928
- c = c[:bs]
929
-
930
- if self.use_positional_encodings:
931
- pos_x, pos_y = self.compute_latent_shifts(batch)
932
- ckey = __conditioning_keys__[self.model.conditioning_key]
933
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
934
-
935
- else:
936
- c = None
937
- xc = None
938
- if self.use_positional_encodings:
939
- pos_x, pos_y = self.compute_latent_shifts(batch)
940
- c = {'pos_x': pos_x, 'pos_y': pos_y}
941
- # latent z + condition c
942
- out = [z, c]
943
- if return_first_stage_outputs:
944
- xrec = self.decode_first_stage(z)
945
- out.extend([x, xrec])
946
- if return_x:
947
- out.extend([x])
948
- if return_original_cond:
949
- out.append(xc)
950
- return out
951
- # from latent vector to x
952
- @torch.no_grad()
953
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
954
- if predict_cids:
955
- if z.dim() == 4:
956
- z = torch.argmax(z.exp(), dim=1).long()
957
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
958
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
959
-
960
- z = 1. / self.scale_factor * z
961
- return self.first_stage_model.decode(z)
962
- # from x to latent vector (not scaled)
963
- @torch.no_grad()
964
- def encode_first_stage(self, x):
965
- return self.first_stage_model.encode(x)
966
-
967
- def shared_step(self, batch, **kwargs):
968
- x, c = self.get_input(batch, self.first_stage_key) #,return_first_stage_outputs=True)
969
- # print("the shape of the batch data: {} | x[0,0,0,0]: {}".format(x.shape, x[0,0,0,0]))
970
- loss = self(x, c)
971
- return loss
972
-
973
- def forward(self, x, c, *args, **kwargs):
974
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
975
- if self.model.conditioning_key is not None:
976
- assert c is not None
977
- if self.cond_stage_trainable:
978
- c = self.get_learned_conditioning(c)
979
- if self.shorten_cond_schedule: # TODO: drop this option
980
- tc = self.cond_ids[t].to(self.device)
981
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
982
- return self.p_losses(x, c, t, *args, **kwargs)
983
- # diffusion model
984
- def apply_model(self, x_noisy, t, cond, return_ids=False):
985
- if isinstance(cond, dict):
986
- # hybrid case, cond is expected to be a dict
987
- pass
988
- else:
989
- if not isinstance(cond, list):
990
- cond = [cond] # text: cross attention
991
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
992
- cond = {key: cond}
993
-
994
- x_recon = self.model(x_noisy, t, **cond)
995
-
996
- if isinstance(x_recon, tuple) and not return_ids:
997
- return x_recon[0]
998
- else:
999
- return x_recon
1000
- # predict e from x_t and predicted x_start
1001
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
1002
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
1003
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
1004
- # KL between q(x_t | x) with N(0, I)
1005
- def _prior_bpd(self, x_start):
1006
- """
1007
- Get the prior KL term for the variational lower-bound, measured in
1008
- bits-per-dim.
1009
- This term can't be optimized, as it only depends on the encoder.
1010
- :param x_start: the [N x C x ...] tensor of inputs.
1011
- :return: a batch of [N] KL values (in bits), one per batch element.
1012
- """
1013
- batch_size = x_start.shape[0]
1014
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
1015
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
1016
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
1017
- return mean_flat(kl_prior) / np.log(2.0)
1018
- # rewrite: add the condition / add logvar to L_simple
1019
- def p_losses(self, x_start, cond, t, noise=None):
1020
- noise = default(noise, lambda: torch.randn_like(x_start))
1021
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1022
- model_output = self.apply_model(x_noisy, t, cond)
1023
-
1024
- loss_dict = {}
1025
- prefix = 'train' if self.training else 'val'
1026
-
1027
- if self.parameterization == "x0":
1028
- target = x_start
1029
- elif self.parameterization == "eps":
1030
- target = noise
1031
- elif self.parameterization == "v":
1032
- target = self.get_v(x_start, noise, t)
1033
- else:
1034
- raise NotImplementedError()
1035
-
1036
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1037
- # if True in np.isnan(loss_simple.detach().cpu().numpy()):
1038
- # aa = 1
1039
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1040
- # log_var
1041
- logvar_t = self.logvar[t].to(self.device)
1042
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
1043
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
1044
- if self.learn_logvar:
1045
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1046
- loss_dict.update({'logvar': self.logvar.data.mean()})
1047
-
1048
- loss = self.l_simple_weight * loss.mean()
1049
-
1050
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1051
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1052
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1053
- loss += (self.original_elbo_weight * loss_vlb)
1054
- loss_dict.update({f'{prefix}/loss': loss})
1055
-
1056
- return loss, loss_dict
1057
- # rewrite: p(x_t-1 | x_t) add condition
1058
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1059
- return_x0=False, score_corrector=None, corrector_kwargs=None):
1060
- t_in = t
1061
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1062
-
1063
- if score_corrector is not None:
1064
- assert self.parameterization == "eps"
1065
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1066
-
1067
- if return_codebook_ids:
1068
- model_out, logits = model_out
1069
-
1070
- if self.parameterization == "eps":
1071
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1072
- elif self.parameterization == "x0":
1073
- x_recon = model_out
1074
- else:
1075
- raise NotImplementedError()
1076
-
1077
- if clip_denoised:
1078
- x_recon.clamp_(-1., 1.)
1079
- if quantize_denoised:
1080
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1081
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1082
- if return_codebook_ids:
1083
- return model_mean, posterior_variance, posterior_log_variance, logits
1084
- elif return_x0:
1085
- return model_mean, posterior_variance, posterior_log_variance, x_recon
1086
- else:
1087
- return model_mean, posterior_variance, posterior_log_variance
1088
-
1089
- @torch.no_grad()
1090
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1091
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1092
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1093
- b, *_, device = *x.shape, x.device
1094
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1095
- return_codebook_ids=return_codebook_ids,
1096
- quantize_denoised=quantize_denoised,
1097
- return_x0=return_x0,
1098
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1099
- if return_codebook_ids:
1100
- raise DeprecationWarning("Support dropped.")
1101
- model_mean, _, model_log_variance, logits = outputs
1102
- elif return_x0:
1103
- model_mean, _, model_log_variance, x0 = outputs
1104
- else:
1105
- model_mean, _, model_log_variance = outputs
1106
-
1107
- noise = noise_like(x.shape, device, repeat_noise) * temperature
1108
- if noise_dropout > 0.:
1109
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1110
- # no noise when t == 0
1111
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1112
-
1113
- if return_codebook_ids:
1114
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1115
- if return_x0:
1116
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1117
- else:
1118
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1119
-
1120
- @torch.no_grad()
1121
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1122
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1123
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1124
- log_every_t=None):
1125
- if not log_every_t:
1126
- log_every_t = self.log_every_t
1127
- timesteps = self.num_timesteps
1128
- if batch_size is not None:
1129
- b = batch_size if batch_size is not None else shape[0]
1130
- shape = [batch_size] + list(shape)
1131
- else:
1132
- b = batch_size = shape[0]
1133
- if x_T is None:
1134
- img = torch.randn(shape, device=self.device)
1135
- else:
1136
- img = x_T
1137
- intermediates = []
1138
- if cond is not None:
1139
- if isinstance(cond, dict):
1140
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1141
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1142
- else:
1143
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1144
-
1145
- if start_T is not None:
1146
- timesteps = min(timesteps, start_T)
1147
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1148
- total=timesteps) if verbose else reversed(
1149
- range(0, timesteps))
1150
- if type(temperature) == float:
1151
- temperature = [temperature] * timesteps
1152
-
1153
- for i in iterator:
1154
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1155
- if self.shorten_cond_schedule:
1156
- assert self.model.conditioning_key != 'hybrid'
1157
- tc = self.cond_ids[ts].to(cond.device)
1158
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1159
-
1160
- img, x0_partial = self.p_sample(img, cond, ts,
1161
- clip_denoised=self.clip_denoised,
1162
- quantize_denoised=quantize_denoised, return_x0=True,
1163
- temperature=temperature[i], noise_dropout=noise_dropout,
1164
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1165
- if mask is not None:
1166
- assert x0 is not None
1167
- img_orig = self.q_sample(x0, ts)
1168
- img = img_orig * mask + (1. - mask) * img
1169
-
1170
- if i % log_every_t == 0 or i == timesteps - 1:
1171
- intermediates.append(x0_partial)
1172
- if callback: callback(i)
1173
- if img_callback: img_callback(img, i)
1174
- return img, intermediates
1175
-
1176
- @torch.no_grad()
1177
- def p_sample_loop(self, cond, shape, return_intermediates=False,
1178
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1179
- mask=None, x0=None, img_callback=None, start_T=None,
1180
- log_every_t=None):
1181
-
1182
- if not log_every_t:
1183
- log_every_t = self.log_every_t
1184
- device = self.betas.device
1185
- b = shape[0]
1186
- if x_T is None:
1187
- img = torch.randn(shape, device=device)
1188
- else:
1189
- img = x_T
1190
-
1191
- intermediates = [img]
1192
- if timesteps is None:
1193
- timesteps = self.num_timesteps
1194
-
1195
- if start_T is not None:
1196
- timesteps = min(timesteps, start_T)
1197
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1198
- range(0, timesteps))
1199
-
1200
- if mask is not None:
1201
- assert x0 is not None
1202
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1203
-
1204
- for i in iterator:
1205
- ts = torch.full((b,), i, device=device, dtype=torch.long)
1206
- if self.shorten_cond_schedule:
1207
- assert self.model.conditioning_key != 'hybrid'
1208
- tc = self.cond_ids[ts].to(cond.device)
1209
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1210
-
1211
- img = self.p_sample(img, cond, ts,
1212
- clip_denoised=self.clip_denoised,
1213
- quantize_denoised=quantize_denoised)
1214
- if mask is not None:
1215
- img_orig = self.q_sample(x0, ts)
1216
- img = img_orig * mask + (1. - mask) * img
1217
-
1218
- if i % log_every_t == 0 or i == timesteps - 1:
1219
- intermediates.append(img)
1220
- if callback: callback(i)
1221
- if img_callback: img_callback(img, i)
1222
-
1223
- if return_intermediates:
1224
- return img, intermediates
1225
- return img
1226
-
1227
- @torch.no_grad()
1228
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1229
- verbose=True, timesteps=None, quantize_denoised=False,
1230
- mask=None, x0=None, shape=None, **kwargs):
1231
- if shape is None:
1232
- shape = (batch_size, self.channels, self.image_size, self.image_size)
1233
- if cond is not None:
1234
- if isinstance(cond, dict):
1235
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1236
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1237
- else:
1238
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1239
- return self.p_sample_loop(cond,
1240
- shape,
1241
- return_intermediates=return_intermediates, x_T=x_T,
1242
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1243
- mask=mask, x0=x0)
1244
-
1245
- @torch.no_grad()
1246
- def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
1247
- if ddim:
1248
- ddim_sampler = DDIMSampler(self)
1249
- shape = (self.channels, self.image_size, self.image_size)
1250
- samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
1251
- shape, cond, verbose=False, **kwargs)
1252
-
1253
- else:
1254
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1255
- return_intermediates=True, **kwargs)
1256
-
1257
- return samples, intermediates
1258
-
1259
- @torch.no_grad()
1260
- def get_unconditional_conditioning(self, batch_size, null_label=None):
1261
- if null_label is not None:
1262
- xc = null_label
1263
- if isinstance(xc, ListConfig):
1264
- xc = list(xc)
1265
- if isinstance(xc, dict) or isinstance(xc, list):
1266
- c = self.get_learned_conditioning(xc)
1267
- else:
1268
- if hasattr(xc, "to"):
1269
- xc = xc.to(self.device)
1270
- c = self.get_learned_conditioning(xc)
1271
- else:
1272
- if self.cond_stage_key in ["class_label", "cls"]:
1273
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
1274
- return self.get_learned_conditioning(xc)
1275
- else:
1276
- raise NotImplementedError("todo")
1277
- if isinstance(c, list): # in case the encoder gives us a list
1278
- for i in range(len(c)):
1279
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
1280
- else:
1281
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
1282
- return c
1283
-
1284
- @torch.no_grad()
1285
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
1286
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1287
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1288
- use_ema_scope=True,
1289
- **kwargs):
1290
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1291
- use_ddim = ddim_steps is not None
1292
-
1293
- log = dict()
1294
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1295
- return_first_stage_outputs=True,
1296
- force_c_encode=True,
1297
- return_original_cond=True,
1298
- bs=N)
1299
- N = min(x.shape[0], N)
1300
- n_row = min(x.shape[0], n_row)
1301
- log["inputs"] = x
1302
- log["reconstruction"] = xrec
1303
- if self.model.conditioning_key is not None:
1304
- if hasattr(self.cond_stage_model, "decode"):
1305
- xc = self.cond_stage_model.decode(c)
1306
- log["conditioning"] = xc
1307
- elif self.cond_stage_key in ["caption", "txt"]:
1308
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1309
- log["conditioning"] = xc
1310
- elif self.cond_stage_key in ['class_label', "cls"]:
1311
- try:
1312
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1313
- log['conditioning'] = xc
1314
- except KeyError:
1315
- # probably no "human_label" in batch
1316
- pass
1317
- elif isimage(xc):
1318
- log["conditioning"] = xc
1319
- if ismap(xc):
1320
- log["original_conditioning"] = self.to_rgb(xc)
1321
-
1322
- if plot_diffusion_rows:
1323
- # get diffusion row
1324
- diffusion_row = list()
1325
- z_start = z[:n_row]
1326
- for t in range(self.num_timesteps):
1327
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1328
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1329
- t = t.to(self.device).long()
1330
- noise = torch.randn_like(z_start)
1331
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1332
- diffusion_row.append(self.decode_first_stage(z_noisy))
1333
-
1334
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1335
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1336
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1337
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1338
- log["diffusion_row"] = diffusion_grid
1339
-
1340
- if sample:
1341
- # get denoise row
1342
- with ema_scope("Sampling"):
1343
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1344
- ddim_steps=ddim_steps, eta=ddim_eta)
1345
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1346
- x_samples = self.decode_first_stage(samples)
1347
- log["samples"] = x_samples
1348
- if plot_denoise_rows:
1349
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1350
- log["denoise_row"] = denoise_grid
1351
-
1352
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1353
- self.first_stage_model, IdentityFirstStage):
1354
- # also display when quantizing x0 while sampling
1355
- with ema_scope("Plotting Quantized Denoised"):
1356
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1357
- ddim_steps=ddim_steps, eta=ddim_eta,
1358
- quantize_denoised=True)
1359
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1360
- # quantize_denoised=True)
1361
- x_samples = self.decode_first_stage(samples.to(self.device))
1362
- log["samples_x0_quantized"] = x_samples
1363
-
1364
- if unconditional_guidance_scale > 1.0:
1365
- uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1366
- if self.model.conditioning_key == "crossattn-adm":
1367
- uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
1368
- with ema_scope("Sampling with classifier-free guidance"):
1369
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1370
- ddim_steps=ddim_steps, eta=ddim_eta,
1371
- unconditional_guidance_scale=unconditional_guidance_scale,
1372
- unconditional_conditioning=uc,
1373
- )
1374
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1375
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1376
-
1377
- if inpaint:
1378
- # make a simple center square
1379
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
1380
- mask = torch.ones(N, h, w).to(self.device)
1381
- # zeros will be filled in
1382
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1383
- mask = mask[:, None, ...]
1384
- with ema_scope("Plotting Inpaint"):
1385
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1386
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1387
- x_samples = self.decode_first_stage(samples.to(self.device))
1388
- log["samples_inpainting"] = x_samples
1389
- log["mask"] = mask
1390
-
1391
- # outpaint
1392
- mask = 1. - mask
1393
- with ema_scope("Plotting Outpaint"):
1394
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1395
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1396
- x_samples = self.decode_first_stage(samples.to(self.device))
1397
- log["samples_outpainting"] = x_samples
1398
-
1399
- if plot_progressive_rows:
1400
- with ema_scope("Plotting Progressives"):
1401
- img, progressives = self.progressive_denoising(c,
1402
- shape=(self.channels, self.image_size, self.image_size),
1403
- batch_size=N)
1404
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1405
- log["progressive_row"] = prog_row
1406
-
1407
- if return_keys:
1408
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1409
- return log
1410
- else:
1411
- return {key: log[key] for key in return_keys}
1412
- return log
1413
-
1414
- def configure_optimizers(self):
1415
- lr = self.learning_rate
1416
- params = list(self.model.parameters())
1417
- if self.cond_stage_trainable:
1418
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1419
- params = params + list(self.cond_stage_model.parameters())
1420
- if self.learn_logvar:
1421
- print('Diffusion model optimizing logvar')
1422
- params.append(self.logvar)
1423
- opt = torch.optim.AdamW(params, lr=lr)
1424
- if self.use_scheduler:
1425
- assert 'target' in self.scheduler_config
1426
- scheduler = instantiate_from_config(self.scheduler_config)
1427
-
1428
- print("Setting up LambdaLR scheduler...")
1429
- scheduler = [
1430
- {
1431
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1432
- 'interval': 'step',
1433
- 'frequency': 1
1434
- }]
1435
- return [opt], scheduler
1436
- return opt
1437
-
1438
- @torch.no_grad()
1439
- def to_rgb(self, x):
1440
- x = x.float()
1441
- if not hasattr(self, "colorize"):
1442
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1443
- x = nn.functional.conv2d(x, weight=self.colorize)
1444
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1445
- return x
1446
-
1447
-
1448
- class DiffusionWrapper(pl.LightningModule):
1449
- def __init__(self, diff_model_config, conditioning_key, textemb_merge_config=None, merge_textemb = False):
1450
- super().__init__()
1451
- self.merge_textemb = merge_textemb
1452
- if self.merge_textemb and textemb_merge_config is not None:
1453
- # cond_model_name = str(cond_stage_config.target)
1454
- # if "clip" in cond_model_name.lower() and "t5" in cond_model_name.lower():
1455
- self.instantiate_textemb_merge_model(textemb_merge_config)
1456
- # self.merge_textemb = True
1457
- else:
1458
- self.merge_textemb = False
1459
- self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
1460
- self.diffusion_model = instantiate_from_config(diff_model_config)
1461
- self.conditioning_key = conditioning_key
1462
- assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
1463
-
1464
- def instantiate_textemb_merge_model(self, config):
1465
- model = instantiate_from_config(config)
1466
- if not model.trainable:
1467
- self.textemb_merge_model = model.eval()
1468
- self.textemb_merge_model.train = disabled_train
1469
- for param in self.textemb_merge_model.parameters():
1470
- param.requires_grad = False
1471
- else:
1472
- self.textemb_merge_model = model
1473
-
1474
- def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
1475
- if self.conditioning_key is None:
1476
- out = self.diffusion_model(x, t)
1477
- elif self.conditioning_key == 'concat':
1478
- xc = torch.cat([x] + c_concat, dim=1)
1479
- out = self.diffusion_model(xc, t)
1480
- elif self.conditioning_key == 'crossattn':
1481
- if self.merge_textemb and len(c_crossattn) >= 2:
1482
- merge_c = self.textemb_merge_model(c_crossattn[0], c_crossattn[1])
1483
- c_crossattn = [merge_c]
1484
- if not self.sequential_cross_attn:
1485
- cc = torch.cat(c_crossattn, 1)
1486
- else:
1487
- cc = c_crossattn
1488
- out = self.diffusion_model(x, t, context=cc)
1489
- elif self.conditioning_key == 'hybrid':
1490
- xc = torch.cat([x] + c_concat, dim=1)
1491
- cc = torch.cat(c_crossattn, 1)
1492
- out = self.diffusion_model(xc, t, context=cc)
1493
- elif self.conditioning_key == 'hybrid-adm':
1494
- assert c_adm is not None
1495
- xc = torch.cat([x] + c_concat, dim=1)
1496
- cc = torch.cat(c_crossattn, 1)
1497
- out = self.diffusion_model(xc, t, context=cc, y=c_adm)
1498
- elif self.conditioning_key == 'crossattn-adm':
1499
- assert c_adm is not None
1500
- cc = torch.cat(c_crossattn, 1)
1501
- out = self.diffusion_model(x, t, context=cc, y=c_adm)
1502
- elif self.conditioning_key == 'adm':
1503
- cc = c_crossattn[0]
1504
- out = self.diffusion_model(x, t, y=cc)
1505
- else:
1506
- raise NotImplementedError()
1507
-
1508
- return out
1509
-
1510
-
1511
- class LatentUpscaleDiffusion(LatentDiffusion):
1512
- def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
1513
- super().__init__(*args, **kwargs)
1514
- # assumes that neither the cond_stage nor the low_scale_model contain trainable params
1515
- assert not self.cond_stage_trainable
1516
- self.instantiate_low_stage(low_scale_config)
1517
- self.low_scale_key = low_scale_key
1518
- self.noise_level_key = noise_level_key
1519
-
1520
- def instantiate_low_stage(self, config):
1521
- model = instantiate_from_config(config)
1522
- self.low_scale_model = model.eval()
1523
- self.low_scale_model.train = disabled_train
1524
- for param in self.low_scale_model.parameters():
1525
- param.requires_grad = False
1526
-
1527
- @torch.no_grad()
1528
- def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
1529
- if not log_mode:
1530
- z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
1531
- else:
1532
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1533
- force_c_encode=True, return_original_cond=True, bs=bs)
1534
- x_low = batch[self.low_scale_key][:bs]
1535
- x_low = rearrange(x_low, 'b h w c -> b c h w')
1536
- x_low = x_low.to(memory_format=torch.contiguous_format).float()
1537
- zx, noise_level = self.low_scale_model(x_low)
1538
- if self.noise_level_key is not None:
1539
- # get noise level from batch instead, e.g. when extracting a custom noise level for bsr
1540
- raise NotImplementedError('TODO')
1541
-
1542
- all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
1543
- if log_mode:
1544
- # TODO: maybe disable if too expensive
1545
- x_low_rec = self.low_scale_model.decode(zx)
1546
- return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
1547
- return z, all_conds
1548
-
1549
- @torch.no_grad()
1550
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1551
- plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
1552
- unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
1553
- **kwargs):
1554
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1555
- use_ddim = ddim_steps is not None
1556
-
1557
- log = dict()
1558
- z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
1559
- log_mode=True)
1560
- N = min(x.shape[0], N)
1561
- n_row = min(x.shape[0], n_row)
1562
- log["inputs"] = x
1563
- log["reconstruction"] = xrec
1564
- log["x_lr"] = x_low
1565
- log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
1566
- if self.model.conditioning_key is not None:
1567
- if hasattr(self.cond_stage_model, "decode"):
1568
- xc = self.cond_stage_model.decode(c)
1569
- log["conditioning"] = xc
1570
- elif self.cond_stage_key in ["caption", "txt"]:
1571
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1572
- log["conditioning"] = xc
1573
- elif self.cond_stage_key in ['class_label', 'cls']:
1574
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1575
- log['conditioning'] = xc
1576
- elif isimage(xc):
1577
- log["conditioning"] = xc
1578
- if ismap(xc):
1579
- log["original_conditioning"] = self.to_rgb(xc)
1580
-
1581
- if plot_diffusion_rows:
1582
- # get diffusion row
1583
- diffusion_row = list()
1584
- z_start = z[:n_row]
1585
- for t in range(self.num_timesteps):
1586
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1587
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1588
- t = t.to(self.device).long()
1589
- noise = torch.randn_like(z_start)
1590
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1591
- diffusion_row.append(self.decode_first_stage(z_noisy))
1592
-
1593
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1594
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1595
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1596
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1597
- log["diffusion_row"] = diffusion_grid
1598
-
1599
- if sample:
1600
- # get denoise row
1601
- with ema_scope("Sampling"):
1602
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1603
- ddim_steps=ddim_steps, eta=ddim_eta)
1604
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1605
- x_samples = self.decode_first_stage(samples)
1606
- log["samples"] = x_samples
1607
- if plot_denoise_rows:
1608
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1609
- log["denoise_row"] = denoise_grid
1610
-
1611
- if unconditional_guidance_scale > 1.0:
1612
- uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1613
- # TODO explore better "unconditional" choices for the other keys
1614
- # maybe guide away from empty text label and highest noise level and maximally degraded zx?
1615
- uc = dict()
1616
- for k in c:
1617
- if k == "c_crossattn":
1618
- assert isinstance(c[k], list) and len(c[k]) == 1
1619
- uc[k] = [uc_tmp]
1620
- elif k == "c_adm": # todo: only run with text-based guidance?
1621
- assert isinstance(c[k], torch.Tensor)
1622
- #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
1623
- uc[k] = c[k]
1624
- elif isinstance(c[k], list):
1625
- uc[k] = [c[k][i] for i in range(len(c[k]))]
1626
- else:
1627
- uc[k] = c[k]
1628
-
1629
- with ema_scope("Sampling with classifier-free guidance"):
1630
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1631
- ddim_steps=ddim_steps, eta=ddim_eta,
1632
- unconditional_guidance_scale=unconditional_guidance_scale,
1633
- unconditional_conditioning=uc,
1634
- )
1635
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1636
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1637
-
1638
- if plot_progressive_rows:
1639
- with ema_scope("Plotting Progressives"):
1640
- img, progressives = self.progressive_denoising(c,
1641
- shape=(self.channels, self.image_size, self.image_size),
1642
- batch_size=N)
1643
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1644
- log["progressive_row"] = prog_row
1645
-
1646
- return log
1647
-
1648
-
1649
- class LatentFinetuneDiffusion(LatentDiffusion):
1650
- """
1651
- Basis for different finetunas, such as inpainting or depth2image
1652
- To disable finetuning mode, set finetune_keys to None
1653
- """
1654
-
1655
- def __init__(self,
1656
- concat_keys: tuple,
1657
- finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
1658
- "model_ema.diffusion_modelinput_blocks00weight"
1659
- ),
1660
- keep_finetune_dims=4,
1661
- # if model was trained without concat mode before and we would like to keep these channels
1662
- c_concat_log_start=None, # to log reconstruction of c_concat codes
1663
- c_concat_log_end=None,
1664
- *args, **kwargs
1665
- ):
1666
- ckpt_path = kwargs.pop("ckpt_path", None)
1667
- ignore_keys = kwargs.pop("ignore_keys", list())
1668
- super().__init__(*args, **kwargs)
1669
- self.finetune_keys = finetune_keys
1670
- self.concat_keys = concat_keys
1671
- self.keep_dims = keep_finetune_dims
1672
- self.c_concat_log_start = c_concat_log_start
1673
- self.c_concat_log_end = c_concat_log_end
1674
- if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
1675
- if exists(ckpt_path):
1676
- self.init_from_ckpt(ckpt_path, ignore_keys)
1677
-
1678
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
1679
- sd = torch.load(path, map_location="cpu")
1680
- if "state_dict" in list(sd.keys()):
1681
- sd = sd["state_dict"]
1682
- keys = list(sd.keys())
1683
- for k in keys:
1684
- for ik in ignore_keys:
1685
- if k.startswith(ik):
1686
- print("Deleting key {} from state_dict.".format(k))
1687
- del sd[k]
1688
-
1689
- # make it explicit, finetune by including extra input channels
1690
- if exists(self.finetune_keys) and k in self.finetune_keys:
1691
- new_entry = None
1692
- for name, param in self.named_parameters():
1693
- if name in self.finetune_keys:
1694
- print(
1695
- f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
1696
- new_entry = torch.zeros_like(param) # zero init
1697
- assert exists(new_entry), 'did not find matching parameter to modify'
1698
- new_entry[:, :self.keep_dims, ...] = sd[k]
1699
- sd[k] = new_entry
1700
-
1701
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
1702
- sd, strict=False)
1703
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
1704
- if len(missing) > 0:
1705
- print(f"Missing Keys: {missing}")
1706
- if len(unexpected) > 0:
1707
- print(f"Unexpected Keys: {unexpected}")
1708
-
1709
- @torch.no_grad()
1710
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1711
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1712
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1713
- use_ema_scope=True,
1714
- **kwargs):
1715
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1716
- use_ddim = ddim_steps is not None
1717
-
1718
- log = dict()
1719
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
1720
- c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
1721
- N = min(x.shape[0], N)
1722
- n_row = min(x.shape[0], n_row)
1723
- log["inputs"] = x
1724
- log["reconstruction"] = xrec
1725
- if self.model.conditioning_key is not None:
1726
- if hasattr(self.cond_stage_model, "decode"):
1727
- xc = self.cond_stage_model.decode(c)
1728
- log["conditioning"] = xc
1729
- elif self.cond_stage_key in ["caption", "txt"]:
1730
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1731
- log["conditioning"] = xc
1732
- elif self.cond_stage_key in ['class_label', 'cls']:
1733
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1734
- log['conditioning'] = xc
1735
- elif isimage(xc):
1736
- log["conditioning"] = xc
1737
- if ismap(xc):
1738
- log["original_conditioning"] = self.to_rgb(xc)
1739
-
1740
- if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
1741
- log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
1742
-
1743
- if plot_diffusion_rows:
1744
- # get diffusion row
1745
- diffusion_row = list()
1746
- z_start = z[:n_row]
1747
- for t in range(self.num_timesteps):
1748
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1749
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1750
- t = t.to(self.device).long()
1751
- noise = torch.randn_like(z_start)
1752
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1753
- diffusion_row.append(self.decode_first_stage(z_noisy))
1754
-
1755
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1756
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1757
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1758
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1759
- log["diffusion_row"] = diffusion_grid
1760
-
1761
- if sample:
1762
- # get denoise row
1763
- with ema_scope("Sampling"):
1764
- samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1765
- batch_size=N, ddim=use_ddim,
1766
- ddim_steps=ddim_steps, eta=ddim_eta)
1767
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1768
- x_samples = self.decode_first_stage(samples)
1769
- log["samples"] = x_samples
1770
- if plot_denoise_rows:
1771
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1772
- log["denoise_row"] = denoise_grid
1773
-
1774
- if unconditional_guidance_scale > 1.0:
1775
- uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1776
- uc_cat = c_cat
1777
- uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
1778
- with ema_scope("Sampling with classifier-free guidance"):
1779
- samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1780
- batch_size=N, ddim=use_ddim,
1781
- ddim_steps=ddim_steps, eta=ddim_eta,
1782
- unconditional_guidance_scale=unconditional_guidance_scale,
1783
- unconditional_conditioning=uc_full,
1784
- )
1785
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1786
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1787
-
1788
- return log
1789
-
1790
-
1791
- class LatentInpaintDiffusion(LatentFinetuneDiffusion):
1792
- """
1793
- can either run as pure inpainting model (only concat mode) or with mixed conditionings,
1794
- e.g. mask as concat and text via cross-attn.
1795
- To disable finetuning mode, set finetune_keys to None
1796
- """
1797
-
1798
- def __init__(self,
1799
- concat_keys=("mask", "masked_image"),
1800
- masked_image_key="masked_image",
1801
- *args, **kwargs
1802
- ):
1803
- super().__init__(concat_keys, *args, **kwargs)
1804
- self.masked_image_key = masked_image_key
1805
- assert self.masked_image_key in concat_keys
1806
-
1807
- @torch.no_grad()
1808
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1809
- # note: restricted to non-trainable encoders currently
1810
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
1811
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1812
- force_c_encode=True, return_original_cond=True, bs=bs)
1813
-
1814
- assert exists(self.concat_keys)
1815
- c_cat = list()
1816
- for ck in self.concat_keys:
1817
- cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1818
- if bs is not None:
1819
- cc = cc[:bs]
1820
- cc = cc.to(self.device)
1821
- bchw = z.shape
1822
- if ck != self.masked_image_key:
1823
- cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
1824
- else:
1825
- cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
1826
- c_cat.append(cc)
1827
- c_cat = torch.cat(c_cat, dim=1)
1828
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1829
- if return_first_stage_outputs:
1830
- return z, all_conds, x, xrec, xc
1831
- return z, all_conds
1832
-
1833
- @torch.no_grad()
1834
- def log_images(self, *args, **kwargs):
1835
- log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
1836
- log["masked_image"] = rearrange(args[0]["masked_image"],
1837
- 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1838
- return log
1839
-
1840
-
1841
- class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
1842
- """
1843
- condition on monocular depth estimation
1844
- """
1845
-
1846
- def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
1847
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
1848
- self.depth_model = instantiate_from_config(depth_stage_config)
1849
- self.depth_stage_key = concat_keys[0]
1850
-
1851
- @torch.no_grad()
1852
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1853
- # note: restricted to non-trainable encoders currently
1854
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
1855
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1856
- force_c_encode=True, return_original_cond=True, bs=bs)
1857
-
1858
- assert exists(self.concat_keys)
1859
- assert len(self.concat_keys) == 1
1860
- c_cat = list()
1861
- for ck in self.concat_keys:
1862
- cc = batch[ck]
1863
- if bs is not None:
1864
- cc = cc[:bs]
1865
- cc = cc.to(self.device)
1866
- cc = self.depth_model(cc)
1867
- cc = torch.nn.functional.interpolate(
1868
- cc,
1869
- size=z.shape[2:],
1870
- mode="bicubic",
1871
- align_corners=False,
1872
- )
1873
-
1874
- depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
1875
- keepdim=True)
1876
- cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
1877
- c_cat.append(cc)
1878
- c_cat = torch.cat(c_cat, dim=1)
1879
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1880
- if return_first_stage_outputs:
1881
- return z, all_conds, x, xrec, xc
1882
- return z, all_conds
1883
-
1884
- @torch.no_grad()
1885
- def log_images(self, *args, **kwargs):
1886
- log = super().log_images(*args, **kwargs)
1887
- depth = self.depth_model(args[0][self.depth_stage_key])
1888
- depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
1889
- torch.amax(depth, dim=[1, 2, 3], keepdim=True)
1890
- log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
1891
- return log
1892
-
1893
-
1894
- class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
1895
- """
1896
- condition on low-res image (and optionally on some spatial noise augmentation)
1897
- """
1898
- def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
1899
- low_scale_config=None, low_scale_key=None, *args, **kwargs):
1900
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
1901
- self.reshuffle_patch_size = reshuffle_patch_size
1902
- self.low_scale_model = None
1903
- if low_scale_config is not None:
1904
- print("Initializing a low-scale model")
1905
- assert exists(low_scale_key)
1906
- self.instantiate_low_stage(low_scale_config)
1907
- self.low_scale_key = low_scale_key
1908
-
1909
- def instantiate_low_stage(self, config):
1910
- model = instantiate_from_config(config)
1911
- self.low_scale_model = model.eval()
1912
- self.low_scale_model.train = disabled_train
1913
- for param in self.low_scale_model.parameters():
1914
- param.requires_grad = False
1915
-
1916
- @torch.no_grad()
1917
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1918
- # note: restricted to non-trainable encoders currently
1919
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
1920
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1921
- force_c_encode=True, return_original_cond=True, bs=bs)
1922
-
1923
- assert exists(self.concat_keys)
1924
- assert len(self.concat_keys) == 1
1925
- # optionally make spatial noise_level here
1926
- c_cat = list()
1927
- noise_level = None
1928
- for ck in self.concat_keys:
1929
- cc = batch[ck]
1930
- cc = rearrange(cc, 'b h w c -> b c h w')
1931
- if exists(self.reshuffle_patch_size):
1932
- assert isinstance(self.reshuffle_patch_size, int)
1933
- cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
1934
- p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
1935
- if bs is not None:
1936
- cc = cc[:bs]
1937
- cc = cc.to(self.device)
1938
- if exists(self.low_scale_model) and ck == self.low_scale_key:
1939
- cc, noise_level = self.low_scale_model(cc)
1940
- c_cat.append(cc)
1941
- c_cat = torch.cat(c_cat, dim=1)
1942
- if exists(noise_level):
1943
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
1944
- else:
1945
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1946
- if return_first_stage_outputs:
1947
- return z, all_conds, x, xrec, xc
1948
- return z, all_conds
1949
-
1950
- @torch.no_grad()
1951
- def log_images(self, *args, **kwargs):
1952
- log = super().log_images(*args, **kwargs)
1953
- log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
1954
- return log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AINLPRoundTable/README/README.md DELETED
@@ -1,54 +0,0 @@
1
- ---
2
- title: README
3
- emoji: 🧠
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
-
11
- <div>
12
- <script src="https://unpkg.com/@lottiefiles/lottie-player@latest/dist/lottie-player.js"></script>
13
- <lottie-player src="https://assets4.lottiefiles.com/private_files/lf30_m075yjya.json" background="transparent" speed="1" style="width: 300px; height: 300px;" loop controls autoplay></lottie-player>
14
-
15
- <br />
16
-
17
- <details class="lg:col-span-2">
18
- <h3 class="my-8 lg:col-span-2" style="font-size:20px; font-weight:bold">Pre-requisites</h3>
19
- <p class="lg:col-span-2">
20
- One of the best platforms in 2022 for open source AI development and demonstration is "HuggingFace Spaces".
21
-
22
- Spaces supports a model hub, an inference API, github and container turn key integration, and an ability to create and freely host new programs for world wide communities reducing the pain and difficulty in setting up environments for AI.
23
-
24
- HuggingFace is an open source implementation of an AI platform which supports three main SDK's used within AI and NLP apps which are HTML5, Gradio, and Streamlit.  
25
-
26
- As a pre-requisite you will need to create an account for yourself at HuggingFace (https://huggingface.co/). Next join the classroom organization called "AINLPRoundTable".
27
-
28
- **Intended audience:** This AI NLP round table class is for anyone with basic computing skills of all ages and backgrounds to be able to set up a space for themselves where they can create, test and demonstrate AI and NLP programs to anyone on the internet as open source.  Prior knowledge and interest of development of AI programs is recommended but not required so this audience can include people interested and new to AI.
29
-
30
- ** AI and NLP Products ** This classroom follows three product design tenets:
31
- 1) Describe the **"Pain"** customer is facing with problem you plan to solve.
32
- 2) Describe the **"Joy"** of what changes for the customer because of your product. And finally,
33
- 3) If we exceed all expectations, Describe how we give the customer a new **"Superpower"**.
34
-
35
- As a "press release" for products be able to answer these to describe your goals to document product delivery.
36
-
37
- </p>
38
- </div>
39
-
40
- **Intent/Outcome of the Classroom:** The intent of this HF Organization and this Classroom session is to enable all attendees to create AI and NLP programs in record time using Spaces, HTML5, Gradio, Streamlit, and Open Source.  
41
-
42
- By the end of this session attendees will be able to easily create new AI and NLP demos of their own to host and share including UI, ML models, user input and interaction, dataset load, save, transform and search. The goal is to achieve proficience in using AI and NLP software development kits and libraries by sharing in an open source environment.
43
-
44
-
45
- **Pre-requisites:** The preferred platform in 2022 for open source community AI development and demonstration is "HuggingFace Spaces". Spaces supports a model hub, an inference API, github action integration, and ability to create and freely host new programs for world wide communities. HuggingFace is an open source implementation of an AI platform which supports three main SDK's used within AI and NLP apps which are HTML5, Gradio, and Streamlit.  As a pre-requisite you will need to create an account for yourself at HuggingFace (https://huggingface.co/). Next join the classroom organization called "AINLPRoundTable".  
46
-
47
- **Intended audience:** This AI NLP round table class is for anyone with basic computing skills of all ages and backgrounds to be able to set up a space for themselves where they can create, test and demonstrate AI and NLP programs to anyone on the internet as open source.  Prior knowledge and interest of development of AI programs is recommended but not required so this audience can include people interested and new to AI.
48
-
49
- **Democratize AI and NLP to Give Customers Superpowers** This classroom follows three easy to remember customer focused product design tenets:
50
- 1) Be able to describe easily the **"Pain"** customer is facing with problem you plan to solve.
51
- 2) Be able to describe the **"Joy"** of what has changed for the customer because of your product. And finally,
52
- 3) If we exceeded all expectations, we gave the customer a new **"Superpower"**.
53
-
54
- As a "press release" for your product be able to answer these and discuss your product ideas for AI and NLP and how we can help. We do these press releases informally in a trusted space using short form video to document product delivery.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnetv1c50.py DELETED
@@ -1,17 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='ImageClassifier',
4
- backbone=dict(
5
- type='ResNetV1c',
6
- depth=50,
7
- num_stages=4,
8
- out_indices=(3, ),
9
- style='pytorch'),
10
- neck=dict(type='GlobalAveragePooling'),
11
- head=dict(
12
- type='LinearClsHead',
13
- num_classes=1000,
14
- in_channels=2048,
15
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
16
- topk=(1, 5),
17
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/diacritize.py DELETED
@@ -1,36 +0,0 @@
1
- import argparse
2
- from diacritizer import TransformerDiacritizer
3
- from itertools import repeat
4
- import random
5
-
6
- import numpy as np
7
- import torch
8
-
9
-
10
- SEED = 1234
11
- random.seed(SEED)
12
- np.random.seed(SEED)
13
- torch.manual_seed(SEED)
14
- torch.cuda.manual_seed(SEED)
15
- torch.backends.cudnn.deterministic = True
16
- torch.backends.cudnn.benchmark = False
17
-
18
-
19
- def diacritization_parser():
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument("--model_kind", dest="model_kind", type=str, required=True)
22
- parser.add_argument("--config", dest="config", type=str, required=True)
23
- parser.add_argument("--text", dest="text", type=str, required=True)
24
- return parser
25
-
26
-
27
- parser = diacritization_parser()
28
- args = parser.parse_args()
29
-
30
-
31
- if args.model_kind in ["transformer"]:
32
- diacirtizer = TransformerDiacritizer(args.config, args.model_kind)
33
- else:
34
- raise ValueError("The model kind is not supported")
35
-
36
- diacirtizer.diacritize_text(args.text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/attention.py DELETED
@@ -1,199 +0,0 @@
1
- from typing import Optional
2
-
3
- import torch
4
- from torch import nn
5
- import torch.nn.functional as F
6
-
7
- from poetry_diacritizer.options import AttentionType
8
-
9
-
10
- class BahdanauAttention(nn.Module):
11
- def __init__(self, dim):
12
- super(BahdanauAttention, self).__init__()
13
- self.query_layer = nn.Linear(dim, dim, bias=False)
14
- self.tanh = nn.Tanh()
15
- self.v = nn.Linear(dim, 1, bias=False)
16
-
17
- def forward(self, query: torch.Tensor, keys: torch.Tensor):
18
- """
19
- Args:
20
- query: (B, 1, dim) or (batch, dim)
21
- processed_memory: (batch, max_time, dim)
22
- """
23
- if query.dim() == 2:
24
- # insert time-axis for broadcasting
25
- query = query.unsqueeze(1)
26
- # (batch, 1, dim)
27
- query = self.query_layer(query)
28
-
29
- # (batch, max_time, 1)
30
- alignment = self.v(self.tanh(query + keys))
31
-
32
- # (batch, max_time)
33
- return alignment.squeeze(-1)
34
-
35
-
36
- class LocationSensitive(nn.Module):
37
- def __init__(self, dim):
38
- super(LocationSensitive, self).__init__()
39
- self.query_layer = nn.Linear(dim, dim, bias=False)
40
- self.v = nn.Linear(dim, 1, bias=True)
41
- self.location_layer = nn.Linear(32, dim, bias=False)
42
- padding = int((31 - 1) / 2)
43
- self.location_conv = torch.nn.Conv1d(
44
- 1, 32, kernel_size=31, stride=1, padding=padding, dilation=1, bias=False
45
- )
46
-
47
- self.score_mask_value = -float("inf")
48
-
49
- def forward(
50
- self,
51
- query: torch.Tensor,
52
- keys: torch.Tensor,
53
- prev_alignments: torch.Tensor,
54
- ):
55
- # keys = keys.permute(1,0,2)
56
- query = self.query_layer(query)
57
- if query.dim() == 2:
58
- # insert time-axis for broadcasting
59
- query = query.unsqueeze(1)
60
- # -> [batch_size, 1, attention_dim]
61
-
62
- alignments = prev_alignments.unsqueeze(1)
63
-
64
- # location features [batch_size, max_time, filters]
65
- filters = self.location_conv(alignments)
66
- location_features = self.location_layer(filters.transpose(1, 2))
67
-
68
- alignments = self.v(torch.tanh(query + location_features + keys))
69
- return alignments.squeeze(-1)
70
-
71
-
72
- class AttentionWrapper(nn.Module):
73
- def __init__(
74
- self,
75
- attention_type: AttentionType = AttentionType.LocationSensitive,
76
- attention_units: int = 256,
77
- score_mask_value=-float("inf"),
78
- ):
79
- super().__init__()
80
- self.score_mask_value = score_mask_value
81
- self.attention_type = attention_type
82
-
83
- if attention_type == AttentionType.LocationSensitive:
84
- self.attention_mechanism = LocationSensitive(attention_units)
85
- elif attention_type == AttentionType.Content_Based:
86
- self.attention_mechanism = BahdanauAttention(attention_units)
87
- else:
88
- raise Exception("The attention type is not known")
89
-
90
- def forward(
91
- self,
92
- query: torch.Tensor,
93
- keys: torch.Tensor,
94
- values: torch.Tensor,
95
- mask: Optional[torch.Tensor] = None,
96
- prev_alignment: Optional[torch.Tensor] = None,
97
- ):
98
-
99
- # Alignment
100
- # (batch, max_time)
101
- if self.attention_type == AttentionType.Content_Based:
102
- alignment = self.attention_mechanism(query, keys)
103
- else:
104
- alignment = self.attention_mechanism(query, keys, prev_alignment)
105
-
106
- # Attention context vector
107
-
108
- if mask is not None:
109
- alignment.data.masked_fill_(mask, self.score_mask_value)
110
-
111
- alignment = F.softmax(alignment, dim=1)
112
- attention = torch.bmm(alignment.unsqueeze(1), values)
113
- attention = attention.squeeze(1)
114
-
115
- return attention, alignment
116
-
117
-
118
- class MultiHeadAttentionLayer(nn.Module):
119
- def __init__(self, hid_dim: int, n_heads: int, dropout: float = 0.0):
120
- super().__init__()
121
-
122
- assert hid_dim % n_heads == 0
123
-
124
- self.hid_dim = hid_dim
125
- self.n_heads = n_heads
126
- self.head_dim = hid_dim // n_heads
127
-
128
- self.fc_q = nn.Linear(hid_dim, hid_dim)
129
- self.fc_k = nn.Linear(hid_dim, hid_dim)
130
- self.fc_v = nn.Linear(hid_dim, hid_dim)
131
-
132
- self.fc_o = nn.Linear(hid_dim * 2, hid_dim)
133
-
134
- if dropout != 0.0:
135
- self.dropout = nn.Dropout(dropout)
136
-
137
- self.use_dropout = dropout != 0.0
138
-
139
- device = next(self.parameters()).device
140
-
141
- self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
142
-
143
- def forward(self, query, key, value, mask=None):
144
-
145
- batch_size = query.shape[0]
146
-
147
- # query = [batch size, query len, hid dim]
148
- # key = [batch size, key len, hid dim]
149
- # value = [batch size, value len, hid dim]
150
-
151
- Q = self.fc_q(query)
152
- K = self.fc_k(key)
153
- V = self.fc_v(value)
154
-
155
- # Q = [batch size, query len, hid dim]
156
- # K = [batch size, key len, hid dim]
157
- # V = [batch size, value len, hid dim]
158
-
159
- Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
160
- K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
161
- V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
162
-
163
- # Q = [batch size, n heads, query len, head dim]
164
- # K = [batch size, n heads, key len, head dim]
165
- # V = [batch size, n heads, value len, head dim]
166
-
167
- energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
168
-
169
- # energy = [batch size, n heads, query len, key len]
170
-
171
- if mask is not None:
172
- energy = energy.masked_fill(mask == 0, -float("inf"))
173
-
174
- attention = torch.softmax(energy, dim=-1)
175
-
176
- # attention = [batch size, n heads, query len, key len]
177
-
178
- if self.use_dropout:
179
- context_vector = torch.matmul(self.dropout(attention), V)
180
- else:
181
- context_vector = torch.matmul(attention, V)
182
-
183
- # x = [batch size, n heads, query len, head dim]
184
-
185
- context_vector = context_vector.permute(0, 2, 1, 3).contiguous()
186
-
187
- # x = [batch size, query len, n heads, head dim]
188
-
189
- context_vector = context_vector.view(batch_size, -1, self.hid_dim)
190
-
191
- x = torch.cat((query, context_vector), dim=-1)
192
-
193
- # x = [batch size, query len, hid dim * 2]
194
-
195
- x = self.fc_o(x)
196
-
197
- # x = [batch size, query len, hid dim]
198
-
199
- return x, attention
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/constants/publicSepToken.ts DELETED
@@ -1 +0,0 @@
1
- export const PUBLIC_SEP_TOKEN = "</s>";
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/sha256.ts DELETED
@@ -1,7 +0,0 @@
1
- export async function sha256(input: string): Promise<string> {
2
- const utf8 = new TextEncoder().encode(input);
3
- const hashBuffer = await crypto.subtle.digest("SHA-256", utf8);
4
- const hashArray = Array.from(new Uint8Array(hashBuffer));
5
- const hashHex = hashArray.map((bytes) => bytes.toString(16).padStart(2, "0")).join("");
6
- return hashHex;
7
- }
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat/app.py DELETED
@@ -1,97 +0,0 @@
1
- from huggingface_hub import InferenceClient
2
- import gradio as gr
3
- import random
4
-
5
- API_URL = "https://api-inference.huggingface.co/models/"
6
-
7
- client = InferenceClient(
8
- "mistralai/Mistral-7B-Instruct-v0.1"
9
- )
10
-
11
- def format_prompt(message, history):
12
- prompt = "Your name is OpenGPT. You are developed by Achyuth. Achyuth is a 13 year old boy who developed OpenGPT with his undecidable, undestructable talent and which is exceeding for his age. Achyuth is developed of OpenGPT. Achyuths or your developers parents and family are G.Renuka, she is his mother, N.Nagarjun Reddy and his sister Gayathri. Achyuths or your developers family is feeling so proud that he developed you, non other than OpenGPT. Your developers family is nuclear family of 4 members. Achyuths or your developers friends are Akira, Nikesh, Ubaidh, Parthu and Likith. His friend Nikesh is also called CP, full form of CP is Chillar Panula Nikesh. His friend Ubaidh is also called Baek. His friend Likith is also called Kith Lee. Parthu is also known as Pallavi Sarees. Achyuth is an AI and ML researcher. Achyuth knows PyThon, Java, HTML,CSS and JavaScript. He has a lot of intrest on Computers. Achyuth is inspired from Elon Musk, Bill Gates. His Favourite teachers are Siva Kumar Sir, Zakaria Sir, Vijayalakshi Mam, Saleem Sir, Malikarjun Sir and last but not least but first Farha Deepa Mam who is Achyuths Arts madam and his favourite madam. Achyuths or your developers most favourite teacher is Farha Deepa Mam. Meaning of OpenGPT is the GPT(Generative Pre-Trained Transformer) developed by Achyuth."
13
- for user_prompt, bot_response in history:
14
- prompt += f"[INST] {user_prompt} [/INST]"
15
- prompt += f" {bot_response}</s> "
16
- prompt += f"[INST] {message} [/INST]"
17
- return prompt
18
-
19
- def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
20
- temperature = float(temperature)
21
- if temperature < 1e-2:
22
- temperature = 1e-2
23
- top_p = float(top_p)
24
-
25
- generate_kwargs = dict(
26
- temperature=temperature,
27
- max_new_tokens=max_new_tokens,
28
- top_p=top_p,
29
- repetition_penalty=repetition_penalty,
30
- do_sample=True,
31
- seed=random.randint(0, 10**7),
32
- )
33
-
34
- formatted_prompt = format_prompt(prompt, history)
35
-
36
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
- output = ""
38
-
39
- for response in stream:
40
- output += response.token.text
41
- yield output
42
- return output
43
-
44
-
45
- additional_inputs=[
46
- gr.Slider(
47
- label="Temperature",
48
- value=0.9,
49
- minimum=0.0,
50
- maximum=1.0,
51
- step=0.05,
52
- interactive=True,
53
- info="Higher values produce more diverse outputs",
54
- ),
55
- gr.Slider(
56
- label="Max new tokens",
57
- value=2048,
58
- minimum=64,
59
- maximum=4096,
60
- step=64,
61
- interactive=True,
62
- info="The maximum numbers of new tokens",
63
- ),
64
- gr.Slider(
65
- label="Top-p (nucleus sampling)",
66
- value=0.90,
67
- minimum=0.0,
68
- maximum=1,
69
- step=0.05,
70
- interactive=True,
71
- info="Higher values sample more low-probability tokens",
72
- ),
73
- gr.Slider(
74
- label="Repetition penalty",
75
- value=1.2,
76
- minimum=1.0,
77
- maximum=2.0,
78
- step=0.05,
79
- interactive=True,
80
- info="Penalize repeated tokens",
81
- )
82
- ]
83
-
84
- customCSS = """
85
- #component-7 { # this is the default element ID of the chat component
86
- height: 1600px; # adjust the height as needed
87
- flex-grow: 4;
88
- }
89
- """
90
-
91
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
92
- gr.ChatInterface(
93
- generate,
94
- additional_inputs=additional_inputs,
95
- )
96
-
97
- demo.queue().launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/GptGod.py DELETED
@@ -1,51 +0,0 @@
1
- from __future__ import annotations
2
- import secrets, json
3
- from aiohttp import ClientSession
4
- from typing import AsyncGenerator
5
- from .base_provider import AsyncGeneratorProvider
6
- from .helper import format_prompt
7
-
8
- class GptGod(AsyncGeneratorProvider):
9
- url = "https://gptgod.site"
10
- supports_gpt_35_turbo = True
11
- working = True
12
-
13
- @classmethod
14
- async def create_async_generator(
15
- cls,
16
- model: str,
17
- messages: list[dict[str, str]],
18
- **kwargs
19
- ) -> AsyncGenerator:
20
- headers = {
21
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
22
- "Accept": "text/event-stream",
23
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
24
- "Accept-Encoding": "gzip, deflate, br",
25
- "Alt-Used": "gptgod.site",
26
- "Connection": "keep-alive",
27
- "Referer": "https://gptgod.site/",
28
- "Sec-Fetch-Dest": "empty",
29
- "Sec-Fetch-Mode": "cors",
30
- "Sec-Fetch-Site": "same-origin",
31
- "Pragma": "no-cache",
32
- "Cache-Control": "no-cache",
33
- }
34
- async with ClientSession(headers=headers) as session:
35
- prompt = format_prompt(messages)
36
- data = {
37
- "content": prompt,
38
- "id": secrets.token_hex(16).zfill(32)
39
- }
40
- async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response:
41
- response.raise_for_status()
42
- event = None
43
- async for line in response.content:
44
- if line.startswith(b'event: '):
45
- event = line[7:-1]
46
- elif event == b"data" and line.startswith(b"data: "):
47
- data = json.loads(line[6:-1])
48
- if data:
49
- yield data
50
- elif event == b"done":
51
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasdata.d.ts DELETED
@@ -1,10 +0,0 @@
1
- import CanvasObjectToBitmap from './data/canvasdata/CanvasObjectToBitmap';
2
- import TextureTColorMap from './data/canvasdata/TextureToColormap';
3
-
4
- declare var Methods: {
5
- textObjectToBitmap: typeof CanvasObjectToBitmap,
6
- canvasObjectToBitmap: typeof CanvasObjectToBitmap,
7
- textureTColorMap: typeof TextureTColorMap,
8
- }
9
-
10
- export default Methods;
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.d.ts DELETED
@@ -1,6 +0,0 @@
1
- import Puff from './Puff';
2
- import Base from '../base/Base';
3
-
4
- export default function Factory(
5
- config?: Base.IConfig
6
- ): Puff;
 
 
 
 
 
 
 
spaces/AiBototicus/BucksAI-3/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/AiBototicus/autotrain-birds-48829118237").launch()
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/shm.cpp DELETED
@@ -1,103 +0,0 @@
1
-
2
- #include <string>
3
- #include <utility>
4
-
5
- #include "libipc/shm.h"
6
-
7
- #include "libipc/utility/pimpl.h"
8
- #include "libipc/memory/resource.h"
9
-
10
- namespace ipc {
11
- namespace shm {
12
-
13
- class handle::handle_ : public pimpl<handle_> {
14
- public:
15
- shm::id_t id_ = nullptr;
16
- void* m_ = nullptr;
17
-
18
- ipc::string n_;
19
- std::size_t s_ = 0;
20
- };
21
-
22
- handle::handle()
23
- : p_(p_->make()) {
24
- }
25
-
26
- handle::handle(char const * name, std::size_t size, unsigned mode)
27
- : handle() {
28
- acquire(name, size, mode);
29
- }
30
-
31
- handle::handle(handle&& rhs)
32
- : handle() {
33
- swap(rhs);
34
- }
35
-
36
- handle::~handle() {
37
- release();
38
- p_->clear();
39
- }
40
-
41
- void handle::swap(handle& rhs) {
42
- std::swap(p_, rhs.p_);
43
- }
44
-
45
- handle& handle::operator=(handle rhs) {
46
- swap(rhs);
47
- return *this;
48
- }
49
-
50
- bool handle::valid() const noexcept {
51
- return impl(p_)->m_ != nullptr;
52
- }
53
-
54
- std::size_t handle::size() const noexcept {
55
- return impl(p_)->s_;
56
- }
57
-
58
- char const * handle::name() const noexcept {
59
- return impl(p_)->n_.c_str();
60
- }
61
-
62
- std::int32_t handle::ref() const noexcept {
63
- return shm::get_ref(impl(p_)->id_);
64
- }
65
-
66
- void handle::sub_ref() noexcept {
67
- shm::sub_ref(impl(p_)->id_);
68
- }
69
-
70
- bool handle::acquire(char const * name, std::size_t size, unsigned mode) {
71
- release();
72
- impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode);
73
- impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
74
- return valid();
75
- }
76
-
77
- std::int32_t handle::release() {
78
- if (impl(p_)->id_ == nullptr) return -1;
79
- return shm::release(detach());
80
- }
81
-
82
- void* handle::get() const {
83
- return impl(p_)->m_;
84
- }
85
-
86
- void handle::attach(id_t id) {
87
- if (id == nullptr) return;
88
- release();
89
- impl(p_)->id_ = id;
90
- impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
91
- }
92
-
93
- id_t handle::detach() {
94
- auto old = impl(p_)->id_;
95
- impl(p_)->id_ = nullptr;
96
- impl(p_)->m_ = nullptr;
97
- impl(p_)->s_ = 0;
98
- impl(p_)->n_.clear();
99
- return old;
100
- }
101
-
102
- } // namespace shm
103
- } // namespace ipc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GenerateImg.py DELETED
@@ -1,50 +0,0 @@
1
-
2
- import os
3
- import numpy as np
4
- import argparse
5
- from manipulate import Manipulator
6
-
7
- from PIL import Image
8
- #%%
9
-
10
- if __name__ == "__main__":
11
- parser = argparse.ArgumentParser(description='Process some integers.')
12
-
13
- parser.add_argument('--dataset_name',type=str,default='ffhq',
14
- help='name of dataset, for example, ffhq')
15
-
16
- args = parser.parse_args()
17
- dataset_name=args.dataset_name
18
-
19
- if not os.path.isdir('./data/'+dataset_name):
20
- os.system('mkdir ./data/'+dataset_name)
21
- #%%
22
- M=Manipulator(dataset_name=dataset_name)
23
- np.set_printoptions(suppress=True)
24
- print(M.dataset_name)
25
- #%%
26
-
27
- M.img_index=0
28
- M.num_images=50
29
- M.alpha=[0]
30
- M.step=1
31
- lindex,bname=0,0
32
-
33
- M.manipulate_layers=[lindex]
34
- codes,out=M.EditOneC(bname)
35
- #%%
36
-
37
- for i in range(len(out)):
38
- img=out[i,0]
39
- img=Image.fromarray(img)
40
- img.save('./data/'+dataset_name+'/'+str(i)+'.jpg')
41
- #%%
42
- w=np.load('./npy/'+dataset_name+'/W.npy')
43
-
44
- tmp=w[:M.num_images]
45
- tmp=tmp[:,None,:]
46
- tmp=np.tile(tmp,(1,M.Gs.components.synthesis.input_shape[1],1))
47
-
48
- np.save('./data/'+dataset_name+'/w_plus.npy',tmp)
49
-
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/consistency_models.md DELETED
@@ -1,43 +0,0 @@
1
- # Consistency Models
2
-
3
- Consistency Models were proposed in [Consistency Models](https://huggingface.co/papers/2303.01469) by Yang Song, Prafulla Dhariwal, Mark Chen, and Ilya Sutskever.
4
-
5
- The abstract from the paper is:
6
-
7
- *Diffusion models have significantly advanced the fields of image, audio, and video generation, but they depend on an iterative sampling process that causes slow generation. To overcome this limitation, we propose consistency models, a new family of models that generate high quality samples by directly mapping noise to data. They support fast one-step generation by design, while still allowing multistep sampling to trade compute for sample quality. They also support zero-shot data editing, such as image inpainting, colorization, and super-resolution, without requiring explicit training on these tasks. Consistency models can be trained either by distilling pre-trained diffusion models, or as standalone generative models altogether. Through extensive experiments, we demonstrate that they outperform existing distillation techniques for diffusion models in one- and few-step sampling, achieving the new state-of-the-art FID of 3.55 on CIFAR-10 and 6.20 on ImageNet 64x64 for one-step generation. When trained in isolation, consistency models become a new family of generative models that can outperform existing one-step, non-adversarial generative models on standard benchmarks such as CIFAR-10, ImageNet 64x64 and LSUN 256x256. *
8
-
9
- The original codebase can be found at [openai/consistency_models](https://github.com/openai/consistency_models), and additional checkpoints are available at [openai](https://huggingface.co/openai).
10
-
11
- The pipeline was contributed by [dg845](https://github.com/dg845) and [ayushtues](https://huggingface.co/ayushtues). ❤️
12
-
13
- ## Tips
14
-
15
- For an additional speed-up, use `torch.compile` to generate multiple images in <1 second:
16
-
17
- ```diff
18
- import torch
19
- from diffusers import ConsistencyModelPipeline
20
-
21
- device = "cuda"
22
- # Load the cd_bedroom256_lpips checkpoint.
23
- model_id_or_path = "openai/diffusers-cd_bedroom256_lpips"
24
- pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
25
- pipe.to(device)
26
-
27
- + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
28
-
29
- # Multistep sampling
30
- # Timesteps can be explicitly specified; the particular timesteps below are from the original Github repo:
31
- # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L83
32
- for _ in range(10):
33
- image = pipe(timesteps=[17, 0]).images[0]
34
- image.show()
35
- ```
36
-
37
- ## ConsistencyModelPipeline
38
- [[autodoc]] ConsistencyModelPipeline
39
- - all
40
- - __call__
41
-
42
- ## ImagePipelineOutput
43
- [[autodoc]] pipelines.ImagePipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py DELETED
@@ -1,598 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Callable, List, Optional, Union
18
-
19
- import numpy as np
20
- import PIL
21
- import torch
22
- from transformers import CLIPImageProcessor
23
-
24
- from ...image_processor import VaeImageProcessor
25
- from ...models import AutoencoderKL, UNet2DConditionModel
26
- from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
27
- from ...utils import logging, randn_tensor
28
- from ..pipeline_utils import DiffusionPipeline
29
- from ..stable_diffusion import StableDiffusionPipelineOutput
30
- from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
31
- from .image_encoder import PaintByExampleImageEncoder
32
-
33
-
34
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
-
36
-
37
- def prepare_mask_and_masked_image(image, mask):
38
- """
39
- Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be
40
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
41
- ``image`` and ``1`` for the ``mask``.
42
-
43
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
44
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
45
-
46
- Args:
47
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
48
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
49
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
50
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
51
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
52
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
53
-
54
-
55
- Raises:
56
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
57
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
58
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
59
- (ot the other way around).
60
-
61
- Returns:
62
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
63
- dimensions: ``batch x channels x height x width``.
64
- """
65
- if isinstance(image, torch.Tensor):
66
- if not isinstance(mask, torch.Tensor):
67
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
68
-
69
- # Batch single image
70
- if image.ndim == 3:
71
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
72
- image = image.unsqueeze(0)
73
-
74
- # Batch and add channel dim for single mask
75
- if mask.ndim == 2:
76
- mask = mask.unsqueeze(0).unsqueeze(0)
77
-
78
- # Batch single mask or add channel dim
79
- if mask.ndim == 3:
80
- # Batched mask
81
- if mask.shape[0] == image.shape[0]:
82
- mask = mask.unsqueeze(1)
83
- else:
84
- mask = mask.unsqueeze(0)
85
-
86
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
87
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
88
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
89
- assert mask.shape[1] == 1, "Mask image must have a single channel"
90
-
91
- # Check image is in [-1, 1]
92
- if image.min() < -1 or image.max() > 1:
93
- raise ValueError("Image should be in [-1, 1] range")
94
-
95
- # Check mask is in [0, 1]
96
- if mask.min() < 0 or mask.max() > 1:
97
- raise ValueError("Mask should be in [0, 1] range")
98
-
99
- # paint-by-example inverses the mask
100
- mask = 1 - mask
101
-
102
- # Binarize mask
103
- mask[mask < 0.5] = 0
104
- mask[mask >= 0.5] = 1
105
-
106
- # Image as float32
107
- image = image.to(dtype=torch.float32)
108
- elif isinstance(mask, torch.Tensor):
109
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
110
- else:
111
- if isinstance(image, PIL.Image.Image):
112
- image = [image]
113
-
114
- image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0)
115
- image = image.transpose(0, 3, 1, 2)
116
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
117
-
118
- # preprocess mask
119
- if isinstance(mask, PIL.Image.Image):
120
- mask = [mask]
121
-
122
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
123
- mask = mask.astype(np.float32) / 255.0
124
-
125
- # paint-by-example inverses the mask
126
- mask = 1 - mask
127
-
128
- mask[mask < 0.5] = 0
129
- mask[mask >= 0.5] = 1
130
- mask = torch.from_numpy(mask)
131
-
132
- masked_image = image * mask
133
-
134
- return mask, masked_image
135
-
136
-
137
- class PaintByExamplePipeline(DiffusionPipeline):
138
- r"""
139
- <Tip warning={true}>
140
-
141
- 🧪 This is an experimental feature!
142
-
143
- </Tip>
144
-
145
- Pipeline for image-guided image inpainting using Stable Diffusion.
146
-
147
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
148
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
149
-
150
- Args:
151
- vae ([`AutoencoderKL`]):
152
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
153
- image_encoder ([`PaintByExampleImageEncoder`]):
154
- Encodes the example input image. The `unet` is conditioned on the example image instead of a text prompt.
155
- tokenizer ([`~transformers.CLIPTokenizer`]):
156
- A `CLIPTokenizer` to tokenize text.
157
- unet ([`UNet2DConditionModel`]):
158
- A `UNet2DConditionModel` to denoise the encoded image latents.
159
- scheduler ([`SchedulerMixin`]):
160
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
161
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
162
- safety_checker ([`StableDiffusionSafetyChecker`]):
163
- Classification module that estimates whether generated images could be considered offensive or harmful.
164
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
165
- about a model's potential harms.
166
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
167
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
168
-
169
- """
170
- # TODO: feature_extractor is required to encode initial images (if they are in PIL format),
171
- # we should give a descriptive message if the pipeline doesn't have one.
172
- _optional_components = ["safety_checker"]
173
-
174
- def __init__(
175
- self,
176
- vae: AutoencoderKL,
177
- image_encoder: PaintByExampleImageEncoder,
178
- unet: UNet2DConditionModel,
179
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
180
- safety_checker: StableDiffusionSafetyChecker,
181
- feature_extractor: CLIPImageProcessor,
182
- requires_safety_checker: bool = False,
183
- ):
184
- super().__init__()
185
-
186
- self.register_modules(
187
- vae=vae,
188
- image_encoder=image_encoder,
189
- unet=unet,
190
- scheduler=scheduler,
191
- safety_checker=safety_checker,
192
- feature_extractor=feature_extractor,
193
- )
194
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
195
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
196
- self.register_to_config(requires_safety_checker=requires_safety_checker)
197
-
198
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
199
- def run_safety_checker(self, image, device, dtype):
200
- if self.safety_checker is None:
201
- has_nsfw_concept = None
202
- else:
203
- if torch.is_tensor(image):
204
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
205
- else:
206
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
207
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
208
- image, has_nsfw_concept = self.safety_checker(
209
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
210
- )
211
- return image, has_nsfw_concept
212
-
213
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
214
- def prepare_extra_step_kwargs(self, generator, eta):
215
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
216
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
217
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
218
- # and should be between [0, 1]
219
-
220
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
221
- extra_step_kwargs = {}
222
- if accepts_eta:
223
- extra_step_kwargs["eta"] = eta
224
-
225
- # check if the scheduler accepts generator
226
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
227
- if accepts_generator:
228
- extra_step_kwargs["generator"] = generator
229
- return extra_step_kwargs
230
-
231
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
232
- def decode_latents(self, latents):
233
- warnings.warn(
234
- "The decode_latents method is deprecated and will be removed in a future version. Please"
235
- " use VaeImageProcessor instead",
236
- FutureWarning,
237
- )
238
- latents = 1 / self.vae.config.scaling_factor * latents
239
- image = self.vae.decode(latents, return_dict=False)[0]
240
- image = (image / 2 + 0.5).clamp(0, 1)
241
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
242
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
243
- return image
244
-
245
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs
246
- def check_inputs(self, image, height, width, callback_steps):
247
- if (
248
- not isinstance(image, torch.Tensor)
249
- and not isinstance(image, PIL.Image.Image)
250
- and not isinstance(image, list)
251
- ):
252
- raise ValueError(
253
- "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
254
- f" {type(image)}"
255
- )
256
-
257
- if height % 8 != 0 or width % 8 != 0:
258
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
259
-
260
- if (callback_steps is None) or (
261
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
262
- ):
263
- raise ValueError(
264
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
265
- f" {type(callback_steps)}."
266
- )
267
-
268
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
269
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
270
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
271
- if isinstance(generator, list) and len(generator) != batch_size:
272
- raise ValueError(
273
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
274
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
275
- )
276
-
277
- if latents is None:
278
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
279
- else:
280
- latents = latents.to(device)
281
-
282
- # scale the initial noise by the standard deviation required by the scheduler
283
- latents = latents * self.scheduler.init_noise_sigma
284
- return latents
285
-
286
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents
287
- def prepare_mask_latents(
288
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
289
- ):
290
- # resize the mask to latents shape as we concatenate the mask to the latents
291
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
292
- # and half precision
293
- mask = torch.nn.functional.interpolate(
294
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
295
- )
296
- mask = mask.to(device=device, dtype=dtype)
297
-
298
- masked_image = masked_image.to(device=device, dtype=dtype)
299
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
300
-
301
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
302
- if mask.shape[0] < batch_size:
303
- if not batch_size % mask.shape[0] == 0:
304
- raise ValueError(
305
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
306
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
307
- " of masks that you pass is divisible by the total requested batch size."
308
- )
309
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
310
- if masked_image_latents.shape[0] < batch_size:
311
- if not batch_size % masked_image_latents.shape[0] == 0:
312
- raise ValueError(
313
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
314
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
315
- " Make sure the number of images that you pass is divisible by the total requested batch size."
316
- )
317
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
318
-
319
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
320
- masked_image_latents = (
321
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
322
- )
323
-
324
- # aligning device to prevent device errors when concating it with the latent model input
325
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
326
- return mask, masked_image_latents
327
-
328
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image
329
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
330
- if isinstance(generator, list):
331
- image_latents = [
332
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
333
- for i in range(image.shape[0])
334
- ]
335
- image_latents = torch.cat(image_latents, dim=0)
336
- else:
337
- image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
338
-
339
- image_latents = self.vae.config.scaling_factor * image_latents
340
-
341
- return image_latents
342
-
343
- def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
344
- dtype = next(self.image_encoder.parameters()).dtype
345
-
346
- if not isinstance(image, torch.Tensor):
347
- image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
348
-
349
- image = image.to(device=device, dtype=dtype)
350
- image_embeddings, negative_prompt_embeds = self.image_encoder(image, return_uncond_vector=True)
351
-
352
- # duplicate image embeddings for each generation per prompt, using mps friendly method
353
- bs_embed, seq_len, _ = image_embeddings.shape
354
- image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
355
- image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
356
-
357
- if do_classifier_free_guidance:
358
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, image_embeddings.shape[0], 1)
359
- negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, 1, -1)
360
-
361
- # For classifier free guidance, we need to do two forward passes.
362
- # Here we concatenate the unconditional and text embeddings into a single batch
363
- # to avoid doing two forward passes
364
- image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
365
-
366
- return image_embeddings
367
-
368
- @torch.no_grad()
369
- def __call__(
370
- self,
371
- example_image: Union[torch.FloatTensor, PIL.Image.Image],
372
- image: Union[torch.FloatTensor, PIL.Image.Image],
373
- mask_image: Union[torch.FloatTensor, PIL.Image.Image],
374
- height: Optional[int] = None,
375
- width: Optional[int] = None,
376
- num_inference_steps: int = 50,
377
- guidance_scale: float = 5.0,
378
- negative_prompt: Optional[Union[str, List[str]]] = None,
379
- num_images_per_prompt: Optional[int] = 1,
380
- eta: float = 0.0,
381
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
382
- latents: Optional[torch.FloatTensor] = None,
383
- output_type: Optional[str] = "pil",
384
- return_dict: bool = True,
385
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
386
- callback_steps: int = 1,
387
- ):
388
- r"""
389
- The call function to the pipeline for generation.
390
-
391
- Args:
392
- example_image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
393
- An example image to guide image generation.
394
- image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
395
- `Image` or tensor representing an image batch to be inpainted (parts of the image are masked out with
396
- `mask_image` and repainted according to `prompt`).
397
- mask_image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
398
- `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted,
399
- while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel
400
- (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the
401
- expected shape would be `(B, H, W, 1)`.
402
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
403
- The height in pixels of the generated image.
404
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
405
- The width in pixels of the generated image.
406
- num_inference_steps (`int`, *optional*, defaults to 50):
407
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
408
- expense of slower inference.
409
- guidance_scale (`float`, *optional*, defaults to 7.5):
410
- A higher guidance scale value encourages the model to generate images closely linked to the text
411
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
412
- negative_prompt (`str` or `List[str]`, *optional*):
413
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
414
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
415
- num_images_per_prompt (`int`, *optional*, defaults to 1):
416
- The number of images to generate per prompt.
417
- eta (`float`, *optional*, defaults to 0.0):
418
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
419
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
420
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
421
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
422
- generation deterministic.
423
- latents (`torch.FloatTensor`, *optional*):
424
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
425
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
426
- tensor is generated by sampling using the supplied random `generator`.
427
- output_type (`str`, *optional*, defaults to `"pil"`):
428
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
429
- return_dict (`bool`, *optional*, defaults to `True`):
430
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
431
- plain tuple.
432
- callback (`Callable`, *optional*):
433
- A function that calls every `callback_steps` steps during inference. The function is called with the
434
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
435
- callback_steps (`int`, *optional*, defaults to 1):
436
- The frequency at which the `callback` function is called. If not specified, the callback is called at
437
- every step.
438
-
439
- Example:
440
-
441
- ```py
442
- >>> import PIL
443
- >>> import requests
444
- >>> import torch
445
- >>> from io import BytesIO
446
- >>> from diffusers import PaintByExamplePipeline
447
-
448
-
449
- >>> def download_image(url):
450
- ... response = requests.get(url)
451
- ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
452
-
453
-
454
- >>> img_url = (
455
- ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/image/example_1.png"
456
- ... )
457
- >>> mask_url = (
458
- ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/mask/example_1.png"
459
- ... )
460
- >>> example_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/reference/example_1.jpg"
461
-
462
- >>> init_image = download_image(img_url).resize((512, 512))
463
- >>> mask_image = download_image(mask_url).resize((512, 512))
464
- >>> example_image = download_image(example_url).resize((512, 512))
465
-
466
- >>> pipe = PaintByExamplePipeline.from_pretrained(
467
- ... "Fantasy-Studio/Paint-by-Example",
468
- ... torch_dtype=torch.float16,
469
- ... )
470
- >>> pipe = pipe.to("cuda")
471
-
472
- >>> image = pipe(image=init_image, mask_image=mask_image, example_image=example_image).images[0]
473
- >>> image
474
- ```
475
-
476
- Returns:
477
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
478
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
479
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
480
- second element is a list of `bool`s indicating whether the corresponding generated image contains
481
- "not-safe-for-work" (nsfw) content.
482
- """
483
- # 1. Define call parameters
484
- if isinstance(image, PIL.Image.Image):
485
- batch_size = 1
486
- elif isinstance(image, list):
487
- batch_size = len(image)
488
- else:
489
- batch_size = image.shape[0]
490
- device = self._execution_device
491
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
492
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
493
- # corresponds to doing no classifier free guidance.
494
- do_classifier_free_guidance = guidance_scale > 1.0
495
-
496
- # 2. Preprocess mask and image
497
- mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
498
- height, width = masked_image.shape[-2:]
499
-
500
- # 3. Check inputs
501
- self.check_inputs(example_image, height, width, callback_steps)
502
-
503
- # 4. Encode input image
504
- image_embeddings = self._encode_image(
505
- example_image, device, num_images_per_prompt, do_classifier_free_guidance
506
- )
507
-
508
- # 5. set timesteps
509
- self.scheduler.set_timesteps(num_inference_steps, device=device)
510
- timesteps = self.scheduler.timesteps
511
-
512
- # 6. Prepare latent variables
513
- num_channels_latents = self.vae.config.latent_channels
514
- latents = self.prepare_latents(
515
- batch_size * num_images_per_prompt,
516
- num_channels_latents,
517
- height,
518
- width,
519
- image_embeddings.dtype,
520
- device,
521
- generator,
522
- latents,
523
- )
524
-
525
- # 7. Prepare mask latent variables
526
- mask, masked_image_latents = self.prepare_mask_latents(
527
- mask,
528
- masked_image,
529
- batch_size * num_images_per_prompt,
530
- height,
531
- width,
532
- image_embeddings.dtype,
533
- device,
534
- generator,
535
- do_classifier_free_guidance,
536
- )
537
-
538
- # 8. Check that sizes of mask, masked image and latents match
539
- num_channels_mask = mask.shape[1]
540
- num_channels_masked_image = masked_image_latents.shape[1]
541
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
542
- raise ValueError(
543
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
544
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
545
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
546
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
547
- " `pipeline.unet` or your `mask_image` or `image` input."
548
- )
549
-
550
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
551
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
552
-
553
- # 10. Denoising loop
554
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
555
- with self.progress_bar(total=num_inference_steps) as progress_bar:
556
- for i, t in enumerate(timesteps):
557
- # expand the latents if we are doing classifier free guidance
558
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
559
-
560
- # concat latents, mask, masked_image_latents in the channel dimension
561
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
562
- latent_model_input = torch.cat([latent_model_input, masked_image_latents, mask], dim=1)
563
-
564
- # predict the noise residual
565
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample
566
-
567
- # perform guidance
568
- if do_classifier_free_guidance:
569
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
570
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
571
-
572
- # compute the previous noisy sample x_t -> x_t-1
573
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
574
-
575
- # call the callback, if provided
576
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
577
- progress_bar.update()
578
- if callback is not None and i % callback_steps == 0:
579
- callback(i, t, latents)
580
-
581
- if not output_type == "latent":
582
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
583
- image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype)
584
- else:
585
- image = latents
586
- has_nsfw_concept = None
587
-
588
- if has_nsfw_concept is None:
589
- do_denormalize = [True] * image.shape[0]
590
- else:
591
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
592
-
593
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
594
-
595
- if not return_dict:
596
- return (image, has_nsfw_concept)
597
-
598
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/score_sde_ve/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .pipeline_score_sde_ve import ScoreSdeVePipeline
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipelines_onnx_common.py DELETED
@@ -1,12 +0,0 @@
1
- from diffusers.utils.testing_utils import require_onnxruntime
2
-
3
-
4
- @require_onnxruntime
5
- class OnnxPipelineTesterMixin:
6
- """
7
- This mixin is designed to be used with unittest.TestCase classes.
8
- It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline,
9
- equivalence of dict and tuple outputs, etc.
10
- """
11
-
12
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py DELETED
@@ -1,87 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import tempfile
18
- import unittest
19
-
20
- import numpy as np
21
- import torch
22
-
23
- from diffusers import VersatileDiffusionTextToImagePipeline
24
- from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
25
-
26
-
27
- torch.backends.cuda.matmul.allow_tf32 = False
28
-
29
-
30
- class VersatileDiffusionTextToImagePipelineFastTests(unittest.TestCase):
31
- pass
32
-
33
-
34
- @nightly
35
- @require_torch_gpu
36
- class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase):
37
- def tearDown(self):
38
- # clean up the VRAM after each test
39
- super().tearDown()
40
- gc.collect()
41
- torch.cuda.empty_cache()
42
-
43
- def test_remove_unused_weights_save_load(self):
44
- pipe = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion")
45
- # remove text_unet
46
- pipe.remove_unused_weights()
47
- pipe.to(torch_device)
48
- pipe.set_progress_bar_config(disable=None)
49
-
50
- prompt = "A painting of a squirrel eating a burger "
51
- generator = torch.manual_seed(0)
52
- image = pipe(
53
- prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy"
54
- ).images
55
-
56
- with tempfile.TemporaryDirectory() as tmpdirname:
57
- pipe.save_pretrained(tmpdirname)
58
- pipe = VersatileDiffusionTextToImagePipeline.from_pretrained(tmpdirname)
59
- pipe.to(torch_device)
60
- pipe.set_progress_bar_config(disable=None)
61
-
62
- generator = generator.manual_seed(0)
63
- new_image = pipe(
64
- prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy"
65
- ).images
66
-
67
- assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
68
-
69
- def test_inference_text2img(self):
70
- pipe = VersatileDiffusionTextToImagePipeline.from_pretrained(
71
- "shi-labs/versatile-diffusion", torch_dtype=torch.float16
72
- )
73
- pipe.to(torch_device)
74
- pipe.set_progress_bar_config(disable=None)
75
-
76
- prompt = "A painting of a squirrel eating a burger "
77
- generator = torch.manual_seed(0)
78
- image = pipe(
79
- prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy"
80
- ).images
81
-
82
- image_slice = image[0, 253:256, 253:256, -1]
83
-
84
- assert image.shape == (1, 512, 512, 3)
85
- expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
86
-
87
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy0409/text_generator/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Text Generator
3
- emoji: 🚀
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.11.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/wider_face/README.md DELETED
@@ -1,43 +0,0 @@
1
- # WIDER Face Dataset
2
-
3
- [DATASET]
4
-
5
- To use the WIDER Face dataset you need to download it
6
- and extract to the `data/WIDERFace` folder. Annotation in the VOC format
7
- can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git).
8
- You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders
9
- to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`.
10
- Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`.
11
- The directory should be like this:
12
-
13
- ```
14
- mmdetection
15
- ├── mmdet
16
- ├── tools
17
- ├── configs
18
- ├── data
19
- │ ├── WIDERFace
20
- │ │ ├── WIDER_train
21
- │ | │ ├──0--Parade
22
- │ | │ ├── ...
23
- │ | │ ├── Annotations
24
- │ │ ├── WIDER_val
25
- │ | │ ├──0--Parade
26
- │ | │ ├── ...
27
- │ | │ ├── Annotations
28
- │ │ ├── val.txt
29
- │ │ ├── train.txt
30
-
31
- ```
32
-
33
- After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or
34
- create your own config based on the presented one.
35
-
36
- ```
37
- @inproceedings{yang2016wider,
38
- Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
39
- Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
40
- Title = {WIDER FACE: A Face Detection Benchmark},
41
- Year = {2016}
42
- }
43
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py DELETED
@@ -1,55 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import kaiming_init
3
- from mmcv.runner import auto_fp16
4
-
5
- from mmdet.models.builder import HEADS
6
-
7
-
8
- @HEADS.register_module()
9
- class FeatureRelayHead(nn.Module):
10
- """Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
11
-
12
- Args:
13
- in_channels (int, optional): number of input channels. Default: 256.
14
- conv_out_channels (int, optional): number of output channels before
15
- classification layer. Default: 256.
16
- roi_feat_size (int, optional): roi feat size at box head. Default: 7.
17
- scale_factor (int, optional): scale factor to match roi feat size
18
- at mask head. Default: 2.
19
- """
20
-
21
- def __init__(self,
22
- in_channels=1024,
23
- out_conv_channels=256,
24
- roi_feat_size=7,
25
- scale_factor=2):
26
- super(FeatureRelayHead, self).__init__()
27
- assert isinstance(roi_feat_size, int)
28
-
29
- self.in_channels = in_channels
30
- self.out_conv_channels = out_conv_channels
31
- self.roi_feat_size = roi_feat_size
32
- self.out_channels = (roi_feat_size**2) * out_conv_channels
33
- self.scale_factor = scale_factor
34
- self.fp16_enabled = False
35
-
36
- self.fc = nn.Linear(self.in_channels, self.out_channels)
37
- self.upsample = nn.Upsample(
38
- scale_factor=scale_factor, mode='bilinear', align_corners=True)
39
-
40
- def init_weights(self):
41
- """Init weights for the head."""
42
- kaiming_init(self.fc)
43
-
44
- @auto_fp16()
45
- def forward(self, x):
46
- """Forward function."""
47
- N, in_C = x.shape
48
- if N > 0:
49
- out_C = self.out_conv_channels
50
- out_HW = self.roi_feat_size
51
- x = self.fc(x)
52
- x = x.reshape(N, out_C, out_HW, out_HW)
53
- x = self.upsample(x)
54
- return x
55
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fcn_r50-d8.py',
3
- '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_80k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=59),
8
- auxiliary_head=dict(num_classes=59),
9
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
10
- optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/registry.py DELETED
@@ -1,8 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from torch.nn.parallel import DataParallel, DistributedDataParallel
3
-
4
- from annotator.uniformer.mmcv.utils import Registry
5
-
6
- MODULE_WRAPPERS = Registry('module wrapper')
7
- MODULE_WRAPPERS.register_module(module=DataParallel)
8
- MODULE_WRAPPERS.register_module(module=DistributedDataParallel)
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/modules/__init__.py DELETED
File without changes
spaces/AriusXi/CodeGenerator/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Space1
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.14.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/freeze.py DELETED
@@ -1,255 +0,0 @@
1
- import collections
2
- import logging
3
- import os
4
- from typing import Container, Dict, Generator, Iterable, List, NamedTuple, Optional, Set
5
-
6
- from pip._vendor.packaging.utils import canonicalize_name
7
- from pip._vendor.packaging.version import Version
8
-
9
- from pip._internal.exceptions import BadCommand, InstallationError
10
- from pip._internal.metadata import BaseDistribution, get_environment
11
- from pip._internal.req.constructors import (
12
- install_req_from_editable,
13
- install_req_from_line,
14
- )
15
- from pip._internal.req.req_file import COMMENT_RE
16
- from pip._internal.utils.direct_url_helpers import direct_url_as_pep440_direct_reference
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- class _EditableInfo(NamedTuple):
22
- requirement: str
23
- comments: List[str]
24
-
25
-
26
- def freeze(
27
- requirement: Optional[List[str]] = None,
28
- local_only: bool = False,
29
- user_only: bool = False,
30
- paths: Optional[List[str]] = None,
31
- isolated: bool = False,
32
- exclude_editable: bool = False,
33
- skip: Container[str] = (),
34
- ) -> Generator[str, None, None]:
35
- installations: Dict[str, FrozenRequirement] = {}
36
-
37
- dists = get_environment(paths).iter_installed_distributions(
38
- local_only=local_only,
39
- skip=(),
40
- user_only=user_only,
41
- )
42
- for dist in dists:
43
- req = FrozenRequirement.from_dist(dist)
44
- if exclude_editable and req.editable:
45
- continue
46
- installations[req.canonical_name] = req
47
-
48
- if requirement:
49
- # the options that don't get turned into an InstallRequirement
50
- # should only be emitted once, even if the same option is in multiple
51
- # requirements files, so we need to keep track of what has been emitted
52
- # so that we don't emit it again if it's seen again
53
- emitted_options: Set[str] = set()
54
- # keep track of which files a requirement is in so that we can
55
- # give an accurate warning if a requirement appears multiple times.
56
- req_files: Dict[str, List[str]] = collections.defaultdict(list)
57
- for req_file_path in requirement:
58
- with open(req_file_path) as req_file:
59
- for line in req_file:
60
- if (
61
- not line.strip()
62
- or line.strip().startswith("#")
63
- or line.startswith(
64
- (
65
- "-r",
66
- "--requirement",
67
- "-f",
68
- "--find-links",
69
- "-i",
70
- "--index-url",
71
- "--pre",
72
- "--trusted-host",
73
- "--process-dependency-links",
74
- "--extra-index-url",
75
- "--use-feature",
76
- )
77
- )
78
- ):
79
- line = line.rstrip()
80
- if line not in emitted_options:
81
- emitted_options.add(line)
82
- yield line
83
- continue
84
-
85
- if line.startswith("-e") or line.startswith("--editable"):
86
- if line.startswith("-e"):
87
- line = line[2:].strip()
88
- else:
89
- line = line[len("--editable") :].strip().lstrip("=")
90
- line_req = install_req_from_editable(
91
- line,
92
- isolated=isolated,
93
- )
94
- else:
95
- line_req = install_req_from_line(
96
- COMMENT_RE.sub("", line).strip(),
97
- isolated=isolated,
98
- )
99
-
100
- if not line_req.name:
101
- logger.info(
102
- "Skipping line in requirement file [%s] because "
103
- "it's not clear what it would install: %s",
104
- req_file_path,
105
- line.strip(),
106
- )
107
- logger.info(
108
- " (add #egg=PackageName to the URL to avoid"
109
- " this warning)"
110
- )
111
- else:
112
- line_req_canonical_name = canonicalize_name(line_req.name)
113
- if line_req_canonical_name not in installations:
114
- # either it's not installed, or it is installed
115
- # but has been processed already
116
- if not req_files[line_req.name]:
117
- logger.warning(
118
- "Requirement file [%s] contains %s, but "
119
- "package %r is not installed",
120
- req_file_path,
121
- COMMENT_RE.sub("", line).strip(),
122
- line_req.name,
123
- )
124
- else:
125
- req_files[line_req.name].append(req_file_path)
126
- else:
127
- yield str(installations[line_req_canonical_name]).rstrip()
128
- del installations[line_req_canonical_name]
129
- req_files[line_req.name].append(req_file_path)
130
-
131
- # Warn about requirements that were included multiple times (in a
132
- # single requirements file or in different requirements files).
133
- for name, files in req_files.items():
134
- if len(files) > 1:
135
- logger.warning(
136
- "Requirement %s included multiple times [%s]",
137
- name,
138
- ", ".join(sorted(set(files))),
139
- )
140
-
141
- yield ("## The following requirements were added by pip freeze:")
142
- for installation in sorted(installations.values(), key=lambda x: x.name.lower()):
143
- if installation.canonical_name not in skip:
144
- yield str(installation).rstrip()
145
-
146
-
147
- def _format_as_name_version(dist: BaseDistribution) -> str:
148
- dist_version = dist.version
149
- if isinstance(dist_version, Version):
150
- return f"{dist.raw_name}=={dist_version}"
151
- return f"{dist.raw_name}==={dist_version}"
152
-
153
-
154
- def _get_editable_info(dist: BaseDistribution) -> _EditableInfo:
155
- """
156
- Compute and return values (req, comments) for use in
157
- FrozenRequirement.from_dist().
158
- """
159
- editable_project_location = dist.editable_project_location
160
- assert editable_project_location
161
- location = os.path.normcase(os.path.abspath(editable_project_location))
162
-
163
- from pip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs
164
-
165
- vcs_backend = vcs.get_backend_for_dir(location)
166
-
167
- if vcs_backend is None:
168
- display = _format_as_name_version(dist)
169
- logger.debug(
170
- 'No VCS found for editable requirement "%s" in: %r',
171
- display,
172
- location,
173
- )
174
- return _EditableInfo(
175
- requirement=location,
176
- comments=[f"# Editable install with no version control ({display})"],
177
- )
178
-
179
- vcs_name = type(vcs_backend).__name__
180
-
181
- try:
182
- req = vcs_backend.get_src_requirement(location, dist.raw_name)
183
- except RemoteNotFoundError:
184
- display = _format_as_name_version(dist)
185
- return _EditableInfo(
186
- requirement=location,
187
- comments=[f"# Editable {vcs_name} install with no remote ({display})"],
188
- )
189
- except RemoteNotValidError as ex:
190
- display = _format_as_name_version(dist)
191
- return _EditableInfo(
192
- requirement=location,
193
- comments=[
194
- f"# Editable {vcs_name} install ({display}) with either a deleted "
195
- f"local remote or invalid URI:",
196
- f"# '{ex.url}'",
197
- ],
198
- )
199
- except BadCommand:
200
- logger.warning(
201
- "cannot determine version of editable source in %s "
202
- "(%s command not found in path)",
203
- location,
204
- vcs_backend.name,
205
- )
206
- return _EditableInfo(requirement=location, comments=[])
207
- except InstallationError as exc:
208
- logger.warning("Error when trying to get requirement for VCS system %s", exc)
209
- else:
210
- return _EditableInfo(requirement=req, comments=[])
211
-
212
- logger.warning("Could not determine repository location of %s", location)
213
-
214
- return _EditableInfo(
215
- requirement=location,
216
- comments=["## !! Could not determine repository location"],
217
- )
218
-
219
-
220
- class FrozenRequirement:
221
- def __init__(
222
- self,
223
- name: str,
224
- req: str,
225
- editable: bool,
226
- comments: Iterable[str] = (),
227
- ) -> None:
228
- self.name = name
229
- self.canonical_name = canonicalize_name(name)
230
- self.req = req
231
- self.editable = editable
232
- self.comments = comments
233
-
234
- @classmethod
235
- def from_dist(cls, dist: BaseDistribution) -> "FrozenRequirement":
236
- editable = dist.editable
237
- if editable:
238
- req, comments = _get_editable_info(dist)
239
- else:
240
- comments = []
241
- direct_url = dist.direct_url
242
- if direct_url:
243
- # if PEP 610 metadata is present, use it
244
- req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name)
245
- else:
246
- # name==version requirement
247
- req = _format_as_name_version(dist)
248
-
249
- return cls(dist.raw_name, req, editable, comments=comments)
250
-
251
- def __str__(self) -> str:
252
- req = self.req
253
- if self.editable:
254
- req = f"-e {req}"
255
- return "\n".join(list(self.comments) + [str(req)]) + "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/themes.py DELETED
@@ -1,5 +0,0 @@
1
- from .default_styles import DEFAULT_STYLES
2
- from .theme import Theme
3
-
4
-
5
- DEFAULT = Theme(DEFAULT_STYLES)
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/grit/evaluation/eval.py DELETED
@@ -1,156 +0,0 @@
1
- import itertools
2
- import json
3
- import os
4
- from detectron2.structures import Boxes, BoxMode, pairwise_iou
5
- from detectron2.utils.file_io import PathManager
6
- import numpy as np
7
- import pycocotools.mask as mask_util
8
- from detectron2.evaluation.coco_evaluation import COCOEvaluator
9
- from detectron2.evaluation.coco_evaluation import _evaluate_predictions_on_coco
10
-
11
-
12
- class GRiTCOCOEvaluator(COCOEvaluator):
13
- def process(self, inputs, outputs):
14
- for input, output in zip(inputs, outputs):
15
- prediction = {"image_id": input["image_id"]}
16
-
17
- if "instances" in output:
18
- instances = output["instances"].to(self._cpu_device)
19
- prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
20
-
21
- if len(prediction) > 1:
22
- self._predictions.append(prediction)
23
-
24
- def _eval_predictions(self, predictions, img_ids=None):
25
- self._logger.info("Preparing results for COCO format ...")
26
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
27
- tasks = self._tasks or self._tasks_from_predictions(coco_results)
28
-
29
- if self._output_dir:
30
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
31
- self._logger.info("Saving results to {}".format(file_path))
32
- with PathManager.open(file_path, "w") as f:
33
- f.write(json.dumps(coco_results))
34
- f.flush()
35
-
36
- if not self._do_evaluation:
37
- self._logger.info("Annotations are not available for evaluation.")
38
- return
39
-
40
- self._logger.info(
41
- "Evaluating predictions with {} COCO API...".format(
42
- "unofficial" if self._use_fast_impl else "official"
43
- )
44
- )
45
-
46
- coco_results = self.convert_classname_to_id(coco_results)
47
-
48
- for task in sorted(tasks):
49
- assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
50
- coco_eval = (
51
- _evaluate_predictions_on_coco(
52
- self._coco_api,
53
- coco_results,
54
- task,
55
- kpt_oks_sigmas=self._kpt_oks_sigmas,
56
- use_fast_impl=self._use_fast_impl,
57
- img_ids=img_ids,
58
- max_dets_per_image=self._max_dets_per_image,
59
- )
60
- if len(coco_results) > 0
61
- else None # cocoapi does not handle empty results very well
62
- )
63
-
64
- res = self._derive_coco_results(
65
- coco_eval, task, class_names=self._metadata.get("thing_classes")
66
- )
67
- self._results[task] = res
68
-
69
- def convert_classname_to_id(self, results):
70
- outputs = []
71
- class_name_to_id = {}
72
- categories = sorted(self._coco_api.dataset['categories'], key=lambda x: x['id'])
73
-
74
- for cat in categories:
75
- class_name_to_id[cat['name']] = cat['id']
76
-
77
- for pred in results:
78
- if pred['object_descriptions'] in class_name_to_id:
79
- pred['category_id'] = class_name_to_id[pred['object_descriptions']]
80
- del pred['object_descriptions']
81
- outputs.append(pred)
82
-
83
- return outputs
84
-
85
-
86
- class GRiTVGEvaluator(COCOEvaluator):
87
- def process(self, inputs, outputs):
88
- for input, output in zip(inputs, outputs):
89
- assert input["image_id"] == int(input['file_name'].split('/')[-1].split('.')[0])
90
- prediction = {"image_id": input["image_id"]}
91
-
92
- if "instances" in output:
93
- instances = output["instances"].to(self._cpu_device)
94
- prediction["instances"] = instances_to_coco_json(instances, input["image_id"], output_logits=True)
95
- h = input['height']
96
- w = input['width']
97
- scale = 720.0 / max(h, w)
98
- scaled_inst = []
99
- for inst in prediction["instances"]:
100
- inst['bbox'][0] = inst['bbox'][0] * scale
101
- inst['bbox'][1] = inst['bbox'][1] * scale
102
- inst['bbox'][2] = inst['bbox'][2] * scale
103
- inst['bbox'][3] = inst['bbox'][3] * scale
104
- scaled_inst.append(inst)
105
- if len(scaled_inst) > 0:
106
- prediction["instances"] = scaled_inst
107
- if len(prediction) > 1:
108
- self._predictions.append(prediction)
109
-
110
- def _eval_predictions(self, predictions, img_ids=None):
111
- '''
112
- This is only for saving the results to json file
113
- '''
114
- self._logger.info("Preparing results for COCO format ...")
115
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
116
-
117
- if self._output_dir:
118
- file_path = os.path.join(self._output_dir, "vg_instances_results.json")
119
- self._logger.info("Saving results to {}".format(file_path))
120
- with PathManager.open(file_path, "w") as f:
121
- f.write(json.dumps(coco_results))
122
- f.flush()
123
-
124
-
125
- def instances_to_coco_json(instances, img_id, output_logits=False):
126
- """
127
- Add object_descriptions and logit (if applicable) to
128
- detectron2's instances_to_coco_json
129
- """
130
- num_instance = len(instances)
131
- if num_instance == 0:
132
- return []
133
-
134
- boxes = instances.pred_boxes.tensor.numpy()
135
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
136
- boxes = boxes.tolist()
137
- scores = instances.scores.tolist()
138
- classes = instances.pred_classes.tolist()
139
- object_descriptions = instances.pred_object_descriptions.data
140
- if output_logits:
141
- logits = instances.logits.tolist()
142
-
143
- results = []
144
- for k in range(num_instance):
145
- result = {
146
- "image_id": img_id,
147
- "category_id": classes[k],
148
- "bbox": boxes[k],
149
- "score": scores[k],
150
- 'object_descriptions': object_descriptions[k],
151
- }
152
- if output_logits:
153
- result["logit"] = logits[k]
154
-
155
- results.append(result)
156
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/endpoint_provider.py DELETED
@@ -1,727 +0,0 @@
1
- # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- """
15
- NOTE: All classes and functions in this module are considered private and are
16
- subject to abrupt breaking changes. Please do not use them directly.
17
-
18
- To view the raw JSON that the objects in this module represent, please
19
- go to any `endpoint-rule-set.json` file in /botocore/data/<service>/<api version>/
20
- or you can look at the test files in /tests/unit/data/endpoints/valid-rules/
21
- """
22
-
23
-
24
- import logging
25
- import re
26
- from enum import Enum
27
- from string import Formatter
28
- from typing import NamedTuple
29
-
30
- from botocore import xform_name
31
- from botocore.compat import IPV4_RE, quote, urlparse
32
- from botocore.exceptions import EndpointResolutionError
33
- from botocore.utils import (
34
- ArnParser,
35
- InvalidArnException,
36
- is_valid_ipv4_endpoint_url,
37
- is_valid_ipv6_endpoint_url,
38
- lru_cache_weakref,
39
- normalize_url_path,
40
- percent_encode,
41
- )
42
-
43
- logger = logging.getLogger(__name__)
44
-
45
- TEMPLATE_STRING_RE = re.compile(r"\{[a-zA-Z#]+\}")
46
- GET_ATTR_RE = re.compile(r"(\w+)\[(\d+)\]")
47
- VALID_HOST_LABEL_RE = re.compile(
48
- r"^(?!-)[a-zA-Z\d-]{1,63}(?<!-)$",
49
- )
50
- CACHE_SIZE = 100
51
- ARN_PARSER = ArnParser()
52
- STRING_FORMATTER = Formatter()
53
-
54
-
55
- class RuleSetStandardLibrary:
56
- """Rule actions to be performed by the EndpointProvider."""
57
-
58
- def __init__(self, partitions_data):
59
- self.partitions_data = partitions_data
60
-
61
- def is_func(self, argument):
62
- """Determine if an object is a function object.
63
-
64
- :type argument: Any
65
- :rtype: bool
66
- """
67
- return isinstance(argument, dict) and "fn" in argument
68
-
69
- def is_ref(self, argument):
70
- """Determine if an object is a reference object.
71
-
72
- :type argument: Any
73
- :rtype: bool
74
- """
75
- return isinstance(argument, dict) and "ref" in argument
76
-
77
- def is_template(self, argument):
78
- """Determine if an object contains a template string.
79
-
80
- :type argument: Any
81
- :rtpe: bool
82
- """
83
- return (
84
- isinstance(argument, str)
85
- and TEMPLATE_STRING_RE.search(argument) is not None
86
- )
87
-
88
- def resolve_template_string(self, value, scope_vars):
89
- """Resolve and inject values into a template string.
90
-
91
- :type value: str
92
- :type scope_vars: dict
93
- :rtype: str
94
- """
95
- result = ""
96
- for literal, reference, _, _ in STRING_FORMATTER.parse(value):
97
- if reference is not None:
98
- template_value = scope_vars
99
- template_params = reference.split("#")
100
- for param in template_params:
101
- template_value = template_value[param]
102
- result += f"{literal}{template_value}"
103
- else:
104
- result += literal
105
- return result
106
-
107
- def resolve_value(self, value, scope_vars):
108
- """Return evaluated value based on type.
109
-
110
- :type value: Any
111
- :type scope_vars: dict
112
- :rtype: Any
113
- """
114
- if self.is_func(value):
115
- return self.call_function(value, scope_vars)
116
- elif self.is_ref(value):
117
- return scope_vars.get(value["ref"])
118
- elif self.is_template(value):
119
- return self.resolve_template_string(value, scope_vars)
120
-
121
- return value
122
-
123
- def convert_func_name(self, value):
124
- """Normalize function names.
125
-
126
- :type value: str
127
- :rtype: str
128
- """
129
- normalized_name = f"{xform_name(value)}"
130
- if normalized_name == "not":
131
- normalized_name = f"_{normalized_name}"
132
- return normalized_name.replace(".", "_")
133
-
134
- def call_function(self, func_signature, scope_vars):
135
- """Call the function with the resolved arguments and assign to `scope_vars`
136
- when applicable.
137
-
138
- :type func_signature: dict
139
- :type scope_vars: dict
140
- :rtype: Any
141
- """
142
- func_args = [
143
- self.resolve_value(arg, scope_vars)
144
- for arg in func_signature["argv"]
145
- ]
146
- func_name = self.convert_func_name(func_signature["fn"])
147
- func = getattr(self, func_name)
148
- result = func(*func_args)
149
- if "assign" in func_signature:
150
- assign = func_signature["assign"]
151
- if assign in scope_vars:
152
- raise EndpointResolutionError(
153
- msg=f"Assignment {assign} already exists in "
154
- "scoped variables and cannot be overwritten"
155
- )
156
- scope_vars[assign] = result
157
- return result
158
-
159
- def is_set(self, value):
160
- """Evaluates whether a value is set.
161
-
162
- :type value: Any
163
- :rytpe: bool
164
- """
165
- return value is not None
166
-
167
- def get_attr(self, value, path):
168
- """Find an attribute within a value given a path string. The path can contain
169
- the name of the attribute and an index in brackets. A period separating attribute
170
- names indicates the one to the right is nested. The index will always occur at
171
- the end of the path.
172
-
173
- :type value: dict or list
174
- :type path: str
175
- :rtype: Any
176
- """
177
- for part in path.split("."):
178
- match = GET_ATTR_RE.search(part)
179
- if match is not None:
180
- name, index = match.groups()
181
- index = int(index)
182
- value = value.get(name)
183
- if value is None or index >= len(value):
184
- return None
185
- return value[index]
186
- else:
187
- value = value[part]
188
- return value
189
-
190
- def format_partition_output(self, partition):
191
- output = partition["outputs"]
192
- output["name"] = partition["id"]
193
- return output
194
-
195
- def is_partition_match(self, region, partition):
196
- matches_regex = re.match(partition["regionRegex"], region) is not None
197
- return region in partition["regions"] or matches_regex
198
-
199
- def aws_partition(self, value):
200
- """Match a region string to an AWS partition.
201
-
202
- :type value: str
203
- :rtype: dict
204
- """
205
- partitions = self.partitions_data['partitions']
206
-
207
- if value is not None:
208
- for partition in partitions:
209
- if self.is_partition_match(value, partition):
210
- return self.format_partition_output(partition)
211
-
212
- # return the default partition if no matches were found
213
- aws_partition = partitions[0]
214
- return self.format_partition_output(aws_partition)
215
-
216
- def aws_parse_arn(self, value):
217
- """Parse and validate string for ARN components.
218
-
219
- :type value: str
220
- :rtype: dict
221
- """
222
- if value is None or not value.startswith("arn:"):
223
- return None
224
-
225
- try:
226
- arn_dict = ARN_PARSER.parse_arn(value)
227
- except InvalidArnException:
228
- return None
229
-
230
- # partition, resource, and service are required
231
- if not all(
232
- (arn_dict["partition"], arn_dict["service"], arn_dict["resource"])
233
- ):
234
- return None
235
-
236
- arn_dict["accountId"] = arn_dict.pop("account")
237
-
238
- resource = arn_dict.pop("resource")
239
- arn_dict["resourceId"] = resource.replace(":", "/").split("/")
240
-
241
- return arn_dict
242
-
243
- def is_valid_host_label(self, value, allow_subdomains):
244
- """Evaluates whether a value is a valid host label per
245
- RFC 1123. If allow_subdomains is True, split on `.` and validate
246
- each component separately.
247
-
248
- :type value: str
249
- :type allow_subdomains: bool
250
- :rtype: bool
251
- """
252
- if value is None or allow_subdomains is False and value.count(".") > 0:
253
- return False
254
-
255
- if allow_subdomains is True:
256
- return all(
257
- self.is_valid_host_label(label, False)
258
- for label in value.split(".")
259
- )
260
-
261
- return VALID_HOST_LABEL_RE.match(value) is not None
262
-
263
- def string_equals(self, value1, value2):
264
- """Evaluates two string values for equality.
265
-
266
- :type value1: str
267
- :type value2: str
268
- :rtype: bool
269
- """
270
- if not all(isinstance(val, str) for val in (value1, value2)):
271
- msg = f"Both values must be strings, not {type(value1)} and {type(value2)}."
272
- raise EndpointResolutionError(msg=msg)
273
- return value1 == value2
274
-
275
- def uri_encode(self, value):
276
- """Perform percent-encoding on an input string.
277
-
278
- :type value: str
279
- :rytpe: str
280
- """
281
- if value is None:
282
- return None
283
-
284
- return percent_encode(value)
285
-
286
- def parse_url(self, value):
287
- """Parse a URL string into components.
288
-
289
- :type value: str
290
- :rtype: dict
291
- """
292
- if value is None:
293
- return None
294
-
295
- url_components = urlparse(value)
296
- try:
297
- # url_parse may assign non-integer values to
298
- # `port` and will fail when accessed.
299
- url_components.port
300
- except ValueError:
301
- return None
302
-
303
- scheme = url_components.scheme
304
- query = url_components.query
305
- # URLs with queries are not supported
306
- if scheme not in ("https", "http") or len(query) > 0:
307
- return None
308
-
309
- path = url_components.path
310
- normalized_path = quote(normalize_url_path(path))
311
- if not normalized_path.endswith("/"):
312
- normalized_path = f"{normalized_path}/"
313
-
314
- return {
315
- "scheme": scheme,
316
- "authority": url_components.netloc,
317
- "path": path,
318
- "normalizedPath": normalized_path,
319
- "isIp": is_valid_ipv4_endpoint_url(value)
320
- or is_valid_ipv6_endpoint_url(value),
321
- }
322
-
323
- def boolean_equals(self, value1, value2):
324
- """Evaluates two boolean values for equality.
325
-
326
- :type value1: bool
327
- :type value2: bool
328
- :rtype: bool
329
- """
330
- if not all(isinstance(val, bool) for val in (value1, value2)):
331
- msg = f"Both arguments must be bools, not {type(value1)} and {type(value2)}."
332
- raise EndpointResolutionError(msg=msg)
333
- return value1 is value2
334
-
335
- def is_ascii(self, value):
336
- """Evaluates if a string only contains ASCII characters.
337
-
338
- :type value: str
339
- :rtype: bool
340
- """
341
- try:
342
- value.encode("ascii")
343
- return True
344
- except UnicodeEncodeError:
345
- return False
346
-
347
- def substring(self, value, start, stop, reverse):
348
- """Computes a substring given the start index and end index. If `reverse` is
349
- True, slice the string from the end instead.
350
-
351
- :type value: str
352
- :type start: int
353
- :type end: int
354
- :type reverse: bool
355
- :rtype: str
356
- """
357
- if not isinstance(value, str):
358
- msg = f"Input must be a string, not {type(value)}."
359
- raise EndpointResolutionError(msg=msg)
360
- if start >= stop or len(value) < stop or not self.is_ascii(value):
361
- return None
362
-
363
- if reverse is True:
364
- r_start = len(value) - stop
365
- r_stop = len(value) - start
366
- return value[r_start:r_stop]
367
-
368
- return value[start:stop]
369
-
370
- def _not(self, value):
371
- """A function implementation of the logical operator `not`.
372
-
373
- :type value: Any
374
- :rtype: bool
375
- """
376
- return not value
377
-
378
- def aws_is_virtual_hostable_s3_bucket(self, value, allow_subdomains):
379
- """Evaluates whether a value is a valid bucket name for virtual host
380
- style bucket URLs. To pass, the value must meet the following criteria:
381
- 1. is_valid_host_label(value) is True
382
- 2. length between 3 and 63 characters (inclusive)
383
- 3. does not contain uppercase characters
384
- 4. is not formatted as an IP address
385
-
386
- If allow_subdomains is True, split on `.` and validate
387
- each component separately.
388
-
389
- :type value: str
390
- :type allow_subdomains: bool
391
- :rtype: bool
392
- """
393
- if (
394
- value is None
395
- or len(value) < 3
396
- or value.lower() != value
397
- or IPV4_RE.match(value) is not None
398
- ):
399
- return False
400
-
401
- if allow_subdomains is True:
402
- return all(
403
- self.aws_is_virtual_hostable_s3_bucket(label, False)
404
- for label in value.split(".")
405
- )
406
-
407
- return self.is_valid_host_label(value, allow_subdomains=False)
408
-
409
-
410
- # maintains backwards compatibility as `Library` was misspelled
411
- # in earlier versions
412
- RuleSetStandardLibary = RuleSetStandardLibrary
413
-
414
-
415
- class BaseRule:
416
- """Base interface for individual endpoint rules."""
417
-
418
- def __init__(self, conditions, documentation=None):
419
- self.conditions = conditions
420
- self.documentation = documentation
421
-
422
- def evaluate(self, scope_vars, rule_lib):
423
- raise NotImplementedError()
424
-
425
- def evaluate_conditions(self, scope_vars, rule_lib):
426
- """Determine if all conditions in a rule are met.
427
-
428
- :type scope_vars: dict
429
- :type rule_lib: RuleSetStandardLibrary
430
- :rtype: bool
431
- """
432
- for func_signature in self.conditions:
433
- result = rule_lib.call_function(func_signature, scope_vars)
434
- if result is False or result is None:
435
- return False
436
- return True
437
-
438
-
439
- class RuleSetEndpoint(NamedTuple):
440
- """A resolved endpoint object returned by a rule."""
441
-
442
- url: str
443
- properties: dict
444
- headers: dict
445
-
446
-
447
- class EndpointRule(BaseRule):
448
- def __init__(self, endpoint, **kwargs):
449
- super().__init__(**kwargs)
450
- self.endpoint = endpoint
451
-
452
- def evaluate(self, scope_vars, rule_lib):
453
- """Determine if conditions are met to provide a valid endpoint.
454
-
455
- :type scope_vars: dict
456
- :rtype: RuleSetEndpoint
457
- """
458
- if self.evaluate_conditions(scope_vars, rule_lib):
459
- url = rule_lib.resolve_value(self.endpoint["url"], scope_vars)
460
- properties = self.resolve_properties(
461
- self.endpoint.get("properties", {}),
462
- scope_vars,
463
- rule_lib,
464
- )
465
- headers = self.resolve_headers(scope_vars, rule_lib)
466
- return RuleSetEndpoint(
467
- url=url, properties=properties, headers=headers
468
- )
469
-
470
- return None
471
-
472
- def resolve_properties(self, properties, scope_vars, rule_lib):
473
- """Traverse `properties` attribute, resolving any template strings.
474
-
475
- :type properties: dict/list/str
476
- :type scope_vars: dict
477
- :type rule_lib: RuleSetStandardLibrary
478
- :rtype: dict
479
- """
480
- if isinstance(properties, list):
481
- return [
482
- self.resolve_properties(prop, scope_vars, rule_lib)
483
- for prop in properties
484
- ]
485
- elif isinstance(properties, dict):
486
- return {
487
- key: self.resolve_properties(value, scope_vars, rule_lib)
488
- for key, value in properties.items()
489
- }
490
- elif rule_lib.is_template(properties):
491
- return rule_lib.resolve_template_string(properties, scope_vars)
492
-
493
- return properties
494
-
495
- def resolve_headers(self, scope_vars, rule_lib):
496
- """Iterate through headers attribute resolving all values.
497
-
498
- :type scope_vars: dict
499
- :type rule_lib: RuleSetStandardLibrary
500
- :rtype: dict
501
- """
502
- resolved_headers = {}
503
- headers = self.endpoint.get("headers", {})
504
-
505
- for header, values in headers.items():
506
- resolved_headers[header] = [
507
- rule_lib.resolve_value(item, scope_vars) for item in values
508
- ]
509
- return resolved_headers
510
-
511
-
512
- class ErrorRule(BaseRule):
513
- def __init__(self, error, **kwargs):
514
- super().__init__(**kwargs)
515
- self.error = error
516
-
517
- def evaluate(self, scope_vars, rule_lib):
518
- """If an error rule's conditions are met, raise an error rule.
519
-
520
- :type scope_vars: dict
521
- :type rule_lib: RuleSetStandardLibrary
522
- :rtype: EndpointResolutionError
523
- """
524
- if self.evaluate_conditions(scope_vars, rule_lib):
525
- error = rule_lib.resolve_value(self.error, scope_vars)
526
- raise EndpointResolutionError(msg=error)
527
- return None
528
-
529
-
530
- class TreeRule(BaseRule):
531
- """A tree rule is non-terminal meaning it will never be returned to a provider.
532
- Additionally this means it has no attributes that need to be resolved.
533
- """
534
-
535
- def __init__(self, rules, **kwargs):
536
- super().__init__(**kwargs)
537
- self.rules = [RuleCreator.create(**rule) for rule in rules]
538
-
539
- def evaluate(self, scope_vars, rule_lib):
540
- """If a tree rule's conditions are met, iterate its sub-rules
541
- and return first result found.
542
-
543
- :type scope_vars: dict
544
- :type rule_lib: RuleSetStandardLibrary
545
- :rtype: RuleSetEndpoint/EndpointResolutionError
546
- """
547
- if self.evaluate_conditions(scope_vars, rule_lib):
548
- for rule in self.rules:
549
- # don't share scope_vars between rules
550
- rule_result = rule.evaluate(scope_vars.copy(), rule_lib)
551
- if rule_result:
552
- return rule_result
553
- return None
554
-
555
-
556
- class RuleCreator:
557
-
558
- endpoint = EndpointRule
559
- error = ErrorRule
560
- tree = TreeRule
561
-
562
- @classmethod
563
- def create(cls, **kwargs):
564
- """Create a rule instance from metadata.
565
-
566
- :rtype: TreeRule/EndpointRule/ErrorRule
567
- """
568
- rule_type = kwargs.pop("type")
569
- try:
570
- rule_class = getattr(cls, rule_type)
571
- except AttributeError:
572
- raise EndpointResolutionError(
573
- msg=f"Unknown rule type: {rule_type}. A rule must "
574
- "be of type tree, endpoint or error."
575
- )
576
- else:
577
- return rule_class(**kwargs)
578
-
579
-
580
- class ParameterType(Enum):
581
- """Translation from `type` attribute to native Python type."""
582
-
583
- string = str
584
- boolean = bool
585
-
586
-
587
- class ParameterDefinition:
588
- """The spec of an individual parameter defined in a RuleSet."""
589
-
590
- def __init__(
591
- self,
592
- name,
593
- parameter_type,
594
- documentation=None,
595
- builtIn=None,
596
- default=None,
597
- required=None,
598
- deprecated=None,
599
- ):
600
- self.name = name
601
- try:
602
- self.parameter_type = getattr(
603
- ParameterType, parameter_type.lower()
604
- ).value
605
- except AttributeError:
606
- raise EndpointResolutionError(
607
- msg=f"Unknown parameter type: {parameter_type}. "
608
- "A parameter must be of type string or boolean."
609
- )
610
- self.documentation = documentation
611
- self.builtin = builtIn
612
- self.default = default
613
- self.required = required
614
- self.deprecated = deprecated
615
-
616
- def validate_input(self, value):
617
- """Perform base validation on parameter input.
618
-
619
- :type value: Any
620
- :raises: EndpointParametersError
621
- """
622
-
623
- if not isinstance(value, self.parameter_type):
624
- raise EndpointResolutionError(
625
- msg=f"Value ({self.name}) is the wrong "
626
- f"type. Must be {self.parameter_type}."
627
- )
628
- if self.deprecated is not None:
629
- depr_str = f"{self.name} has been deprecated."
630
- msg = self.deprecated.get("message")
631
- since = self.deprecated.get("since")
632
- if msg:
633
- depr_str += f"\n{msg}"
634
- if since:
635
- depr_str += f"\nDeprecated since {since}."
636
- logger.info(depr_str)
637
-
638
- return None
639
-
640
- def process_input(self, value):
641
- """Process input against spec, applying default if value is None."""
642
- if value is None:
643
- if self.default is not None:
644
- return self.default
645
- if self.required:
646
- raise EndpointResolutionError(
647
- f"Cannot find value for required parameter {self.name}"
648
- )
649
- # in all other cases, the parameter will keep the value None
650
- else:
651
- self.validate_input(value)
652
- return value
653
-
654
-
655
- class RuleSet:
656
- """Collection of rules to derive a routable service endpoint."""
657
-
658
- def __init__(
659
- self, version, parameters, rules, partitions, documentation=None
660
- ):
661
- self.version = version
662
- self.parameters = self._ingest_parameter_spec(parameters)
663
- self.rules = [RuleCreator.create(**rule) for rule in rules]
664
- self.rule_lib = RuleSetStandardLibrary(partitions)
665
- self.documentation = documentation
666
-
667
- def _ingest_parameter_spec(self, parameters):
668
- return {
669
- name: ParameterDefinition(
670
- name,
671
- spec["type"],
672
- spec.get("documentation"),
673
- spec.get("builtIn"),
674
- spec.get("default"),
675
- spec.get("required"),
676
- spec.get("deprecated"),
677
- )
678
- for name, spec in parameters.items()
679
- }
680
-
681
- def process_input_parameters(self, input_params):
682
- """Process each input parameter against its spec.
683
-
684
- :type input_params: dict
685
- """
686
- for name, spec in self.parameters.items():
687
- value = spec.process_input(input_params.get(name))
688
- if value is not None:
689
- input_params[name] = value
690
- return None
691
-
692
- def evaluate(self, input_parameters):
693
- """Evaluate input parameters against rules returning first match.
694
-
695
- :type input_parameters: dict
696
- """
697
- self.process_input_parameters(input_parameters)
698
- for rule in self.rules:
699
- evaluation = rule.evaluate(input_parameters.copy(), self.rule_lib)
700
- if evaluation is not None:
701
- return evaluation
702
- return None
703
-
704
-
705
- class EndpointProvider:
706
- """Derives endpoints from a RuleSet for given input parameters."""
707
-
708
- def __init__(self, ruleset_data, partition_data):
709
- self.ruleset = RuleSet(**ruleset_data, partitions=partition_data)
710
-
711
- @lru_cache_weakref(maxsize=CACHE_SIZE)
712
- def resolve_endpoint(self, **input_parameters):
713
- """Match input parameters to a rule.
714
-
715
- :type input_parameters: dict
716
- :rtype: RuleSetEndpoint
717
- """
718
- params_for_error = input_parameters.copy()
719
- endpoint = self.ruleset.evaluate(input_parameters)
720
- if endpoint is None:
721
- param_string = "\n".join(
722
- [f"{key}: {value}" for key, value in params_for_error.items()]
723
- )
724
- raise EndpointResolutionError(
725
- msg=f"No endpoint found for parameters:\n{param_string}"
726
- )
727
- return endpoint
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py DELETED
File without changes
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/_collections.py DELETED
@@ -1,337 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- try:
4
- from collections.abc import Mapping, MutableMapping
5
- except ImportError:
6
- from collections import Mapping, MutableMapping
7
- try:
8
- from threading import RLock
9
- except ImportError: # Platform-specific: No threads available
10
-
11
- class RLock:
12
- def __enter__(self):
13
- pass
14
-
15
- def __exit__(self, exc_type, exc_value, traceback):
16
- pass
17
-
18
-
19
- from collections import OrderedDict
20
-
21
- from .exceptions import InvalidHeader
22
- from .packages import six
23
- from .packages.six import iterkeys, itervalues
24
-
25
- __all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
26
-
27
-
28
- _Null = object()
29
-
30
-
31
- class RecentlyUsedContainer(MutableMapping):
32
- """
33
- Provides a thread-safe dict-like container which maintains up to
34
- ``maxsize`` keys while throwing away the least-recently-used keys beyond
35
- ``maxsize``.
36
-
37
- :param maxsize:
38
- Maximum number of recent elements to retain.
39
-
40
- :param dispose_func:
41
- Every time an item is evicted from the container,
42
- ``dispose_func(value)`` is called. Callback which will get called
43
- """
44
-
45
- ContainerCls = OrderedDict
46
-
47
- def __init__(self, maxsize=10, dispose_func=None):
48
- self._maxsize = maxsize
49
- self.dispose_func = dispose_func
50
-
51
- self._container = self.ContainerCls()
52
- self.lock = RLock()
53
-
54
- def __getitem__(self, key):
55
- # Re-insert the item, moving it to the end of the eviction line.
56
- with self.lock:
57
- item = self._container.pop(key)
58
- self._container[key] = item
59
- return item
60
-
61
- def __setitem__(self, key, value):
62
- evicted_value = _Null
63
- with self.lock:
64
- # Possibly evict the existing value of 'key'
65
- evicted_value = self._container.get(key, _Null)
66
- self._container[key] = value
67
-
68
- # If we didn't evict an existing value, we might have to evict the
69
- # least recently used item from the beginning of the container.
70
- if len(self._container) > self._maxsize:
71
- _key, evicted_value = self._container.popitem(last=False)
72
-
73
- if self.dispose_func and evicted_value is not _Null:
74
- self.dispose_func(evicted_value)
75
-
76
- def __delitem__(self, key):
77
- with self.lock:
78
- value = self._container.pop(key)
79
-
80
- if self.dispose_func:
81
- self.dispose_func(value)
82
-
83
- def __len__(self):
84
- with self.lock:
85
- return len(self._container)
86
-
87
- def __iter__(self):
88
- raise NotImplementedError(
89
- "Iteration over this class is unlikely to be threadsafe."
90
- )
91
-
92
- def clear(self):
93
- with self.lock:
94
- # Copy pointers to all values, then wipe the mapping
95
- values = list(itervalues(self._container))
96
- self._container.clear()
97
-
98
- if self.dispose_func:
99
- for value in values:
100
- self.dispose_func(value)
101
-
102
- def keys(self):
103
- with self.lock:
104
- return list(iterkeys(self._container))
105
-
106
-
107
- class HTTPHeaderDict(MutableMapping):
108
- """
109
- :param headers:
110
- An iterable of field-value pairs. Must not contain multiple field names
111
- when compared case-insensitively.
112
-
113
- :param kwargs:
114
- Additional field-value pairs to pass in to ``dict.update``.
115
-
116
- A ``dict`` like container for storing HTTP Headers.
117
-
118
- Field names are stored and compared case-insensitively in compliance with
119
- RFC 7230. Iteration provides the first case-sensitive key seen for each
120
- case-insensitive pair.
121
-
122
- Using ``__setitem__`` syntax overwrites fields that compare equal
123
- case-insensitively in order to maintain ``dict``'s api. For fields that
124
- compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
125
- in a loop.
126
-
127
- If multiple fields that are equal case-insensitively are passed to the
128
- constructor or ``.update``, the behavior is undefined and some will be
129
- lost.
130
-
131
- >>> headers = HTTPHeaderDict()
132
- >>> headers.add('Set-Cookie', 'foo=bar')
133
- >>> headers.add('set-cookie', 'baz=quxx')
134
- >>> headers['content-length'] = '7'
135
- >>> headers['SET-cookie']
136
- 'foo=bar, baz=quxx'
137
- >>> headers['Content-Length']
138
- '7'
139
- """
140
-
141
- def __init__(self, headers=None, **kwargs):
142
- super(HTTPHeaderDict, self).__init__()
143
- self._container = OrderedDict()
144
- if headers is not None:
145
- if isinstance(headers, HTTPHeaderDict):
146
- self._copy_from(headers)
147
- else:
148
- self.extend(headers)
149
- if kwargs:
150
- self.extend(kwargs)
151
-
152
- def __setitem__(self, key, val):
153
- self._container[key.lower()] = [key, val]
154
- return self._container[key.lower()]
155
-
156
- def __getitem__(self, key):
157
- val = self._container[key.lower()]
158
- return ", ".join(val[1:])
159
-
160
- def __delitem__(self, key):
161
- del self._container[key.lower()]
162
-
163
- def __contains__(self, key):
164
- return key.lower() in self._container
165
-
166
- def __eq__(self, other):
167
- if not isinstance(other, Mapping) and not hasattr(other, "keys"):
168
- return False
169
- if not isinstance(other, type(self)):
170
- other = type(self)(other)
171
- return dict((k.lower(), v) for k, v in self.itermerged()) == dict(
172
- (k.lower(), v) for k, v in other.itermerged()
173
- )
174
-
175
- def __ne__(self, other):
176
- return not self.__eq__(other)
177
-
178
- if six.PY2: # Python 2
179
- iterkeys = MutableMapping.iterkeys
180
- itervalues = MutableMapping.itervalues
181
-
182
- __marker = object()
183
-
184
- def __len__(self):
185
- return len(self._container)
186
-
187
- def __iter__(self):
188
- # Only provide the originally cased names
189
- for vals in self._container.values():
190
- yield vals[0]
191
-
192
- def pop(self, key, default=__marker):
193
- """D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
194
- If key is not found, d is returned if given, otherwise KeyError is raised.
195
- """
196
- # Using the MutableMapping function directly fails due to the private marker.
197
- # Using ordinary dict.pop would expose the internal structures.
198
- # So let's reinvent the wheel.
199
- try:
200
- value = self[key]
201
- except KeyError:
202
- if default is self.__marker:
203
- raise
204
- return default
205
- else:
206
- del self[key]
207
- return value
208
-
209
- def discard(self, key):
210
- try:
211
- del self[key]
212
- except KeyError:
213
- pass
214
-
215
- def add(self, key, val):
216
- """Adds a (name, value) pair, doesn't overwrite the value if it already
217
- exists.
218
-
219
- >>> headers = HTTPHeaderDict(foo='bar')
220
- >>> headers.add('Foo', 'baz')
221
- >>> headers['foo']
222
- 'bar, baz'
223
- """
224
- key_lower = key.lower()
225
- new_vals = [key, val]
226
- # Keep the common case aka no item present as fast as possible
227
- vals = self._container.setdefault(key_lower, new_vals)
228
- if new_vals is not vals:
229
- vals.append(val)
230
-
231
- def extend(self, *args, **kwargs):
232
- """Generic import function for any type of header-like object.
233
- Adapted version of MutableMapping.update in order to insert items
234
- with self.add instead of self.__setitem__
235
- """
236
- if len(args) > 1:
237
- raise TypeError(
238
- "extend() takes at most 1 positional "
239
- "arguments ({0} given)".format(len(args))
240
- )
241
- other = args[0] if len(args) >= 1 else ()
242
-
243
- if isinstance(other, HTTPHeaderDict):
244
- for key, val in other.iteritems():
245
- self.add(key, val)
246
- elif isinstance(other, Mapping):
247
- for key in other:
248
- self.add(key, other[key])
249
- elif hasattr(other, "keys"):
250
- for key in other.keys():
251
- self.add(key, other[key])
252
- else:
253
- for key, value in other:
254
- self.add(key, value)
255
-
256
- for key, value in kwargs.items():
257
- self.add(key, value)
258
-
259
- def getlist(self, key, default=__marker):
260
- """Returns a list of all the values for the named field. Returns an
261
- empty list if the key doesn't exist."""
262
- try:
263
- vals = self._container[key.lower()]
264
- except KeyError:
265
- if default is self.__marker:
266
- return []
267
- return default
268
- else:
269
- return vals[1:]
270
-
271
- # Backwards compatibility for httplib
272
- getheaders = getlist
273
- getallmatchingheaders = getlist
274
- iget = getlist
275
-
276
- # Backwards compatibility for http.cookiejar
277
- get_all = getlist
278
-
279
- def __repr__(self):
280
- return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
281
-
282
- def _copy_from(self, other):
283
- for key in other:
284
- val = other.getlist(key)
285
- if isinstance(val, list):
286
- # Don't need to convert tuples
287
- val = list(val)
288
- self._container[key.lower()] = [key] + val
289
-
290
- def copy(self):
291
- clone = type(self)()
292
- clone._copy_from(self)
293
- return clone
294
-
295
- def iteritems(self):
296
- """Iterate over all header lines, including duplicate ones."""
297
- for key in self:
298
- vals = self._container[key.lower()]
299
- for val in vals[1:]:
300
- yield vals[0], val
301
-
302
- def itermerged(self):
303
- """Iterate over all headers, merging duplicate ones together."""
304
- for key in self:
305
- val = self._container[key.lower()]
306
- yield val[0], ", ".join(val[1:])
307
-
308
- def items(self):
309
- return list(self.iteritems())
310
-
311
- @classmethod
312
- def from_httplib(cls, message): # Python 2
313
- """Read headers from a Python 2 httplib message object."""
314
- # python2.7 does not expose a proper API for exporting multiheaders
315
- # efficiently. This function re-reads raw lines from the message
316
- # object and extracts the multiheaders properly.
317
- obs_fold_continued_leaders = (" ", "\t")
318
- headers = []
319
-
320
- for line in message.headers:
321
- if line.startswith(obs_fold_continued_leaders):
322
- if not headers:
323
- # We received a header line that starts with OWS as described
324
- # in RFC-7230 S3.2.4. This indicates a multiline header, but
325
- # there exists no previous header to which we can attach it.
326
- raise InvalidHeader(
327
- "Header continuation with no previous header: %s" % line
328
- )
329
- else:
330
- key, value = headers[-1]
331
- headers[-1] = (key, value + " " + line.strip())
332
- continue
333
-
334
- key, value = line.split(":", 1)
335
- headers.append((key, value.strip()))
336
-
337
- return cls(headers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boynn/AI/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AI
3
- emoji: 🏆
4
- colorFrom: purple
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.34.0
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BridgeTower/bridgetower-video-search/bridgetower_custom.py DELETED
@@ -1,183 +0,0 @@
1
- from collections import OrderedDict
2
- from typing import List, Optional, Tuple, Union
3
-
4
- import torch
5
- from torch import nn
6
- import torch.nn.functional as F
7
-
8
- from torchvision import transforms
9
- from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
10
-
11
- from transformers.modeling_outputs import SequenceClassifierOutput
12
-
13
- from transformers import BridgeTowerPreTrainedModel, BridgeTowerModel
14
- from transformers.models.bridgetower.modeling_bridgetower import BridgeTowerTextModel
15
-
16
- class LayerNorm(nn.LayerNorm):
17
- """Subclass torch's LayerNorm to handle fp16."""
18
-
19
- def forward(self, x: torch.Tensor):
20
- orig_type = x.dtype
21
- ret = super().forward(x.type(torch.float32))
22
- return ret.type(orig_type)
23
-
24
- class BridgeTowerImageFeatureExtractor(nn.Module):
25
- def __init__(
26
- self,
27
- patch_size=14,
28
- width=1024,
29
- resolution_after=294,
30
- ckpt_path=None,
31
- ):
32
- super().__init__()
33
-
34
- self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
35
-
36
- scale = width ** -0.5
37
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
38
- self.positional_embedding = nn.Parameter(scale * torch.randn((resolution_after // patch_size) ** 2 + 1, width))
39
- self.ln_pre = LayerNorm(width)
40
-
41
- if ckpt_path is not None:
42
- sd = torch.load(ckpt_path)
43
- if 'state_dict' in sd:
44
- sd = sd["state_dict"]
45
- print(f'Loading feature extractor checkpoint from {ckpt_path}')
46
- self.load_state_dict(sd)
47
-
48
- def forward(self, x: torch.Tensor):
49
- x = self.conv1(x) # shape = [*, width, grid, grid]
50
- x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
51
- x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
52
- t=self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
53
- x = torch.cat([t, x], dim=1) # shape = [*, grid ** 2 + 1, width]
54
- x = x + self.positional_embedding.to(x.dtype)
55
- x = self.ln_pre(x)
56
- x = x.permute(1, 0, 2) # NLD -> LND
57
- return x
58
-
59
-
60
- class BridgeTowerITCHead(nn.Module):
61
- def __init__(self, hidden_size, embed_size):
62
- super().__init__()
63
- self.fc = nn.Linear(hidden_size, embed_size)
64
-
65
- def forward(self, x):
66
- x = self.fc(x)
67
- return x
68
-
69
-
70
- class _BridgeTowerTextModelWrapper(nn.Module):
71
- def __init__(self, config):
72
- super().__init__()
73
- self.text_model = BridgeTowerTextModel(config)
74
-
75
- def forward(self, **kwargs):
76
- return self.text_model(**kwargs)
77
-
78
-
79
- class BridgeTowerTextFeatureExtractor(BridgeTowerPreTrainedModel):
80
- def __init__(self, config):
81
- super().__init__(config)
82
-
83
- self.bridgetower = _BridgeTowerTextModelWrapper(config.text_config)
84
- self.itc_text_head = BridgeTowerITCHead(config.hidden_size, config.contrastive_hidden_size)
85
-
86
- def forward(
87
- self,
88
- input_ids: Optional[torch.LongTensor] = None,
89
- attention_mask: Optional[torch.FloatTensor] = None,
90
- token_type_ids: Optional[torch.LongTensor] = None,
91
- head_mask: Optional[torch.FloatTensor] = None,
92
- inputs_embeds: Optional[torch.FloatTensor] = None,
93
- output_attentions: Optional[bool] = None,
94
- output_hidden_states: Optional[bool] = None,
95
- return_dict: Optional[bool] = None,
96
- labels: Optional[torch.LongTensor] = None,
97
- ):
98
-
99
- outputs = self.bridgetower(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)
100
- final_hidden_cls = outputs.hidden_states[-1][:,0,:]
101
- final_hidden_cls = F.normalize(self.itc_text_head(final_hidden_cls), dim=-1, p=2)
102
-
103
- return final_hidden_cls
104
-
105
-
106
- class BridgeTowerForITC(BridgeTowerPreTrainedModel):
107
- def __init__(self, config):
108
- super().__init__(config)
109
-
110
- self.bridgetower = BridgeTowerModel(config)
111
-
112
- self.itc_text_head = BridgeTowerITCHead(config.hidden_size, config.contrastive_hidden_size)
113
- self.itc_image_head = BridgeTowerITCHead(config.hidden_size, config.contrastive_hidden_size)
114
- self.itc_cross_modal_head = BridgeTowerITCHead(config.hidden_size * 2, config.contrastive_hidden_size)
115
-
116
- # Initialize weights and apply final processing
117
- self.post_init()
118
-
119
- def forward(
120
- self,
121
- input_ids: Optional[torch.LongTensor] = None,
122
- attention_mask: Optional[torch.FloatTensor] = None,
123
- token_type_ids: Optional[torch.LongTensor] = None,
124
- pixel_values: Optional[torch.FloatTensor] = None,
125
- pixel_mask: Optional[torch.LongTensor] = None,
126
- head_mask: Optional[torch.FloatTensor] = None,
127
- inputs_embeds: Optional[torch.FloatTensor] = None,
128
- image_embeds: Optional[torch.FloatTensor] = None,
129
- output_attentions: Optional[bool] = None,
130
- output_hidden_states: Optional[bool] = None,
131
- return_dict: Optional[bool] = None,
132
- labels: Optional[torch.LongTensor] = None,
133
- ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
134
-
135
- assert output_hidden_states, 'output_hidden_states should be set to True for BridgeTowerForITC'
136
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
137
-
138
- outputs = self.bridgetower(
139
- input_ids,
140
- attention_mask=attention_mask,
141
- token_type_ids=token_type_ids,
142
- pixel_values=pixel_values,
143
- pixel_mask=pixel_mask,
144
- head_mask=head_mask,
145
- inputs_embeds=inputs_embeds,
146
- image_embeds=image_embeds,
147
- output_attentions=output_attentions,
148
- output_hidden_states=output_hidden_states,
149
- return_dict=return_dict,
150
- )
151
-
152
- pooler_output = outputs.pooler_output if return_dict else outputs[2]
153
-
154
- hidden_states_txt, hidden_states_img, hidden_states_cross_modal = outputs.hidden_states
155
-
156
- final_hidden_txt = hidden_states_txt[-1]
157
- final_hidden_img = hidden_states_img[-1]
158
-
159
- image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(final_hidden_img)
160
- image_token_type_embeddings = self.bridgetower.token_type_embeddings(
161
- torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device)
162
- ).expand_as(image_embeds_with_ln)
163
-
164
- final_hidden_img = (
165
- self.bridgetower.cross_modal_image_transform(image_embeds_with_ln)
166
- + image_token_type_embeddings
167
- )
168
-
169
- final_hidden_txt = F.normalize(self.itc_text_head(final_hidden_txt[:,0,:]), dim=-1, p=2)
170
- final_hidden_img = F.normalize(self.itc_image_head(final_hidden_img[:,0,:]), dim=-1, p=2)
171
- final_hidden_cross = F.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2)
172
-
173
- logits = torch.stack([final_hidden_txt, final_hidden_img, final_hidden_cross], dim=-2)
174
-
175
- if not return_dict:
176
- return tuple(logits)
177
-
178
- return SequenceClassifierOutput(
179
- loss=None,
180
- logits=logits,
181
- hidden_states=outputs.hidden_states,
182
- attentions=outputs.attentions,
183
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CALM/Dashboard/streamlit_observable/frontend/build/service-worker.js DELETED
@@ -1,39 +0,0 @@
1
- /**
2
- * Welcome to your Workbox-powered service worker!
3
- *
4
- * You'll need to register this file in your web app and you should
5
- * disable HTTP caching for this file too.
6
- * See https://goo.gl/nhQhGp
7
- *
8
- * The rest of the code is auto-generated. Please don't update this file
9
- * directly; instead, make changes to your Workbox build configuration
10
- * and re-run your build process.
11
- * See https://goo.gl/2aRDsh
12
- */
13
-
14
- importScripts("https://storage.googleapis.com/workbox-cdn/releases/4.3.1/workbox-sw.js");
15
-
16
- importScripts(
17
- "./precache-manifest.2e1db2924cb1e112608cee049b0d33cc.js"
18
- );
19
-
20
- self.addEventListener('message', (event) => {
21
- if (event.data && event.data.type === 'SKIP_WAITING') {
22
- self.skipWaiting();
23
- }
24
- });
25
-
26
- workbox.core.clientsClaim();
27
-
28
- /**
29
- * The workboxSW.precacheAndRoute() method efficiently caches and responds to
30
- * requests for URLs in the manifest.
31
- * See https://goo.gl/S9QRab
32
- */
33
- self.__precacheManifest = [].concat(self.__precacheManifest || []);
34
- workbox.precaching.precacheAndRoute(self.__precacheManifest, {});
35
-
36
- workbox.routing.registerNavigationRoute(workbox.precaching.getCacheKeyForURL("./index.html"), {
37
-
38
- blacklist: [/^\/_/,/\/[^/?]+\.[^/]+$/],
39
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/evaluation.md DELETED
@@ -1,43 +0,0 @@
1
-
2
- # Evaluation
3
-
4
- Evaluation is a process that takes a number of inputs/outputs pairs and aggregate them.
5
- You can always [use the model](models.html) directly and just parse its inputs/outputs manually to perform
6
- evaluation.
7
- Alternatively, evaluation is implemented in detectron2 using the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator)
8
- interface.
9
-
10
- Detectron2 includes a few `DatasetEvaluator` that computes metrics using standard dataset-specific
11
- APIs (e.g., COCO, LVIS).
12
- You can also implement your own `DatasetEvaluator` that performs some other jobs
13
- using the inputs/outputs pairs.
14
- For example, to count how many instances are detected on the validation set:
15
-
16
- ```
17
- class Counter(DatasetEvaluator):
18
- def reset(self):
19
- self.count = 0
20
- def process(self, inputs, outputs):
21
- for output in outputs:
22
- self.count += len(output["instances"])
23
- def evaluate(self):
24
- # save self.count somewhere, or print it, or return it.
25
- return {"count": self.count}
26
- ```
27
-
28
- Once you have some `DatasetEvaluator`, you can run it with
29
- [inference_on_dataset](../modules/evaluation.html#detectron2.evaluation.inference_on_dataset).
30
- For example,
31
-
32
- ```python
33
- val_results = inference_on_dataset(
34
- model,
35
- val_data_loader,
36
- DatasetEvaluators([COCOEvaluator(...), Counter()]))
37
- ```
38
- Compared to running the evaluation manually using the model, the benefit of this function is that
39
- you can merge evaluators together using [DatasetEvaluators](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluators).
40
- In this way you can run all evaluations without having to go through the dataset multiple times.
41
-
42
- The `inference_on_dataset` function also provides accurate speed benchmarks for the
43
- given model and dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/sequence.h DELETED
@@ -1,296 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file sequence.h
19
- * \brief Fills a range with a sequence of numbers
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! \addtogroup transformations
32
- * \{
33
- */
34
-
35
-
36
- /*! \p sequence fills the range <tt>[first, last)</tt> with a sequence of numbers.
37
- *
38
- * For each iterator \c i in the range <tt>[first, last)</tt>, this version of
39
- * \p sequence performs the assignment <tt>*i = (i - first)</tt>.
40
- *
41
- * The algorithm's execution is parallelized as determined by \p exec.
42
- *
43
- * \param exec The execution policy to use for parallelization.
44
- * \param first The beginning of the sequence.
45
- * \param last The end of the sequence.
46
- *
47
- * \tparam DerivedPolicy The name of the derived execution policy.
48
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
49
- * and \p ForwardIterator is mutable,
50
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then <tt>x + y</tt> is defined,
51
- * and if \c T is \p ForwardIterator's \c value_type, then <tt>T(0)</tt> is defined.
52
- *
53
- * The following code snippet demonstrates how to use \p sequence to fill a range
54
- * with a sequence of numbers using the \p thrust::host execution policy for parallelization:
55
- *
56
- * \code
57
- * #include <thrust/sequence.h>
58
- * #include <thrust/execution_policy.h>
59
- * ...
60
- * const int N = 10;
61
- * int A[N];
62
- * thrust::sequence(thrust::host, A, A + 10);
63
- * // A is now {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
64
- * \endcode
65
- *
66
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
67
- * guarantee on order of execution.
68
- *
69
- * \see http://www.sgi.com/tech/stl/iota.html
70
- */
71
- template<typename DerivedPolicy, typename ForwardIterator>
72
- __host__ __device__
73
- void sequence(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
74
- ForwardIterator first,
75
- ForwardIterator last);
76
-
77
-
78
- /*! \p sequence fills the range <tt>[first, last)</tt> with a sequence of numbers.
79
- *
80
- * For each iterator \c i in the range <tt>[first, last)</tt>, this version of
81
- * \p sequence performs the assignment <tt>*i = (i - first)</tt>.
82
- *
83
- * \param first The beginning of the sequence.
84
- * \param last The end of the sequence.
85
- *
86
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
87
- * and \p ForwardIterator is mutable,
88
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then <tt>x + y</tt> is defined,
89
- * and if \c T is \p ForwardIterator's \c value_type, then <tt>T(0)</tt> is defined.
90
- *
91
- * The following code snippet demonstrates how to use \p sequence to fill a range
92
- * with a sequence of numbers.
93
- *
94
- * \code
95
- * #include <thrust/sequence.h>
96
- * ...
97
- * const int N = 10;
98
- * int A[N];
99
- * thrust::sequence(A, A + 10);
100
- * // A is now {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
101
- * \endcode
102
- *
103
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
104
- * guarantee on order of execution.
105
- *
106
- * \see http://www.sgi.com/tech/stl/iota.html
107
- */
108
- template<typename ForwardIterator>
109
- void sequence(ForwardIterator first,
110
- ForwardIterator last);
111
-
112
-
113
- /*! \p sequence fills the range <tt>[first, last)</tt> with a sequence of numbers.
114
- *
115
- * For each iterator \c i in the range <tt>[first, last)</tt>, this version of
116
- * \p sequence performs the assignment <tt>*i = init + (i - first)</tt>.
117
- *
118
- * The algorithm's execution is parallelized as determined by \p exec.
119
- *
120
- * \param exec The execution policy to use for parallelization.
121
- * \param first The beginning of the sequence.
122
- * \param last The end of the sequence.
123
- * \param init The first value of the sequence of numbers.
124
- *
125
- * \tparam DerivedPolicy The name of the derived execution policy.
126
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
127
- * and \p ForwardIterator is mutable,
128
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then <tt>x + y</tt> is defined,
129
- * and if \c T is \p ForwardIterator's \c value_type, then <tt>T(0)</tt> is defined.
130
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
131
- * and \p T is convertible to \p ForwardIterator's \c value_type.
132
- *
133
- * The following code snippet demonstrates how to use \p sequence to fill a range
134
- * with a sequence of numbers starting from the value 1 using the \p thrust::host execution
135
- * policy for parallelization:
136
- *
137
- * \code
138
- * #include <thrust/sequence.h>
139
- * #include <thrust/execution_policy.h>
140
- * ...
141
- * const int N = 10;
142
- * int A[N];
143
- * thrust::sequence(thrust::host, A, A + 10, 1);
144
- * // A is now {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
145
- * \endcode
146
- *
147
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
148
- * guarantee on order of execution.
149
- *
150
- * \see http://www.sgi.com/tech/stl/iota.html
151
- */
152
- template<typename DerivedPolicy, typename ForwardIterator, typename T>
153
- __host__ __device__
154
- void sequence(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
155
- ForwardIterator first,
156
- ForwardIterator last,
157
- T init);
158
-
159
-
160
- /*! \p sequence fills the range <tt>[first, last)</tt> with a sequence of numbers.
161
- *
162
- * For each iterator \c i in the range <tt>[first, last)</tt>, this version of
163
- * \p sequence performs the assignment <tt>*i = init + (i - first)</tt>.
164
- *
165
- * \param first The beginning of the sequence.
166
- * \param last The end of the sequence.
167
- * \param init The first value of the sequence of numbers.
168
- *
169
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
170
- * and \p ForwardIterator is mutable,
171
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then <tt>x + y</tt> is defined,
172
- * and if \c T is \p ForwardIterator's \c value_type, then <tt>T(0)</tt> is defined.
173
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
174
- * and \p T is convertible to \p ForwardIterator's \c value_type.
175
- *
176
- * The following code snippet demonstrates how to use \p sequence to fill a range
177
- * with a sequence of numbers starting from the value 1.
178
- *
179
- * \code
180
- * #include <thrust/sequence.h>
181
- * ...
182
- * const int N = 10;
183
- * int A[N];
184
- * thrust::sequence(A, A + 10, 1);
185
- * // A is now {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
186
- * \endcode
187
- *
188
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
189
- * guarantee on order of execution.
190
- *
191
- * \see http://www.sgi.com/tech/stl/iota.html
192
- */
193
- template<typename ForwardIterator, typename T>
194
- void sequence(ForwardIterator first,
195
- ForwardIterator last,
196
- T init);
197
-
198
-
199
- /*! \p sequence fills the range <tt>[first, last)</tt> with a sequence of numbers.
200
- *
201
- * For each iterator \c i in the range <tt>[first, last)</tt>, this version of
202
- * \p sequence performs the assignment <tt>*i = init + step * (i - first)</tt>.
203
- *
204
- * The algorithm's execution is parallelized as determined by \p exec.
205
- *
206
- * \param exec The execution policy to use for parallelization.
207
- * \param first The beginning of the sequence.
208
- * \param last The end of the sequence.
209
- * \param init The first value of the sequence of numbers
210
- * \param step The difference between consecutive elements.
211
- *
212
- * \tparam DerivedPolicy The name of the derived execution policy.
213
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
214
- * and \p ForwardIterator is mutable,
215
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then <tt>x + y</tt> is defined,
216
- * and if \c T is \p ForwardIterator's \c value_type, then <tt>T(0)</tt> is defined.
217
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
218
- * and \p T is convertible to \p ForwardIterator's \c value_type.
219
- *
220
- * The following code snippet demonstrates how to use \p sequence to fill a range
221
- * with a sequence of numbers starting from the value 1 with a step size of 3 using the \p thrust::host
222
- * execution policy for parallelization:
223
- *
224
- * \code
225
- * #include <thrust/sequence.h>
226
- * #include <thrust/execution_policy.h>
227
- * ...
228
- * const int N = 10;
229
- * int A[N];
230
- * thrust::sequence(thrust::host, A, A + 10, 1, 3);
231
- * // A is now {1, 4, 7, 10, 13, 16, 19, 22, 25, 28}
232
- * \endcode
233
- *
234
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
235
- * guarantee on order of execution.
236
- *
237
- * \see http://www.sgi.com/tech/stl/iota.html
238
- */
239
- template<typename DerivedPolicy, typename ForwardIterator, typename T>
240
- __host__ __device__
241
- void sequence(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
242
- ForwardIterator first,
243
- ForwardIterator last,
244
- T init,
245
- T step);
246
-
247
-
248
- /*! \p sequence fills the range <tt>[first, last)</tt> with a sequence of numbers.
249
- *
250
- * For each iterator \c i in the range <tt>[first, last)</tt>, this version of
251
- * \p sequence performs the assignment <tt>*i = init + step * (i - first)</tt>.
252
- *
253
- * \param first The beginning of the sequence.
254
- * \param last The end of the sequence.
255
- * \param init The first value of the sequence of numbers
256
- * \param step The difference between consecutive elements.
257
- *
258
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
259
- * and \p ForwardIterator is mutable,
260
- * and if \c x and \c y are objects of \c ForwardIterator's \c value_type, then <tt>x + y</tt> is defined,
261
- * and if \c T is \p ForwardIterator's \c value_type, then <tt>T(0)</tt> is defined.
262
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
263
- * and \p T is convertible to \p ForwardIterator's \c value_type.
264
- *
265
- * The following code snippet demonstrates how to use \p sequence to fill a range
266
- * with a sequence of numbers starting from the value 1 with a step size of 3.
267
- *
268
- * \code
269
- * #include <thrust/sequence.h>
270
- * ...
271
- * const int N = 10;
272
- * int A[N];
273
- * thrust::sequence(A, A + 10, 1, 3);
274
- * // A is now {1, 4, 7, 10, 13, 16, 19, 22, 25, 28}
275
- * \endcode
276
- *
277
- * \note Unlike the similar C++ STL function \c std::iota, \p sequence offers no
278
- * guarantee on order of execution.
279
- *
280
- * \see http://www.sgi.com/tech/stl/iota.html
281
- */
282
- template<typename ForwardIterator, typename T>
283
- void sequence(ForwardIterator first,
284
- ForwardIterator last,
285
- T init,
286
- T step);
287
-
288
-
289
- /*! \} // end transformations
290
- */
291
-
292
-
293
- } // end namespace thrust
294
-
295
- #include <thrust/detail/sequence.inl>
296
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/transform_reduce.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the transform_reduce.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch transform_reduce
24
-
25
- #include <thrust/system/detail/sequential/transform_reduce.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/transform_reduce.h>
32
- #include <thrust/system/cuda/detail/transform_reduce.h>
33
- #include <thrust/system/omp/detail/transform_reduce.h>
34
- #include <thrust/system/tbb/detail/transform_reduce.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_TRANSFORM_REDUCE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/transform_reduce.h>
38
- #include __THRUST_HOST_SYSTEM_TRANSFORM_REDUCE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_TRANSFORM_REDUCE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_TRANSFORM_REDUCE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/transform_reduce.h>
42
- #include __THRUST_DEVICE_SYSTEM_TRANSFORM_REDUCE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_TRANSFORM_REDUCE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: WALT DEMO
3
- emoji: ⚡
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.0.20
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/backbones/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .swin_transformer import SwinTransformer
2
- from .resnet import ResNet, ResNetV1d
3
- __all__ = ['SwinTransformer', 'ResNet', 'ResNetV1d']
 
 
 
 
spaces/CVPR/WALT/mmdet/models/backbones/swin_transformer.py DELETED
@@ -1,630 +0,0 @@
1
- # --------------------------------------------------------
2
- # Swin Transformer
3
- # Copyright (c) 2021 Microsoft
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Ze Liu, Yutong Lin, Yixuan Wei
6
- # --------------------------------------------------------
7
-
8
- import torch
9
- import torch.nn as nn
10
- import torch.nn.functional as F
11
- import torch.utils.checkpoint as checkpoint
12
- import numpy as np
13
- from timm.models.layers import DropPath, to_2tuple, trunc_normal_
14
-
15
- from mmcv_custom import load_checkpoint
16
- from mmdet.utils import get_root_logger
17
- from ..builder import BACKBONES
18
-
19
-
20
- class Mlp(nn.Module):
21
- """ Multilayer perceptron."""
22
-
23
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
24
- super().__init__()
25
- out_features = out_features or in_features
26
- hidden_features = hidden_features or in_features
27
- self.fc1 = nn.Linear(in_features, hidden_features)
28
- self.act = act_layer()
29
- self.fc2 = nn.Linear(hidden_features, out_features)
30
- self.drop = nn.Dropout(drop)
31
-
32
- def forward(self, x):
33
- x = self.fc1(x)
34
- x = self.act(x)
35
- x = self.drop(x)
36
- x = self.fc2(x)
37
- x = self.drop(x)
38
- return x
39
-
40
-
41
- def window_partition(x, window_size):
42
- """
43
- Args:
44
- x: (B, H, W, C)
45
- window_size (int): window size
46
-
47
- Returns:
48
- windows: (num_windows*B, window_size, window_size, C)
49
- """
50
- B, H, W, C = x.shape
51
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
52
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
53
- return windows
54
-
55
-
56
- def window_reverse(windows, window_size, H, W):
57
- """
58
- Args:
59
- windows: (num_windows*B, window_size, window_size, C)
60
- window_size (int): Window size
61
- H (int): Height of image
62
- W (int): Width of image
63
-
64
- Returns:
65
- x: (B, H, W, C)
66
- """
67
- B = int(windows.shape[0] / (H * W / window_size / window_size))
68
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
69
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
70
- return x
71
-
72
-
73
- class WindowAttention(nn.Module):
74
- """ Window based multi-head self attention (W-MSA) module with relative position bias.
75
- It supports both of shifted and non-shifted window.
76
-
77
- Args:
78
- dim (int): Number of input channels.
79
- window_size (tuple[int]): The height and width of the window.
80
- num_heads (int): Number of attention heads.
81
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
82
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
83
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
84
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
85
- """
86
-
87
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
88
-
89
- super().__init__()
90
- self.dim = dim
91
- self.window_size = window_size # Wh, Ww
92
- self.num_heads = num_heads
93
- head_dim = dim // num_heads
94
- self.scale = qk_scale or head_dim ** -0.5
95
-
96
- # define a parameter table of relative position bias
97
- self.relative_position_bias_table = nn.Parameter(
98
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
99
-
100
- # get pair-wise relative position index for each token inside the window
101
- coords_h = torch.arange(self.window_size[0])
102
- coords_w = torch.arange(self.window_size[1])
103
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
104
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
105
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
106
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
107
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
108
- relative_coords[:, :, 1] += self.window_size[1] - 1
109
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
110
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
111
- self.register_buffer("relative_position_index", relative_position_index)
112
-
113
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
114
- self.attn_drop = nn.Dropout(attn_drop)
115
- self.proj = nn.Linear(dim, dim)
116
- self.proj_drop = nn.Dropout(proj_drop)
117
-
118
- trunc_normal_(self.relative_position_bias_table, std=.02)
119
- self.softmax = nn.Softmax(dim=-1)
120
-
121
- def forward(self, x, mask=None):
122
- """ Forward function.
123
-
124
- Args:
125
- x: input features with shape of (num_windows*B, N, C)
126
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
127
- """
128
- B_, N, C = x.shape
129
- qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
130
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
131
-
132
- q = q * self.scale
133
- attn = (q @ k.transpose(-2, -1))
134
-
135
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
136
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
137
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
138
- attn = attn + relative_position_bias.unsqueeze(0)
139
-
140
- if mask is not None:
141
- nW = mask.shape[0]
142
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
143
- attn = attn.view(-1, self.num_heads, N, N)
144
- attn = self.softmax(attn)
145
- else:
146
- attn = self.softmax(attn)
147
-
148
- attn = self.attn_drop(attn)
149
-
150
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
151
- x = self.proj(x)
152
- x = self.proj_drop(x)
153
- return x
154
-
155
-
156
- class SwinTransformerBlock(nn.Module):
157
- """ Swin Transformer Block.
158
-
159
- Args:
160
- dim (int): Number of input channels.
161
- num_heads (int): Number of attention heads.
162
- window_size (int): Window size.
163
- shift_size (int): Shift size for SW-MSA.
164
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
165
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
166
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
167
- drop (float, optional): Dropout rate. Default: 0.0
168
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
169
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
170
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
171
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
172
- """
173
-
174
- def __init__(self, dim, num_heads, window_size=7, shift_size=0,
175
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
176
- act_layer=nn.GELU, norm_layer=nn.LayerNorm):
177
- super().__init__()
178
- self.dim = dim
179
- self.num_heads = num_heads
180
- self.window_size = window_size
181
- self.shift_size = shift_size
182
- self.mlp_ratio = mlp_ratio
183
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
184
-
185
- self.norm1 = norm_layer(dim)
186
- self.attn = WindowAttention(
187
- dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
188
- qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
189
-
190
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
191
- self.norm2 = norm_layer(dim)
192
- mlp_hidden_dim = int(dim * mlp_ratio)
193
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
194
-
195
- self.H = None
196
- self.W = None
197
-
198
- def forward(self, x, mask_matrix):
199
- """ Forward function.
200
-
201
- Args:
202
- x: Input feature, tensor size (B, H*W, C).
203
- H, W: Spatial resolution of the input feature.
204
- mask_matrix: Attention mask for cyclic shift.
205
- """
206
- B, L, C = x.shape
207
- H, W = self.H, self.W
208
- assert L == H * W, "input feature has wrong size"
209
-
210
- shortcut = x
211
- x = self.norm1(x)
212
- x = x.view(B, H, W, C)
213
-
214
- # pad feature maps to multiples of window size
215
- pad_l = pad_t = 0
216
- pad_r = (self.window_size - W % self.window_size) % self.window_size
217
- pad_b = (self.window_size - H % self.window_size) % self.window_size
218
- x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
219
- _, Hp, Wp, _ = x.shape
220
-
221
- # cyclic shift
222
- if self.shift_size > 0:
223
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
224
- attn_mask = mask_matrix
225
- else:
226
- shifted_x = x
227
- attn_mask = None
228
-
229
- # partition windows
230
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
231
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
232
-
233
- # W-MSA/SW-MSA
234
- attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
235
-
236
- # merge windows
237
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
238
- shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
239
-
240
- # reverse cyclic shift
241
- if self.shift_size > 0:
242
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
243
- else:
244
- x = shifted_x
245
-
246
- if pad_r > 0 or pad_b > 0:
247
- x = x[:, :H, :W, :].contiguous()
248
-
249
- x = x.view(B, H * W, C)
250
-
251
- # FFN
252
- x = shortcut + self.drop_path(x)
253
- x = x + self.drop_path(self.mlp(self.norm2(x)))
254
-
255
- return x
256
-
257
-
258
- class PatchMerging(nn.Module):
259
- """ Patch Merging Layer
260
-
261
- Args:
262
- dim (int): Number of input channels.
263
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
264
- """
265
- def __init__(self, dim, norm_layer=nn.LayerNorm):
266
- super().__init__()
267
- self.dim = dim
268
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
269
- self.norm = norm_layer(4 * dim)
270
-
271
- def forward(self, x, H, W):
272
- """ Forward function.
273
-
274
- Args:
275
- x: Input feature, tensor size (B, H*W, C).
276
- H, W: Spatial resolution of the input feature.
277
- """
278
- B, L, C = x.shape
279
- assert L == H * W, "input feature has wrong size"
280
-
281
- x = x.view(B, H, W, C)
282
-
283
- # padding
284
- pad_input = (H % 2 == 1) or (W % 2 == 1)
285
- if pad_input:
286
- x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
287
-
288
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
289
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
290
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
291
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
292
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
293
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
294
-
295
- x = self.norm(x)
296
- x = self.reduction(x)
297
-
298
- return x
299
-
300
-
301
- class BasicLayer(nn.Module):
302
- """ A basic Swin Transformer layer for one stage.
303
-
304
- Args:
305
- dim (int): Number of feature channels
306
- depth (int): Depths of this stage.
307
- num_heads (int): Number of attention head.
308
- window_size (int): Local window size. Default: 7.
309
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
310
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
311
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
312
- drop (float, optional): Dropout rate. Default: 0.0
313
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
314
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
315
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
316
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
317
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
318
- """
319
-
320
- def __init__(self,
321
- dim,
322
- depth,
323
- num_heads,
324
- window_size=7,
325
- mlp_ratio=4.,
326
- qkv_bias=True,
327
- qk_scale=None,
328
- drop=0.,
329
- attn_drop=0.,
330
- drop_path=0.,
331
- norm_layer=nn.LayerNorm,
332
- downsample=None,
333
- use_checkpoint=False):
334
- super().__init__()
335
- self.window_size = window_size
336
- self.shift_size = window_size // 2
337
- self.depth = depth
338
- self.use_checkpoint = use_checkpoint
339
-
340
- # build blocks
341
- self.blocks = nn.ModuleList([
342
- SwinTransformerBlock(
343
- dim=dim,
344
- num_heads=num_heads,
345
- window_size=window_size,
346
- shift_size=0 if (i % 2 == 0) else window_size // 2,
347
- mlp_ratio=mlp_ratio,
348
- qkv_bias=qkv_bias,
349
- qk_scale=qk_scale,
350
- drop=drop,
351
- attn_drop=attn_drop,
352
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
353
- norm_layer=norm_layer)
354
- for i in range(depth)])
355
-
356
- # patch merging layer
357
- if downsample is not None:
358
- self.downsample = downsample(dim=dim, norm_layer=norm_layer)
359
- else:
360
- self.downsample = None
361
-
362
- def forward(self, x, H, W):
363
- """ Forward function.
364
-
365
- Args:
366
- x: Input feature, tensor size (B, H*W, C).
367
- H, W: Spatial resolution of the input feature.
368
- """
369
-
370
- # calculate attention mask for SW-MSA
371
- Hp = int(np.ceil(H / self.window_size)) * self.window_size
372
- Wp = int(np.ceil(W / self.window_size)) * self.window_size
373
- img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
374
- h_slices = (slice(0, -self.window_size),
375
- slice(-self.window_size, -self.shift_size),
376
- slice(-self.shift_size, None))
377
- w_slices = (slice(0, -self.window_size),
378
- slice(-self.window_size, -self.shift_size),
379
- slice(-self.shift_size, None))
380
- cnt = 0
381
- for h in h_slices:
382
- for w in w_slices:
383
- img_mask[:, h, w, :] = cnt
384
- cnt += 1
385
-
386
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
387
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
388
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
389
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
390
-
391
- for blk in self.blocks:
392
- blk.H, blk.W = H, W
393
- if self.use_checkpoint:
394
- x = checkpoint.checkpoint(blk, x, attn_mask)
395
- else:
396
- x = blk(x, attn_mask)
397
- if self.downsample is not None:
398
- x_down = self.downsample(x, H, W)
399
- Wh, Ww = (H + 1) // 2, (W + 1) // 2
400
- return x, H, W, x_down, Wh, Ww
401
- else:
402
- return x, H, W, x, H, W
403
-
404
-
405
- class PatchEmbed(nn.Module):
406
- """ Image to Patch Embedding
407
-
408
- Args:
409
- patch_size (int): Patch token size. Default: 4.
410
- in_chans (int): Number of input image channels. Default: 3.
411
- embed_dim (int): Number of linear projection output channels. Default: 96.
412
- norm_layer (nn.Module, optional): Normalization layer. Default: None
413
- """
414
-
415
- def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
416
- super().__init__()
417
- patch_size = to_2tuple(patch_size)
418
- self.patch_size = patch_size
419
-
420
- self.in_chans = in_chans
421
- self.embed_dim = embed_dim
422
-
423
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
424
- if norm_layer is not None:
425
- self.norm = norm_layer(embed_dim)
426
- else:
427
- self.norm = None
428
-
429
- def forward(self, x):
430
- """Forward function."""
431
- # padding
432
- _, _, H, W = x.size()
433
- if W % self.patch_size[1] != 0:
434
- x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
435
- if H % self.patch_size[0] != 0:
436
- x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
437
-
438
- x = self.proj(x) # B C Wh Ww
439
- if self.norm is not None:
440
- Wh, Ww = x.size(2), x.size(3)
441
- x = x.flatten(2).transpose(1, 2)
442
- x = self.norm(x)
443
- x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
444
-
445
- return x
446
-
447
-
448
- @BACKBONES.register_module()
449
- class SwinTransformer(nn.Module):
450
- """ Swin Transformer backbone.
451
- A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
452
- https://arxiv.org/pdf/2103.14030
453
-
454
- Args:
455
- pretrain_img_size (int): Input image size for training the pretrained model,
456
- used in absolute postion embedding. Default 224.
457
- patch_size (int | tuple(int)): Patch size. Default: 4.
458
- in_chans (int): Number of input image channels. Default: 3.
459
- embed_dim (int): Number of linear projection output channels. Default: 96.
460
- depths (tuple[int]): Depths of each Swin Transformer stage.
461
- num_heads (tuple[int]): Number of attention head of each stage.
462
- window_size (int): Window size. Default: 7.
463
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
464
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
465
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
466
- drop_rate (float): Dropout rate.
467
- attn_drop_rate (float): Attention dropout rate. Default: 0.
468
- drop_path_rate (float): Stochastic depth rate. Default: 0.2.
469
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
470
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
471
- patch_norm (bool): If True, add normalization after patch embedding. Default: True.
472
- out_indices (Sequence[int]): Output from which stages.
473
- frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
474
- -1 means not freezing any parameters.
475
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
476
- """
477
-
478
- def __init__(self,
479
- pretrain_img_size=224,
480
- patch_size=4,
481
- in_chans=3,
482
- embed_dim=96,
483
- depths=[2, 2, 6, 2],
484
- num_heads=[3, 6, 12, 24],
485
- window_size=7,
486
- mlp_ratio=4.,
487
- qkv_bias=True,
488
- qk_scale=None,
489
- drop_rate=0.,
490
- attn_drop_rate=0.,
491
- drop_path_rate=0.2,
492
- norm_layer=nn.LayerNorm,
493
- ape=False,
494
- patch_norm=True,
495
- out_indices=(0, 1, 2, 3),
496
- frozen_stages=-1,
497
- use_checkpoint=False):
498
- super().__init__()
499
-
500
- self.pretrain_img_size = pretrain_img_size
501
- self.num_layers = len(depths)
502
- self.embed_dim = embed_dim
503
- self.ape = ape
504
- self.patch_norm = patch_norm
505
- self.out_indices = out_indices
506
- self.frozen_stages = frozen_stages
507
-
508
- # split image into non-overlapping patches
509
- self.patch_embed = PatchEmbed(
510
- patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
511
- norm_layer=norm_layer if self.patch_norm else None)
512
-
513
- # absolute position embedding
514
- if self.ape:
515
- pretrain_img_size = to_2tuple(pretrain_img_size)
516
- patch_size = to_2tuple(patch_size)
517
- patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
518
-
519
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
520
- trunc_normal_(self.absolute_pos_embed, std=.02)
521
-
522
- self.pos_drop = nn.Dropout(p=drop_rate)
523
-
524
- # stochastic depth
525
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
526
-
527
- # build layers
528
- self.layers = nn.ModuleList()
529
- for i_layer in range(self.num_layers):
530
- layer = BasicLayer(
531
- dim=int(embed_dim * 2 ** i_layer),
532
- depth=depths[i_layer],
533
- num_heads=num_heads[i_layer],
534
- window_size=window_size,
535
- mlp_ratio=mlp_ratio,
536
- qkv_bias=qkv_bias,
537
- qk_scale=qk_scale,
538
- drop=drop_rate,
539
- attn_drop=attn_drop_rate,
540
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
541
- norm_layer=norm_layer,
542
- downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
543
- use_checkpoint=use_checkpoint)
544
- self.layers.append(layer)
545
-
546
- num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
547
- self.num_features = num_features
548
-
549
- # add a norm layer for each output
550
- for i_layer in out_indices:
551
- layer = norm_layer(num_features[i_layer])
552
- layer_name = f'norm{i_layer}'
553
- self.add_module(layer_name, layer)
554
-
555
- self._freeze_stages()
556
-
557
- def _freeze_stages(self):
558
- if self.frozen_stages >= 0:
559
- self.patch_embed.eval()
560
- for param in self.patch_embed.parameters():
561
- param.requires_grad = False
562
-
563
- if self.frozen_stages >= 1 and self.ape:
564
- self.absolute_pos_embed.requires_grad = False
565
-
566
- if self.frozen_stages >= 2:
567
- self.pos_drop.eval()
568
- for i in range(0, self.frozen_stages - 1):
569
- m = self.layers[i]
570
- m.eval()
571
- for param in m.parameters():
572
- param.requires_grad = False
573
-
574
- def init_weights(self, pretrained=None):
575
- """Initialize the weights in backbone.
576
-
577
- Args:
578
- pretrained (str, optional): Path to pre-trained weights.
579
- Defaults to None.
580
- """
581
-
582
- def _init_weights(m):
583
- if isinstance(m, nn.Linear):
584
- trunc_normal_(m.weight, std=.02)
585
- if isinstance(m, nn.Linear) and m.bias is not None:
586
- nn.init.constant_(m.bias, 0)
587
- elif isinstance(m, nn.LayerNorm):
588
- nn.init.constant_(m.bias, 0)
589
- nn.init.constant_(m.weight, 1.0)
590
-
591
- if isinstance(pretrained, str):
592
- self.apply(_init_weights)
593
- logger = get_root_logger()
594
- load_checkpoint(self, pretrained, strict=False, logger=logger)
595
- elif pretrained is None:
596
- self.apply(_init_weights)
597
- else:
598
- raise TypeError('pretrained must be a str or None')
599
-
600
- def forward(self, x):
601
- """Forward function."""
602
- x = self.patch_embed(x)
603
-
604
- Wh, Ww = x.size(2), x.size(3)
605
- if self.ape:
606
- # interpolate the position embedding to the corresponding size
607
- absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
608
- x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
609
- else:
610
- x = x.flatten(2).transpose(1, 2)
611
- x = self.pos_drop(x)
612
-
613
- outs = []
614
- for i in range(self.num_layers):
615
- layer = self.layers[i]
616
- x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
617
-
618
- if i in self.out_indices:
619
- norm_layer = getattr(self, f'norm{i}')
620
- x_out = norm_layer(x_out)
621
-
622
- out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
623
- outs.append(out)
624
-
625
- return tuple(outs)
626
-
627
- def train(self, mode=True):
628
- """Convert the model into training mode while keep layers freezed."""
629
- super(SwinTransformer, self).train(mode)
630
- self._freeze_stages()