parquet-converter commited on
Commit
6b53fb4
·
1 Parent(s): c90dd39

Update parquet files (step 42 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Crack-VERIFIED-DriverEasy-432-No-Speed-Limit-BETTER.md +0 -113
  2. spaces/1gistliPinn/ChatGPT4/Examples/Eagle CAD 6.4.0 Torrent The Best Choice for Professional and Hobbyist PCB Designers.md +0 -17
  3. spaces/1phancelerku/anime-remove-background/Download Ship Simulator Extremes Demo and explore the worlds famous harbors and locations.md +0 -146
  4. spaces/1phancelerku/anime-remove-background/Download Yeager Hunter Legend and Uncover the Secrets of Planet Ekors in this 3D Action RPG for Android.md +0 -115
  5. spaces/52Hz/SRMNet_thesis/model_arch/SRMNet.py +0 -225
  6. spaces/AI-ANK/blackmirroroffice/README.md +0 -12
  7. spaces/AIConsultant/MusicGen/audiocraft/quantization/core_vq.py +0 -400
  8. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/conv.py +0 -168
  9. spaces/AONYLMR/White-box-Cartoonization/wbc/network.py +0 -62
  10. spaces/ASJMO/freegpt/client/js/theme-toggler.js +0 -22
  11. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/_base_/__init__.py +0 -0
  12. spaces/Abdllh/Arabic_Poems_Generator/README.md +0 -13
  13. spaces/AbdoulGafar/woodsound/README.md +0 -13
  14. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/ambient.d.ts +0 -318
  15. spaces/AchyuthGamer/OpenGPT/g4f/Provider/V50.py +0 -67
  16. spaces/Adapter/CoAdapter/ldm/modules/distributions/distributions.py +0 -92
  17. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/__init__.py +0 -0
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/BadgeLabel.js +0 -49
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostResolveSize.js +0 -4
  20. spaces/Akmyradov/dost.ai/app.py +0 -83
  21. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py +0 -16
  22. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/libJPG/jpgd.h +0 -316
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/composable_stable_diffusion.py +0 -580
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/embeddings.py +0 -546
  25. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py +0 -6
  26. spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context.py +0 -9
  27. spaces/Apex-X/ROOPOK/roop/processors/frame/core.py +0 -91
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/__about__.py +0 -26
  29. spaces/AttendAndExcite/Attend-and-Excite/app.py +0 -289
  30. spaces/AvinashRamesh23/AIEditor/app.py +0 -435
  31. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet.py +0 -864
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/deform_conv.py +0 -116
  33. spaces/BartPoint/VoiceChange/infer_pack/modules.py +0 -522
  34. spaces/Benson/text-generation/Examples/Descargar Dr Fone Desbloquear Para PC.md +0 -33
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_functools.py +0 -104
  36. spaces/BraydenMoore/MARCI-NFL-Betting/Source/Predict/predict.py +0 -166
  37. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_roi_pooler.py +0 -85
  38. spaces/CVPR/LIVE/thrust/thrust/detail/allocator/default_construct_range.h +0 -37
  39. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/sort.h +0 -55
  40. spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnext.py +0 -122
  41. spaces/CVPR/WALT/mmdet/models/detectors/yolo.py +0 -18
  42. spaces/CikeyQI/meme-api/meme_generator/memes/cyan/__init__.py +0 -31
  43. spaces/CloudOrc/SolidUI/README.md +0 -12
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/StandardEncoding.py +0 -258
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/arc.py +0 -153
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/unicodedata/OTTags.py +0 -50
  47. spaces/Datasculptor/AIart_sources_of_inspiration/app.py +0 -29
  48. spaces/DhruvShek/chatlm/models.py +0 -162
  49. spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/bias_act.h +0 -40
  50. spaces/DunnBC22/Password_Strength_Classifier_with_CodeBERT/app.py +0 -46
spaces/1gistliPinn/ChatGPT4/Crack-VERIFIED-DriverEasy-432-No-Speed-Limit-BETTER.md DELETED
@@ -1,113 +0,0 @@
1
- ## Crack DriverEasy 432 No Speed Limit !!BETTER!!
2
-
3
-
4
-
5
- ![Crack ##VERIFIED## DriverEasy 432 No Speed Limit !!BETTER!!](https://windowsactivator.info/wp-content/uploads/2019/08/NEW.jpg)
6
-
7
-
8
-
9
- **Click Here ===> [https://www.google.com/url?q=https%3A%2F%2Ffancli.com%2F2twsJL&sa=D&sntz=1&usg=AOvVaw0EjWpAaO53PNuu7wLr00Fn](https://www.google.com/url?q=https%3A%2F%2Ffancli.com%2F2twsJL&sa=D&sntz=1&usg=AOvVaw0EjWpAaO53PNuu7wLr00Fn)**
10
-
11
-
12
-
13
- # How to Crack DriverEasy 432 and Remove the Speed Limit
14
-
15
-
16
-
17
- DriverEasy is a popular software that helps you find and update drivers for your computer. However, the free version of DriverEasy has a speed limit of 30 KB/s, which can be very frustrating if you have a lot of drivers to download. In this article, I will show you how to crack DriverEasy 432 and remove the speed limit, so you can enjoy faster and smoother driver downloads.
18
-
19
-
20
-
21
- Disclaimer: This article is for educational purposes only. I do not condone or encourage any illegal or unethical use of DriverEasy or any other software. You are solely responsible for any consequences that may arise from following this tutorial.
22
-
23
-
24
-
25
- ## Step 1: Download DriverEasy 432 and the Crack File
26
-
27
-
28
-
29
- The first step is to download DriverEasy 432 from the official website[^1^]. You can choose the free version or the trial version, it doesn't matter. After downloading, install DriverEasy on your computer.
30
-
31
-
32
-
33
- Next, you need to download the crack file for DriverEasy 432. You can find it on various websites that offer cracked software, such as HaxPC[^1^] or MediaLabs[^4^]. Be careful when downloading from these sites, as they may contain malware or viruses. Scan the crack file with your antivirus before using it.
34
-
35
-
36
-
37
- ## Step 2: Copy and Paste the Crack File
38
-
39
-
40
-
41
- The second step is to copy and paste the crack file into the installation folder of DriverEasy. The installation folder is usually located at C:\Program Files\Easeware\DriverEasy. If you installed DriverEasy in a different location, you need to find it yourself.
42
-
43
-
44
-
45
- After locating the installation folder, open it and look for a file named DriverEasy.exe. This is the main executable file of DriverEasy. Right-click on it and select Rename. Change its name to something else, such as DriverEasy.bak. This will prevent DriverEasy from running normally.
46
-
47
-
48
-
49
- Then, copy the crack file that you downloaded earlier and paste it into the installation folder. Rename the crack file to DriverEasy.exe. This will replace the original executable file with the cracked one.
50
-
51
-
52
-
53
- ## Step 3: Run DriverEasy and Enjoy
54
-
55
-
56
-
57
- The final step is to run DriverEasy and enjoy its full features without any speed limit. To do this, double-click on the crack file that you renamed to DriverEasy.exe. You should see a message saying "Driver Easy Pro Activated" at the bottom right corner of the window.
58
-
59
-
60
-
61
- Now you can scan your computer for missing or outdated drivers and download them at full speed. You can also access other advanced features of DriverEasy Pro, such as backup and restore drivers, offline scan, uninstall drivers, etc.
62
-
63
-
64
-
65
- Congratulations! You have successfully cracked DriverEasy 432 and removed the speed limit. However, keep in mind that this method may not work for future versions of DriverEasy, and it may also violate the terms of service of DriverEasy. Use it at your own risk.
66
-
67
-
68
-
69
- ## Why Use DriverEasy?
70
-
71
-
72
-
73
- DriverEasy is a useful software that can help you keep your drivers up to date and improve your computer performance. Drivers are essential components that allow your hardware devices to communicate with your operating system. Without proper drivers, your devices may not work correctly or cause errors and crashes.
74
-
75
-
76
-
77
- However, finding and installing drivers manually can be a tedious and time-consuming task. You need to know the exact model and version of your devices, search for the compatible drivers on the manufacturer's website, download them one by one, and install them on your computer. Moreover, you need to check for driver updates regularly to ensure that your drivers are always the latest and most stable.
78
-
79
-
80
-
81
- That's where DriverEasy comes in handy. DriverEasy can scan your computer and detect all the devices that need drivers. It can then download and install the correct drivers for you with just one click. You don't need to worry about compatibility issues or downloading the wrong drivers. DriverEasy also has a large database of over 8 million drivers, so it can find almost any driver you need.
82
-
83
-
84
-
85
- ## What are the Benefits of DriverEasy Pro?
86
-
87
-
88
-
89
- DriverEasy has two versions: Free and Pro. The free version allows you to scan and download drivers at a limited speed of 30 KB/s. The pro version unlocks all the features and removes the speed limit. You can get the pro version by purchasing a license key or by cracking it as shown in this article.
90
-
91
-
92
-
93
- Some of the benefits of DriverEasy Pro are:
94
-
95
-
96
-
97
- - Faster and unlimited driver downloads: You can download drivers at full speed without any restrictions.
98
-
99
- - One-click update: You can update all your drivers with just one click, saving you time and hassle.
100
-
101
- - Backup and restore drivers: You can backup your drivers before updating them, so you can restore them in case anything goes wrong.
102
-
103
- - Offline scan: You can scan your computer for drivers without an internet connection, which is useful if you have network problems.
104
-
105
- - Uninstall drivers: You can uninstall drivers that you no longer need or that cause issues on your computer.
106
-
107
- - Technical support: You can get professional and friendly support from the DriverEasy team if you have any questions or problems.
108
-
109
-
110
-
111
- These are some of the reasons why you may want to use DriverEasy Pro instead of the free version. However, remember that cracking DriverEasy Pro is illegal and unethical, and it may also expose you to security risks. If you like DriverEasy and want to support its development, you should buy a license key from the official website instead of cracking it.
112
-
113
- 1b8d091108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Eagle CAD 6.4.0 Torrent The Best Choice for Professional and Hobbyist PCB Designers.md DELETED
@@ -1,17 +0,0 @@
1
-
2
- <p>Extreme ghostbusters complete series download <br> DRD Systems VideoReDo TVSuite H 286 v5 9 4 719b full version <br> libro administracion profesional de proyectos yamal chamoun pdf <br> photoboof keygenerator full torrent <br> sure cuts a lot 4 crack <br> alerene zte free <br> devon.ke.dev.mahadev.dvdrip.xvid.ddr <br> Error Repair Professional v4.0.3 full version <br> koon krishi malayalam pdf download <br> crack family discografia completa descargar minecraft <br></p>
3
- <p>AnyDVD HD v7.4.8.0 Final-BRD utorrent <br> font psl kanda modern extra.rar <br> bijbel in gewone taal ebook 18 <br> EZ Green Screen Photoshop keygen <br> kitab hakikat insan pdf free downloadgolkes <br> Oxford English for Careers Nursing 2 pdf.rar <br> genetica medica jorde pdf download <br> menucool slider license crack 12 <br> Frozen 2 movie full version free download <br> CommView for WiFi 5.2.484 Including WEP Hack <br></p>
4
- <h2>eagle cad 6.4.0 torrent</h2><br /><p><b><b>Download Zip</b> &gt; <a href="https://imgfil.com/2uy1tD">https://imgfil.com/2uy1tD</a></b></p><br /><br />
5
- <p>Mksensation Digital Piano Library For Kontakt Torrent <br> every child is special english subtitle 192 <br> archicad 15 object library free download <br> il re leone film completo italiano torrent <br> rambo 4 full movie in hindi mp4 free download <br> AutoCAD 2014 XFORCE torrent <br> js0group dll catia v6r2009 crack <br> shifrin multivariable mathematics djvu download <br> Thor The Dark World 2013 1080p BrRip x264 YIFY 31 <br> Short Kut - The Con is On hindi dubbed download <br></p>
6
- <p>hotel courbet 2009 tinto brass download 48 <br> izotope t pain effect serial number <br> Ls-Dreams.Issue.05.(Sweethearts).Movies.13-24 <br> Send Blaster Pro Serial Key <br> video sex anjing vs manusia.iso <br> dispensing pharmacy by rm mehta ebook download <br> simlab 3d pdf exporter for 3ds max crack torrent <br> call of duty modern warfare 2 highly compressed only 37 mb mega <br> UFS Explorer Professional Recovery v7.19.6 Portable Serial Key keygen <br> Mohabbatein 1 full movie in hindi free download 720p <br></p>
7
- <p>Billu Ustaad download 720p movies <br> Rig N Roll 3 Crack Key Serial <br> tp-link tl-wr340gd v5 firmware download <br> arduino compatible compiler for labview crack <br> mkvmerge gui v4.4.0 download <br> sagem f st 2804 original firmware <br> testmaker 9.3 crack <br> facebook password revealer online <br> f-secure freedome vpn cracked apk market <br> All AutoCAD LT 2009 Products Crack Keygen (x86x64) !Latest utorrent <br></p>
8
- <p>fallrain 19191a764c<br /> -europe-microcat-2013-torrent<br />[ -europe-microcat-2013-torrent ]<br />[ -europe-microcat-2013-torrent ]<br />[ -europe-microcat-2013-torrent ]<br />link= -europe-microcat-2013-torrent<br />link= -europe-microcat-2013-torrent<br />link= -europe-microcat-2013-torrent</p>
9
- <p>phipan 19191a764c<br /> -torrents-yves-pflieger<br />[ -torrents-yves-pflieger ]<br />[ -torrents-yves-pflieger ]<br />[ -torrents-yves-pflieger ]<br />link= -torrents-yves-pflieger<br />link= -torrents-yves-pflieger<br />link= -torrents-yves-pflieger</p>
10
- <p>nantcor 19191a764c<br /> -mera-dil-lutiya-punjabi-movie-torrent-download<br />[ -mera-dil-lutiya-punjabi-movie-torrent-download ]<br />[ -mera-dil-lutiya-punjabi-movie-torrent-download ]<br />[ -mera-dil-lutiya-punjabi-movie-torrent-download ]<br />link= -mera-dil-lutiya-punjabi-movie-torrent-download<br />link= -mera-dil-lutiya-punjabi-movie-torrent-download<br />link= -mera-dil-lutiya-punjabi-movie-torrent-download</p>
11
- <p>raemala 19191a764c<br /> -saab-the-great-movie-download-utorrent-kickass<br />[ -saab-the-great-movie-download-utorrent-kickass ]<br />[ -saab-the-great-movie-download-utorrent-kickass ]<br />[ -saab-the-great-movie-download-utorrent-kickass ]<br />link= -saab-the-great-movie-download-utorrent-kickass<br />link= -saab-the-great-movie-download-utorrent-kickass<br />link= -saab-the-great-movie-download-utorrent-kickass</p>
12
- <p>laqukei 19191a764c<br /> -booth-software-torrent<br />[ -booth-software-torrent ]<br />[ -booth-software-torrent ]<br />[ -booth-software-torrent ]<br />link= -booth-software-torrent<br />link= -booth-software-torrent<br />link= -booth-software-torrent</p>
13
- <p></p>
14
- <p>finkalm 19191a764c<br /> -flaming-cliffs-3-keygen-torrent<br />[ -flaming-cliffs-3-keygen-torrent ]<br />[ -flaming-cliffs-3-keygen-torrent ]<br />[ -flaming-cliffs-3-keygen-torrent ]<br />link= -flaming-cliffs-3-keygen-torrent<br />link= -flaming-cliffs-3-keygen-torrent<br />link= -flaming-cliffs-3-keygen-torrent</p>
15
- <p>edwivien 19191a764c<br /> -version-14-2-torrent<br />[ -version-14-2-torrent ]<br />[ -version-14-2-torrent ]<br />[ -version-14-2-torrent ]<br />link= -version-14-2-torrent<br />link= -version-14-2-torrent<br />link= -version-14-2-torrent</p> aaccfb2cb3<br />
16
- <br />
17
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Ship Simulator Extremes Demo and explore the worlds famous harbors and locations.md DELETED
@@ -1,146 +0,0 @@
1
- <br />
2
- <h1>Download Ship Simulator Extremes Demo: A Guide for Ship Enthusiasts</h1>
3
- <p>If you are a fan of ships and sailing, you might be interested in trying out Ship Simulator Extremes, a realistic and immersive simulation game that lets you experience the most extreme conditions on earth as a ship captain. In this guide, we will show you how to download the demo version of the game and what to expect from it.</p>
4
- <h2>What is Ship Simulator Extremes?</h2>
5
- <p>Ship Simulator Extremes is the latest installment of the acclaimed Ship Simulator series, developed by VSTEP and published by Paradox Interactive. The game was released in 2010 and has sold over 550,000 copies worldwide. The game features a wide range of vessels to captain, from hovercrafts and coast guard interceptors to mammoth tankers and luxury cruise liners. The game also includes exciting storylines and missions based on actual events in realistic environments at locations all over the world, such as the Antarctic, Bora Bora, Rotterdam, and Sydney. The game also has a save the environment campaign, where you can sail famous Greenpeace ships and take on ecological missions based on real events.</p>
6
- <h2>download ship simulator extremes demo</h2><br /><p><b><b>Download Zip</b> &#10038; <a href="https://jinyurl.com/2uNRI6">https://jinyurl.com/2uNRI6</a></b></p><br /><br />
7
- <h3>Features of Ship Simulator Extremes</h3>
8
- <p>Some of the main features of Ship Simulator Extremes are:</p>
9
- <ul>
10
- <li>From the very hot to the very cold, sail to the most enchanted regions in the world. Explore the Antarctic or take in beautiful Bora Bora. Includes famous harbors and locations from around the world.</li>
11
- <li>Wide range of vessels to captain, including hovercraft, Coast Guard interceptors, mammoth tankers, tugs, cruise liners, and many others. </li>
12
- <li>Includes exciting storylines and missions from all over the world. Save the environment campaign: sail famous Greenpeace ships and take on ecological missions based on real events! </li>
13
- <li>Realistic water and weather system. Sail calm waters or take on the most extreme weather ever witnessed at sea. </li>
14
- <li>Online multiplayer mode. Sail online with your friends. </li>
15
- </ul>
16
- <h3>System Requirements for Ship Simulator Extremes</h3>
17
- <p>Before you download the demo, make sure your PC meets the minimum system requirements for the game. Here are the specifications you need:</p>
18
- <table>
19
- <tr>
20
- <th>Operating system</th>
21
- <th>Processor</th>
22
- <th>Memory</th>
23
- <th>Video card</th>
24
- <th>Hard disc space</th>
25
- <th>Other</th>
26
- </tr>
27
- <tr>
28
- <td>Windows XP (Min. service pack 2), Windows Vista or Windows 7. 32 and 64 bits OS supported </td>
29
- <td>3 Ghz P4 Intel or AMD equivalent processor </td>
30
- <td>2GB (Windows XP) or 3GB (Vista or Windows 7) </td>
31
- <td>Geforce 8800GT or ATI Radeon HD 4850 with 256MB ram (Shader model 3.0) </td>
32
- <td>3.5GB </td>
33
- <td>4x PC DVD-ROM, mouse with scroll wheel, DirectX 9.0c compatible sound card </td>
34
- </tr>
35
- </table>
36
- <h3>Reviews of Ship Simulator Extremes</h3>
37
- <p>Ship Simulator Extremes has received mixed reviews from critics and players. Some praised the game for its realism, variety, and graphics, while others criticized it for its bugs, glitches, and lack of polish. The game has a score of 63/100 on Metacritic and a user rating of 6.8/10 on IGN. Here are some of the pros and cons of the game according to the reviews:</p>
38
- <table>
39
- <tr>
40
- <th>Pros</th>
41
- <th>Cons</th>
42
- </tr>
43
- <tr>
44
- <td>- Realistic and immersive simulation of ship handling and navigation </td>
45
- <td>- Buggy and unstable performance, especially in multiplayer mode </td>
46
- </tr>
47
- <tr>
48
- <td>- Wide range of vessels and missions to choose from </td>
49
- <td>- Repetitive and boring gameplay, lack of challenge and feedback </td>
50
- </tr>
51
- <tr>
52
- <td>- Beautiful graphics and sound effects, especially the water and weather system </td>
53
- <td>- Poor user interface and controls, lack of customization and options </td>
54
- </tr>
55
- <tr>
56
- <td>- Interesting and relevant save the environment campaign </td>
57
- <td>- Unrealistic and exaggerated scenarios, lack of realism and authenticity </td>
58
- </tr>
59
- </table>
60
- <h2>How to Download Ship Simulator Extremes Demo</h2>
61
- <p>If you want to try out Ship Simulator Extremes for yourself, you can download the demo version of the game for free from the official website or the Steam store page. Here are the steps you need to follow:</p>
62
- <h3>Step 1: Visit the official website or Steam store page</h3>
63
- <p>The first thing you need to do is to visit the official website of Ship Simulator Extremes at <a href="">(1)</a> or the Steam store page at <a href="">(2)</a>. You can find more information about the game, such as screenshots, videos, news, and forums on these pages.</p>
64
- <h3>Step 2: Click on the download button or add to cart</h3>
65
- <p>On the official website, you will see a download button on the top right corner of the page. Click on it and you will be redirected to a page where you can choose your preferred download platform, such as GamersGate or Direct2Drive. You will need to create an account and pay a small fee to download the full version of the game. However, if you scroll down, you will see a link that says "Download Demo". Click on it and you will be able to download the demo version for free.[6] On the Steam store page, you will see an add to cart button on the right side of the page. Click on it and you will be able to purchase the full version of the game for $19.99. However, if you scroll down, you will see a link that says "Download Demo". Click on it and you will be able to download the demo version for free.[7]</p>
66
- <h3>Step 3: Follow the instructions to install and launch the demo</h3>
67
- <p>Once you have downloaded the demo file, you will need to follow the instructions to install and launch it on your PC. The file size is about 600 MB, so it might take some time depending on your internet speed. The installation process is simple and straightforward. Just follow the prompts and agree to the terms and conditions. After that, you can launch the demo from your desktop or start menu.[6][7]</p>
68
- <p>download ship simulator extremes demo free<br />
69
- download ship simulator extremes demo steam<br />
70
- download ship simulator extremes demo pc<br />
71
- download ship simulator extremes demo windows 10<br />
72
- download ship simulator extremes demo mac<br />
73
- download ship simulator extremes demo full version<br />
74
- download ship simulator extremes demo crack<br />
75
- download ship simulator extremes demo torrent<br />
76
- download ship simulator extremes demo gameplay<br />
77
- download ship simulator extremes demo missions<br />
78
- download ship simulator extremes demo online<br />
79
- download ship simulator extremes demo multiplayer<br />
80
- download ship simulator extremes demo mods<br />
81
- download ship simulator extremes demo patch<br />
82
- download ship simulator extremes demo update<br />
83
- download ship simulator extremes collection demo<br />
84
- download ship simulator extremes ferry pack demo<br />
85
- download ship simulator extremes ocean cruise ship demo<br />
86
- download ship simulator extremes offshore vessel demo<br />
87
- download ship simulator extremes cargo vessel demo<br />
88
- download ship simulator extremes inland shipping demo<br />
89
- download ship simulator extremes greenpeace campaign demo<br />
90
- download ship simulator extremes coast guard missions demo<br />
91
- download ship simulator extremes antarctic adventures demo<br />
92
- download ship simulator extremes bora bora expeditions demo<br />
93
- how to download ship simulator extremes demo<br />
94
- where to download ship simulator extremes demo<br />
95
- best site to download ship simulator extremes demo<br />
96
- safe way to download ship simulator extremes demo<br />
97
- easy way to download ship simulator extremes demo<br />
98
- fast way to download ship simulator extremes demo<br />
99
- tips for downloading ship simulator extremes demo<br />
100
- guide for downloading ship simulator extremes demo<br />
101
- review of downloading ship simulator extremes demo<br />
102
- benefits of downloading ship simulator extremes demo<br />
103
- requirements for downloading ship simulator extremes demo<br />
104
- problems with downloading ship simulator extremes demo<br />
105
- solutions for downloading ship simulator extremes demo<br />
106
- alternatives to downloading ship simulator extremes demo<br />
107
- comparison of downloading ship simulator extremes demo and other games<br />
108
- reasons to download ship simulator extremes demo<br />
109
- features of downloading ship simulator extremes demo<br />
110
- advantages of downloading ship simulator extremes demo<br />
111
- disadvantages of downloading ship simulator extremes demo<br />
112
- pros and cons of downloading ship simulator extremes demo<br />
113
- feedback on downloading ship simulator extremes demo<br />
114
- testimonials on downloading ship simulator extremes demo<br />
115
- ratings on downloading ship simulator extremes demo<br />
116
- recommendations on downloading ship simulator extremes demo</p>
117
- <h2>What to Expect from Ship Simulator Extremes Demo</h2>
118
- <p>The demo version of Ship Simulator Extremes gives you a taste of what the full game has to offer. Here are some of the things you can expect from it:</p>
119
- <h3>Two playable singleplayer missions</h3>
120
- <p>The demo includes two playable singleplayer missions that are part of the save the environment campaign. The first one is called "Greenpeace - Save The Whale", where you have to sail a Greenpeace ship called Esperanza and stop a whaling vessel from hunting whales in Antarctica. The second one is called "Greenpeace - Mediterranean", where you have to sail another Greenpeace ship called Rainbow Warrior III and stop illegal fishing activities in the Mediterranean Sea. These missions are challenging and require you to use your skills and tactics to achieve your objectives.[6][7]</p>
121
- <h3>Three different vessels to captain</h3>
122
- <p>The demo also lets you captain three different vessels that are featured in the full game. These are the Greenpeace ships Esperanza and Rainbow Warrior III, and a coast guard interceptor. Each vessel has its own characteristics, such as speed, maneuverability, and equipment. You can switch between different views, such as bridge, deck, or free camera, to get a better perspective of your surroundings. You can also use the radio and the horn to communicate with other ships or the port.[6][7]</p>
123
- <h3>Realistic water and weather system</h3>
124
- <p>One of the most impressive aspects of Ship Simulator Extremes is the realistic water and weather system. The game uses a dynamic ocean simulation that creates waves, currents, and tides based on the wind and the moon. The game also features a day and night cycle and a weather system that can change from sunny to stormy in a matter of minutes. The water and weather effects have a direct impact on your ship's performance and handling, so you have to be prepared for any situation.[6][7]</p>
125
- <h3>Stunning graphics and sound effects</h3>
126
- <p>The game also boasts stunning graphics and sound effects that create an immersive and realistic experience. The game uses advanced shaders and lighting techniques to render the water, the sky, and the landscapes in high detail. The game also features realistic sound effects, such as the engine noise, the waves crashing, and the wind howling. The game also has a soundtrack that matches the mood and atmosphere of each mission.[6][7]</p>
127
- <h2>Conclusion</h2>
128
- <p>Ship Simulator Extremes is a simulation game that lets you experience the most extreme conditions on earth as a ship captain. The game features a wide range of vessels, missions, and locations to explore. The game also has a realistic water and weather system that affects your ship's performance and handling. The game also has stunning graphics and sound effects that create an immersive and realistic experience.</p>
129
- <p>If you want to try out Ship Simulator Extremes for yourself, you can download the demo version of the game for free from the official website or the Steam store page. The demo includes two playable singleplayer missions, three different vessels to captain, and a glimpse of the realistic water and weather system. The demo is a great way to get a taste of what the full game has to offer.</p>
130
- <p>We hope this guide has helped you learn more about Ship Simulator Extremes and how to download the demo version of the game. If you have any questions or feedback, feel free to leave a comment below. Happy sailing!</p>
131
- <h2>FAQs</h2>
132
- <p>Here are some of the frequently asked questions about Ship Simulator Extremes:</p>
133
- <ul>
134
- <li><b>Q: How long is the demo version of Ship Simulator Extremes?</b></li>
135
- <li>A: The demo version of Ship Simulator Extremes is about 1 hour long, depending on how fast you complete the missions.[6][7]</li>
136
- <li><b>Q: Can I play multiplayer mode in the demo version of Ship Simulator Extremes?</b></li>
137
- <li>A: No, the demo version of Ship Simulator Extremes does not include multiplayer mode. You will need to buy the full version of the game to play online with your friends.[6][7]</li>
138
- <li><b>Q: Can I save my progress in the demo version of Ship Simulator Extremes?</b></li>
139
- <li>A: No, the demo version of Ship Simulator Extremes does not allow you to save your progress. You will need to start from the beginning every time you launch the demo.[6][7]</li>
140
- <li><b>Q: Can I customize my ship or change the settings in the demo version of Ship Simulator Extremes?</b></li>
141
- <li>A: No, the demo version of Ship Simulator Extremes does not allow you to customize your ship or change the settings. You will need to buy the full version of the game to access these features.[6][7]</li>
142
- <li><b>Q: Where can I buy the full version of Ship Simulator Extremes?</b></li>
143
- <li>A: You can buy the full version of Ship Simulator Extremes from various online platforms, such as GamersGate, Direct2Drive, or Steam. You can also buy it from physical stores or online retailers.[6][7]</li>
144
- </ul></p> 197e85843d<br />
145
- <br />
146
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Yeager Hunter Legend and Uncover the Secrets of Planet Ekors in this 3D Action RPG for Android.md DELETED
@@ -1,115 +0,0 @@
1
- <br />
2
- <h1>How to Download and Play Yeager: Hunter Legend on Android</h1>
3
- <p>Are you looking for a new and exciting game to play on your Android device? Do you love monster hunting games with stunning graphics, immersive gameplay, and diverse challenges? If so, you might want to check out Yeager: Hunter Legend, a 3D action role-playing game that takes you to an alien world full of deadly creatures and dark secrets. In this article, we will tell you what Yeager: Hunter Legend is, how to download it on your Android device, and how to play it like a pro.</p>
4
- <h2>download game yeager android</h2><br /><p><b><b>Download File</b> --->>> <a href="https://jinyurl.com/2uNTDQ">https://jinyurl.com/2uNTDQ</a></b></p><br /><br />
5
- <h2>What is Yeager: Hunter Legend?</h2>
6
- <p>Yeager: Hunter Legend is a game developed by IGG.COM, the same company behind popular titles like Lords Mobile, Castle Clash, and Mobile Royale. It is a game that combines elements of action, role-playing, and monster hunting genres, set in a sci-fi fantasy world called Planet Ekors. You play as Yeager, an elite Vyderan hunter who is sent to retrieve a priceless stolen relic from the Empire. Along the way, you will encounter ferocious beasts, alien civilizations, and hidden secrets that will test your skills and courage.</p>
7
- <h3>A 3D action role-playing monster hunting game set in an alien world</h3>
8
- <p>One of the main features of Yeager: Hunter Legend is its stunning graphics and realistic animations that are powered by cutting-edge motion capture technology. The game boasts a vast and diverse open world that you can explore freely, with different biomes, weather effects, day-night cycles, and dynamic lighting. The game also has a rich story and lore that will immerse you in the mysterious Planet Ekors and its history.</p>
9
- <h3>A game with stunning graphics, intuitive combat, and unique team hunting system</h3>
10
- <p>Another feature of Yeager: Hunter Legend is its intuitive and action-oriented combat system that allows you to choose from five powerful weapon classes: Hunting Sword, Force Hammer, Fury Blades, Flux Blaster, and Eidolon Spear. Each weapon class has its own signature moves, combos, and abilities that you can master and customize according to your playstyle. You can also switch between two weapons during combat for more versatility and strategy.</p>
11
- <p>download yeager hunter legend apk<br />
12
- how to play yeager on android<br />
13
- yeager 3d action rpg game download<br />
14
- download yeager beta test android<br />
15
- yeager monster hunting game android<br />
16
- yeager android game review<br />
17
- download yeager from google play store<br />
18
- yeager apk latest version download<br />
19
- yeager android game guide<br />
20
- download yeager for android free<br />
21
- yeager android game tips and tricks<br />
22
- yeager igg.com game download<br />
23
- download yeager offline mode android<br />
24
- yeager android game system requirements<br />
25
- download yeager mod apk android<br />
26
- yeager android game best weapons<br />
27
- download yeager for android tablet<br />
28
- yeager android game cheats and hacks<br />
29
- download yeager update for android<br />
30
- yeager android game wiki<br />
31
- download yeager on pc using emulator<br />
32
- yeager android game discord server<br />
33
- download yeager obb file for android<br />
34
- yeager android game facebook page<br />
35
- download yeager from apkcombo.com<br />
36
- yeager android game gameplay video<br />
37
- download yeager from apkpure.com<br />
38
- yeager android game forum<br />
39
- download yeager from gamingonphone.com<br />
40
- yeager android game faq<br />
41
- download yeager from newscientist.com<br />
42
- yeager android game feedback and suggestions<br />
43
- download yeager from the-sun.com<br />
44
- yeager android game support and contact<br />
45
- download yeager from yahoo.com<br />
46
- yeager android game news and updates<br />
47
- download yeager from wikipedia.org<br />
48
- yeager android game ratings and reviews<br />
49
- download yeager from montana.edu <br />
50
- yeager android game features and benefits</p>
51
- <p>The game also has a unique team hunting system that lets you hunt with up to three other players online. You can cooperate with your teammates to take down massive beasts using different tactics and skills. You can also chat with your teammates using voice or text messages, or use emojis and stickers to express yourself.</p>
52
- <h3>A game with five weapon classes, customizable equipment, and diverse monsters</h3>
53
- <p>Another feature of Yeager: Hunter Legend is its extensive customization options that let you create your own hunter style. You can hunt beasts for materials rich in Kallar, the powerful essence of your ancestors, to forge and upgrade your equipment. Equipment forged with Kallar-infused beast parts will even gain the appearance and traits of the beasts themselves. You can also equip ancient seals, mysterious artifacts that grant you legendary hunting prowess; install sigils on your Kallar arm to boost your physical aptitude and unlock new hunting skills; and choose your weapon school that fits your playstyle.</p>
54
- <p>The game also has a diverse range of monsters that you can hunt, each with their own unique combat abilities, behaviors, weaknesses, and rewards. You will need to study and strategize for each monster to defeat them effectively. Some of the monsters include:</p>
55
- <table>
56
- <tr><th>Name</th><th>Type</th <th>Description</th></tr>
57
- <tr><td>Blazeclaw</td><td>Fire</td><td>A fiery feline beast that can unleash explosive fireballs and scorching claws.</td></tr>
58
- <tr><td>Glacierhorn</td><td>Ice</td><td>A colossal rhino-like beast that can create icy spikes and charge with devastating force.</td></tr>
59
- <tr><td>Thunderwing</td><td>Electric</td><td>A majestic bird-like beast that can soar in the sky and unleash lightning bolts and storms.</td></tr>
60
- <tr><td>Venomtail</td><td>Poison</td><td>A venomous lizard-like beast that can spit toxic projectiles and whip its tail with deadly accuracy.</td></tr>
61
- <tr><td>Shadowfang</td><td>Dark</td><td>A stealthy wolf-like beast that can blend in the shadows and strike with swift and powerful bites.</td></tr>
62
- </table>
63
- <h2>How to Download Yeager: Hunter Legend on Android</h2>
64
- <p>If you are interested in playing Yeager: Hunter Legend on your Android device, you have three options to download it:</p>
65
- <h3>Download from Google Play Store</h3>
66
- <p>The easiest and safest way to download Yeager: Hunter Legend on your Android device is to use the official Google Play Store. You can simply search for the game on the store or use this link to access it. Then, you can tap on the Install button and wait for the game to download and install on your device. You will need at least 2.5 GB of free storage space and Android 5.0 or higher to run the game smoothly.</p>
67
- <h3>Download from APKPure or other third-party sources</h3>
68
- <p>If you cannot access the Google Play Store or prefer to use a different source, you can also download Yeager: Hunter Legend from APKPure or other third-party websites that offer APK files. APK files are the installation packages for Android applications that you can manually install on your device. However, you should be careful when downloading APK files from unknown sources, as they may contain malware or viruses that can harm your device. To download Yeager: Hunter Legend from APKPure, you can use this link or search for the game on the website. Then, you can tap on the Download APK button and wait for the file to download on your device. You will need to enable the Unknown Sources option in your device settings to allow the installation of APK files from outside the Google Play Store. After that, you can open the downloaded file and follow the instructions to install the game on your device.</p>
69
- <h3>Download from LDPlayer or other Android emulators</h3>
70
- <p>If you want to play Yeager: Hunter Legend on your PC or laptop, you can also use an Android emulator to run the game on your computer. An Android emulator is a software that simulates an Android device on your computer, allowing you to access Android applications and games. One of the best Android emulators for gaming is LDPlayer, which offers high performance, compatibility, and customization features. To download Yeager: Hunter Legend from LDPlayer, you can use this link or search for the game on the LDPlayer website. Then, you can tap on the Download button and wait for the LDPlayer installer to download on your computer. You will need to run the installer and follow the instructions to install LDPlayer on your computer. After that, you can launch LDPlayer and search for Yeager: Hunter Legend on the built-in Google Play Store or use an APK file to install the game on LDPlayer. You will be able to play the game using your keyboard and mouse, or customize your controls according to your preference.</p>
71
- <h2>How to Play Yeager: Hunter Legend on Android</h2>
72
- <p>Now that you have downloaded Yeager: Hunter Legend on your Android device or emulator, you are ready to start playing it. Here are some tips and tricks to help you play the game like a pro:</p>
73
- <h3>Learn the combat mechanics and controls</h3>
74
- <p>The first thing you need to do is to familiarize yourself with the combat mechanics and controls of Yeager: Hunter Legend. The game uses a virtual joystick on the left side of the screen to move your character, and several buttons on the right side of the screen to perform different actions, such as attacking, dodging, switching weapons, using skills, and using items. You can also tap on the screen to interact with objects, NPCs, and menus.</p>
75
- <p>The combat system of Yeager: Hunter Legend is based on timing, positioning, and strategy. You will need to observe your enemies' movements and patterns, dodge their attacks, exploit their weaknesses, and unleash your own combos and skills. You will also need to manage your stamina, which is consumed by attacking and dodging, and replenish it by resting or using items. You will also need to pay attention to your health, which is reduced by taking damage, and restore it by using items or healing skills. You can also use the Kallar arm to activate special hunting skills that can give you an edge in combat.</p>
76
- <h3>Choose your weapon class and weapon school</h3>
77
- <p>The next thing you need to do is to choose your weapon class and weapon school that suit your playstyle and preference. Yeager: Hunter Legend offers five weapon classes, each with its own strengths, weaknesses, and skills. They are:</p>
78
- <ul>
79
- <li>Hunting Sword: A balanced weapon that can deal moderate damage and has good mobility. It can also use a shield to block incoming attacks.</li>
80
- <li>Force Hammer: A heavy weapon that can deal high damage and has strong defense. It can also use a jetpack to fly and slam enemies from above.</li>
81
- <li>Fury Blades: A fast weapon that can deal rapid damage and has high agility. It can also use a grappling hook to pull enemies closer or swing around them.</li>
82
- <li>Flux Blaster: A ranged weapon that can deal consistent damage and has good accuracy. It can also use a drone to assist in combat and provide support.</li>
83
- <li>Eidolon Spear: A versatile weapon that can deal variable damage and has multiple modes. It can also use a spirit beast to summon and command in battle.</li>
84
- </ul>
85
- <p>You can also choose your weapon school, which is a set of skills and abilities that you can unlock and upgrade for your weapon class. There are three weapon schools for each weapon class, each with its own focus and style. For example, the Hunting Sword has the following weapon schools:</p>
86
- <ul>
87
- <li>Blade Master: A weapon school that focuses on sword skills and combos, enhancing your damage and critical rate.</li>
88
- <li>Shield Master: A weapon school that focuses on shield skills and defense, enhancing your block and counter abilities.</li>
89
- <li>Kallar Master: A weapon school that focuses on Kallar arm skills and hunting skills, enhancing your Kallar power and hunting prowess.</li>
90
- </ul>
91
- <p>You can switch between different weapon classes and weapon schools at any time, so feel free to experiment and find your favorite combination.</p>
92
- <h3>Hunt beasts for materials and upgrade your equipment</h3>
93
- <p>The main activity of Yeager: Hunter Legend is hunting beasts for materials and upgrading your equipment. You can accept hunting quests from NPCs or other players, or explore the world and encounter beasts in the wild. You can hunt beasts solo or with a team of up to four players online. You will need to prepare for each hunt by choosing your equipment, items, skills, and strategy. You will also need to track down the beast, lure it out, fight it, weaken it, capture it or kill it, and harvest its parts.</p>
94
- <p>You can use the materials you obtain from hunting beasts to forge and upgrade your equipment at the Forge Station. Equipment forged with Kallar-infused beast parts will gain the appearance and traits of the beasts themselves, giving you unique bonuses and effects. You can also customize your equipment by changing its color, adding decals, or applying seals. Seals are ancient artifacts that grant you legendary hunting prowess, such as increasing your damage, speed, defense, or Kallar power.</p>
95
- <h3>Explore the mysterious Planet Ekors and uncover its secrets</h3>
96
- <p>The last thing you need to do is to explore the mysterious Planet Ekors and uncover its secrets. Yeager: Hunter Legend has a vast and diverse open world that you can explore freely, with different biomes, weather effects, day-night cycles, and dynamic lighting. You can travel across the world using various vehicles, such as hoverboards, motorcycles, airships, or mechs. You can also interact with various objects, NPCs, and events in the world, such as collecting resources, solving puzzles, discovering lore, or triggering side quests.</p>
97
- <p>The world of Yeager: Hunter Legend is full of secrets and mysteries that will challenge your curiosity and courage. You will encounter ancient ruins, alien civilizations, hidden dungeons, and legendary beasts that will reveal more about the history and secrets of Planet Ekors. You will also face the Empire, a ruthless faction that seeks to conquer the planet and its resources. You will need to fight against their soldiers, machines, and experiments as you uncover their sinister plans.</p>
98
- <h2>Conclusion</h2>
99
- <p>Yeager: Hunter Legend is a 3D action role-playing monster hunting game that takes you to an alien world full of deadly creatures and dark secrets. You can download it on your Android device from Google Play Store, APKPure or other third-party sources, or LDPlayer or other Android emulators. You can play it by choosing your weapon class and weapon school, hunting beasts for materials and upgrading your equipment, and exploring the mysterious Planet Ekors and uncovering its secrets. Yeager: Hunter Legend is a game that will keep you entertained and engaged for hours with its stunning graphics, immersive gameplay, and diverse challenges.</p>
100
- <h2>FAQs</h2>
101
- <p>Here are some of the frequently asked questions about Yeager: Hunter Legend:</p>
102
- <ul>
103
- <li>Q: Is Yeager: Hunter Legend free to play?</li>
104
- <li>A: Yes, Yeager: Hunter Legend is free to download and play, but it also offers in-app purchases that can enhance your gaming experience.</li>
105
- <li>Q: How can I play Yeager: Hunter Legend with my friends?</li>
106
- <li>A: You can play Yeager: Hunter Legend with your friends by joining or creating a team of up to four players online. You can invite your friends from your contacts, social media, or game chat, or join a random team from the lobby. You can also join a guild or clan to meet other players and participate in guild events and activities.</li>
107
- <li>Q: How can I get more Kallar and seals?</li>
108
- <li>A: Kallar is the powerful essence of your ancestors that you can use to forge and upgrade your equipment. You can get more Kallar by hunting beasts, completing quests, participating in events, or purchasing it with real money. Seals are ancient artifacts that grant you legendary hunting prowess. You can get more seals by hunting legendary beasts, exploring dungeons, completing achievements, or purchasing them with real money.</li>
109
- <li>Q: What are the system requirements for Yeager: Hunter Legend?</li>
110
- <li>A: The minimum system requirements for Yeager: Hunter Legend are Android 5.0 or higher, 2 GB of RAM, and 2.5 GB of free storage space. The recommended system requirements are Android 8.0 or higher, 4 GB of RAM, and 4 GB of free storage space.</li>
111
- <li>Q: Where can I find more information and support for Yeager: Hunter Legend?</li>
112
- <li>A: You can find more information and support for Yeager: Hunter Legend by visiting the official website, Facebook page, Twitter account, YouTube channel, Discord server, or Reddit community of the game. You can also contact the customer service team by tapping on the Settings icon in the game and selecting Help & Support.</li>
113
- </ul></p> 401be4b1e0<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_thesis/model_arch/SRMNet.py DELETED
@@ -1,225 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- ##---------- Basic Layers ----------
5
- def conv3x3(in_chn, out_chn, bias=True):
6
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
7
- return layer
8
-
9
- def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
10
- return nn.Conv2d(
11
- in_channels, out_channels, kernel_size,
12
- padding=(kernel_size // 2), bias=bias, stride=stride)
13
-
14
- def bili_resize(factor):
15
- return nn.Upsample(scale_factor=factor, mode='bilinear', align_corners=False)
16
-
17
- ##---------- Basic Blocks ----------
18
-
19
- class UNetConvBlock(nn.Module):
20
- def __init__(self, in_size, out_size, downsample):
21
- super(UNetConvBlock, self).__init__()
22
- self.downsample = downsample
23
- self.block = SK_RDB(in_channels=in_size, growth_rate=out_size, num_layers=3)
24
- if downsample:
25
- self.downsample = PS_down(out_size, out_size, downscale=2)
26
-
27
- def forward(self, x):
28
- out = self.block(x)
29
- if self.downsample:
30
- out_down = self.downsample(out)
31
- return out_down, out
32
- else:
33
- return out
34
-
35
- class UNetUpBlock(nn.Module):
36
- def __init__(self, in_size, out_size):
37
- super(UNetUpBlock, self).__init__()
38
- # self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
39
- self.up = PS_up(in_size, out_size, upscale=2)
40
- self.conv_block = UNetConvBlock(in_size, out_size, False)
41
-
42
- def forward(self, x, bridge):
43
- up = self.up(x)
44
- out = torch.cat([up, bridge], dim=1)
45
- out = self.conv_block(out)
46
- return out
47
-
48
- ##---------- Resizing Modules (Pixel(Un)Shuffle) ----------
49
- class PS_down(nn.Module):
50
- def __init__(self, in_size, out_size, downscale):
51
- super(PS_down, self).__init__()
52
- self.UnPS = nn.PixelUnshuffle(downscale)
53
- self.conv1 = nn.Conv2d((downscale**2) * in_size, out_size, 1, 1, 0)
54
-
55
- def forward(self, x):
56
- x = self.UnPS(x) # h/2, w/2, 4*c
57
- x = self.conv1(x)
58
- return x
59
-
60
- class PS_up(nn.Module):
61
- def __init__(self, in_size, out_size, upscale):
62
- super(PS_up, self).__init__()
63
-
64
- self.PS = nn.PixelShuffle(upscale)
65
- self.conv1 = nn.Conv2d(in_size//(upscale**2), out_size, 1, 1, 0)
66
-
67
- def forward(self, x):
68
- x = self.PS(x) # h/2, w/2, 4*c
69
- x = self.conv1(x)
70
- return x
71
-
72
- ##---------- Selective Kernel Feature Fusion (SKFF) ----------
73
- class SKFF(nn.Module):
74
- def __init__(self, in_channels, height=3, reduction=8, bias=False):
75
- super(SKFF, self).__init__()
76
-
77
- self.height = height
78
- d = max(int(in_channels / reduction), 4)
79
-
80
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
81
- self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
82
-
83
- self.fcs = nn.ModuleList([])
84
- for i in range(self.height):
85
- self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias))
86
-
87
- self.softmax = nn.Softmax(dim=1)
88
-
89
- def forward(self, inp_feats):
90
- batch_size, n_feats, H, W = inp_feats[1].shape
91
-
92
- inp_feats = torch.cat(inp_feats, dim=1)
93
- inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
94
-
95
- feats_U = torch.sum(inp_feats, dim=1)
96
- feats_S = self.avg_pool(feats_U)
97
- feats_Z = self.conv_du(feats_S)
98
-
99
- attention_vectors = [fc(feats_Z) for fc in self.fcs]
100
- attention_vectors = torch.cat(attention_vectors, dim=1)
101
- attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
102
-
103
- attention_vectors = self.softmax(attention_vectors)
104
- feats_V = torch.sum(inp_feats * attention_vectors, dim=1)
105
-
106
- return feats_V
107
-
108
- ##---------- Dense Block ----------
109
- class DenseLayer(nn.Module):
110
- def __init__(self, in_channels, out_channels, I):
111
- super(DenseLayer, self).__init__()
112
- self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=3 // 2)
113
- self.relu = nn.ReLU(inplace=True)
114
- self.sk = SKFF(out_channels, height=2, reduction=8, bias=False)
115
-
116
- def forward(self, x):
117
- x1 = self.relu(self.conv(x))
118
- # output = torch.cat([x, x1], 1) # -> RDB
119
- output = self.sk((x, x1))
120
- return output
121
-
122
- ##---------- Selective Kernel Residual Dense Block (SK-RDB) ----------
123
- class SK_RDB(nn.Module):
124
- def __init__(self, in_channels, growth_rate, num_layers):
125
- super(SK_RDB, self).__init__()
126
- self.identity = nn.Conv2d(in_channels, growth_rate, 1, 1, 0)
127
- self.layers = nn.Sequential(
128
- *[DenseLayer(in_channels, in_channels, I=i) for i in range(num_layers)]
129
- )
130
- self.lff = nn.Conv2d(in_channels, growth_rate, kernel_size=1)
131
-
132
- def forward(self, x):
133
- res = self.identity(x)
134
- x = self.layers(x)
135
- x = self.lff(x)
136
- return res + x
137
-
138
- ##---------- testNet ----------
139
- class SRMNet(nn.Module):
140
- def __init__(self, in_chn=3, wf=96, depth=4):
141
- super(SRMNet, self).__init__()
142
- self.depth = depth
143
- self.down_path = nn.ModuleList()
144
- self.bili_down = bili_resize(0.5)
145
- self.conv_01 = nn.Conv2d(in_chn, wf, 3, 1, 1)
146
-
147
- # encoder of UNet-64
148
- prev_channels = 0
149
- for i in range(depth): # 0,1,2,3
150
- downsample = True if (i + 1) < depth else False
151
- self.down_path.append(UNetConvBlock(prev_channels + wf, (2 ** i) * wf, downsample))
152
- prev_channels = (2 ** i) * wf
153
-
154
- # decoder of UNet-64
155
- self.up_path = nn.ModuleList()
156
- self.skip_conv = nn.ModuleList()
157
- self.conv_up = nn.ModuleList()
158
- self.bottom_conv = nn.Conv2d(prev_channels, wf, 3, 1, 1)
159
- self.bottom_up = bili_resize(2 ** (depth-1))
160
-
161
- for i in reversed(range(depth - 1)):
162
- self.up_path.append(UNetUpBlock(prev_channels, (2 ** i) * wf))
163
- self.skip_conv.append(nn.Conv2d((2 ** i) * wf, (2 ** i) * wf, 3, 1, 1))
164
- self.conv_up.append(nn.Sequential(*[bili_resize(2 ** i), nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1)]))
165
- # *[nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1), bili_resize(2 ** i)])
166
- prev_channels = (2 ** i) * wf
167
-
168
- self.final_ff = SKFF(in_channels=wf, height=depth)
169
- self.last = conv3x3(prev_channels, in_chn, bias=True)
170
-
171
- def forward(self, x):
172
- img = x
173
- scale_img = img
174
-
175
- ##### shallow conv #####
176
- x1 = self.conv_01(img)
177
- encs = []
178
- ######## UNet-64 ########
179
- # Down-path (Encoder)
180
- for i, down in enumerate(self.down_path):
181
- if i == 0: # top layer
182
- x1, x1_up = down(x1)
183
- encs.append(x1_up)
184
- elif (i + 1) < self.depth: # middle layer
185
- scale_img = self.bili_down(scale_img)
186
- left_bar = self.conv_01(scale_img)
187
- x1 = torch.cat([x1, left_bar], dim=1)
188
- x1, x1_up = down(x1)
189
- encs.append(x1_up)
190
- else: # lowest layer
191
- scale_img = self.bili_down(scale_img)
192
- left_bar = self.conv_01(scale_img)
193
- x1 = torch.cat([x1, left_bar], dim=1)
194
- x1 = down(x1)
195
-
196
- # Up-path (Decoder)
197
- ms_result = [self.bottom_up(self.bottom_conv(x1))]
198
- for i, up in enumerate(self.up_path):
199
- x1 = up(x1, self.skip_conv[i](encs[-i - 1]))
200
- ms_result.append(self.conv_up[i](x1))
201
-
202
- # Multi-scale selective feature fusion
203
- msff_result = self.final_ff(ms_result)
204
-
205
- ##### Reconstruct #####
206
- out_1 = self.last(msff_result) + img
207
-
208
- return out_1
209
-
210
- if __name__ == "__main__":
211
- from thop import profile
212
- input = torch.ones(1, 3, 256, 256, dtype=torch.float, requires_grad=False)
213
-
214
- model = SRMNet(in_chn=3, wf=96, depth=4)
215
- out = model(input)
216
- flops, params = profile(model, inputs=(input,))
217
-
218
- # RDBlayer = SK_RDB(in_channels=64, growth_rate=64, num_layers=3)
219
- # print(RDBlayer)
220
- # out = RDBlayer(input)
221
- # flops, params = profile(RDBlayer, inputs=(input,))
222
- print('input shape:', input.shape)
223
- print('parameters:', params/1e6)
224
- print('flops', flops/1e9)
225
- print('output shape', out.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-ANK/blackmirroroffice/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Blackmirroroffice
3
- emoji: 👁
4
- colorFrom: blue
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/quantization/core_vq.py DELETED
@@ -1,400 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing as tp
8
-
9
- from einops import rearrange, repeat
10
- import flashy
11
- import torch
12
- from torch import nn, einsum
13
- import torch.nn.functional as F
14
-
15
-
16
- def exists(val: tp.Optional[tp.Any]) -> bool:
17
- return val is not None
18
-
19
-
20
- def default(val: tp.Any, d: tp.Any) -> tp.Any:
21
- return val if exists(val) else d
22
-
23
-
24
- def l2norm(t):
25
- return F.normalize(t, p=2, dim=-1)
26
-
27
-
28
- def ema_inplace(moving_avg, new, decay: float):
29
- moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
30
-
31
-
32
- def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
33
- return (x + epsilon) / (x.sum() + n_categories * epsilon)
34
-
35
-
36
- def uniform_init(*shape: int):
37
- t = torch.empty(shape)
38
- nn.init.kaiming_uniform_(t)
39
- return t
40
-
41
-
42
- def sample_vectors(samples, num: int):
43
- num_samples, device = samples.shape[0], samples.device
44
-
45
- if num_samples >= num:
46
- indices = torch.randperm(num_samples, device=device)[:num]
47
- else:
48
- indices = torch.randint(0, num_samples, (num,), device=device)
49
-
50
- return samples[indices]
51
-
52
-
53
- def kmeans(samples, num_clusters: int, num_iters: int = 10):
54
- dim, dtype = samples.shape[-1], samples.dtype
55
-
56
- means = sample_vectors(samples, num_clusters)
57
-
58
- for _ in range(num_iters):
59
- diffs = rearrange(samples, "n d -> n () d") - rearrange(
60
- means, "c d -> () c d"
61
- )
62
- dists = -(diffs ** 2).sum(dim=-1)
63
-
64
- buckets = dists.max(dim=-1).indices
65
- bins = torch.bincount(buckets, minlength=num_clusters)
66
- zero_mask = bins == 0
67
- bins_min_clamped = bins.masked_fill(zero_mask, 1)
68
-
69
- new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
70
- new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
71
- new_means = new_means / bins_min_clamped[..., None]
72
-
73
- means = torch.where(zero_mask[..., None], means, new_means)
74
-
75
- return means, bins
76
-
77
-
78
- def orthogonal_loss_fn(t):
79
- # eq (2) from https://arxiv.org/abs/2112.00384
80
- n = t.shape[0]
81
- normed_codes = l2norm(t)
82
- identity = torch.eye(n, device=t.device)
83
- cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes)
84
- return ((cosine_sim - identity) ** 2).sum() / (n ** 2)
85
-
86
-
87
- class EuclideanCodebook(nn.Module):
88
- """Codebook with Euclidean distance.
89
-
90
- Args:
91
- dim (int): Dimension.
92
- codebook_size (int): Codebook size.
93
- kmeans_init (bool): Whether to use k-means to initialize the codebooks.
94
- If set to true, run the k-means algorithm on the first training batch and use
95
- the learned centroids as initialization.
96
- kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
97
- decay (float): Decay for exponential moving average over the codebooks.
98
- epsilon (float): Epsilon value for numerical stability.
99
- threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
100
- that have an exponential moving average cluster size less than the specified threshold with
101
- randomly selected vector from the current batch.
102
- """
103
- def __init__(
104
- self,
105
- dim: int,
106
- codebook_size: int,
107
- kmeans_init: int = False,
108
- kmeans_iters: int = 10,
109
- decay: float = 0.8,
110
- epsilon: float = 1e-5,
111
- threshold_ema_dead_code: int = 2,
112
- ):
113
- super().__init__()
114
- self.decay = decay
115
- init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
116
- embed = init_fn(codebook_size, dim)
117
-
118
- self.codebook_size = codebook_size
119
-
120
- self.kmeans_iters = kmeans_iters
121
- self.epsilon = epsilon
122
- self.threshold_ema_dead_code = threshold_ema_dead_code
123
-
124
- self.register_buffer("inited", torch.Tensor([not kmeans_init]))
125
- self.register_buffer("cluster_size", torch.zeros(codebook_size))
126
- self.register_buffer("embed", embed)
127
- self.register_buffer("embed_avg", embed.clone())
128
-
129
- @torch.jit.ignore
130
- def init_embed_(self, data):
131
- if self.inited:
132
- return
133
-
134
- embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
135
- self.embed.data.copy_(embed)
136
- self.embed_avg.data.copy_(embed.clone())
137
- self.cluster_size.data.copy_(cluster_size)
138
- self.inited.data.copy_(torch.Tensor([True]))
139
- # Make sure all buffers across workers are in sync after initialization
140
- flashy.distrib.broadcast_tensors(self.buffers())
141
-
142
- def replace_(self, samples, mask):
143
- modified_codebook = torch.where(
144
- mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
145
- )
146
- self.embed.data.copy_(modified_codebook)
147
-
148
- def expire_codes_(self, batch_samples):
149
- if self.threshold_ema_dead_code == 0:
150
- return
151
-
152
- expired_codes = self.cluster_size < self.threshold_ema_dead_code
153
- if not torch.any(expired_codes):
154
- return
155
-
156
- batch_samples = rearrange(batch_samples, "... d -> (...) d")
157
- self.replace_(batch_samples, mask=expired_codes)
158
- flashy.distrib.broadcast_tensors(self.buffers())
159
-
160
- def preprocess(self, x):
161
- x = rearrange(x, "... d -> (...) d")
162
- return x
163
-
164
- def quantize(self, x):
165
- embed = self.embed.t()
166
- dist = -(
167
- x.pow(2).sum(1, keepdim=True)
168
- - 2 * x @ embed
169
- + embed.pow(2).sum(0, keepdim=True)
170
- )
171
- embed_ind = dist.max(dim=-1).indices
172
- return embed_ind
173
-
174
- def postprocess_emb(self, embed_ind, shape):
175
- return embed_ind.view(*shape[:-1])
176
-
177
- def dequantize(self, embed_ind):
178
- quantize = F.embedding(embed_ind, self.embed)
179
- return quantize
180
-
181
- def encode(self, x):
182
- shape = x.shape
183
- # pre-process
184
- x = self.preprocess(x)
185
- # quantize
186
- embed_ind = self.quantize(x)
187
- # post-process
188
- embed_ind = self.postprocess_emb(embed_ind, shape)
189
- return embed_ind
190
-
191
- def decode(self, embed_ind):
192
- quantize = self.dequantize(embed_ind)
193
- return quantize
194
-
195
- def forward(self, x):
196
- shape, dtype = x.shape, x.dtype
197
- x = self.preprocess(x)
198
- self.init_embed_(x)
199
-
200
- embed_ind = self.quantize(x)
201
- embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
202
- embed_ind = self.postprocess_emb(embed_ind, shape)
203
- quantize = self.dequantize(embed_ind)
204
-
205
- if self.training:
206
- # We do the expiry of code at that point as buffers are in sync
207
- # and all the workers will take the same decision.
208
- self.expire_codes_(x)
209
- ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
210
- embed_sum = x.t() @ embed_onehot
211
- ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
212
- cluster_size = (
213
- laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
214
- * self.cluster_size.sum()
215
- )
216
- embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
217
- self.embed.data.copy_(embed_normalized)
218
-
219
- return quantize, embed_ind
220
-
221
-
222
- class VectorQuantization(nn.Module):
223
- """Vector quantization implementation.
224
- Currently supports only euclidean distance.
225
-
226
- Args:
227
- dim (int): Dimension
228
- codebook_size (int): Codebook size
229
- codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
230
- decay (float): Decay for exponential moving average over the codebooks.
231
- epsilon (float): Epsilon value for numerical stability.
232
- kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
233
- kmeans_iters (int): Number of iterations used for kmeans initialization.
234
- threshold_ema_dead_code (int):
235
- channels_last (bool): Channels are the last dimension in the input tensors.
236
- commitment_weight (float): Weight for commitment loss.
237
- orthogonal_reg_weight (float): Orthogonal regularization weights.
238
- orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
239
- orthogonal_reg_max_codes (optional int): Maximum number of codes to consider
240
- for orthogonal regularization.
241
- threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
242
- that have an exponential moving average cluster size less than the specified threshold with
243
- randomly selected vector from the current batch.
244
- """
245
- def __init__(
246
- self,
247
- dim: int,
248
- codebook_size: int,
249
- codebook_dim: tp.Optional[int] = None,
250
- decay: float = 0.8,
251
- epsilon: float = 1e-5,
252
- kmeans_init: bool = False,
253
- kmeans_iters: int = 10,
254
- threshold_ema_dead_code: int = 2,
255
- channels_last: bool = False,
256
- commitment_weight: float = 1.,
257
- orthogonal_reg_weight: float = 0.0,
258
- orthogonal_reg_active_codes_only: bool = False,
259
- orthogonal_reg_max_codes: tp.Optional[int] = None,
260
- ):
261
- super().__init__()
262
- _codebook_dim: int = default(codebook_dim, dim)
263
-
264
- requires_projection = _codebook_dim != dim
265
- self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity())
266
- self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity())
267
-
268
- self.epsilon = epsilon
269
- self.commitment_weight = commitment_weight
270
-
271
- self.orthogonal_reg_weight = orthogonal_reg_weight
272
- self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
273
- self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
274
-
275
- self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size,
276
- kmeans_init=kmeans_init, kmeans_iters=kmeans_iters,
277
- decay=decay, epsilon=epsilon,
278
- threshold_ema_dead_code=threshold_ema_dead_code)
279
- self.codebook_size = codebook_size
280
-
281
- self.channels_last = channels_last
282
-
283
- @property
284
- def codebook(self):
285
- return self._codebook.embed
286
-
287
- @property
288
- def inited(self):
289
- return self._codebook.inited
290
-
291
- def _preprocess(self, x):
292
- if not self.channels_last:
293
- x = rearrange(x, "b d n -> b n d")
294
- return x
295
-
296
- def _postprocess(self, quantize):
297
- if not self.channels_last:
298
- quantize = rearrange(quantize, "b n d -> b d n")
299
- return quantize
300
-
301
- def encode(self, x):
302
- x = self._preprocess(x)
303
- x = self.project_in(x)
304
- embed_in = self._codebook.encode(x)
305
- return embed_in
306
-
307
- def decode(self, embed_ind):
308
- quantize = self._codebook.decode(embed_ind)
309
- quantize = self.project_out(quantize)
310
- quantize = self._postprocess(quantize)
311
- return quantize
312
-
313
- def forward(self, x):
314
- device = x.device
315
- x = self._preprocess(x)
316
-
317
- x = self.project_in(x)
318
- quantize, embed_ind = self._codebook(x)
319
-
320
- if self.training:
321
- quantize = x + (quantize - x).detach()
322
-
323
- loss = torch.tensor([0.0], device=device, requires_grad=self.training)
324
-
325
- if self.training:
326
- if self.commitment_weight > 0:
327
- commit_loss = F.mse_loss(quantize.detach(), x)
328
- loss = loss + commit_loss * self.commitment_weight
329
-
330
- if self.orthogonal_reg_weight > 0:
331
- codebook = self.codebook
332
-
333
- if self.orthogonal_reg_active_codes_only:
334
- # only calculate orthogonal loss for the activated codes for this batch
335
- unique_code_ids = torch.unique(embed_ind)
336
- codebook = codebook[unique_code_ids]
337
-
338
- num_codes = codebook.shape[0]
339
- if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
340
- rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes]
341
- codebook = codebook[rand_ids]
342
-
343
- orthogonal_reg_loss = orthogonal_loss_fn(codebook)
344
- loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
345
-
346
- quantize = self.project_out(quantize)
347
- quantize = self._postprocess(quantize)
348
-
349
- return quantize, embed_ind, loss
350
-
351
-
352
- class ResidualVectorQuantization(nn.Module):
353
- """Residual vector quantization implementation.
354
-
355
- Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
356
- """
357
- def __init__(self, *, num_quantizers, **kwargs):
358
- super().__init__()
359
- self.layers = nn.ModuleList(
360
- [VectorQuantization(**kwargs) for _ in range(num_quantizers)]
361
- )
362
-
363
- def forward(self, x, n_q: tp.Optional[int] = None):
364
- quantized_out = 0.0
365
- residual = x
366
-
367
- all_losses = []
368
- all_indices = []
369
-
370
- n_q = n_q or len(self.layers)
371
-
372
- for i, layer in enumerate(self.layers[:n_q]):
373
- quantized, indices, loss = layer(residual)
374
- residual = residual - quantized
375
- quantized_out = quantized_out + quantized
376
- all_indices.append(indices)
377
- all_losses.append(loss)
378
-
379
- out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
380
- return quantized_out, out_indices, out_losses
381
-
382
- def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor:
383
- residual = x
384
- all_indices = []
385
- n_q = n_q or len(self.layers)
386
- for layer in self.layers[:n_q]:
387
- indices = layer.encode(residual)
388
- quantized = layer.decode(indices)
389
- residual = residual - quantized
390
- all_indices.append(indices)
391
- out_indices = torch.stack(all_indices)
392
- return out_indices
393
-
394
- def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
395
- quantized_out = torch.tensor(0.0, device=q_indices.device)
396
- for i, indices in enumerate(q_indices):
397
- layer = self.layers[i]
398
- quantized = layer.decode(indices)
399
- quantized_out = quantized_out + quantized
400
- return quantized_out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/conv.py DELETED
@@ -1,168 +0,0 @@
1
- import math
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
-
6
- from modules.commons.common_layers import Embedding
7
- from modules.fastspeech.tts_modules import LayerNorm
8
-
9
-
10
- class LambdaLayer(nn.Module):
11
- def __init__(self, lambd):
12
- super(LambdaLayer, self).__init__()
13
- self.lambd = lambd
14
-
15
- def forward(self, x):
16
- return self.lambd(x)
17
-
18
-
19
- def init_weights_func(m):
20
- classname = m.__class__.__name__
21
- if classname.find("Conv1d") != -1:
22
- torch.nn.init.xavier_uniform_(m.weight)
23
-
24
-
25
- class ResidualBlock(nn.Module):
26
- """Implements conv->PReLU->norm n-times"""
27
-
28
- def __init__(self, channels, kernel_size, dilation, n=2, norm_type='bn', dropout=0.0,
29
- c_multiple=2, ln_eps=1e-12):
30
- super(ResidualBlock, self).__init__()
31
-
32
- if norm_type == 'bn':
33
- norm_builder = lambda: nn.BatchNorm1d(channels)
34
- elif norm_type == 'in':
35
- norm_builder = lambda: nn.InstanceNorm1d(channels, affine=True)
36
- elif norm_type == 'gn':
37
- norm_builder = lambda: nn.GroupNorm(8, channels)
38
- elif norm_type == 'ln':
39
- norm_builder = lambda: LayerNorm(channels, dim=1, eps=ln_eps)
40
- else:
41
- norm_builder = lambda: nn.Identity()
42
-
43
- self.blocks = [
44
- nn.Sequential(
45
- norm_builder(),
46
- nn.Conv1d(channels, c_multiple * channels, kernel_size, dilation=dilation,
47
- padding=(dilation * (kernel_size - 1)) // 2),
48
- LambdaLayer(lambda x: x * kernel_size ** -0.5),
49
- nn.GELU(),
50
- nn.Conv1d(c_multiple * channels, channels, 1, dilation=dilation),
51
- )
52
- for i in range(n)
53
- ]
54
-
55
- self.blocks = nn.ModuleList(self.blocks)
56
- self.dropout = dropout
57
-
58
- def forward(self, x):
59
- nonpadding = (x.abs().sum(1) > 0).float()[:, None, :]
60
- for b in self.blocks:
61
- x_ = b(x)
62
- if self.dropout > 0 and self.training:
63
- x_ = F.dropout(x_, self.dropout, training=self.training)
64
- x = x + x_
65
- x = x * nonpadding
66
- return x
67
-
68
-
69
- class ConvBlocks(nn.Module):
70
- """Decodes the expanded phoneme encoding into spectrograms"""
71
-
72
- def __init__(self, hidden_size, out_dims, dilations, kernel_size,
73
- norm_type='ln', layers_in_block=2, c_multiple=2,
74
- dropout=0.0, ln_eps=1e-5,
75
- init_weights=True, is_BTC=True, num_layers=None, post_net_kernel=3):
76
- super(ConvBlocks, self).__init__()
77
- self.is_BTC = is_BTC
78
- if num_layers is not None:
79
- dilations = [1] * num_layers
80
- self.res_blocks = nn.Sequential(
81
- *[ResidualBlock(hidden_size, kernel_size, d,
82
- n=layers_in_block, norm_type=norm_type, c_multiple=c_multiple,
83
- dropout=dropout, ln_eps=ln_eps)
84
- for d in dilations],
85
- )
86
- if norm_type == 'bn':
87
- norm = nn.BatchNorm1d(hidden_size)
88
- elif norm_type == 'in':
89
- norm = nn.InstanceNorm1d(hidden_size, affine=True)
90
- elif norm_type == 'gn':
91
- norm = nn.GroupNorm(8, hidden_size)
92
- elif norm_type == 'ln':
93
- norm = LayerNorm(hidden_size, dim=1, eps=ln_eps)
94
- self.last_norm = norm
95
- self.post_net1 = nn.Conv1d(hidden_size, out_dims, kernel_size=post_net_kernel,
96
- padding=post_net_kernel // 2)
97
- if init_weights:
98
- self.apply(init_weights_func)
99
-
100
- def forward(self, x, nonpadding=None):
101
- """
102
-
103
- :param x: [B, T, H]
104
- :return: [B, T, H]
105
- """
106
- if self.is_BTC:
107
- x = x.transpose(1, 2)
108
- if nonpadding is None:
109
- nonpadding = (x.abs().sum(1) > 0).float()[:, None, :]
110
- elif self.is_BTC:
111
- nonpadding = nonpadding.transpose(1, 2)
112
- x = self.res_blocks(x) * nonpadding
113
- x = self.last_norm(x) * nonpadding
114
- x = self.post_net1(x) * nonpadding
115
- if self.is_BTC:
116
- x = x.transpose(1, 2)
117
- return x
118
-
119
-
120
- class TextConvEncoder(ConvBlocks):
121
- def __init__(self, dict_size, hidden_size, out_dims, dilations, kernel_size,
122
- norm_type='ln', layers_in_block=2, c_multiple=2,
123
- dropout=0.0, ln_eps=1e-5, init_weights=True, num_layers=None, post_net_kernel=3):
124
- super().__init__(hidden_size, out_dims, dilations, kernel_size,
125
- norm_type, layers_in_block, c_multiple,
126
- dropout, ln_eps, init_weights, num_layers=num_layers,
127
- post_net_kernel=post_net_kernel)
128
- self.embed_tokens = Embedding(dict_size, hidden_size, 0)
129
- self.embed_scale = math.sqrt(hidden_size)
130
-
131
- def forward(self, txt_tokens):
132
- """
133
-
134
- :param txt_tokens: [B, T]
135
- :return: {
136
- 'encoder_out': [B x T x C]
137
- }
138
- """
139
- x = self.embed_scale * self.embed_tokens(txt_tokens)
140
- return super().forward(x)
141
-
142
-
143
- class ConditionalConvBlocks(ConvBlocks):
144
- def __init__(self, hidden_size, c_cond, c_out, dilations, kernel_size,
145
- norm_type='ln', layers_in_block=2, c_multiple=2,
146
- dropout=0.0, ln_eps=1e-5, init_weights=True, is_BTC=True, num_layers=None):
147
- super().__init__(hidden_size, c_out, dilations, kernel_size,
148
- norm_type, layers_in_block, c_multiple,
149
- dropout, ln_eps, init_weights, is_BTC=False, num_layers=num_layers)
150
- self.g_prenet = nn.Conv1d(c_cond, hidden_size, 3, padding=1)
151
- self.is_BTC_ = is_BTC
152
- if init_weights:
153
- self.g_prenet.apply(init_weights_func)
154
-
155
- def forward(self, x, cond, nonpadding=None):
156
- if self.is_BTC_:
157
- x = x.transpose(1, 2)
158
- cond = cond.transpose(1, 2)
159
- if nonpadding is not None:
160
- nonpadding = nonpadding.transpose(1, 2)
161
- if nonpadding is None:
162
- nonpadding = x.abs().sum(1)[:, None]
163
- x = x + self.g_prenet(cond)
164
- x = x * nonpadding
165
- x = super(ConditionalConvBlocks, self).forward(x) # input needs to be BTC
166
- if self.is_BTC_:
167
- x = x.transpose(1, 2)
168
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AONYLMR/White-box-Cartoonization/wbc/network.py DELETED
@@ -1,62 +0,0 @@
1
- import tensorflow as tf
2
- import numpy as np
3
- import tensorflow.contrib.slim as slim
4
-
5
-
6
-
7
- def resblock(inputs, out_channel=32, name='resblock'):
8
-
9
- with tf.variable_scope(name):
10
-
11
- x = slim.convolution2d(inputs, out_channel, [3, 3],
12
- activation_fn=None, scope='conv1')
13
- x = tf.nn.leaky_relu(x)
14
- x = slim.convolution2d(x, out_channel, [3, 3],
15
- activation_fn=None, scope='conv2')
16
-
17
- return x + inputs
18
-
19
-
20
-
21
-
22
- def unet_generator(inputs, channel=32, num_blocks=4, name='generator', reuse=False):
23
- with tf.variable_scope(name, reuse=reuse):
24
-
25
- x0 = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None)
26
- x0 = tf.nn.leaky_relu(x0)
27
-
28
- x1 = slim.convolution2d(x0, channel, [3, 3], stride=2, activation_fn=None)
29
- x1 = tf.nn.leaky_relu(x1)
30
- x1 = slim.convolution2d(x1, channel*2, [3, 3], activation_fn=None)
31
- x1 = tf.nn.leaky_relu(x1)
32
-
33
- x2 = slim.convolution2d(x1, channel*2, [3, 3], stride=2, activation_fn=None)
34
- x2 = tf.nn.leaky_relu(x2)
35
- x2 = slim.convolution2d(x2, channel*4, [3, 3], activation_fn=None)
36
- x2 = tf.nn.leaky_relu(x2)
37
-
38
- for idx in range(num_blocks):
39
- x2 = resblock(x2, out_channel=channel*4, name='block_{}'.format(idx))
40
-
41
- x2 = slim.convolution2d(x2, channel*2, [3, 3], activation_fn=None)
42
- x2 = tf.nn.leaky_relu(x2)
43
-
44
- h1, w1 = tf.shape(x2)[1], tf.shape(x2)[2]
45
- x3 = tf.image.resize_bilinear(x2, (h1*2, w1*2))
46
- x3 = slim.convolution2d(x3+x1, channel*2, [3, 3], activation_fn=None)
47
- x3 = tf.nn.leaky_relu(x3)
48
- x3 = slim.convolution2d(x3, channel, [3, 3], activation_fn=None)
49
- x3 = tf.nn.leaky_relu(x3)
50
-
51
- h2, w2 = tf.shape(x3)[1], tf.shape(x3)[2]
52
- x4 = tf.image.resize_bilinear(x3, (h2*2, w2*2))
53
- x4 = slim.convolution2d(x4+x0, channel, [3, 3], activation_fn=None)
54
- x4 = tf.nn.leaky_relu(x4)
55
- x4 = slim.convolution2d(x4, 3, [7, 7], activation_fn=None)
56
-
57
- return x4
58
-
59
- if __name__ == '__main__':
60
-
61
-
62
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/js/theme-toggler.js DELETED
@@ -1,22 +0,0 @@
1
- var switch_theme_toggler = document.getElementById("theme-toggler");
2
-
3
- switch_theme_toggler.addEventListener("change", toggleTheme);
4
-
5
- function setTheme(themeName) {
6
- localStorage.setItem("theme", themeName);
7
- document.documentElement.className = themeName;
8
- }
9
-
10
- function toggleTheme() {
11
- var currentTheme = localStorage.getItem("theme");
12
- var newTheme = currentTheme === "theme-dark" ? "theme-light" : "theme-dark";
13
-
14
- setTheme(newTheme);
15
- switch_theme_toggler.checked = newTheme === "theme-dark";
16
- }
17
-
18
- (function () {
19
- var currentTheme = localStorage.getItem("theme") || "theme-dark";
20
- setTheme(currentTheme);
21
- switch_theme_toggler.checked = currentTheme === "theme-dark";
22
- })();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/_base_/__init__.py DELETED
File without changes
spaces/Abdllh/Arabic_Poems_Generator/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Arabic Poems Generator
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: Aalaa/Arabic_Poems_Generator
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbdoulGafar/woodsound/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Woodsound
3
- emoji: 👁
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.45.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/ambient.d.ts DELETED
@@ -1,318 +0,0 @@
1
-
2
- // this file is generated — do not edit it
3
-
4
-
5
- /// <reference types="@sveltejs/kit" />
6
-
7
- /**
8
- * Environment variables [loaded by Vite](https://vitejs.dev/guide/env-and-mode.html#env-files) from `.env` files and `process.env`. Like [`$env/dynamic/private`](https://kit.svelte.dev/docs/modules#$env-dynamic-private), this module cannot be imported into client-side code. This module only includes variables that _do not_ begin with [`config.kit.env.publicPrefix`](https://kit.svelte.dev/docs/configuration#env) _and do_ start with [`config.kit.env.privatePrefix`](https://kit.svelte.dev/docs/configuration#env) (if configured).
9
- *
10
- * _Unlike_ [`$env/dynamic/private`](https://kit.svelte.dev/docs/modules#$env-dynamic-private), the values exported from this module are statically injected into your bundle at build time, enabling optimisations like dead code elimination.
11
- *
12
- * ```ts
13
- * import { API_KEY } from '$env/static/private';
14
- * ```
15
- *
16
- * Note that all environment variables referenced in your code should be declared (for example in an `.env` file), even if they don't have a value until the app is deployed:
17
- *
18
- * ```
19
- * MY_FEATURE_FLAG=""
20
- * ```
21
- *
22
- * You can override `.env` values from the command line like so:
23
- *
24
- * ```bash
25
- * MY_FEATURE_FLAG="enabled" npm run dev
26
- * ```
27
- */
28
- declare module '$env/static/private' {
29
- export const MONGODB_URL: string;
30
- export const MONGODB_DB_NAME: string;
31
- export const MONGODB_DIRECT_CONNECTION: string;
32
- export const COOKIE_NAME: string;
33
- export const HF_ACCESS_TOKEN: string;
34
- export const HF_API_ROOT: string;
35
- export const SERPER_API_KEY: string;
36
- export const SERPAPI_KEY: string;
37
- export const OPENID_CLIENT_ID: string;
38
- export const OPENID_CLIENT_SECRET: string;
39
- export const OPENID_SCOPES: string;
40
- export const OPENID_PROVIDER_URL: string;
41
- export const USE_CLIENT_CERTIFICATE: string;
42
- export const CERT_PATH: string;
43
- export const KEY_PATH: string;
44
- export const CA_PATH: string;
45
- export const CLIENT_KEY_PASSWORD: string;
46
- export const REJECT_UNAUTHORIZED: string;
47
- export const MODELS: string;
48
- export const OLD_MODELS: string;
49
- export const PARQUET_EXPORT_DATASET: string;
50
- export const PARQUET_EXPORT_HF_TOKEN: string;
51
- export const PARQUET_EXPORT_SECRET: string;
52
- export const RATE_LIMIT: string;
53
- export const MESSAGES_BEFORE_LOGIN: string;
54
- export const ACSetupSvcPort: string;
55
- export const ACSvcPort: string;
56
- export const ALLUSERSPROFILE: string;
57
- export const APPDATA: string;
58
- export const CHROME_CRASHPAD_PIPE_NAME: string;
59
- export const COLOR: string;
60
- export const COLORTERM: string;
61
- export const CommonProgramFiles: string;
62
- export const CommonProgramW6432: string;
63
- export const COMPUTERNAME: string;
64
- export const ComSpec: string;
65
- export const DriverData: string;
66
- export const EDITOR: string;
67
- export const EFC_38340: string;
68
- export const EnableLog: string;
69
- export const GIT_ASKPASS: string;
70
- export const HOME: string;
71
- export const HOMEDRIVE: string;
72
- export const HOMEPATH: string;
73
- export const INIT_CWD: string;
74
- export const LANG: string;
75
- export const LOCALAPPDATA: string;
76
- export const LOGONSERVER: string;
77
- export const NODE: string;
78
- export const NODE_ENV: string;
79
- export const NODE_EXE: string;
80
- export const NPM_CLI_JS: string;
81
- export const npm_command: string;
82
- export const npm_config_cache: string;
83
- export const npm_config_engine_strict: string;
84
- export const npm_config_globalconfig: string;
85
- export const npm_config_global_prefix: string;
86
- export const npm_config_init_module: string;
87
- export const npm_config_local_prefix: string;
88
- export const npm_config_metrics_registry: string;
89
- export const npm_config_node_gyp: string;
90
- export const npm_config_noproxy: string;
91
- export const npm_config_prefix: string;
92
- export const npm_config_userconfig: string;
93
- export const npm_config_user_agent: string;
94
- export const npm_execpath: string;
95
- export const npm_lifecycle_event: string;
96
- export const npm_lifecycle_script: string;
97
- export const npm_node_execpath: string;
98
- export const npm_package_json: string;
99
- export const npm_package_name: string;
100
- export const npm_package_version: string;
101
- export const NPM_PREFIX_NPM_CLI_JS: string;
102
- export const NUMBER_OF_PROCESSORS: string;
103
- export const OculusBase: string;
104
- export const OneDrive: string;
105
- export const OneDriveConsumer: string;
106
- export const ORIGINAL_XDG_CURRENT_DESKTOP: string;
107
- export const OS: string;
108
- export const Path: string;
109
- export const PATHEXT: string;
110
- export const PROCESSOR_ARCHITECTURE: string;
111
- export const PROCESSOR_IDENTIFIER: string;
112
- export const PROCESSOR_LEVEL: string;
113
- export const PROCESSOR_REVISION: string;
114
- export const ProgramData: string;
115
- export const ProgramFiles: string;
116
- export const ProgramW6432: string;
117
- export const PROMPT: string;
118
- export const PSModulePath: string;
119
- export const PUBLIC: string;
120
- export const RlsSvcPort: string;
121
- export const SESSIONNAME: string;
122
- export const SystemDrive: string;
123
- export const SystemRoot: string;
124
- export const TEMP: string;
125
- export const TERM_PROGRAM: string;
126
- export const TERM_PROGRAM_VERSION: string;
127
- export const TMP: string;
128
- export const USERDOMAIN: string;
129
- export const USERDOMAIN_ROAMINGPROFILE: string;
130
- export const USERNAME: string;
131
- export const USERPROFILE: string;
132
- export const VSCODE_GIT_ASKPASS_EXTRA_ARGS: string;
133
- export const VSCODE_GIT_ASKPASS_MAIN: string;
134
- export const VSCODE_GIT_ASKPASS_NODE: string;
135
- export const VSCODE_GIT_IPC_HANDLE: string;
136
- export const VSCODE_INJECTION: string;
137
- export const VSCODE_NONCE: string;
138
- export const windir: string;
139
- }
140
-
141
- /**
142
- * Similar to [`$env/static/private`](https://kit.svelte.dev/docs/modules#$env-static-private), except that it only includes environment variables that begin with [`config.kit.env.publicPrefix`](https://kit.svelte.dev/docs/configuration#env) (which defaults to `PUBLIC_`), and can therefore safely be exposed to client-side code.
143
- *
144
- * Values are replaced statically at build time.
145
- *
146
- * ```ts
147
- * import { PUBLIC_BASE_URL } from '$env/static/public';
148
- * ```
149
- */
150
- declare module '$env/static/public' {
151
- export const PUBLIC_ORIGIN: string;
152
- export const PUBLIC_SHARE_PREFIX: string;
153
- export const PUBLIC_GOOGLE_ANALYTICS_ID: string;
154
- export const PUBLIC_DEPRECATED_GOOGLE_ANALYTICS_ID: string;
155
- export const PUBLIC_ANNOUNCEMENT_BANNERS: string;
156
- export const PUBLIC_APP_NAME: string;
157
- export const PUBLIC_APP_ASSETS: string;
158
- export const PUBLIC_APP_COLOR: string;
159
- export const PUBLIC_APP_DATA_SHARING: string;
160
- export const PUBLIC_APP_DISCLAIMER: string;
161
- export const PUBLIC_VERSION: string;
162
- }
163
-
164
- /**
165
- * This module provides access to runtime environment variables, as defined by the platform you're running on. For example if you're using [`adapter-node`](https://github.com/sveltejs/kit/tree/master/packages/adapter-node) (or running [`vite preview`](https://kit.svelte.dev/docs/cli)), this is equivalent to `process.env`. This module only includes variables that _do not_ begin with [`config.kit.env.publicPrefix`](https://kit.svelte.dev/docs/configuration#env) _and do_ start with [`config.kit.env.privatePrefix`](https://kit.svelte.dev/docs/configuration#env) (if configured).
166
- *
167
- * This module cannot be imported into client-side code.
168
- *
169
- * ```ts
170
- * import { env } from '$env/dynamic/private';
171
- * console.log(env.DEPLOYMENT_SPECIFIC_VARIABLE);
172
- * ```
173
- *
174
- * > In `dev`, `$env/dynamic` always includes environment variables from `.env`. In `prod`, this behavior will depend on your adapter.
175
- */
176
- declare module '$env/dynamic/private' {
177
- export const env: {
178
- MONGODB_URL: string;
179
- MONGODB_DB_NAME: string;
180
- MONGODB_DIRECT_CONNECTION: string;
181
- COOKIE_NAME: string;
182
- HF_ACCESS_TOKEN: string;
183
- HF_API_ROOT: string;
184
- SERPER_API_KEY: string;
185
- SERPAPI_KEY: string;
186
- OPENID_CLIENT_ID: string;
187
- OPENID_CLIENT_SECRET: string;
188
- OPENID_SCOPES: string;
189
- OPENID_PROVIDER_URL: string;
190
- USE_CLIENT_CERTIFICATE: string;
191
- CERT_PATH: string;
192
- KEY_PATH: string;
193
- CA_PATH: string;
194
- CLIENT_KEY_PASSWORD: string;
195
- REJECT_UNAUTHORIZED: string;
196
- MODELS: string;
197
- OLD_MODELS: string;
198
- PARQUET_EXPORT_DATASET: string;
199
- PARQUET_EXPORT_HF_TOKEN: string;
200
- PARQUET_EXPORT_SECRET: string;
201
- RATE_LIMIT: string;
202
- MESSAGES_BEFORE_LOGIN: string;
203
- ACSetupSvcPort: string;
204
- ACSvcPort: string;
205
- ALLUSERSPROFILE: string;
206
- APPDATA: string;
207
- CHROME_CRASHPAD_PIPE_NAME: string;
208
- COLOR: string;
209
- COLORTERM: string;
210
- CommonProgramFiles: string;
211
- CommonProgramW6432: string;
212
- COMPUTERNAME: string;
213
- ComSpec: string;
214
- DriverData: string;
215
- EDITOR: string;
216
- EFC_38340: string;
217
- EnableLog: string;
218
- GIT_ASKPASS: string;
219
- HOME: string;
220
- HOMEDRIVE: string;
221
- HOMEPATH: string;
222
- INIT_CWD: string;
223
- LANG: string;
224
- LOCALAPPDATA: string;
225
- LOGONSERVER: string;
226
- NODE: string;
227
- NODE_ENV: string;
228
- NODE_EXE: string;
229
- NPM_CLI_JS: string;
230
- npm_command: string;
231
- npm_config_cache: string;
232
- npm_config_engine_strict: string;
233
- npm_config_globalconfig: string;
234
- npm_config_global_prefix: string;
235
- npm_config_init_module: string;
236
- npm_config_local_prefix: string;
237
- npm_config_metrics_registry: string;
238
- npm_config_node_gyp: string;
239
- npm_config_noproxy: string;
240
- npm_config_prefix: string;
241
- npm_config_userconfig: string;
242
- npm_config_user_agent: string;
243
- npm_execpath: string;
244
- npm_lifecycle_event: string;
245
- npm_lifecycle_script: string;
246
- npm_node_execpath: string;
247
- npm_package_json: string;
248
- npm_package_name: string;
249
- npm_package_version: string;
250
- NPM_PREFIX_NPM_CLI_JS: string;
251
- NUMBER_OF_PROCESSORS: string;
252
- OculusBase: string;
253
- OneDrive: string;
254
- OneDriveConsumer: string;
255
- ORIGINAL_XDG_CURRENT_DESKTOP: string;
256
- OS: string;
257
- Path: string;
258
- PATHEXT: string;
259
- PROCESSOR_ARCHITECTURE: string;
260
- PROCESSOR_IDENTIFIER: string;
261
- PROCESSOR_LEVEL: string;
262
- PROCESSOR_REVISION: string;
263
- ProgramData: string;
264
- ProgramFiles: string;
265
- ProgramW6432: string;
266
- PROMPT: string;
267
- PSModulePath: string;
268
- PUBLIC: string;
269
- RlsSvcPort: string;
270
- SESSIONNAME: string;
271
- SystemDrive: string;
272
- SystemRoot: string;
273
- TEMP: string;
274
- TERM_PROGRAM: string;
275
- TERM_PROGRAM_VERSION: string;
276
- TMP: string;
277
- USERDOMAIN: string;
278
- USERDOMAIN_ROAMINGPROFILE: string;
279
- USERNAME: string;
280
- USERPROFILE: string;
281
- VSCODE_GIT_ASKPASS_EXTRA_ARGS: string;
282
- VSCODE_GIT_ASKPASS_MAIN: string;
283
- VSCODE_GIT_ASKPASS_NODE: string;
284
- VSCODE_GIT_IPC_HANDLE: string;
285
- VSCODE_INJECTION: string;
286
- VSCODE_NONCE: string;
287
- windir: string;
288
- [key: `PUBLIC_${string}`]: undefined;
289
- [key: `${string}`]: string | undefined;
290
- }
291
- }
292
-
293
- /**
294
- * Similar to [`$env/dynamic/private`](https://kit.svelte.dev/docs/modules#$env-dynamic-private), but only includes variables that begin with [`config.kit.env.publicPrefix`](https://kit.svelte.dev/docs/configuration#env) (which defaults to `PUBLIC_`), and can therefore safely be exposed to client-side code.
295
- *
296
- * Note that public dynamic environment variables must all be sent from the server to the client, causing larger network requests — when possible, use `$env/static/public` instead.
297
- *
298
- * ```ts
299
- * import { env } from '$env/dynamic/public';
300
- * console.log(env.PUBLIC_DEPLOYMENT_SPECIFIC_VARIABLE);
301
- * ```
302
- */
303
- declare module '$env/dynamic/public' {
304
- export const env: {
305
- PUBLIC_ORIGIN: string;
306
- PUBLIC_SHARE_PREFIX: string;
307
- PUBLIC_GOOGLE_ANALYTICS_ID: string;
308
- PUBLIC_DEPRECATED_GOOGLE_ANALYTICS_ID: string;
309
- PUBLIC_ANNOUNCEMENT_BANNERS: string;
310
- PUBLIC_APP_NAME: string;
311
- PUBLIC_APP_ASSETS: string;
312
- PUBLIC_APP_COLOR: string;
313
- PUBLIC_APP_DATA_SHARING: string;
314
- PUBLIC_APP_DISCLAIMER: string;
315
- PUBLIC_VERSION: string;
316
- [key: `PUBLIC_${string}`]: string | undefined;
317
- }
318
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/V50.py DELETED
@@ -1,67 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import uuid
4
-
5
- import requests
6
-
7
- from ..typing import Any, CreateResult
8
- from .base_provider import BaseProvider
9
-
10
-
11
- class V50(BaseProvider):
12
- url = 'https://p5.v50.ltd'
13
- supports_gpt_35_turbo = True
14
- supports_stream = False
15
- needs_auth = False
16
- working = False
17
-
18
- @staticmethod
19
- def create_completion(
20
- model: str,
21
- messages: list[dict[str, str]],
22
- stream: bool, **kwargs: Any) -> CreateResult:
23
-
24
- conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
25
- conversation += "\nassistant: "
26
-
27
- payload = {
28
- "prompt" : conversation,
29
- "options" : {},
30
- "systemMessage" : ".",
31
- "temperature" : kwargs.get("temperature", 0.4),
32
- "top_p" : kwargs.get("top_p", 0.4),
33
- "model" : model,
34
- "user" : str(uuid.uuid4())
35
- }
36
-
37
- headers = {
38
- 'authority' : 'p5.v50.ltd',
39
- 'accept' : 'application/json, text/plain, */*',
40
- 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
41
- 'content-type' : 'application/json',
42
- 'origin' : 'https://p5.v50.ltd',
43
- 'referer' : 'https://p5.v50.ltd/',
44
- 'sec-ch-ua-platform': '"Windows"',
45
- 'sec-fetch-dest' : 'empty',
46
- 'sec-fetch-mode' : 'cors',
47
- 'sec-fetch-site' : 'same-origin',
48
- 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
49
- }
50
- response = requests.post("https://p5.v50.ltd/api/chat-process",
51
- json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
52
-
53
- if "https://fk1.v50.ltd" not in response.text:
54
- yield response.text
55
-
56
- @classmethod
57
- @property
58
- def params(cls):
59
- params = [
60
- ("model", "str"),
61
- ("messages", "list[dict[str, str]]"),
62
- ("stream", "bool"),
63
- ("temperature", "float"),
64
- ("top_p", "int"),
65
- ]
66
- param = ", ".join([": ".join(p) for p in params])
67
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/distributions/distributions.py DELETED
@@ -1,92 +0,0 @@
1
- import torch
2
- import numpy as np
3
-
4
-
5
- class AbstractDistribution:
6
- def sample(self):
7
- raise NotImplementedError()
8
-
9
- def mode(self):
10
- raise NotImplementedError()
11
-
12
-
13
- class DiracDistribution(AbstractDistribution):
14
- def __init__(self, value):
15
- self.value = value
16
-
17
- def sample(self):
18
- return self.value
19
-
20
- def mode(self):
21
- return self.value
22
-
23
-
24
- class DiagonalGaussianDistribution(object):
25
- def __init__(self, parameters, deterministic=False):
26
- self.parameters = parameters
27
- self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28
- self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29
- self.deterministic = deterministic
30
- self.std = torch.exp(0.5 * self.logvar)
31
- self.var = torch.exp(self.logvar)
32
- if self.deterministic:
33
- self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34
-
35
- def sample(self):
36
- x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
37
- return x
38
-
39
- def kl(self, other=None):
40
- if self.deterministic:
41
- return torch.Tensor([0.])
42
- else:
43
- if other is None:
44
- return 0.5 * torch.sum(torch.pow(self.mean, 2)
45
- + self.var - 1.0 - self.logvar,
46
- dim=[1, 2, 3])
47
- else:
48
- return 0.5 * torch.sum(
49
- torch.pow(self.mean - other.mean, 2) / other.var
50
- + self.var / other.var - 1.0 - self.logvar + other.logvar,
51
- dim=[1, 2, 3])
52
-
53
- def nll(self, sample, dims=[1,2,3]):
54
- if self.deterministic:
55
- return torch.Tensor([0.])
56
- logtwopi = np.log(2.0 * np.pi)
57
- return 0.5 * torch.sum(
58
- logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
59
- dim=dims)
60
-
61
- def mode(self):
62
- return self.mean
63
-
64
-
65
- def normal_kl(mean1, logvar1, mean2, logvar2):
66
- """
67
- source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
68
- Compute the KL divergence between two gaussians.
69
- Shapes are automatically broadcasted, so batches can be compared to
70
- scalars, among other use cases.
71
- """
72
- tensor = None
73
- for obj in (mean1, logvar1, mean2, logvar2):
74
- if isinstance(obj, torch.Tensor):
75
- tensor = obj
76
- break
77
- assert tensor is not None, "at least one argument must be a Tensor"
78
-
79
- # Force variances to be Tensors. Broadcasting helps convert scalars to
80
- # Tensors, but it does not work for torch.exp().
81
- logvar1, logvar2 = [
82
- x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
83
- for x in (logvar1, logvar2)
84
- ]
85
-
86
- return 0.5 * (
87
- -1.0
88
- + logvar2
89
- - logvar1
90
- + torch.exp(logvar1 - logvar2)
91
- + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
92
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/BadgeLabel.js DELETED
@@ -1,49 +0,0 @@
1
- import OverlapSizer from '../overlapsizer/OverlapSizer.js';
2
-
3
- const GetValue = Phaser.Utils.Objects.GetValue;
4
- const BadgeKeys = {
5
- leftTop: 'left-top', centerTop: 'center-top', rightTop: 'right-top',
6
- leftCenter: 'left-center', center: 'center', rightCenter: 'right-center',
7
- leftBottom: 'left-bottom', centerBottom: 'center-bottom', rightBottom: 'right-bottom'
8
- }
9
-
10
- class Badge extends OverlapSizer {
11
- constructor(scene, config) {
12
- // Create sizer
13
- super(scene, config);
14
- this.type = 'rexBadge';
15
-
16
- // Add elements
17
- var background = GetValue(config, 'background', undefined);
18
- if (background) {
19
- this.addBackground(background);
20
- }
21
- this.addChildrenMap('background', background);
22
-
23
- // Base item
24
- var main = GetValue(config, 'main', undefined);
25
- if (main) {
26
- this.add(main, {
27
- key: 'main',
28
- align: 'center',
29
- expand: false,
30
- })
31
- }
32
- this.addChildrenMap('main', main);
33
-
34
- // Badges
35
- for (var key in BadgeKeys) {
36
- var badge = GetValue(config, key, undefined);
37
- if (badge) {
38
- this.add(badge, {
39
- key: key,
40
- align: BadgeKeys[key],
41
- expand: false,
42
- })
43
- this.addChildrenMap(key, badge);
44
- }
45
- }
46
- }
47
- }
48
-
49
- export default Badge;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostResolveSize.js DELETED
@@ -1,4 +0,0 @@
1
- var PostResolveSize = function (width, height) {
2
- }
3
-
4
- export default PostResolveSize;
 
 
 
 
 
spaces/Akmyradov/dost.ai/app.py DELETED
@@ -1,83 +0,0 @@
1
- import os
2
- import gradio as gr
3
- import whisper
4
- import openai
5
- import tempfile
6
- from neon_tts_plugin_coqui import CoquiTTS
7
-
8
- model = whisper.load_model("small")
9
-
10
- class Dost:
11
- LANGUAGES = list(CoquiTTS.langs.keys())
12
- coquiTTS = CoquiTTS()
13
- OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
14
- def __init__(self):
15
- self.convHistory = []
16
- self.voice = None
17
- self.result = []
18
-
19
- def recognize(self, audio):
20
- audio = whisper.load_audio(audio)
21
- audio = whisper.pad_or_trim(audio)
22
-
23
- mel = whisper.log_mel_spectrogram(audio).to(model.device)
24
-
25
- _, probs = model.detect_language(mel)
26
- lang = max(probs, key=probs.get)
27
-
28
- options = whisper.DecodingOptions(fp16 = False)
29
- result = whisper.decode(model, mel, options)
30
-
31
- print("-------------------RECOGNIZE---------------------")
32
- print(result)
33
- self.response(result.text, lang)
34
-
35
- def response(self, prompt, lang):
36
- response = openai.Completion.create(
37
- model="text-davinci-002",
38
- prompt=f"You: {prompt}Friend: ",
39
- temperature=0.5,
40
- max_tokens=60,
41
- top_p=1.0,
42
- frequency_penalty=0.5,
43
- presence_penalty=0.0,
44
- stop=["You:"]
45
- )
46
- choice = response['choices'][0]['text']
47
- print("-------------------RESPONSE---------------------")
48
- print(choice)
49
- self.convHistory.append((prompt, choice))
50
- self.result.append(self.convHistory)
51
- print(self.convHistory[0])
52
- print(type(self.convHistory[0]))
53
- self.say(choice, lang)
54
-
55
- def say(self, text, language):
56
- coqui_langs = ['en' ,'es' ,'fr' ,'de' ,'pl' ,'uk' ,'ro' ,'hu' ,'bg' ,'nl' ,'fi' ,'sl' ,'lv' ,'ga']
57
- if language not in coqui_langs:
58
- language = 'en'
59
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
60
- self.coquiTTS.get_tts(text, fp, speaker = {"language" : language})
61
- print("-------------------AUDIOOUTPUT---------------------")
62
- print("DONE", fp.name)
63
- self.result.append(fp.name)
64
-
65
- def start(self, audio, state):
66
- self.convHistory = state
67
- self.result = []
68
- self.recognize(audio)
69
- print(self.result)
70
- return tuple(self.result)
71
-
72
- dost = Dost()
73
- with gr.Blocks() as demo:
74
- state = gr.State([])
75
- with gr.Row():
76
- with gr.Column():
77
- input_audio = gr.Audio(source="microphone", type="filepath")
78
- btn = gr.Button("Submit")
79
- conversation = gr.Chatbot(value=dost.convHistory)
80
- output_audio = gr.Audio(label="AI voice response")
81
- btn.click(dost.start, inputs=[input_audio, state], outputs=[conversation, output_audio])
82
-
83
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py DELETED
@@ -1,16 +0,0 @@
1
- import importlib
2
- import os.path as osp
3
-
4
-
5
- def get_config(config_file):
6
- assert config_file.startswith('configs/'), 'config file setting must start with configs/'
7
- temp_config_name = osp.basename(config_file)
8
- temp_module_name = osp.splitext(temp_config_name)[0]
9
- config = importlib.import_module("configs.base")
10
- cfg = config.config
11
- config = importlib.import_module("configs.%s" % temp_module_name)
12
- job_cfg = config.config
13
- cfg.update(job_cfg)
14
- if cfg.output is None:
15
- cfg.output = osp.join('work_dirs', temp_module_name)
16
- return cfg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/libJPG/jpgd.h DELETED
@@ -1,316 +0,0 @@
1
- // jpgd.h - C++ class for JPEG decompression.
2
- // Public domain, Rich Geldreich <[email protected]>
3
- #ifndef JPEG_DECODER_H
4
- #define JPEG_DECODER_H
5
-
6
- #include <stdlib.h>
7
- #include <stdio.h>
8
- #include <setjmp.h>
9
-
10
- namespace jpgd
11
- {
12
- typedef unsigned char uint8;
13
- typedef signed short int16;
14
- typedef unsigned short uint16;
15
- typedef unsigned int uint;
16
- typedef signed int int32;
17
-
18
- // Loads a JPEG image from a memory buffer or a file.
19
- // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
20
- // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
21
- // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
22
- // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
23
- // BEGIN EPIC MOD
24
- //unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
25
- unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
26
- // END EPIC MOD
27
- unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
28
-
29
- // Success/failure error codes.
30
- enum jpgd_status
31
- {
32
- JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
33
- JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
34
- JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
35
- JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
36
- JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
37
- JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
38
- JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
39
- JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
40
- JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
41
- };
42
-
43
- // Input stream interface.
44
- // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
45
- // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
46
- // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
47
- // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
48
- class jpeg_decoder_stream
49
- {
50
- public:
51
- jpeg_decoder_stream() { }
52
- virtual ~jpeg_decoder_stream() { }
53
-
54
- // The read() method is called when the internal input buffer is empty.
55
- // Parameters:
56
- // pBuf - input buffer
57
- // max_bytes_to_read - maximum bytes that can be written to pBuf
58
- // pEOF_flag - set this to true if at end of stream (no more bytes remaining)
59
- // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
60
- // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
61
- virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
62
- };
63
-
64
- // stdio FILE stream class.
65
- class jpeg_decoder_file_stream : public jpeg_decoder_stream
66
- {
67
- jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
68
- jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
69
-
70
- FILE *m_pFile;
71
- bool m_eof_flag, m_error_flag;
72
-
73
- public:
74
- jpeg_decoder_file_stream();
75
- virtual ~jpeg_decoder_file_stream();
76
-
77
- bool open(const char *Pfilename);
78
- void close();
79
-
80
- virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
81
- };
82
-
83
- // Memory stream class.
84
- class jpeg_decoder_mem_stream : public jpeg_decoder_stream
85
- {
86
- const uint8 *m_pSrc_data;
87
- uint m_ofs, m_size;
88
-
89
- public:
90
- jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
91
- jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
92
-
93
- virtual ~jpeg_decoder_mem_stream() { }
94
-
95
- bool open(const uint8 *pSrc_data, uint size);
96
- void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
97
-
98
- virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
99
- };
100
-
101
- // Loads JPEG file from a jpeg_decoder_stream.
102
- unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
103
-
104
- enum
105
- {
106
- JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
107
- JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
108
- };
109
-
110
- typedef int16 jpgd_quant_t;
111
- typedef int16 jpgd_block_t;
112
-
113
- class jpeg_decoder
114
- {
115
- public:
116
- // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
117
- // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
118
- jpeg_decoder(jpeg_decoder_stream *pStream);
119
-
120
- ~jpeg_decoder();
121
-
122
- // Call this method after constructing the object to begin decompression.
123
- // If JPGD_SUCCESS is returned you may then call decode() on each scanline.
124
- int begin_decoding();
125
-
126
- // Returns the next scan line.
127
- // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
128
- // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
129
- // Returns JPGD_SUCCESS if a scan line has been returned.
130
- // Returns JPGD_DONE if all scan lines have been returned.
131
- // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
132
- int decode(const void** pScan_line, uint* pScan_line_len);
133
-
134
- inline jpgd_status get_error_code() const { return m_error_code; }
135
-
136
- inline int get_width() const { return m_image_x_size; }
137
- inline int get_height() const { return m_image_y_size; }
138
-
139
- inline int get_num_components() const { return m_comps_in_frame; }
140
-
141
- inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
142
- inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
143
-
144
- // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
145
- inline int get_total_bytes_read() const { return m_total_bytes_read; }
146
-
147
- private:
148
- jpeg_decoder(const jpeg_decoder &);
149
- jpeg_decoder &operator =(const jpeg_decoder &);
150
-
151
- typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
152
-
153
- struct huff_tables
154
- {
155
- bool ac_table;
156
- uint look_up[256];
157
- uint look_up2[256];
158
- uint8 code_size[256];
159
- uint tree[512];
160
- };
161
-
162
- struct coeff_buf
163
- {
164
- uint8 *pData;
165
- int block_num_x, block_num_y;
166
- int block_len_x, block_len_y;
167
- int block_size;
168
- };
169
-
170
- struct mem_block
171
- {
172
- mem_block *m_pNext;
173
- size_t m_used_count;
174
- size_t m_size;
175
- char m_data[1];
176
- };
177
-
178
- jmp_buf m_jmp_state;
179
- mem_block *m_pMem_blocks;
180
- int m_image_x_size;
181
- int m_image_y_size;
182
- jpeg_decoder_stream *m_pStream;
183
- int m_progressive_flag;
184
- uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
185
- uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
186
- uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
187
- jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
188
- int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
189
- int m_comps_in_frame; // # of components in frame
190
- int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
191
- int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
192
- int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
193
- int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
194
- int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
195
- int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
196
- int m_comps_in_scan; // # of components in scan
197
- int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
198
- int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
199
- int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
200
- int m_spectral_start; // spectral selection start
201
- int m_spectral_end; // spectral selection end
202
- int m_successive_low; // successive approximation low
203
- int m_successive_high; // successive approximation high
204
- int m_max_mcu_x_size; // MCU's max. X size in pixels
205
- int m_max_mcu_y_size; // MCU's max. Y size in pixels
206
- int m_blocks_per_mcu;
207
- int m_max_blocks_per_row;
208
- int m_mcus_per_row, m_mcus_per_col;
209
- int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
210
- int m_total_lines_left; // total # lines left in image
211
- int m_mcu_lines_left; // total # lines left in this MCU
212
- int m_real_dest_bytes_per_scan_line;
213
- int m_dest_bytes_per_scan_line; // rounded up
214
- int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
215
- huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
216
- coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
217
- coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
218
- int m_eob_run;
219
- int m_block_y_mcu[JPGD_MAX_COMPONENTS];
220
- uint8* m_pIn_buf_ofs;
221
- int m_in_buf_left;
222
- int m_tem_flag;
223
- bool m_eof_flag;
224
- uint8 m_in_buf_pad_start[128];
225
- uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
226
- uint8 m_in_buf_pad_end[128];
227
- int m_bits_left;
228
- uint m_bit_buf;
229
- int m_restart_interval;
230
- int m_restarts_left;
231
- int m_next_restart_num;
232
- int m_max_mcus_per_row;
233
- int m_max_blocks_per_mcu;
234
- int m_expanded_blocks_per_mcu;
235
- int m_expanded_blocks_per_row;
236
- int m_expanded_blocks_per_component;
237
- bool m_freq_domain_chroma_upsample;
238
- int m_max_mcus_per_col;
239
- uint m_last_dc_val[JPGD_MAX_COMPONENTS];
240
- jpgd_block_t* m_pMCU_coefficients;
241
- int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
242
- uint8* m_pSample_buf;
243
- int m_crr[256];
244
- int m_cbb[256];
245
- int m_crg[256];
246
- int m_cbg[256];
247
- uint8* m_pScan_line_0;
248
- uint8* m_pScan_line_1;
249
- jpgd_status m_error_code;
250
- bool m_ready_flag;
251
- int m_total_bytes_read;
252
-
253
- void free_all_blocks();
254
- // BEGIN EPIC MOD
255
- UE_NORETURN void stop_decoding(jpgd_status status);
256
- // END EPIC MOD
257
- void *alloc(size_t n, bool zero = false);
258
- void word_clear(void *p, uint16 c, uint n);
259
- void prep_in_buffer();
260
- void read_dht_marker();
261
- void read_dqt_marker();
262
- void read_sof_marker();
263
- void skip_variable_marker();
264
- void read_dri_marker();
265
- void read_sos_marker();
266
- int next_marker();
267
- int process_markers();
268
- void locate_soi_marker();
269
- void locate_sof_marker();
270
- int locate_sos_marker();
271
- void init(jpeg_decoder_stream * pStream);
272
- void create_look_ups();
273
- void fix_in_buffer();
274
- void transform_mcu(int mcu_row);
275
- void transform_mcu_expand(int mcu_row);
276
- coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
277
- inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
278
- void load_next_row();
279
- void decode_next_row();
280
- void make_huff_table(int index, huff_tables *pH);
281
- void check_quant_tables();
282
- void check_huff_tables();
283
- void calc_mcu_block_order();
284
- int init_scan();
285
- void init_frame();
286
- void process_restart();
287
- void decode_scan(pDecode_block_func decode_block_func);
288
- void init_progressive();
289
- void init_sequential();
290
- void decode_start();
291
- void decode_init(jpeg_decoder_stream * pStream);
292
- void H2V2Convert();
293
- void H2V1Convert();
294
- void H1V2Convert();
295
- void H1V1Convert();
296
- void gray_convert();
297
- void expanded_convert();
298
- void find_eoi();
299
- inline uint get_char();
300
- inline uint get_char(bool *pPadding_flag);
301
- inline void stuff_char(uint8 q);
302
- inline uint8 get_octet();
303
- inline uint get_bits(int num_bits);
304
- inline uint get_bits_no_markers(int numbits);
305
- inline int huff_decode(huff_tables *pH);
306
- inline int huff_decode(huff_tables *pH, int& extrabits);
307
- static inline uint8 clamp(int i);
308
- static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
309
- static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
310
- static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
311
- static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
312
- };
313
-
314
- } // namespace jpgd
315
-
316
- #endif // JPEG_DECODER_H
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/composable_stable_diffusion.py DELETED
@@ -1,580 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Callable, List, Optional, Union
17
-
18
- import torch
19
- from packaging import version
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
21
-
22
- from diffusers import DiffusionPipeline
23
- from diffusers.configuration_utils import FrozenDict
24
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
25
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
26
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
27
- from diffusers.schedulers import (
28
- DDIMScheduler,
29
- DPMSolverMultistepScheduler,
30
- EulerAncestralDiscreteScheduler,
31
- EulerDiscreteScheduler,
32
- LMSDiscreteScheduler,
33
- PNDMScheduler,
34
- )
35
- from diffusers.utils import deprecate, is_accelerate_available, logging
36
-
37
-
38
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
-
40
-
41
- class ComposableStableDiffusionPipeline(DiffusionPipeline):
42
- r"""
43
- Pipeline for text-to-image generation using Stable Diffusion.
44
-
45
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
46
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
47
-
48
- Args:
49
- vae ([`AutoencoderKL`]):
50
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
51
- text_encoder ([`CLIPTextModel`]):
52
- Frozen text-encoder. Stable Diffusion uses the text portion of
53
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
54
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
55
- tokenizer (`CLIPTokenizer`):
56
- Tokenizer of class
57
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
58
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
59
- scheduler ([`SchedulerMixin`]):
60
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
61
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
62
- safety_checker ([`StableDiffusionSafetyChecker`]):
63
- Classification module that estimates whether generated images could be considered offensive or harmful.
64
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
65
- feature_extractor ([`CLIPImageProcessor`]):
66
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
67
- """
68
- _optional_components = ["safety_checker", "feature_extractor"]
69
-
70
- def __init__(
71
- self,
72
- vae: AutoencoderKL,
73
- text_encoder: CLIPTextModel,
74
- tokenizer: CLIPTokenizer,
75
- unet: UNet2DConditionModel,
76
- scheduler: Union[
77
- DDIMScheduler,
78
- PNDMScheduler,
79
- LMSDiscreteScheduler,
80
- EulerDiscreteScheduler,
81
- EulerAncestralDiscreteScheduler,
82
- DPMSolverMultistepScheduler,
83
- ],
84
- safety_checker: StableDiffusionSafetyChecker,
85
- feature_extractor: CLIPImageProcessor,
86
- requires_safety_checker: bool = True,
87
- ):
88
- super().__init__()
89
-
90
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
91
- deprecation_message = (
92
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
93
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
94
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
95
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
96
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
97
- " file"
98
- )
99
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
100
- new_config = dict(scheduler.config)
101
- new_config["steps_offset"] = 1
102
- scheduler._internal_dict = FrozenDict(new_config)
103
-
104
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
105
- deprecation_message = (
106
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
107
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
108
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
109
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
110
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
111
- )
112
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
113
- new_config = dict(scheduler.config)
114
- new_config["clip_sample"] = False
115
- scheduler._internal_dict = FrozenDict(new_config)
116
-
117
- if safety_checker is None and requires_safety_checker:
118
- logger.warning(
119
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
120
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
121
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
122
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
123
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
124
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
125
- )
126
-
127
- if safety_checker is not None and feature_extractor is None:
128
- raise ValueError(
129
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
130
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
131
- )
132
-
133
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
134
- version.parse(unet.config._diffusers_version).base_version
135
- ) < version.parse("0.9.0.dev0")
136
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
137
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
138
- deprecation_message = (
139
- "The configuration file of the unet has set the default `sample_size` to smaller than"
140
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
141
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
142
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
143
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
144
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
145
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
146
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
147
- " the `unet/config.json` file"
148
- )
149
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
150
- new_config = dict(unet.config)
151
- new_config["sample_size"] = 64
152
- unet._internal_dict = FrozenDict(new_config)
153
-
154
- self.register_modules(
155
- vae=vae,
156
- text_encoder=text_encoder,
157
- tokenizer=tokenizer,
158
- unet=unet,
159
- scheduler=scheduler,
160
- safety_checker=safety_checker,
161
- feature_extractor=feature_extractor,
162
- )
163
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
164
- self.register_to_config(requires_safety_checker=requires_safety_checker)
165
-
166
- def enable_vae_slicing(self):
167
- r"""
168
- Enable sliced VAE decoding.
169
-
170
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
171
- steps. This is useful to save some memory and allow larger batch sizes.
172
- """
173
- self.vae.enable_slicing()
174
-
175
- def disable_vae_slicing(self):
176
- r"""
177
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
178
- computing decoding in one step.
179
- """
180
- self.vae.disable_slicing()
181
-
182
- def enable_sequential_cpu_offload(self, gpu_id=0):
183
- r"""
184
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
185
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
186
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
187
- """
188
- if is_accelerate_available():
189
- from accelerate import cpu_offload
190
- else:
191
- raise ImportError("Please install accelerate via `pip install accelerate`")
192
-
193
- device = torch.device(f"cuda:{gpu_id}")
194
-
195
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
196
- if cpu_offloaded_model is not None:
197
- cpu_offload(cpu_offloaded_model, device)
198
-
199
- if self.safety_checker is not None:
200
- # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate
201
- # fix by only offloading self.safety_checker for now
202
- cpu_offload(self.safety_checker.vision_model, device)
203
-
204
- @property
205
- def _execution_device(self):
206
- r"""
207
- Returns the device on which the pipeline's models will be executed. After calling
208
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
209
- hooks.
210
- """
211
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
212
- return self.device
213
- for module in self.unet.modules():
214
- if (
215
- hasattr(module, "_hf_hook")
216
- and hasattr(module._hf_hook, "execution_device")
217
- and module._hf_hook.execution_device is not None
218
- ):
219
- return torch.device(module._hf_hook.execution_device)
220
- return self.device
221
-
222
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
223
- r"""
224
- Encodes the prompt into text encoder hidden states.
225
-
226
- Args:
227
- prompt (`str` or `list(int)`):
228
- prompt to be encoded
229
- device: (`torch.device`):
230
- torch device
231
- num_images_per_prompt (`int`):
232
- number of images that should be generated per prompt
233
- do_classifier_free_guidance (`bool`):
234
- whether to use classifier free guidance or not
235
- negative_prompt (`str` or `List[str]`):
236
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
237
- if `guidance_scale` is less than `1`).
238
- """
239
- batch_size = len(prompt) if isinstance(prompt, list) else 1
240
-
241
- text_inputs = self.tokenizer(
242
- prompt,
243
- padding="max_length",
244
- max_length=self.tokenizer.model_max_length,
245
- truncation=True,
246
- return_tensors="pt",
247
- )
248
- text_input_ids = text_inputs.input_ids
249
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
250
-
251
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
252
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
253
- logger.warning(
254
- "The following part of your input was truncated because CLIP can only handle sequences up to"
255
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
256
- )
257
-
258
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
259
- attention_mask = text_inputs.attention_mask.to(device)
260
- else:
261
- attention_mask = None
262
-
263
- text_embeddings = self.text_encoder(
264
- text_input_ids.to(device),
265
- attention_mask=attention_mask,
266
- )
267
- text_embeddings = text_embeddings[0]
268
-
269
- # duplicate text embeddings for each generation per prompt, using mps friendly method
270
- bs_embed, seq_len, _ = text_embeddings.shape
271
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
272
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
273
-
274
- # get unconditional embeddings for classifier free guidance
275
- if do_classifier_free_guidance:
276
- uncond_tokens: List[str]
277
- if negative_prompt is None:
278
- uncond_tokens = [""] * batch_size
279
- elif type(prompt) is not type(negative_prompt):
280
- raise TypeError(
281
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
282
- f" {type(prompt)}."
283
- )
284
- elif isinstance(negative_prompt, str):
285
- uncond_tokens = [negative_prompt]
286
- elif batch_size != len(negative_prompt):
287
- raise ValueError(
288
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
289
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
290
- " the batch size of `prompt`."
291
- )
292
- else:
293
- uncond_tokens = negative_prompt
294
-
295
- max_length = text_input_ids.shape[-1]
296
- uncond_input = self.tokenizer(
297
- uncond_tokens,
298
- padding="max_length",
299
- max_length=max_length,
300
- truncation=True,
301
- return_tensors="pt",
302
- )
303
-
304
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
305
- attention_mask = uncond_input.attention_mask.to(device)
306
- else:
307
- attention_mask = None
308
-
309
- uncond_embeddings = self.text_encoder(
310
- uncond_input.input_ids.to(device),
311
- attention_mask=attention_mask,
312
- )
313
- uncond_embeddings = uncond_embeddings[0]
314
-
315
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
316
- seq_len = uncond_embeddings.shape[1]
317
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
318
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
319
-
320
- # For classifier free guidance, we need to do two forward passes.
321
- # Here we concatenate the unconditional and text embeddings into a single batch
322
- # to avoid doing two forward passes
323
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
324
-
325
- return text_embeddings
326
-
327
- def run_safety_checker(self, image, device, dtype):
328
- if self.safety_checker is not None:
329
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
330
- image, has_nsfw_concept = self.safety_checker(
331
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
332
- )
333
- else:
334
- has_nsfw_concept = None
335
- return image, has_nsfw_concept
336
-
337
- def decode_latents(self, latents):
338
- latents = 1 / 0.18215 * latents
339
- image = self.vae.decode(latents).sample
340
- image = (image / 2 + 0.5).clamp(0, 1)
341
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
342
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
343
- return image
344
-
345
- def prepare_extra_step_kwargs(self, generator, eta):
346
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
347
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
348
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
349
- # and should be between [0, 1]
350
-
351
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
352
- extra_step_kwargs = {}
353
- if accepts_eta:
354
- extra_step_kwargs["eta"] = eta
355
-
356
- # check if the scheduler accepts generator
357
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
358
- if accepts_generator:
359
- extra_step_kwargs["generator"] = generator
360
- return extra_step_kwargs
361
-
362
- def check_inputs(self, prompt, height, width, callback_steps):
363
- if not isinstance(prompt, str) and not isinstance(prompt, list):
364
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
365
-
366
- if height % 8 != 0 or width % 8 != 0:
367
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
368
-
369
- if (callback_steps is None) or (
370
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
371
- ):
372
- raise ValueError(
373
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
374
- f" {type(callback_steps)}."
375
- )
376
-
377
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
378
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
379
- if latents is None:
380
- if device.type == "mps":
381
- # randn does not work reproducibly on mps
382
- latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
383
- else:
384
- latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
385
- else:
386
- if latents.shape != shape:
387
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
388
- latents = latents.to(device)
389
-
390
- # scale the initial noise by the standard deviation required by the scheduler
391
- latents = latents * self.scheduler.init_noise_sigma
392
- return latents
393
-
394
- @torch.no_grad()
395
- def __call__(
396
- self,
397
- prompt: Union[str, List[str]],
398
- height: Optional[int] = None,
399
- width: Optional[int] = None,
400
- num_inference_steps: int = 50,
401
- guidance_scale: float = 7.5,
402
- negative_prompt: Optional[Union[str, List[str]]] = None,
403
- num_images_per_prompt: Optional[int] = 1,
404
- eta: float = 0.0,
405
- generator: Optional[torch.Generator] = None,
406
- latents: Optional[torch.FloatTensor] = None,
407
- output_type: Optional[str] = "pil",
408
- return_dict: bool = True,
409
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
410
- callback_steps: int = 1,
411
- weights: Optional[str] = "",
412
- ):
413
- r"""
414
- Function invoked when calling the pipeline for generation.
415
-
416
- Args:
417
- prompt (`str` or `List[str]`):
418
- The prompt or prompts to guide the image generation.
419
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
420
- The height in pixels of the generated image.
421
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
422
- The width in pixels of the generated image.
423
- num_inference_steps (`int`, *optional*, defaults to 50):
424
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
425
- expense of slower inference.
426
- guidance_scale (`float`, *optional*, defaults to 7.5):
427
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
428
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
429
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
430
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
431
- usually at the expense of lower image quality.
432
- negative_prompt (`str` or `List[str]`, *optional*):
433
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
434
- if `guidance_scale` is less than `1`).
435
- num_images_per_prompt (`int`, *optional*, defaults to 1):
436
- The number of images to generate per prompt.
437
- eta (`float`, *optional*, defaults to 0.0):
438
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
439
- [`schedulers.DDIMScheduler`], will be ignored for others.
440
- generator (`torch.Generator`, *optional*):
441
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
442
- deterministic.
443
- latents (`torch.FloatTensor`, *optional*):
444
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
445
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
446
- tensor will ge generated by sampling using the supplied random `generator`.
447
- output_type (`str`, *optional*, defaults to `"pil"`):
448
- The output format of the generate image. Choose between
449
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
450
- return_dict (`bool`, *optional*, defaults to `True`):
451
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
452
- plain tuple.
453
- callback (`Callable`, *optional*):
454
- A function that will be called every `callback_steps` steps during inference. The function will be
455
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
456
- callback_steps (`int`, *optional*, defaults to 1):
457
- The frequency at which the `callback` function will be called. If not specified, the callback will be
458
- called at every step.
459
-
460
- Returns:
461
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
462
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
463
- When returning a tuple, the first element is a list with the generated images, and the second element is a
464
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
465
- (nsfw) content, according to the `safety_checker`.
466
- """
467
- # 0. Default height and width to unet
468
- height = height or self.unet.config.sample_size * self.vae_scale_factor
469
- width = width or self.unet.config.sample_size * self.vae_scale_factor
470
-
471
- # 1. Check inputs. Raise error if not correct
472
- self.check_inputs(prompt, height, width, callback_steps)
473
-
474
- # 2. Define call parameters
475
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
476
- device = self._execution_device
477
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
478
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
479
- # corresponds to doing no classifier free guidance.
480
- do_classifier_free_guidance = guidance_scale > 1.0
481
-
482
- if "|" in prompt:
483
- prompt = [x.strip() for x in prompt.split("|")]
484
- print(f"composing {prompt}...")
485
-
486
- if not weights:
487
- # specify weights for prompts (excluding the unconditional score)
488
- print("using equal positive weights (conjunction) for all prompts...")
489
- weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
490
- else:
491
- # set prompt weight for each
492
- num_prompts = len(prompt) if isinstance(prompt, list) else 1
493
- weights = [float(w.strip()) for w in weights.split("|")]
494
- # guidance scale as the default
495
- if len(weights) < num_prompts:
496
- weights.append(guidance_scale)
497
- else:
498
- weights = weights[:num_prompts]
499
- assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
500
- weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
501
- else:
502
- weights = guidance_scale
503
-
504
- # 3. Encode input prompt
505
- text_embeddings = self._encode_prompt(
506
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
507
- )
508
-
509
- # 4. Prepare timesteps
510
- self.scheduler.set_timesteps(num_inference_steps, device=device)
511
- timesteps = self.scheduler.timesteps
512
-
513
- # 5. Prepare latent variables
514
- num_channels_latents = self.unet.config.in_channels
515
- latents = self.prepare_latents(
516
- batch_size * num_images_per_prompt,
517
- num_channels_latents,
518
- height,
519
- width,
520
- text_embeddings.dtype,
521
- device,
522
- generator,
523
- latents,
524
- )
525
-
526
- # composable diffusion
527
- if isinstance(prompt, list) and batch_size == 1:
528
- # remove extra unconditional embedding
529
- # N = one unconditional embed + conditional embeds
530
- text_embeddings = text_embeddings[len(prompt) - 1 :]
531
-
532
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
533
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
534
-
535
- # 7. Denoising loop
536
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
537
- with self.progress_bar(total=num_inference_steps) as progress_bar:
538
- for i, t in enumerate(timesteps):
539
- # expand the latents if we are doing classifier free guidance
540
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
541
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
542
-
543
- # predict the noise residual
544
- noise_pred = []
545
- for j in range(text_embeddings.shape[0]):
546
- noise_pred.append(
547
- self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
548
- )
549
- noise_pred = torch.cat(noise_pred, dim=0)
550
-
551
- # perform guidance
552
- if do_classifier_free_guidance:
553
- noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
554
- noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
555
- dim=0, keepdims=True
556
- )
557
-
558
- # compute the previous noisy sample x_t -> x_t-1
559
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
560
-
561
- # call the callback, if provided
562
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
563
- progress_bar.update()
564
- if callback is not None and i % callback_steps == 0:
565
- callback(i, t, latents)
566
-
567
- # 8. Post-processing
568
- image = self.decode_latents(latents)
569
-
570
- # 9. Run safety checker
571
- image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
572
-
573
- # 10. Convert to PIL
574
- if output_type == "pil":
575
- image = self.numpy_to_pil(image)
576
-
577
- if not return_dict:
578
- return (image, has_nsfw_concept)
579
-
580
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/embeddings.py DELETED
@@ -1,546 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import math
15
- from typing import Optional
16
-
17
- import numpy as np
18
- import torch
19
- from torch import nn
20
-
21
- from .activations import get_activation
22
-
23
-
24
- def get_timestep_embedding(
25
- timesteps: torch.Tensor,
26
- embedding_dim: int,
27
- flip_sin_to_cos: bool = False,
28
- downscale_freq_shift: float = 1,
29
- scale: float = 1,
30
- max_period: int = 10000,
31
- ):
32
- """
33
- This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
34
-
35
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
36
- These may be fractional.
37
- :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
38
- embeddings. :return: an [N x dim] Tensor of positional embeddings.
39
- """
40
- assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
41
-
42
- half_dim = embedding_dim // 2
43
- exponent = -math.log(max_period) * torch.arange(
44
- start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
45
- )
46
- exponent = exponent / (half_dim - downscale_freq_shift)
47
-
48
- emb = torch.exp(exponent)
49
- emb = timesteps[:, None].float() * emb[None, :]
50
-
51
- # scale embeddings
52
- emb = scale * emb
53
-
54
- # concat sine and cosine embeddings
55
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
56
-
57
- # flip sine and cosine embeddings
58
- if flip_sin_to_cos:
59
- emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
60
-
61
- # zero pad
62
- if embedding_dim % 2 == 1:
63
- emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
64
- return emb
65
-
66
-
67
- def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0):
68
- """
69
- grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or
70
- [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
71
- """
72
- grid_h = np.arange(grid_size, dtype=np.float32)
73
- grid_w = np.arange(grid_size, dtype=np.float32)
74
- grid = np.meshgrid(grid_w, grid_h) # here w goes first
75
- grid = np.stack(grid, axis=0)
76
-
77
- grid = grid.reshape([2, 1, grid_size, grid_size])
78
- pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
79
- if cls_token and extra_tokens > 0:
80
- pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
81
- return pos_embed
82
-
83
-
84
- def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
85
- if embed_dim % 2 != 0:
86
- raise ValueError("embed_dim must be divisible by 2")
87
-
88
- # use half of dimensions to encode grid_h
89
- emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
90
- emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
91
-
92
- emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
93
- return emb
94
-
95
-
96
- def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
97
- """
98
- embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
99
- """
100
- if embed_dim % 2 != 0:
101
- raise ValueError("embed_dim must be divisible by 2")
102
-
103
- omega = np.arange(embed_dim // 2, dtype=np.float64)
104
- omega /= embed_dim / 2.0
105
- omega = 1.0 / 10000**omega # (D/2,)
106
-
107
- pos = pos.reshape(-1) # (M,)
108
- out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
109
-
110
- emb_sin = np.sin(out) # (M, D/2)
111
- emb_cos = np.cos(out) # (M, D/2)
112
-
113
- emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
114
- return emb
115
-
116
-
117
- class PatchEmbed(nn.Module):
118
- """2D Image to Patch Embedding"""
119
-
120
- def __init__(
121
- self,
122
- height=224,
123
- width=224,
124
- patch_size=16,
125
- in_channels=3,
126
- embed_dim=768,
127
- layer_norm=False,
128
- flatten=True,
129
- bias=True,
130
- ):
131
- super().__init__()
132
-
133
- num_patches = (height // patch_size) * (width // patch_size)
134
- self.flatten = flatten
135
- self.layer_norm = layer_norm
136
-
137
- self.proj = nn.Conv2d(
138
- in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
139
- )
140
- if layer_norm:
141
- self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
142
- else:
143
- self.norm = None
144
-
145
- pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5))
146
- self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
147
-
148
- def forward(self, latent):
149
- latent = self.proj(latent)
150
- if self.flatten:
151
- latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
152
- if self.layer_norm:
153
- latent = self.norm(latent)
154
- return latent + self.pos_embed
155
-
156
-
157
- class TimestepEmbedding(nn.Module):
158
- def __init__(
159
- self,
160
- in_channels: int,
161
- time_embed_dim: int,
162
- act_fn: str = "silu",
163
- out_dim: int = None,
164
- post_act_fn: Optional[str] = None,
165
- cond_proj_dim=None,
166
- ):
167
- super().__init__()
168
-
169
- self.linear_1 = nn.Linear(in_channels, time_embed_dim)
170
-
171
- if cond_proj_dim is not None:
172
- self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
173
- else:
174
- self.cond_proj = None
175
-
176
- self.act = get_activation(act_fn)
177
-
178
- if out_dim is not None:
179
- time_embed_dim_out = out_dim
180
- else:
181
- time_embed_dim_out = time_embed_dim
182
- self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)
183
-
184
- if post_act_fn is None:
185
- self.post_act = None
186
- else:
187
- self.post_act = get_activation(post_act_fn)
188
-
189
- def forward(self, sample, condition=None):
190
- if condition is not None:
191
- sample = sample + self.cond_proj(condition)
192
- sample = self.linear_1(sample)
193
-
194
- if self.act is not None:
195
- sample = self.act(sample)
196
-
197
- sample = self.linear_2(sample)
198
-
199
- if self.post_act is not None:
200
- sample = self.post_act(sample)
201
- return sample
202
-
203
-
204
- class Timesteps(nn.Module):
205
- def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
206
- super().__init__()
207
- self.num_channels = num_channels
208
- self.flip_sin_to_cos = flip_sin_to_cos
209
- self.downscale_freq_shift = downscale_freq_shift
210
-
211
- def forward(self, timesteps):
212
- t_emb = get_timestep_embedding(
213
- timesteps,
214
- self.num_channels,
215
- flip_sin_to_cos=self.flip_sin_to_cos,
216
- downscale_freq_shift=self.downscale_freq_shift,
217
- )
218
- return t_emb
219
-
220
-
221
- class GaussianFourierProjection(nn.Module):
222
- """Gaussian Fourier embeddings for noise levels."""
223
-
224
- def __init__(
225
- self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
226
- ):
227
- super().__init__()
228
- self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
229
- self.log = log
230
- self.flip_sin_to_cos = flip_sin_to_cos
231
-
232
- if set_W_to_weight:
233
- # to delete later
234
- self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
235
-
236
- self.weight = self.W
237
-
238
- def forward(self, x):
239
- if self.log:
240
- x = torch.log(x)
241
-
242
- x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi
243
-
244
- if self.flip_sin_to_cos:
245
- out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1)
246
- else:
247
- out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
248
- return out
249
-
250
-
251
- class ImagePositionalEmbeddings(nn.Module):
252
- """
253
- Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the
254
- height and width of the latent space.
255
-
256
- For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092
257
-
258
- For VQ-diffusion:
259
-
260
- Output vector embeddings are used as input for the transformer.
261
-
262
- Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE.
263
-
264
- Args:
265
- num_embed (`int`):
266
- Number of embeddings for the latent pixels embeddings.
267
- height (`int`):
268
- Height of the latent image i.e. the number of height embeddings.
269
- width (`int`):
270
- Width of the latent image i.e. the number of width embeddings.
271
- embed_dim (`int`):
272
- Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings.
273
- """
274
-
275
- def __init__(
276
- self,
277
- num_embed: int,
278
- height: int,
279
- width: int,
280
- embed_dim: int,
281
- ):
282
- super().__init__()
283
-
284
- self.height = height
285
- self.width = width
286
- self.num_embed = num_embed
287
- self.embed_dim = embed_dim
288
-
289
- self.emb = nn.Embedding(self.num_embed, embed_dim)
290
- self.height_emb = nn.Embedding(self.height, embed_dim)
291
- self.width_emb = nn.Embedding(self.width, embed_dim)
292
-
293
- def forward(self, index):
294
- emb = self.emb(index)
295
-
296
- height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height))
297
-
298
- # 1 x H x D -> 1 x H x 1 x D
299
- height_emb = height_emb.unsqueeze(2)
300
-
301
- width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width))
302
-
303
- # 1 x W x D -> 1 x 1 x W x D
304
- width_emb = width_emb.unsqueeze(1)
305
-
306
- pos_emb = height_emb + width_emb
307
-
308
- # 1 x H x W x D -> 1 x L xD
309
- pos_emb = pos_emb.view(1, self.height * self.width, -1)
310
-
311
- emb = emb + pos_emb[:, : emb.shape[1], :]
312
-
313
- return emb
314
-
315
-
316
- class LabelEmbedding(nn.Module):
317
- """
318
- Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
319
-
320
- Args:
321
- num_classes (`int`): The number of classes.
322
- hidden_size (`int`): The size of the vector embeddings.
323
- dropout_prob (`float`): The probability of dropping a label.
324
- """
325
-
326
- def __init__(self, num_classes, hidden_size, dropout_prob):
327
- super().__init__()
328
- use_cfg_embedding = dropout_prob > 0
329
- self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
330
- self.num_classes = num_classes
331
- self.dropout_prob = dropout_prob
332
-
333
- def token_drop(self, labels, force_drop_ids=None):
334
- """
335
- Drops labels to enable classifier-free guidance.
336
- """
337
- if force_drop_ids is None:
338
- drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
339
- else:
340
- drop_ids = torch.tensor(force_drop_ids == 1)
341
- labels = torch.where(drop_ids, self.num_classes, labels)
342
- return labels
343
-
344
- def forward(self, labels: torch.LongTensor, force_drop_ids=None):
345
- use_dropout = self.dropout_prob > 0
346
- if (self.training and use_dropout) or (force_drop_ids is not None):
347
- labels = self.token_drop(labels, force_drop_ids)
348
- embeddings = self.embedding_table(labels)
349
- return embeddings
350
-
351
-
352
- class TextImageProjection(nn.Module):
353
- def __init__(
354
- self,
355
- text_embed_dim: int = 1024,
356
- image_embed_dim: int = 768,
357
- cross_attention_dim: int = 768,
358
- num_image_text_embeds: int = 10,
359
- ):
360
- super().__init__()
361
-
362
- self.num_image_text_embeds = num_image_text_embeds
363
- self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
364
- self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim)
365
-
366
- def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
367
- batch_size = text_embeds.shape[0]
368
-
369
- # image
370
- image_text_embeds = self.image_embeds(image_embeds)
371
- image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
372
-
373
- # text
374
- text_embeds = self.text_proj(text_embeds)
375
-
376
- return torch.cat([image_text_embeds, text_embeds], dim=1)
377
-
378
-
379
- class ImageProjection(nn.Module):
380
- def __init__(
381
- self,
382
- image_embed_dim: int = 768,
383
- cross_attention_dim: int = 768,
384
- num_image_text_embeds: int = 32,
385
- ):
386
- super().__init__()
387
-
388
- self.num_image_text_embeds = num_image_text_embeds
389
- self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
390
- self.norm = nn.LayerNorm(cross_attention_dim)
391
-
392
- def forward(self, image_embeds: torch.FloatTensor):
393
- batch_size = image_embeds.shape[0]
394
-
395
- # image
396
- image_embeds = self.image_embeds(image_embeds)
397
- image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
398
- image_embeds = self.norm(image_embeds)
399
- return image_embeds
400
-
401
-
402
- class CombinedTimestepLabelEmbeddings(nn.Module):
403
- def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1):
404
- super().__init__()
405
-
406
- self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1)
407
- self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
408
- self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob)
409
-
410
- def forward(self, timestep, class_labels, hidden_dtype=None):
411
- timesteps_proj = self.time_proj(timestep)
412
- timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
413
-
414
- class_labels = self.class_embedder(class_labels) # (N, D)
415
-
416
- conditioning = timesteps_emb + class_labels # (N, D)
417
-
418
- return conditioning
419
-
420
-
421
- class TextTimeEmbedding(nn.Module):
422
- def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64):
423
- super().__init__()
424
- self.norm1 = nn.LayerNorm(encoder_dim)
425
- self.pool = AttentionPooling(num_heads, encoder_dim)
426
- self.proj = nn.Linear(encoder_dim, time_embed_dim)
427
- self.norm2 = nn.LayerNorm(time_embed_dim)
428
-
429
- def forward(self, hidden_states):
430
- hidden_states = self.norm1(hidden_states)
431
- hidden_states = self.pool(hidden_states)
432
- hidden_states = self.proj(hidden_states)
433
- hidden_states = self.norm2(hidden_states)
434
- return hidden_states
435
-
436
-
437
- class TextImageTimeEmbedding(nn.Module):
438
- def __init__(self, text_embed_dim: int = 768, image_embed_dim: int = 768, time_embed_dim: int = 1536):
439
- super().__init__()
440
- self.text_proj = nn.Linear(text_embed_dim, time_embed_dim)
441
- self.text_norm = nn.LayerNorm(time_embed_dim)
442
- self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
443
-
444
- def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
445
- # text
446
- time_text_embeds = self.text_proj(text_embeds)
447
- time_text_embeds = self.text_norm(time_text_embeds)
448
-
449
- # image
450
- time_image_embeds = self.image_proj(image_embeds)
451
-
452
- return time_image_embeds + time_text_embeds
453
-
454
-
455
- class ImageTimeEmbedding(nn.Module):
456
- def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
457
- super().__init__()
458
- self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
459
- self.image_norm = nn.LayerNorm(time_embed_dim)
460
-
461
- def forward(self, image_embeds: torch.FloatTensor):
462
- # image
463
- time_image_embeds = self.image_proj(image_embeds)
464
- time_image_embeds = self.image_norm(time_image_embeds)
465
- return time_image_embeds
466
-
467
-
468
- class ImageHintTimeEmbedding(nn.Module):
469
- def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
470
- super().__init__()
471
- self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
472
- self.image_norm = nn.LayerNorm(time_embed_dim)
473
- self.input_hint_block = nn.Sequential(
474
- nn.Conv2d(3, 16, 3, padding=1),
475
- nn.SiLU(),
476
- nn.Conv2d(16, 16, 3, padding=1),
477
- nn.SiLU(),
478
- nn.Conv2d(16, 32, 3, padding=1, stride=2),
479
- nn.SiLU(),
480
- nn.Conv2d(32, 32, 3, padding=1),
481
- nn.SiLU(),
482
- nn.Conv2d(32, 96, 3, padding=1, stride=2),
483
- nn.SiLU(),
484
- nn.Conv2d(96, 96, 3, padding=1),
485
- nn.SiLU(),
486
- nn.Conv2d(96, 256, 3, padding=1, stride=2),
487
- nn.SiLU(),
488
- nn.Conv2d(256, 4, 3, padding=1),
489
- )
490
-
491
- def forward(self, image_embeds: torch.FloatTensor, hint: torch.FloatTensor):
492
- # image
493
- time_image_embeds = self.image_proj(image_embeds)
494
- time_image_embeds = self.image_norm(time_image_embeds)
495
- hint = self.input_hint_block(hint)
496
- return time_image_embeds, hint
497
-
498
-
499
- class AttentionPooling(nn.Module):
500
- # Copied from https://github.com/deep-floyd/IF/blob/2f91391f27dd3c468bf174be5805b4cc92980c0b/deepfloyd_if/model/nn.py#L54
501
-
502
- def __init__(self, num_heads, embed_dim, dtype=None):
503
- super().__init__()
504
- self.dtype = dtype
505
- self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim**0.5)
506
- self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
507
- self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
508
- self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
509
- self.num_heads = num_heads
510
- self.dim_per_head = embed_dim // self.num_heads
511
-
512
- def forward(self, x):
513
- bs, length, width = x.size()
514
-
515
- def shape(x):
516
- # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
517
- x = x.view(bs, -1, self.num_heads, self.dim_per_head)
518
- # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
519
- x = x.transpose(1, 2)
520
- # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
521
- x = x.reshape(bs * self.num_heads, -1, self.dim_per_head)
522
- # (bs*n_heads, length, dim_per_head) --> (bs*n_heads, dim_per_head, length)
523
- x = x.transpose(1, 2)
524
- return x
525
-
526
- class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype)
527
- x = torch.cat([class_token, x], dim=1) # (bs, length+1, width)
528
-
529
- # (bs*n_heads, class_token_length, dim_per_head)
530
- q = shape(self.q_proj(class_token))
531
- # (bs*n_heads, length+class_token_length, dim_per_head)
532
- k = shape(self.k_proj(x))
533
- v = shape(self.v_proj(x))
534
-
535
- # (bs*n_heads, class_token_length, length+class_token_length):
536
- scale = 1 / math.sqrt(math.sqrt(self.dim_per_head))
537
- weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards
538
- weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
539
-
540
- # (bs*n_heads, dim_per_head, class_token_length)
541
- a = torch.einsum("bts,bcs->bct", weight, v)
542
-
543
- # (bs, length+1, width)
544
- a = a.reshape(bs, -1, 1).transpose(1, 2)
545
-
546
- return a[:, 0, :] # cls_token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py DELETED
@@ -1,6 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
4
- ]
5
- model = dict(
6
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './fcn_hr18_480x480_40k_pascal_context.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18_small',
4
- backbone=dict(
5
- extra=dict(
6
- stage1=dict(num_blocks=(2, )),
7
- stage2=dict(num_blocks=(2, 2)),
8
- stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
9
- stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/ROOPOK/roop/processors/frame/core.py DELETED
@@ -1,91 +0,0 @@
1
- import os
2
- import sys
3
- import importlib
4
- import psutil
5
- from concurrent.futures import ThreadPoolExecutor, as_completed
6
- from queue import Queue
7
- from types import ModuleType
8
- from typing import Any, List, Callable
9
- from tqdm import tqdm
10
-
11
- import roop
12
-
13
- FRAME_PROCESSORS_MODULES: List[ModuleType] = []
14
- FRAME_PROCESSORS_INTERFACE = [
15
- 'pre_check',
16
- 'pre_start',
17
- 'process_frame',
18
- 'process_frames',
19
- 'process_image',
20
- 'process_video',
21
- 'post_process'
22
- ]
23
-
24
-
25
- def load_frame_processor_module(frame_processor: str) -> Any:
26
- try:
27
- frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
28
- for method_name in FRAME_PROCESSORS_INTERFACE:
29
- if not hasattr(frame_processor_module, method_name):
30
- raise NotImplementedError
31
- except ModuleNotFoundError:
32
- sys.exit(f'Frame processor {frame_processor} not found.')
33
- except NotImplementedError:
34
- sys.exit(f'Frame processor {frame_processor} not implemented correctly.')
35
- return frame_processor_module
36
-
37
-
38
- def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
39
- global FRAME_PROCESSORS_MODULES
40
-
41
- if not FRAME_PROCESSORS_MODULES:
42
- for frame_processor in frame_processors:
43
- frame_processor_module = load_frame_processor_module(frame_processor)
44
- FRAME_PROCESSORS_MODULES.append(frame_processor_module)
45
- return FRAME_PROCESSORS_MODULES
46
-
47
-
48
- def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
49
- with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
50
- futures = []
51
- queue = create_queue(temp_frame_paths)
52
- queue_per_future = max(len(temp_frame_paths) // roop.globals.execution_threads, 1)
53
- while not queue.empty():
54
- future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
55
- futures.append(future)
56
- for future in as_completed(futures):
57
- future.result()
58
-
59
-
60
- def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
61
- queue: Queue[str] = Queue()
62
- for frame_path in temp_frame_paths:
63
- queue.put(frame_path)
64
- return queue
65
-
66
-
67
- def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
68
- queues = []
69
- for _ in range(queue_per_future):
70
- if not queue.empty():
71
- queues.append(queue.get())
72
- return queues
73
-
74
-
75
- def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
76
- progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
77
- total = len(frame_paths)
78
- with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
79
- multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
80
-
81
-
82
- def update_progress(progress: Any = None) -> None:
83
- process = psutil.Process(os.getpid())
84
- memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
85
- progress.set_postfix({
86
- 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
87
- 'execution_providers': roop.globals.execution_providers,
88
- 'execution_threads': roop.globals.execution_threads
89
- })
90
- progress.refresh()
91
- progress.update(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/__about__.py DELETED
@@ -1,26 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- __all__ = [
6
- "__title__",
7
- "__summary__",
8
- "__uri__",
9
- "__version__",
10
- "__author__",
11
- "__email__",
12
- "__license__",
13
- "__copyright__",
14
- ]
15
-
16
- __title__ = "packaging"
17
- __summary__ = "Core utilities for Python packages"
18
- __uri__ = "https://github.com/pypa/packaging"
19
-
20
- __version__ = "21.3"
21
-
22
- __author__ = "Donald Stufft and individual contributors"
23
- __email__ = "[email protected]"
24
-
25
- __license__ = "BSD-2-Clause or Apache-2.0"
26
- __copyright__ = "2014-2019 %s" % __author__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AttendAndExcite/Attend-and-Excite/app.py DELETED
@@ -1,289 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import os
6
- import random
7
-
8
- import gradio as gr
9
- import numpy as np
10
- import PIL.Image
11
- import spaces
12
- import torch
13
- from diffusers import StableDiffusionAttendAndExcitePipeline, StableDiffusionPipeline
14
-
15
- DESCRIPTION = """\
16
- # Attend-and-Excite
17
-
18
- This is a demo for [Attend-and-Excite](https://arxiv.org/abs/2301.13826).
19
- Attend-and-Excite performs attention-based generative semantic guidance to mitigate subject neglect in Stable Diffusion.
20
- Select a prompt and a set of indices matching the subjects you wish to strengthen (the `Check token indices` cell can help map between a word and its index).
21
- """
22
-
23
- if not torch.cuda.is_available():
24
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
25
-
26
- if torch.cuda.is_available():
27
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
- model_id = "CompVis/stable-diffusion-v1-4"
29
- ax_pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained(model_id)
30
- ax_pipe.to(device)
31
- sd_pipe = StableDiffusionPipeline.from_pretrained(model_id)
32
- sd_pipe.to(device)
33
-
34
-
35
- MAX_INFERENCE_STEPS = 100
36
- MAX_SEED = np.iinfo(np.int32).max
37
-
38
-
39
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
40
- if randomize_seed:
41
- seed = random.randint(0, MAX_SEED)
42
- return seed
43
-
44
-
45
- def get_token_table(prompt: str) -> list[tuple[int, str]]:
46
- tokens = [ax_pipe.tokenizer.decode(t) for t in ax_pipe.tokenizer(prompt)["input_ids"]]
47
- tokens = tokens[1:-1]
48
- return list(enumerate(tokens, start=1))
49
-
50
-
51
- @spaces.GPU
52
- def run(
53
- prompt: str,
54
- indices_to_alter_str: str,
55
- seed: int = 0,
56
- apply_attend_and_excite: bool = True,
57
- num_inference_steps: int = 50,
58
- guidance_scale: float = 7.5,
59
- scale_factor: int = 20,
60
- thresholds: dict[int, float] = {
61
- 10: 0.5,
62
- 20: 0.8,
63
- },
64
- max_iter_to_alter: int = 25,
65
- ) -> PIL.Image.Image:
66
- if num_inference_steps > MAX_INFERENCE_STEPS:
67
- raise gr.Error(f"Number of steps cannot exceed {MAX_INFERENCE_STEPS}.")
68
-
69
- generator = torch.Generator(device=device).manual_seed(seed)
70
- if apply_attend_and_excite:
71
- try:
72
- token_indices = list(map(int, indices_to_alter_str.split(",")))
73
- except Exception:
74
- raise ValueError("Invalid token indices.")
75
- out = ax_pipe(
76
- prompt=prompt,
77
- token_indices=token_indices,
78
- guidance_scale=guidance_scale,
79
- generator=generator,
80
- num_inference_steps=num_inference_steps,
81
- max_iter_to_alter=max_iter_to_alter,
82
- thresholds=thresholds,
83
- scale_factor=scale_factor,
84
- )
85
- else:
86
- out = sd_pipe(
87
- prompt=prompt,
88
- guidance_scale=guidance_scale,
89
- generator=generator,
90
- num_inference_steps=num_inference_steps,
91
- )
92
- return out.images[0]
93
-
94
-
95
- def process_example(
96
- prompt: str,
97
- indices_to_alter_str: str,
98
- seed: int,
99
- apply_attend_and_excite: bool,
100
- ) -> tuple[list[tuple[int, str]], PIL.Image.Image]:
101
- token_table = get_token_table(prompt)
102
- result = run(
103
- prompt=prompt,
104
- indices_to_alter_str=indices_to_alter_str,
105
- seed=seed,
106
- apply_attend_and_excite=apply_attend_and_excite,
107
- )
108
- return token_table, result
109
-
110
-
111
- with gr.Blocks(css="style.css") as demo:
112
- gr.Markdown(DESCRIPTION)
113
- gr.DuplicateButton(
114
- value="Duplicate Space for private use",
115
- elem_id="duplicate-button",
116
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
117
- )
118
-
119
- with gr.Row():
120
- with gr.Column():
121
- prompt = gr.Text(
122
- label="Prompt",
123
- max_lines=1,
124
- placeholder="A pod of dolphins leaping out of the water in an ocean with a ship on the background",
125
- )
126
- with gr.Accordion(label="Check token indices", open=False):
127
- show_token_indices_button = gr.Button("Show token indices")
128
- token_indices_table = gr.Dataframe(label="Token indices", headers=["Index", "Token"], col_count=2)
129
- token_indices_str = gr.Text(
130
- label="Token indices (a comma-separated list indices of the tokens you wish to alter)",
131
- max_lines=1,
132
- placeholder="4,16",
133
- )
134
- apply_attend_and_excite = gr.Checkbox(label="Apply Attend-and-Excite", value=True)
135
- seed = gr.Slider(
136
- label="Seed",
137
- minimum=0,
138
- maximum=MAX_SEED,
139
- step=1,
140
- value=0,
141
- )
142
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
143
- num_inference_steps = gr.Slider(
144
- label="Number of inference steps",
145
- minimum=1,
146
- maximum=MAX_INFERENCE_STEPS,
147
- step=1,
148
- value=50,
149
- )
150
- guidance_scale = gr.Slider(
151
- label="Guidance scale",
152
- minimum=0,
153
- maximum=50,
154
- step=0.1,
155
- value=7.5,
156
- )
157
- run_button = gr.Button("Generate")
158
- with gr.Column():
159
- result = gr.Image(label="Result")
160
-
161
- with gr.Row():
162
- examples = [
163
- [
164
- "A mouse and a red car",
165
- "2,6",
166
- 2098,
167
- True,
168
- ],
169
- [
170
- "A mouse and a red car",
171
- "2,6",
172
- 2098,
173
- False,
174
- ],
175
- [
176
- "A horse and a dog",
177
- "2,5",
178
- 123,
179
- True,
180
- ],
181
- [
182
- "A horse and a dog",
183
- "2,5",
184
- 123,
185
- False,
186
- ],
187
- [
188
- "A painting of an elephant with glasses",
189
- "5,7",
190
- 123,
191
- True,
192
- ],
193
- [
194
- "A painting of an elephant with glasses",
195
- "5,7",
196
- 123,
197
- False,
198
- ],
199
- [
200
- "A playful kitten chasing a butterfly in a wildflower meadow",
201
- "3,6,10",
202
- 123,
203
- True,
204
- ],
205
- [
206
- "A playful kitten chasing a butterfly in a wildflower meadow",
207
- "3,6,10",
208
- 123,
209
- False,
210
- ],
211
- [
212
- "A grizzly bear catching a salmon in a crystal clear river surrounded by a forest",
213
- "2,6,15",
214
- 123,
215
- True,
216
- ],
217
- [
218
- "A grizzly bear catching a salmon in a crystal clear river surrounded by a forest",
219
- "2,6,15",
220
- 123,
221
- False,
222
- ],
223
- [
224
- "A pod of dolphins leaping out of the water in an ocean with a ship on the background",
225
- "4,16",
226
- 123,
227
- True,
228
- ],
229
- [
230
- "A pod of dolphins leaping out of the water in an ocean with a ship on the background",
231
- "4,16",
232
- 123,
233
- False,
234
- ],
235
- ]
236
- gr.Examples(
237
- examples=examples,
238
- inputs=[
239
- prompt,
240
- token_indices_str,
241
- seed,
242
- apply_attend_and_excite,
243
- ],
244
- outputs=[
245
- token_indices_table,
246
- result,
247
- ],
248
- fn=process_example,
249
- cache_examples=os.getenv("CACHE_EXAMPLES") == "1",
250
- examples_per_page=20,
251
- )
252
-
253
- show_token_indices_button.click(
254
- fn=get_token_table,
255
- inputs=prompt,
256
- outputs=token_indices_table,
257
- queue=False,
258
- api_name="get-token-table",
259
- )
260
-
261
- gr.on(
262
- triggers=[prompt.submit, token_indices_str.submit, run_button.click],
263
- fn=randomize_seed_fn,
264
- inputs=[seed, randomize_seed],
265
- outputs=seed,
266
- queue=False,
267
- api_name=False,
268
- ).then(
269
- fn=get_token_table,
270
- inputs=prompt,
271
- outputs=token_indices_table,
272
- queue=False,
273
- api_name=False,
274
- ).then(
275
- fn=run,
276
- inputs=[
277
- prompt,
278
- token_indices_str,
279
- seed,
280
- apply_attend_and_excite,
281
- num_inference_steps,
282
- guidance_scale,
283
- ],
284
- outputs=result,
285
- api_name="run",
286
- )
287
-
288
- if __name__ == "__main__":
289
- demo.queue(max_size=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AvinashRamesh23/AIEditor/app.py DELETED
@@ -1,435 +0,0 @@
1
- import streamlit as st
2
- import whisper
3
- import re
4
- from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
5
- from moviepy.editor import *
6
- import math
7
- from stable_whisper import modify_model,results_to_word_srt
8
- import asyncio
9
- from deepgram import Deepgram
10
- from typing import Dict
11
- import os
12
- import moviepy.editor as mp
13
- from pytube import YouTube
14
- from time import sleep
15
- import pandas as pd
16
-
17
- import calendar
18
- import time
19
-
20
- current_GMT = time.gmtime()
21
-
22
- time_stamp = calendar.timegm(current_GMT)
23
-
24
- st.title('AI Editor for Content Creators!')
25
-
26
- @st.cache(suppress_st_warning=True)
27
- #load whisper model
28
- def load_model(model_selected):
29
- #load medium model
30
- model = whisper.load_model(model_selected)
31
- # modify model to get word timestamp
32
- modify_model(model)
33
- return model
34
-
35
- #transcribe
36
- @st.cache(suppress_st_warning=True)
37
- def transcribe_video(vid,model_selected):
38
- model = load_model(model_selected)
39
- options = whisper.DecodingOptions(fp16=False,language="English")
40
- result = model.transcribe(vid, **options.__dict__)
41
- result['srt'] = whisper_result_to_srt(result)
42
- return result
43
-
44
- #srt generation
45
- def whisper_result_to_srt(result):
46
- text = []
47
- for i,s in enumerate(result['segments']):
48
- text.append(str(i+1))
49
- time_start = s['start']
50
- hours, minutes, seconds = int(time_start/3600), (time_start/60) % 60, (time_start) % 60
51
- timestamp_start = "%02d:%02d:%06.3f" % (hours, minutes, seconds)
52
- timestamp_start = timestamp_start.replace('.',',')
53
- time_end = s['end']
54
- hours, minutes, seconds = int(time_end/3600), (time_end/60) % 60, (time_end) % 60
55
- timestamp_end = "%02d:%02d:%06.3f" % (hours, minutes, seconds)
56
- timestamp_end = timestamp_end.replace('.',',')
57
- text.append(timestamp_start + " --> " + timestamp_end)
58
- text.append(s['text'].strip() + "\n")
59
- return "\n".join(text)
60
-
61
- #compute speaking_time
62
- async def compute_speaking_time(transcript_data: Dict,data:str) -> None:
63
- if 'results' in transcript_data:
64
- transcript = transcript_data['results']['channels'][0]['alternatives'][0]['words']
65
- total_speaker_time = {}
66
- speaker_words = []
67
- current_speaker = -1
68
-
69
- for speaker in transcript:
70
- speaker_number = speaker["speaker"]
71
-
72
- if speaker_number is not current_speaker:
73
- current_speaker = speaker_number
74
- speaker_words.append([speaker_number, [], 0])
75
-
76
- try:
77
- total_speaker_time[speaker_number][1] += 1
78
- except KeyError:
79
- total_speaker_time[speaker_number] = [0,1]
80
-
81
- get_word = speaker["word"]
82
- speaker_words[-1][1].append(get_word)
83
-
84
- total_speaker_time[speaker_number][0] += speaker["end"] - speaker["start"]
85
- speaker_words[-1][2] += speaker["end"] - speaker["start"]
86
-
87
- for speaker, words, time_amount in speaker_words:
88
- print(f"Speaker {speaker}: {' '.join(words)}")
89
- data+=f"\nSpeaker {speaker}: {' '.join(words)}"
90
- print(f"Speaker {speaker}: {time_amount}")
91
- data+=f"\nSpeaker {speaker}: {time_amount}"
92
-
93
-
94
- for speaker, (total_time, amount) in total_speaker_time.items():
95
- print(f"Speaker {speaker} avg time per phrase: {total_time/amount} ")
96
- data+=f"\nSpeaker {speaker} avg time per phrase: {total_time/amount} "
97
- print(f"Total time of conversation: {total_time}")
98
- data+=f"\nTotal time of conversation: {total_time}"
99
- return transcript,data
100
-
101
- #extract audio from video
102
- def extract_write_audio(vd):
103
- my_clip = mp.VideoFileClip(f'{vd}')
104
- my_clip.audio.write_audiofile(f"audio.wav")
105
-
106
- #speaker diarization workflow
107
- async def speaker_diarization_flow(PATH_TO_FILE):
108
- audio = extract_write_audio(PATH_TO_FILE)
109
- data = ''
110
- DEEPGRAM_API_KEY = "3dc39bf904babb858390455b1a1399e221bf87f8"
111
- deepgram = Deepgram(DEEPGRAM_API_KEY)
112
- with open(PATH_TO_FILE, 'rb') as audio:
113
- source = {'buffer': audio, 'mimetype': 'audio/wav'}
114
- transcription = await deepgram.transcription.prerecorded(source, {'punctuate': True, 'diarize': True})
115
- transcript,final_data = await compute_speaking_time(transcription,data)
116
- return final_data
117
-
118
- # speaker diarization main funciton
119
- async def speaker_diarization(PATH_TO_FILE):
120
- data = await speaker_diarization_flow(PATH_TO_FILE)
121
- print("data is", data)
122
- return data
123
-
124
- #find filler words
125
- def filler_words_finder(result_data):
126
- word_map_prior_edit=set()
127
- word_map_after_edit=set()
128
- #my filler words sample
129
- filler_words={'um','ah','you know','mmm','mmm','er','uh','Hmm','actually','basically','seriously','mhm','uh huh','uh','huh','ooh','aah','ooh'}
130
- filler_words_timestamp=set()
131
- for keys in result_data:
132
- if keys == 'segments':
133
- prev=0
134
- for i in result_data[keys]:
135
- for word in i['whole_word_timestamps']:
136
- lower_case = re.sub(r'\W','',word['word'].lower())
137
- word_map_prior_edit.add(word['timestamp'])
138
- if lower_case in filler_words or lower_case.startswith(('hm','aa','mm','oo')):
139
- st.write(word['word'].lower(),word['timestamp'])
140
- print(word['word'].lower(),word['timestamp'])
141
- filler_words_timestamp.add(word['timestamp'])
142
- prev=word['timestamp']
143
- continue
144
- word_map_after_edit.add((prev,word['timestamp']))
145
- prev=word['timestamp']
146
- return word_map_after_edit, filler_words_timestamp
147
-
148
- def merge_overlapping_time_intervals(intervals):
149
- stack = []
150
- result=[intervals[0]]
151
-
152
- for interval in intervals:
153
- interval2=result[-1]
154
-
155
- if overlap(interval,interval2):
156
- result[-1] = [min(interval[0],interval2[0]),max(interval[1],interval2[1])]
157
- else:
158
- result.append(interval)
159
-
160
- return result
161
-
162
- def overlap(interval1,interval2):
163
- return min(interval1[1],interval2[1])-max(interval1[0],interval2[0]) >= 0
164
-
165
- #assembly ai endpoints
166
- import requests
167
- transcript_endpoint = "https://api.assemblyai.com/v2/transcript"
168
- upload_endpoint = "https://api.assemblyai.com/v2/upload"
169
-
170
- headers = {
171
- "authorization": "05e515bf6b474966bc48bbdd1448b3cf",
172
- "content-type": "application/json"
173
- }
174
-
175
- def upload_to_AssemblyAI(save_location):
176
- CHUNK_SIZE = 5242880
177
- def read_file(filename):
178
- with open(filename, 'rb') as _file:
179
- while True:
180
- print("chunk uploaded")
181
- data = _file.read(CHUNK_SIZE)
182
- if not data:
183
- break
184
- yield data
185
-
186
- upload_response = requests.post(
187
- upload_endpoint,
188
- headers=headers, data=read_file(save_location)
189
- )
190
- print(upload_response.json())
191
- audio_url = upload_response.json()['upload_url']
192
- print('Uploaded to', audio_url)
193
- return audio_url
194
-
195
-
196
- def start_analysis(audio_url,type):
197
- ## Start transcription job of audio file
198
- data = {
199
- 'audio_url': audio_url,
200
- 'iab_categories': True,
201
- 'content_safety': True,
202
- "summarization": True,
203
- "summary_type": "bullets",
204
- "summary_model":type
205
- }
206
- if type=='conversational':
207
- data["speaker_labels"]= True
208
-
209
- transcript_response = requests.post(transcript_endpoint, json=data, headers=headers)
210
- print(transcript_response.json())
211
- transcript_id = transcript_response.json()['id']
212
- polling_endpoint = transcript_endpoint + "/" + transcript_id
213
- print("Transcribing at", polling_endpoint)
214
- return polling_endpoint
215
-
216
- def get_analysis_results(polling_endpoint):
217
- status = 'submitted'
218
-
219
- while True:
220
- print(status)
221
- polling_response = requests.get(polling_endpoint, headers=headers)
222
- status = polling_response.json()['status']
223
- # st.write(polling_response.json())
224
- # st.write(status)
225
- if status == 'submitted' or status == 'processing' or status == 'queued':
226
- print('not ready yet')
227
- sleep(10)
228
-
229
- elif status == 'completed':
230
- print('creating transcript')
231
- return polling_response
232
- break
233
-
234
- else:
235
- print('error')
236
- return False
237
- break
238
-
239
- def pii_redact(audiourl,options):
240
- print(options,audiourl)
241
- endpoint = "https://api.assemblyai.com/v2/transcript"
242
- json = {
243
- "audio_url": audiourl,
244
- "redact_pii": True,
245
- "redact_pii_audio": True,
246
- "redact_pii_policies": options
247
- }
248
-
249
- headers = {
250
- "authorization": "05e515bf6b474966bc48bbdd1448b3cf",
251
- "content-type": "application/json",
252
- }
253
-
254
- response = requests.post(endpoint, json=json, headers=headers)
255
- print(response.json())
256
- transcript_id = response.json()['id']
257
- polling_endpoint = endpoint + "/" + transcript_id
258
- return polling_endpoint
259
-
260
- def pii_redact_audio(polling_endpoint):
261
- status = 'submitted'
262
- headers = {
263
- "authorization": "05e515bf6b474966bc48bbdd1448b3cf",
264
- "content-type": "application/json",
265
- }
266
- while True:
267
- print(status)
268
- polling_response = requests.get(polling_endpoint, headers=headers)
269
- status = polling_response.json()['status']
270
- if status == 'submitted' or status == 'processing' or status == 'queued':
271
- print('not ready yet')
272
- sleep(10)
273
-
274
- elif status == 'completed':
275
- print('creating transcript')
276
- return polling_response
277
- break
278
-
279
- else:
280
- print('error')
281
- return False
282
- break
283
-
284
- def download_redact_audio(pooling_enpoint):
285
- headers = {
286
- "authorization": "05e515bf6b474966bc48bbdd1448b3cf",
287
- "content-type": "application/json",
288
- }
289
-
290
- redacted_audio_response = requests.get(pooling_enpoint + "/redacted-audio",headers=headers)
291
- print(redacted_audio_response.json())
292
- redacted_audio = requests.get(redacted_audio_response.json()['redacted_audio_url'])
293
- with open('redacted_audio.mp3', 'wb') as f:
294
- f.write(redacted_audio.content)
295
-
296
- def redact_audio_video_display(vd,audio):
297
- audioclip = AudioFileClip(audio)
298
- clip = VideoFileClip(vd)
299
- videoclip = clip.set_audio(audioclip)
300
- videoclip.write_videofile("Redacted_video.mp4")
301
- st.video("Redacted_video.mp4")
302
-
303
- async def main(uploaded_video,model_selected):
304
- try:
305
- vid = uploaded_video.name
306
- with open(vid, mode='wb') as f:
307
- f.write(uploaded_video.read()) # save video to disk
308
- except:
309
- with st.spinner('Downloading Yotube Video'):
310
- yt = YouTube(uploaded_video)
311
- title=yt.title
312
- vid = f"{title}.mp4"
313
- yt.streams.filter(file_extension="mp4").get_by_resolution("360p").download(filename=vid)
314
- finally:
315
- name = vid.split('.')[0]
316
- preview = st.video(vid)
317
- #extracting the transcription result
318
- with st.spinner('Transcribing Video, Wait for it...'):
319
- result = transcribe_video(vid,model_selected)
320
- st.text_area("Edit Transcript",result["text"])
321
- col1, col2, col3, col4, col5, col6 = st.columns([1,1,1,1,1,1])
322
- tab1, tab2, tab3, tab4, tab5, tab6 = st.tabs(["Remove Filler Words","Edit Video" ,"Download SRT", "Perform Speaker Diarization","Content Analyzer","PII redactation"])
323
-
324
- with tab1:
325
- filler_word = st.button('Edit/Remove Filler Words with a click of a button')
326
- if filler_word:
327
- with st.spinner(text="In progress..."):
328
- word_map_after_edit, filler_words_timestamp = filler_words_finder(result)
329
- final_intervals = merge_overlapping_time_intervals(sorted(list(word_map_after_edit)))
330
- subclips=[]
331
- for start,end in final_intervals:
332
- clip = VideoFileClip(vid)
333
- tmp = clip.subclip(start,(end - end*0.1))
334
- subclips.append(tmp)
335
- #concatenate subclips without filler words
336
- final_clip = concatenate_videoclips(subclips)
337
- final_clip.write_videofile(f"remove_{vid}")
338
- preview = st.video(f"remove_{vid}")
339
-
340
- with tab2:
341
- save = st.button('Edit')
342
-
343
- with tab3:
344
- download = st.download_button('Download SRT', result['srt'],f'{name}.srt')
345
- if download:
346
- st.write('Thanks for downloading!')
347
-
348
- with tab4:
349
- identify_download_speaker = st.button('Perform Speaker Diarization')
350
- if identify_download_speaker:
351
- with st.spinner(text="In progress..."):
352
- results = await speaker_diarization(vid)
353
- download_speaker = st.download_button("download speaker_diarization",results,'diarization_stats.txt')
354
- if download_speaker:
355
- st.write('Thanks for downloading!')
356
-
357
- with tab5:
358
- type = st.selectbox('Summary Type?',('informative', 'conversational', 'catchy'))
359
- Analyze_content = st.button("Start Content Analysis")
360
- if Analyze_content:
361
- with st.spinner(text="In progress..."):
362
- audio = extract_write_audio(vid)
363
- audio_url = upload_to_AssemblyAI("audio.wav")
364
- # start analysis of the file
365
- polling_endpoint = start_analysis(audio_url,type)
366
- # receive the results
367
- results = get_analysis_results(polling_endpoint)
368
-
369
- # separate analysis results
370
- summary = results.json()['summary']
371
- content_moderation = results.json()["content_safety_labels"]
372
- topic_labels = results.json()["iab_categories_result"]
373
-
374
- my_expander1 = st.expander(label='Summary')
375
- my_expander2 = st.expander(label='Content Moderation')
376
- my_expander3 = st.expander(label='Topic Discussed')
377
-
378
- with my_expander1:
379
- st.header("Video summary")
380
- st.write(summary)
381
-
382
- with my_expander2:
383
- st.header("Sensitive content")
384
- if content_moderation['summary'] != {}:
385
- st.subheader('🚨 Mention of the following sensitive topics detected.')
386
- moderation_df = pd.DataFrame(content_moderation['summary'].items())
387
- moderation_df.columns = ['topic','confidence']
388
- st.dataframe(moderation_df, use_container_width=True)
389
- else:
390
- st.subheader('✅ All clear! No sensitive content detected.')
391
-
392
- with my_expander3:
393
- st.header("Topics discussed")
394
- topics_df = pd.DataFrame(topic_labels['summary'].items())
395
- topics_df.columns = ['topic','confidence']
396
- topics_df["topic"] = topics_df["topic"].str.split(">")
397
- expanded_topics = topics_df.topic.apply(pd.Series).add_prefix('topic_level_')
398
- topics_df = topics_df.join(expanded_topics).drop('topic', axis=1).sort_values(['confidence'], ascending=False).fillna('')
399
- st.dataframe(topics_df, use_container_width=True)
400
-
401
- with tab6:
402
- options = st.multiselect('Select Policies to redact from video',["medical_process","medical_condition","blood_type","drug","injury","number_sequence","email_address","date_of_birth","phone_number","us_social_security_number","credit_card_number","credit_card_expiration","credit_card_cvv","date","nationality","event","language","location","money_amount","person_name","person_age","organization","political_affiliation","occupation","religion","drivers_license","banking_information"],["person_name", 'credit_card_number'])
403
- Perform_redact = st.button("Start PII Redaction")
404
- if Perform_redact:
405
- with st.spinner(text="In progress..."):
406
- audio = extract_write_audio(vid)
407
- audio_url = upload_to_AssemblyAI("audio.wav")
408
- print(audio_url)
409
- print([ x for x in options ])
410
- polling_endpoint = pii_redact(audio_url,options)
411
- results = pii_redact_audio(polling_endpoint)
412
- download_redact_audio(polling_endpoint)
413
- redact_audio_video_display(vid,"redacted_audio.mp3")
414
-
415
- Model_type = st.sidebar.selectbox("Choose Model",('Tiny - Best for Srt generation', 'Base - Best suited for various AI services', 'Medium - Use this model for filler word removal'),0)
416
- upload_video = st.sidebar.file_uploader("Upload mp4 file",type=["mp4","mpeg"])
417
- youtube_url = st.sidebar.text_input("Enter a youtube video url")
418
- # submit_button = st.sidebar.button("Extract Youtube Video")
419
-
420
- if Model_type.startswith("Tiny"):
421
- model_selected = 'tiny.en'
422
- if Model_type.startswith("Base"):
423
- model_selected = 'base.en'
424
- if Model_type.startswith("Small"):
425
- model_selected = 'small.en'
426
- if Model_type.startswith("Medium"):
427
- model_selected = 'medium.en'
428
-
429
- if youtube_url:
430
- asyncio.run(main(youtube_url,model_selected))
431
-
432
- if upload_video:
433
- asyncio.run(main(upload_video,model_selected))
434
-
435
- st.sidebar.write("Kindly upload or provide youtube link with less a minute of video for faster performance and avoid excess usage of the free tier.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet.py DELETED
@@ -1,864 +0,0 @@
1
-
2
- import math
3
- import json
4
- import copy
5
- from typing import List, Dict
6
- import numpy as np
7
- import torch
8
- from torch import nn
9
- from torch.nn import functional as F
10
-
11
- from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
12
- from detectron2.layers import ShapeSpec, cat
13
- from detectron2.structures import Instances, Boxes
14
- from detectron2.modeling import detector_postprocess
15
- from detectron2.utils.comm import get_world_size
16
- from detectron2.config import configurable
17
-
18
- from ..layers.heatmap_focal_loss import heatmap_focal_loss_jit
19
- from ..layers.heatmap_focal_loss import binary_heatmap_focal_loss
20
- from ..layers.iou_loss import IOULoss
21
- from ..layers.ml_nms import ml_nms
22
- from ..debug import debug_train, debug_test
23
- from .utils import reduce_sum, _transpose
24
- from .centernet_head import CenterNetHead
25
-
26
- __all__ = ["CenterNet"]
27
-
28
- INF = 100000000
29
-
30
- @PROPOSAL_GENERATOR_REGISTRY.register()
31
- class CenterNet(nn.Module):
32
- @configurable
33
- def __init__(self,
34
- # input_shape: Dict[str, ShapeSpec],
35
- in_channels=256,
36
- *,
37
- num_classes=80,
38
- in_features=("p3", "p4", "p5", "p6", "p7"),
39
- strides=(8, 16, 32, 64, 128),
40
- score_thresh=0.05,
41
- hm_min_overlap=0.8,
42
- loc_loss_type='giou',
43
- min_radius=4,
44
- hm_focal_alpha=0.25,
45
- hm_focal_beta=4,
46
- loss_gamma=2.0,
47
- reg_weight=2.0,
48
- not_norm_reg=True,
49
- with_agn_hm=False,
50
- only_proposal=False,
51
- as_proposal=False,
52
- not_nms=False,
53
- pos_weight=1.,
54
- neg_weight=1.,
55
- sigmoid_clamp=1e-4,
56
- ignore_high_fp=-1.,
57
- center_nms=False,
58
- sizes_of_interest=[[0,80],[64,160],[128,320],[256,640],[512,10000000]],
59
- more_pos=False,
60
- more_pos_thresh=0.2,
61
- more_pos_topk=9,
62
- pre_nms_topk_train=1000,
63
- pre_nms_topk_test=1000,
64
- post_nms_topk_train=100,
65
- post_nms_topk_test=100,
66
- nms_thresh_train=0.6,
67
- nms_thresh_test=0.6,
68
- no_reduce=False,
69
- debug=False,
70
- vis_thresh=0.5,
71
- pixel_mean=[103.530,116.280,123.675],
72
- pixel_std=[1.0,1.0,1.0],
73
- device='cuda',
74
- centernet_head=None,
75
- ):
76
- super().__init__()
77
- self.num_classes = num_classes
78
- self.in_features = in_features
79
- self.strides = strides
80
- self.score_thresh = score_thresh
81
- self.min_radius = min_radius
82
- self.hm_focal_alpha = hm_focal_alpha
83
- self.hm_focal_beta = hm_focal_beta
84
- self.loss_gamma = loss_gamma
85
- self.reg_weight = reg_weight
86
- self.not_norm_reg = not_norm_reg
87
- self.with_agn_hm = with_agn_hm
88
- self.only_proposal = only_proposal
89
- self.as_proposal = as_proposal
90
- self.not_nms = not_nms
91
- self.pos_weight = pos_weight
92
- self.neg_weight = neg_weight
93
- self.sigmoid_clamp = sigmoid_clamp
94
- self.ignore_high_fp = ignore_high_fp
95
- self.center_nms = center_nms
96
- self.sizes_of_interest = sizes_of_interest
97
- self.more_pos = more_pos
98
- self.more_pos_thresh = more_pos_thresh
99
- self.more_pos_topk = more_pos_topk
100
- self.pre_nms_topk_train = pre_nms_topk_train
101
- self.pre_nms_topk_test = pre_nms_topk_test
102
- self.post_nms_topk_train = post_nms_topk_train
103
- self.post_nms_topk_test = post_nms_topk_test
104
- self.nms_thresh_train = nms_thresh_train
105
- self.nms_thresh_test = nms_thresh_test
106
- self.no_reduce = no_reduce
107
- self.debug = debug
108
- self.vis_thresh = vis_thresh
109
- if self.center_nms:
110
- self.not_nms = True
111
- self.iou_loss = IOULoss(loc_loss_type)
112
- assert (not self.only_proposal) or self.with_agn_hm
113
- # delta for rendering heatmap
114
- self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap)
115
- if centernet_head is None:
116
- self.centernet_head = CenterNetHead(
117
- in_channels=in_channels,
118
- num_levels=len(in_features),
119
- with_agn_hm=with_agn_hm,
120
- only_proposal=only_proposal)
121
- else:
122
- self.centernet_head = centernet_head
123
- if self.debug:
124
- pixel_mean = torch.Tensor(pixel_mean).to(
125
- torch.device(device)).view(3, 1, 1)
126
- pixel_std = torch.Tensor(pixel_std).to(
127
- torch.device(device)).view(3, 1, 1)
128
- self.denormalizer = lambda x: x * pixel_std + pixel_mean
129
-
130
- @classmethod
131
- def from_config(cls, cfg, input_shape):
132
- ret = {
133
- # 'input_shape': input_shape,
134
- 'in_channels': input_shape[
135
- cfg.MODEL.CENTERNET.IN_FEATURES[0]].channels,
136
- 'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES,
137
- 'in_features': cfg.MODEL.CENTERNET.IN_FEATURES,
138
- 'strides': cfg.MODEL.CENTERNET.FPN_STRIDES,
139
- 'score_thresh': cfg.MODEL.CENTERNET.INFERENCE_TH,
140
- 'loc_loss_type': cfg.MODEL.CENTERNET.LOC_LOSS_TYPE,
141
- 'hm_min_overlap': cfg.MODEL.CENTERNET.HM_MIN_OVERLAP,
142
- 'min_radius': cfg.MODEL.CENTERNET.MIN_RADIUS,
143
- 'hm_focal_alpha': cfg.MODEL.CENTERNET.HM_FOCAL_ALPHA,
144
- 'hm_focal_beta': cfg.MODEL.CENTERNET.HM_FOCAL_BETA,
145
- 'loss_gamma': cfg.MODEL.CENTERNET.LOSS_GAMMA,
146
- 'reg_weight': cfg.MODEL.CENTERNET.REG_WEIGHT,
147
- 'not_norm_reg': cfg.MODEL.CENTERNET.NOT_NORM_REG,
148
- 'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,
149
- 'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,
150
- 'as_proposal': cfg.MODEL.CENTERNET.AS_PROPOSAL,
151
- 'not_nms': cfg.MODEL.CENTERNET.NOT_NMS,
152
- 'pos_weight': cfg.MODEL.CENTERNET.POS_WEIGHT,
153
- 'neg_weight': cfg.MODEL.CENTERNET.NEG_WEIGHT,
154
- 'sigmoid_clamp': cfg.MODEL.CENTERNET.SIGMOID_CLAMP,
155
- 'ignore_high_fp': cfg.MODEL.CENTERNET.IGNORE_HIGH_FP,
156
- 'center_nms': cfg.MODEL.CENTERNET.CENTER_NMS,
157
- 'sizes_of_interest': cfg.MODEL.CENTERNET.SOI,
158
- 'more_pos': cfg.MODEL.CENTERNET.MORE_POS,
159
- 'more_pos_thresh': cfg.MODEL.CENTERNET.MORE_POS_THRESH,
160
- 'more_pos_topk': cfg.MODEL.CENTERNET.MORE_POS_TOPK,
161
- 'pre_nms_topk_train': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN,
162
- 'pre_nms_topk_test': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TEST,
163
- 'post_nms_topk_train': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN,
164
- 'post_nms_topk_test': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TEST,
165
- 'nms_thresh_train': cfg.MODEL.CENTERNET.NMS_TH_TRAIN,
166
- 'nms_thresh_test': cfg.MODEL.CENTERNET.NMS_TH_TEST,
167
- 'no_reduce': cfg.MODEL.CENTERNET.NO_REDUCE,
168
- 'debug': cfg.DEBUG,
169
- 'vis_thresh': cfg.VIS_THRESH,
170
- 'pixel_mean': cfg.MODEL.PIXEL_MEAN,
171
- 'pixel_std': cfg.MODEL.PIXEL_STD,
172
- 'device': cfg.MODEL.DEVICE,
173
- 'centernet_head': CenterNetHead(
174
- cfg, [input_shape[f] for f in cfg.MODEL.CENTERNET.IN_FEATURES]),
175
- }
176
- return ret
177
-
178
-
179
- def forward(self, images, features_dict, gt_instances):
180
- features = [features_dict[f] for f in self.in_features]
181
- clss_per_level, reg_pred_per_level, agn_hm_pred_per_level = \
182
- self.centernet_head(features)
183
- grids = self.compute_grids(features)
184
- shapes_per_level = grids[0].new_tensor(
185
- [(x.shape[2], x.shape[3]) for x in reg_pred_per_level])
186
-
187
- if not self.training:
188
- return self.inference(
189
- images, clss_per_level, reg_pred_per_level,
190
- agn_hm_pred_per_level, grids)
191
- else:
192
- pos_inds, labels, reg_targets, flattened_hms = \
193
- self._get_ground_truth(
194
- grids, shapes_per_level, gt_instances)
195
- # logits_pred: M x F, reg_pred: M x 4, agn_hm_pred: M
196
- logits_pred, reg_pred, agn_hm_pred = self._flatten_outputs(
197
- clss_per_level, reg_pred_per_level, agn_hm_pred_per_level)
198
-
199
- if self.more_pos:
200
- # add more pixels as positive if \
201
- # 1. they are within the center3x3 region of an object
202
- # 2. their regression losses are small (<self.more_pos_thresh)
203
- pos_inds, labels = self._add_more_pos(
204
- reg_pred, gt_instances, shapes_per_level)
205
-
206
- losses = self.losses(
207
- pos_inds, labels, reg_targets, flattened_hms,
208
- logits_pred, reg_pred, agn_hm_pred)
209
-
210
- proposals = None
211
- if self.only_proposal:
212
- agn_hm_pred_per_level = [x.sigmoid() for x in agn_hm_pred_per_level]
213
- proposals = self.predict_instances(
214
- grids, agn_hm_pred_per_level, reg_pred_per_level,
215
- images.image_sizes, [None for _ in agn_hm_pred_per_level])
216
- elif self.as_proposal: # category specific bbox as agnostic proposals
217
- clss_per_level = [x.sigmoid() for x in clss_per_level]
218
- proposals = self.predict_instances(
219
- grids, clss_per_level, reg_pred_per_level,
220
- images.image_sizes, agn_hm_pred_per_level)
221
- if self.only_proposal or self.as_proposal:
222
- for p in range(len(proposals)):
223
- proposals[p].proposal_boxes = proposals[p].get('pred_boxes')
224
- proposals[p].objectness_logits = proposals[p].get('scores')
225
- proposals[p].remove('pred_boxes')
226
- proposals[p].remove('scores')
227
- proposals[p].remove('pred_classes')
228
-
229
- if self.debug:
230
- debug_train(
231
- [self.denormalizer(x) for x in images],
232
- gt_instances, flattened_hms, reg_targets,
233
- labels, pos_inds, shapes_per_level, grids, self.strides)
234
- return proposals, losses
235
-
236
-
237
- def losses(
238
- self, pos_inds, labels, reg_targets, flattened_hms,
239
- logits_pred, reg_pred, agn_hm_pred):
240
- '''
241
- Inputs:
242
- pos_inds: N
243
- labels: N
244
- reg_targets: M x 4
245
- flattened_hms: M x C
246
- logits_pred: M x C
247
- reg_pred: M x 4
248
- agn_hm_pred: M x 1 or None
249
- N: number of positive locations in all images
250
- M: number of pixels from all FPN levels
251
- C: number of classes
252
- '''
253
- assert (torch.isfinite(reg_pred).all().item())
254
- num_pos_local = pos_inds.numel()
255
- num_gpus = get_world_size()
256
- if self.no_reduce:
257
- total_num_pos = num_pos_local * num_gpus
258
- else:
259
- total_num_pos = reduce_sum(
260
- pos_inds.new_tensor([num_pos_local])).item()
261
- num_pos_avg = max(total_num_pos / num_gpus, 1.0)
262
-
263
- losses = {}
264
- if not self.only_proposal:
265
- pos_loss, neg_loss = heatmap_focal_loss_jit(
266
- logits_pred, flattened_hms, pos_inds, labels,
267
- alpha=self.hm_focal_alpha,
268
- beta=self.hm_focal_beta,
269
- gamma=self.loss_gamma,
270
- reduction='sum',
271
- sigmoid_clamp=self.sigmoid_clamp,
272
- ignore_high_fp=self.ignore_high_fp,
273
- )
274
- pos_loss = self.pos_weight * pos_loss / num_pos_avg
275
- neg_loss = self.neg_weight * neg_loss / num_pos_avg
276
- losses['loss_centernet_pos'] = pos_loss
277
- losses['loss_centernet_neg'] = neg_loss
278
-
279
- reg_inds = torch.nonzero(reg_targets.max(dim=1)[0] >= 0).squeeze(1)
280
- reg_pred = reg_pred[reg_inds]
281
- reg_targets_pos = reg_targets[reg_inds]
282
- reg_weight_map = flattened_hms.max(dim=1)[0]
283
- reg_weight_map = reg_weight_map[reg_inds]
284
- reg_weight_map = reg_weight_map * 0 + 1 \
285
- if self.not_norm_reg else reg_weight_map
286
- if self.no_reduce:
287
- reg_norm = max(reg_weight_map.sum(), 1)
288
- else:
289
- reg_norm = max(reduce_sum(reg_weight_map.sum()).item() / num_gpus, 1)
290
-
291
- reg_loss = self.reg_weight * self.iou_loss(
292
- reg_pred, reg_targets_pos, reg_weight_map,
293
- reduction='sum') / reg_norm
294
- losses['loss_centernet_loc'] = reg_loss
295
-
296
- if self.with_agn_hm:
297
- cat_agn_heatmap = flattened_hms.max(dim=1)[0] # M
298
- agn_pos_loss, agn_neg_loss = binary_heatmap_focal_loss(
299
- agn_hm_pred, cat_agn_heatmap, pos_inds,
300
- alpha=self.hm_focal_alpha,
301
- beta=self.hm_focal_beta,
302
- gamma=self.loss_gamma,
303
- sigmoid_clamp=self.sigmoid_clamp,
304
- ignore_high_fp=self.ignore_high_fp,
305
- )
306
- agn_pos_loss = self.pos_weight * agn_pos_loss / num_pos_avg
307
- agn_neg_loss = self.neg_weight * agn_neg_loss / num_pos_avg
308
- losses['loss_centernet_agn_pos'] = agn_pos_loss
309
- losses['loss_centernet_agn_neg'] = agn_neg_loss
310
-
311
- if self.debug:
312
- print('losses', losses)
313
- print('total_num_pos', total_num_pos)
314
- return losses
315
-
316
-
317
- def compute_grids(self, features):
318
- grids = []
319
- for level, feature in enumerate(features):
320
- h, w = feature.size()[-2:]
321
- shifts_x = torch.arange(
322
- 0, w * self.strides[level],
323
- step=self.strides[level],
324
- dtype=torch.float32, device=feature.device)
325
- shifts_y = torch.arange(
326
- 0, h * self.strides[level],
327
- step=self.strides[level],
328
- dtype=torch.float32, device=feature.device)
329
- shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
330
- shift_x = shift_x.reshape(-1)
331
- shift_y = shift_y.reshape(-1)
332
- grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \
333
- self.strides[level] // 2
334
- grids.append(grids_per_level)
335
- return grids
336
-
337
-
338
- def _get_ground_truth(self, grids, shapes_per_level, gt_instances):
339
- '''
340
- Input:
341
- grids: list of tensors [(hl x wl, 2)]_l
342
- shapes_per_level: list of tuples L x 2:
343
- gt_instances: gt instances
344
- Retuen:
345
- pos_inds: N
346
- labels: N
347
- reg_targets: M x 4
348
- flattened_hms: M x C or M x 1
349
- N: number of objects in all images
350
- M: number of pixels from all FPN levels
351
- '''
352
-
353
- # get positive pixel index
354
- if not self.more_pos:
355
- pos_inds, labels = self._get_label_inds(
356
- gt_instances, shapes_per_level)
357
- else:
358
- pos_inds, labels = None, None
359
- heatmap_channels = self.num_classes
360
- L = len(grids)
361
- num_loc_list = [len(loc) for loc in grids]
362
- strides = torch.cat([
363
- shapes_per_level.new_ones(num_loc_list[l]) * self.strides[l] \
364
- for l in range(L)]).float() # M
365
- reg_size_ranges = torch.cat([
366
- shapes_per_level.new_tensor(self.sizes_of_interest[l]).float().view(
367
- 1, 2).expand(num_loc_list[l], 2) for l in range(L)]) # M x 2
368
- grids = torch.cat(grids, dim=0) # M x 2
369
- M = grids.shape[0]
370
-
371
- reg_targets = []
372
- flattened_hms = []
373
- for i in range(len(gt_instances)): # images
374
- boxes = gt_instances[i].gt_boxes.tensor # N x 4
375
- area = gt_instances[i].gt_boxes.area() # N
376
- gt_classes = gt_instances[i].gt_classes # N in [0, self.num_classes]
377
-
378
- N = boxes.shape[0]
379
- if N == 0:
380
- reg_targets.append(grids.new_zeros((M, 4)) - INF)
381
- flattened_hms.append(
382
- grids.new_zeros((
383
- M, 1 if self.only_proposal else heatmap_channels)))
384
- continue
385
-
386
- l = grids[:, 0].view(M, 1) - boxes[:, 0].view(1, N) # M x N
387
- t = grids[:, 1].view(M, 1) - boxes[:, 1].view(1, N) # M x N
388
- r = boxes[:, 2].view(1, N) - grids[:, 0].view(M, 1) # M x N
389
- b = boxes[:, 3].view(1, N) - grids[:, 1].view(M, 1) # M x N
390
- reg_target = torch.stack([l, t, r, b], dim=2) # M x N x 4
391
-
392
- centers = ((boxes[:, [0, 1]] + boxes[:, [2, 3]]) / 2) # N x 2
393
- centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2
394
- strides_expanded = strides.view(M, 1, 1).expand(M, N, 2)
395
- centers_discret = ((centers_expanded / strides_expanded).int() * \
396
- strides_expanded).float() + strides_expanded / 2 # M x N x 2
397
-
398
- is_peak = (((grids.view(M, 1, 2).expand(M, N, 2) - \
399
- centers_discret) ** 2).sum(dim=2) == 0) # M x N
400
- is_in_boxes = reg_target.min(dim=2)[0] > 0 # M x N
401
- is_center3x3 = self.get_center3x3(
402
- grids, centers, strides) & is_in_boxes # M x N
403
- is_cared_in_the_level = self.assign_reg_fpn(
404
- reg_target, reg_size_ranges) # M x N
405
- reg_mask = is_center3x3 & is_cared_in_the_level # M x N
406
-
407
- dist2 = ((grids.view(M, 1, 2).expand(M, N, 2) - \
408
- centers_expanded) ** 2).sum(dim=2) # M x N
409
- dist2[is_peak] = 0
410
- radius2 = self.delta ** 2 * 2 * area # N
411
- radius2 = torch.clamp(
412
- radius2, min=self.min_radius ** 2)
413
- weighted_dist2 = dist2 / radius2.view(1, N).expand(M, N) # M x N
414
- reg_target = self._get_reg_targets(
415
- reg_target, weighted_dist2.clone(), reg_mask, area) # M x 4
416
-
417
- if self.only_proposal:
418
- flattened_hm = self._create_agn_heatmaps_from_dist(
419
- weighted_dist2.clone()) # M x 1
420
- else:
421
- flattened_hm = self._create_heatmaps_from_dist(
422
- weighted_dist2.clone(), gt_classes,
423
- channels=heatmap_channels) # M x C
424
-
425
- reg_targets.append(reg_target)
426
- flattened_hms.append(flattened_hm)
427
-
428
- # transpose im first training_targets to level first ones
429
- reg_targets = _transpose(reg_targets, num_loc_list)
430
- flattened_hms = _transpose(flattened_hms, num_loc_list)
431
- for l in range(len(reg_targets)):
432
- reg_targets[l] = reg_targets[l] / float(self.strides[l])
433
- reg_targets = cat([x for x in reg_targets], dim=0) # MB x 4
434
- flattened_hms = cat([x for x in flattened_hms], dim=0) # MB x C
435
-
436
- return pos_inds, labels, reg_targets, flattened_hms
437
-
438
-
439
- def _get_label_inds(self, gt_instances, shapes_per_level):
440
- '''
441
- Inputs:
442
- gt_instances: [n_i], sum n_i = N
443
- shapes_per_level: L x 2 [(h_l, w_l)]_L
444
- Returns:
445
- pos_inds: N'
446
- labels: N'
447
- '''
448
- pos_inds = []
449
- labels = []
450
- L = len(self.strides)
451
- B = len(gt_instances)
452
- shapes_per_level = shapes_per_level.long()
453
- loc_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]).long() # L
454
- level_bases = []
455
- s = 0
456
- for l in range(L):
457
- level_bases.append(s)
458
- s = s + B * loc_per_level[l]
459
- level_bases = shapes_per_level.new_tensor(level_bases).long() # L
460
- strides_default = shapes_per_level.new_tensor(self.strides).float() # L
461
- for im_i in range(B):
462
- targets_per_im = gt_instances[im_i]
463
- bboxes = targets_per_im.gt_boxes.tensor # n x 4
464
- n = bboxes.shape[0]
465
- centers = ((bboxes[:, [0, 1]] + bboxes[:, [2, 3]]) / 2) # n x 2
466
- centers = centers.view(n, 1, 2).expand(n, L, 2)
467
- strides = strides_default.view(1, L, 1).expand(n, L, 2)
468
- centers_inds = (centers / strides).long() # n x L x 2
469
- Ws = shapes_per_level[:, 1].view(1, L).expand(n, L)
470
- pos_ind = level_bases.view(1, L).expand(n, L) + \
471
- im_i * loc_per_level.view(1, L).expand(n, L) + \
472
- centers_inds[:, :, 1] * Ws + \
473
- centers_inds[:, :, 0] # n x L
474
- is_cared_in_the_level = self.assign_fpn_level(bboxes)
475
- pos_ind = pos_ind[is_cared_in_the_level].view(-1)
476
- label = targets_per_im.gt_classes.view(
477
- n, 1).expand(n, L)[is_cared_in_the_level].view(-1)
478
-
479
- pos_inds.append(pos_ind) # n'
480
- labels.append(label) # n'
481
- pos_inds = torch.cat(pos_inds, dim=0).long()
482
- labels = torch.cat(labels, dim=0)
483
- return pos_inds, labels # N, N
484
-
485
-
486
- def assign_fpn_level(self, boxes):
487
- '''
488
- Inputs:
489
- boxes: n x 4
490
- size_ranges: L x 2
491
- Return:
492
- is_cared_in_the_level: n x L
493
- '''
494
- size_ranges = boxes.new_tensor(
495
- self.sizes_of_interest).view(len(self.sizes_of_interest), 2) # L x 2
496
- crit = ((boxes[:, 2:] - boxes[:, :2]) **2).sum(dim=1) ** 0.5 / 2 # n
497
- n, L = crit.shape[0], size_ranges.shape[0]
498
- crit = crit.view(n, 1).expand(n, L)
499
- size_ranges_expand = size_ranges.view(1, L, 2).expand(n, L, 2)
500
- is_cared_in_the_level = (crit >= size_ranges_expand[:, :, 0]) & \
501
- (crit <= size_ranges_expand[:, :, 1])
502
- return is_cared_in_the_level
503
-
504
-
505
- def assign_reg_fpn(self, reg_targets_per_im, size_ranges):
506
- '''
507
- TODO (Xingyi): merge it with assign_fpn_level
508
- Inputs:
509
- reg_targets_per_im: M x N x 4
510
- size_ranges: M x 2
511
- '''
512
- crit = ((reg_targets_per_im[:, :, :2] + \
513
- reg_targets_per_im[:, :, 2:])**2).sum(dim=2) ** 0.5 / 2 # M x N
514
- is_cared_in_the_level = (crit >= size_ranges[:, [0]]) & \
515
- (crit <= size_ranges[:, [1]])
516
- return is_cared_in_the_level
517
-
518
-
519
- def _get_reg_targets(self, reg_targets, dist, mask, area):
520
- '''
521
- reg_targets (M x N x 4): long tensor
522
- dist (M x N)
523
- is_*: M x N
524
- '''
525
- dist[mask == 0] = INF * 1.0
526
- min_dist, min_inds = dist.min(dim=1) # M
527
- reg_targets_per_im = reg_targets[
528
- range(len(reg_targets)), min_inds] # M x N x 4 --> M x 4
529
- reg_targets_per_im[min_dist == INF] = - INF
530
- return reg_targets_per_im
531
-
532
-
533
- def _create_heatmaps_from_dist(self, dist, labels, channels):
534
- '''
535
- dist: M x N
536
- labels: N
537
- return:
538
- heatmaps: M x C
539
- '''
540
- heatmaps = dist.new_zeros((dist.shape[0], channels))
541
- for c in range(channels):
542
- inds = (labels == c) # N
543
- if inds.int().sum() == 0:
544
- continue
545
- heatmaps[:, c] = torch.exp(-dist[:, inds].min(dim=1)[0])
546
- zeros = heatmaps[:, c] < 1e-4
547
- heatmaps[zeros, c] = 0
548
- return heatmaps
549
-
550
-
551
- def _create_agn_heatmaps_from_dist(self, dist):
552
- '''
553
- TODO (Xingyi): merge it with _create_heatmaps_from_dist
554
- dist: M x N
555
- return:
556
- heatmaps: M x 1
557
- '''
558
- heatmaps = dist.new_zeros((dist.shape[0], 1))
559
- heatmaps[:, 0] = torch.exp(-dist.min(dim=1)[0])
560
- zeros = heatmaps < 1e-4
561
- heatmaps[zeros] = 0
562
- return heatmaps
563
-
564
-
565
- def _flatten_outputs(self, clss, reg_pred, agn_hm_pred):
566
- # Reshape: (N, F, Hl, Wl) -> (N, Hl, Wl, F) -> (sum_l N*Hl*Wl, F)
567
- clss = cat([x.permute(0, 2, 3, 1).reshape(-1, x.shape[1]) \
568
- for x in clss], dim=0) if clss[0] is not None else None
569
- reg_pred = cat(
570
- [x.permute(0, 2, 3, 1).reshape(-1, 4) for x in reg_pred], dim=0)
571
- agn_hm_pred = cat([x.permute(0, 2, 3, 1).reshape(-1) \
572
- for x in agn_hm_pred], dim=0) if self.with_agn_hm else None
573
- return clss, reg_pred, agn_hm_pred
574
-
575
-
576
- def get_center3x3(self, locations, centers, strides):
577
- '''
578
- Inputs:
579
- locations: M x 2
580
- centers: N x 2
581
- strides: M
582
- '''
583
- M, N = locations.shape[0], centers.shape[0]
584
- locations_expanded = locations.view(M, 1, 2).expand(M, N, 2) # M x N x 2
585
- centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2
586
- strides_expanded = strides.view(M, 1, 1).expand(M, N, 2) # M x N
587
- centers_discret = ((centers_expanded / strides_expanded).int() * \
588
- strides_expanded).float() + strides_expanded / 2 # M x N x 2
589
- dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs()
590
- dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs()
591
- return (dist_x <= strides_expanded[:, :, 0]) & \
592
- (dist_y <= strides_expanded[:, :, 0])
593
-
594
-
595
- def inference(self, images, clss_per_level, reg_pred_per_level,
596
- agn_hm_pred_per_level, grids):
597
- logits_pred = [x.sigmoid() if x is not None else None \
598
- for x in clss_per_level]
599
- agn_hm_pred_per_level = [x.sigmoid() if x is not None else None \
600
- for x in agn_hm_pred_per_level]
601
-
602
- if self.only_proposal:
603
- proposals = self.predict_instances(
604
- grids, agn_hm_pred_per_level, reg_pred_per_level,
605
- images.image_sizes, [None for _ in agn_hm_pred_per_level])
606
- else:
607
- proposals = self.predict_instances(
608
- grids, logits_pred, reg_pred_per_level,
609
- images.image_sizes, agn_hm_pred_per_level)
610
- if self.as_proposal or self.only_proposal:
611
- for p in range(len(proposals)):
612
- proposals[p].proposal_boxes = proposals[p].get('pred_boxes')
613
- proposals[p].objectness_logits = proposals[p].get('scores')
614
- proposals[p].remove('pred_boxes')
615
-
616
- if self.debug:
617
- debug_test(
618
- [self.denormalizer(x) for x in images],
619
- logits_pred, reg_pred_per_level,
620
- agn_hm_pred_per_level, preds=proposals,
621
- vis_thresh=self.vis_thresh,
622
- debug_show_name=False)
623
- return proposals, {}
624
-
625
-
626
- def predict_instances(
627
- self, grids, logits_pred, reg_pred, image_sizes, agn_hm_pred,
628
- is_proposal=False):
629
- sampled_boxes = []
630
- for l in range(len(grids)):
631
- sampled_boxes.append(self.predict_single_level(
632
- grids[l], logits_pred[l], reg_pred[l] * self.strides[l],
633
- image_sizes, agn_hm_pred[l], l, is_proposal=is_proposal))
634
- boxlists = list(zip(*sampled_boxes))
635
- boxlists = [Instances.cat(boxlist) for boxlist in boxlists]
636
- boxlists = self.nms_and_topK(
637
- boxlists, nms=not self.not_nms)
638
- return boxlists
639
-
640
-
641
- def predict_single_level(
642
- self, grids, heatmap, reg_pred, image_sizes, agn_hm, level,
643
- is_proposal=False):
644
- N, C, H, W = heatmap.shape
645
- # put in the same format as grids
646
- if self.center_nms:
647
- heatmap_nms = nn.functional.max_pool2d(
648
- heatmap, (3, 3), stride=1, padding=1)
649
- heatmap = heatmap * (heatmap_nms == heatmap).float()
650
- heatmap = heatmap.permute(0, 2, 3, 1) # N x H x W x C
651
- heatmap = heatmap.reshape(N, -1, C) # N x HW x C
652
- box_regression = reg_pred.view(N, 4, H, W).permute(0, 2, 3, 1) # N x H x W x 4
653
- box_regression = box_regression.reshape(N, -1, 4)
654
-
655
- candidate_inds = heatmap > self.score_thresh # 0.05
656
- pre_nms_top_n = candidate_inds.view(N, -1).sum(1) # N
657
- pre_nms_topk = self.pre_nms_topk_train if self.training else self.pre_nms_topk_test
658
- pre_nms_top_n = pre_nms_top_n.clamp(max=pre_nms_topk) # N
659
-
660
- if agn_hm is not None:
661
- agn_hm = agn_hm.view(N, 1, H, W).permute(0, 2, 3, 1)
662
- agn_hm = agn_hm.reshape(N, -1)
663
- heatmap = heatmap * agn_hm[:, :, None]
664
-
665
- results = []
666
- for i in range(N):
667
- per_box_cls = heatmap[i] # HW x C
668
- per_candidate_inds = candidate_inds[i] # n
669
- per_box_cls = per_box_cls[per_candidate_inds] # n
670
-
671
- per_candidate_nonzeros = per_candidate_inds.nonzero() # n
672
- per_box_loc = per_candidate_nonzeros[:, 0] # n
673
- per_class = per_candidate_nonzeros[:, 1] # n
674
-
675
- per_box_regression = box_regression[i] # HW x 4
676
- per_box_regression = per_box_regression[per_box_loc] # n x 4
677
- per_grids = grids[per_box_loc] # n x 2
678
-
679
- per_pre_nms_top_n = pre_nms_top_n[i] # 1
680
-
681
- if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
682
- per_box_cls, top_k_indices = \
683
- per_box_cls.topk(per_pre_nms_top_n, sorted=False)
684
- per_class = per_class[top_k_indices]
685
- per_box_regression = per_box_regression[top_k_indices]
686
- per_grids = per_grids[top_k_indices]
687
-
688
- detections = torch.stack([
689
- per_grids[:, 0] - per_box_regression[:, 0],
690
- per_grids[:, 1] - per_box_regression[:, 1],
691
- per_grids[:, 0] + per_box_regression[:, 2],
692
- per_grids[:, 1] + per_box_regression[:, 3],
693
- ], dim=1) # n x 4
694
-
695
- # avoid invalid boxes in RoI heads
696
- detections[:, 2] = torch.max(detections[:, 2], detections[:, 0] + 0.01)
697
- detections[:, 3] = torch.max(detections[:, 3], detections[:, 1] + 0.01)
698
- boxlist = Instances(image_sizes[i])
699
- boxlist.scores = torch.sqrt(per_box_cls) \
700
- if self.with_agn_hm else per_box_cls # n
701
- # import pdb; pdb.set_trace()
702
- boxlist.pred_boxes = Boxes(detections)
703
- boxlist.pred_classes = per_class
704
- results.append(boxlist)
705
- return results
706
-
707
-
708
- def nms_and_topK(self, boxlists, nms=True):
709
- num_images = len(boxlists)
710
- results = []
711
- for i in range(num_images):
712
- nms_thresh = self.nms_thresh_train if self.training else \
713
- self.nms_thresh_test
714
- result = ml_nms(boxlists[i], nms_thresh) if nms else boxlists[i]
715
- if self.debug:
716
- print('#proposals before nms', len(boxlists[i]))
717
- print('#proposals after nms', len(result))
718
- num_dets = len(result)
719
- post_nms_topk = self.post_nms_topk_train if self.training else \
720
- self.post_nms_topk_test
721
- if num_dets > post_nms_topk:
722
- cls_scores = result.scores
723
- image_thresh, _ = torch.kthvalue(
724
- cls_scores.float().cpu(),
725
- num_dets - post_nms_topk + 1
726
- )
727
- keep = cls_scores >= image_thresh.item()
728
- keep = torch.nonzero(keep).squeeze(1)
729
- result = result[keep]
730
- if self.debug:
731
- print('#proposals after filter', len(result))
732
- results.append(result)
733
- return results
734
-
735
-
736
- def _add_more_pos(self, reg_pred, gt_instances, shapes_per_level):
737
- labels, level_masks, c33_inds, c33_masks, c33_regs = \
738
- self._get_c33_inds(gt_instances, shapes_per_level)
739
- N, L, K = labels.shape[0], len(self.strides), 9
740
- c33_inds[c33_masks == 0] = 0
741
- reg_pred_c33 = reg_pred[c33_inds].detach() # N x L x K
742
- invalid_reg = c33_masks == 0
743
- c33_regs_expand = c33_regs.view(N * L * K, 4).clamp(min=0)
744
- if N > 0:
745
- with torch.no_grad():
746
- c33_reg_loss = self.iou_loss(
747
- reg_pred_c33.view(N * L * K, 4),
748
- c33_regs_expand, None,
749
- reduction='none').view(N, L, K).detach() # N x L x K
750
- else:
751
- c33_reg_loss = reg_pred_c33.new_zeros((N, L, K)).detach()
752
- c33_reg_loss[invalid_reg] = INF # N x L x K
753
- c33_reg_loss.view(N * L, K)[level_masks.view(N * L), 4] = 0 # real center
754
- c33_reg_loss = c33_reg_loss.view(N, L * K)
755
- if N == 0:
756
- loss_thresh = c33_reg_loss.new_ones((N)).float()
757
- else:
758
- loss_thresh = torch.kthvalue(
759
- c33_reg_loss, self.more_pos_topk, dim=1)[0] # N
760
- loss_thresh[loss_thresh > self.more_pos_thresh] = self.more_pos_thresh # N
761
- new_pos = c33_reg_loss.view(N, L, K) < \
762
- loss_thresh.view(N, 1, 1).expand(N, L, K)
763
- pos_inds = c33_inds[new_pos].view(-1) # P
764
- labels = labels.view(N, 1, 1).expand(N, L, K)[new_pos].view(-1)
765
- return pos_inds, labels
766
-
767
-
768
- def _get_c33_inds(self, gt_instances, shapes_per_level):
769
- '''
770
- TODO (Xingyi): The current implementation is ugly. Refactor.
771
- Get the center (and the 3x3 region near center) locations of each objects
772
- Inputs:
773
- gt_instances: [n_i], sum n_i = N
774
- shapes_per_level: L x 2 [(h_l, w_l)]_L
775
- '''
776
- labels = []
777
- level_masks = []
778
- c33_inds = []
779
- c33_masks = []
780
- c33_regs = []
781
- L = len(self.strides)
782
- B = len(gt_instances)
783
- shapes_per_level = shapes_per_level.long()
784
- loc_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]).long() # L
785
- level_bases = []
786
- s = 0
787
- for l in range(L):
788
- level_bases.append(s)
789
- s = s + B * loc_per_level[l]
790
- level_bases = shapes_per_level.new_tensor(level_bases).long() # L
791
- strides_default = shapes_per_level.new_tensor(self.strides).float() # L
792
- K = 9
793
- dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0, 1]).long()
794
- dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1, 1]).long()
795
- for im_i in range(B):
796
- targets_per_im = gt_instances[im_i]
797
- bboxes = targets_per_im.gt_boxes.tensor # n x 4
798
- n = bboxes.shape[0]
799
- if n == 0:
800
- continue
801
- centers = ((bboxes[:, [0, 1]] + bboxes[:, [2, 3]]) / 2) # n x 2
802
- centers = centers.view(n, 1, 2).expand(n, L, 2)
803
-
804
- strides = strides_default.view(1, L, 1).expand(n, L, 2) #
805
- centers_inds = (centers / strides).long() # n x L x 2
806
- center_grids = centers_inds * strides + strides // 2# n x L x 2
807
- l = center_grids[:, :, 0] - bboxes[:, 0].view(n, 1).expand(n, L)
808
- t = center_grids[:, :, 1] - bboxes[:, 1].view(n, 1).expand(n, L)
809
- r = bboxes[:, 2].view(n, 1).expand(n, L) - center_grids[:, :, 0]
810
- b = bboxes[:, 3].view(n, 1).expand(n, L) - center_grids[:, :, 1] # n x L
811
- reg = torch.stack([l, t, r, b], dim=2) # n x L x 4
812
- reg = reg / strides_default.view(1, L, 1).expand(n, L, 4).float()
813
-
814
- Ws = shapes_per_level[:, 1].view(1, L).expand(n, L)
815
- Hs = shapes_per_level[:, 0].view(1, L).expand(n, L)
816
- expand_Ws = Ws.view(n, L, 1).expand(n, L, K)
817
- expand_Hs = Hs.view(n, L, 1).expand(n, L, K)
818
- label = targets_per_im.gt_classes.view(n).clone()
819
- mask = reg.min(dim=2)[0] >= 0 # n x L
820
- mask = mask & self.assign_fpn_level(bboxes)
821
- labels.append(label) # n
822
- level_masks.append(mask) # n x L
823
-
824
- Dy = dy.view(1, 1, K).expand(n, L, K)
825
- Dx = dx.view(1, 1, K).expand(n, L, K)
826
- c33_ind = level_bases.view(1, L, 1).expand(n, L, K) + \
827
- im_i * loc_per_level.view(1, L, 1).expand(n, L, K) + \
828
- (centers_inds[:, :, 1:2].expand(n, L, K) + Dy) * expand_Ws + \
829
- (centers_inds[:, :, 0:1].expand(n, L, K) + Dx) # n x L x K
830
-
831
- c33_mask = \
832
- ((centers_inds[:, :, 1:2].expand(n, L, K) + dy) < expand_Hs) & \
833
- ((centers_inds[:, :, 1:2].expand(n, L, K) + dy) >= 0) & \
834
- ((centers_inds[:, :, 0:1].expand(n, L, K) + dx) < expand_Ws) & \
835
- ((centers_inds[:, :, 0:1].expand(n, L, K) + dx) >= 0)
836
- # TODO (Xingyi): think about better way to implement this
837
- # Currently it hard codes the 3x3 region
838
- c33_reg = reg.view(n, L, 1, 4).expand(n, L, K, 4).clone()
839
- c33_reg[:, :, [0, 3, 6], 0] -= 1
840
- c33_reg[:, :, [0, 3, 6], 2] += 1
841
- c33_reg[:, :, [2, 5, 8], 0] += 1
842
- c33_reg[:, :, [2, 5, 8], 2] -= 1
843
- c33_reg[:, :, [0, 1, 2], 1] -= 1
844
- c33_reg[:, :, [0, 1, 2], 3] += 1
845
- c33_reg[:, :, [6, 7, 8], 1] += 1
846
- c33_reg[:, :, [6, 7, 8], 3] -= 1
847
- c33_mask = c33_mask & (c33_reg.min(dim=3)[0] >= 0) # n x L x K
848
- c33_inds.append(c33_ind)
849
- c33_masks.append(c33_mask)
850
- c33_regs.append(c33_reg)
851
-
852
- if len(level_masks) > 0:
853
- labels = torch.cat(labels, dim=0)
854
- level_masks = torch.cat(level_masks, dim=0)
855
- c33_inds = torch.cat(c33_inds, dim=0).long()
856
- c33_regs = torch.cat(c33_regs, dim=0)
857
- c33_masks = torch.cat(c33_masks, dim=0)
858
- else:
859
- labels = shapes_per_level.new_zeros((0)).long()
860
- level_masks = shapes_per_level.new_zeros((0, L)).bool()
861
- c33_inds = shapes_per_level.new_zeros((0, L, K)).long()
862
- c33_regs = shapes_per_level.new_zeros((0, L, K, 4)).float()
863
- c33_masks = shapes_per_level.new_zeros((0, L, K)).bool()
864
- return labels, level_masks, c33_inds, c33_masks, c33_regs # N x L, N x L x K
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/deform_conv.py DELETED
@@ -1,116 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
- from detectron2.layers import Conv2d
5
-
6
-
7
- class _NewEmptyTensorOp(torch.autograd.Function):
8
- @staticmethod
9
- def forward(ctx, x, new_shape):
10
- ctx.shape = x.shape
11
- return x.new_empty(new_shape)
12
-
13
- @staticmethod
14
- def backward(ctx, grad):
15
- shape = ctx.shape
16
- return _NewEmptyTensorOp.apply(grad, shape), None
17
-
18
-
19
- class DFConv2d(nn.Module):
20
- """Deformable convolutional layer"""
21
- def __init__(
22
- self,
23
- in_channels,
24
- out_channels,
25
- with_modulated_dcn=True,
26
- kernel_size=3,
27
- stride=1,
28
- groups=1,
29
- dilation=1,
30
- deformable_groups=1,
31
- bias=False,
32
- padding=None
33
- ):
34
- super(DFConv2d, self).__init__()
35
- if isinstance(kernel_size, (list, tuple)):
36
- assert isinstance(stride, (list, tuple))
37
- assert isinstance(dilation, (list, tuple))
38
- assert len(kernel_size) == 2
39
- assert len(stride) == 2
40
- assert len(dilation) == 2
41
- padding = (
42
- dilation[0] * (kernel_size[0] - 1) // 2,
43
- dilation[1] * (kernel_size[1] - 1) // 2
44
- )
45
- offset_base_channels = kernel_size[0] * kernel_size[1]
46
- else:
47
- padding = dilation * (kernel_size - 1) // 2
48
- offset_base_channels = kernel_size * kernel_size
49
- if with_modulated_dcn:
50
- from detectron2.layers.deform_conv import ModulatedDeformConv
51
- offset_channels = offset_base_channels * 3 # default: 27
52
- conv_block = ModulatedDeformConv
53
- else:
54
- from detectron2.layers.deform_conv import DeformConv
55
- offset_channels = offset_base_channels * 2 # default: 18
56
- conv_block = DeformConv
57
- self.offset = Conv2d(
58
- in_channels,
59
- deformable_groups * offset_channels,
60
- kernel_size=kernel_size,
61
- stride=stride,
62
- padding=padding,
63
- groups=1,
64
- dilation=dilation
65
- )
66
- nn.init.constant_(self.offset.weight, 0)
67
- nn.init.constant_(self.offset.bias, 0)
68
- '''
69
- for l in [self.offset, ]:
70
- nn.init.kaiming_uniform_(l.weight, a=1)
71
- torch.nn.init.constant_(l.bias, 0.)
72
- '''
73
- self.conv = conv_block(
74
- in_channels,
75
- out_channels,
76
- kernel_size=kernel_size,
77
- stride=stride,
78
- padding=padding,
79
- dilation=dilation,
80
- groups=groups,
81
- deformable_groups=deformable_groups,
82
- bias=bias
83
- )
84
- self.with_modulated_dcn = with_modulated_dcn
85
- self.kernel_size = kernel_size
86
- self.stride = stride
87
- self.padding = padding
88
- self.dilation = dilation
89
- self.offset_split = offset_base_channels * deformable_groups * 2
90
-
91
- def forward(self, x, return_offset=False):
92
- if x.numel() > 0:
93
- if not self.with_modulated_dcn:
94
- offset_mask = self.offset(x)
95
- x = self.conv(x, offset_mask)
96
- else:
97
- offset_mask = self.offset(x)
98
- offset = offset_mask[:, :self.offset_split, :, :]
99
- mask = offset_mask[:, self.offset_split:, :, :].sigmoid()
100
- x = self.conv(x, offset, mask)
101
- if return_offset:
102
- return x, offset_mask
103
- return x
104
- # get output shape
105
- output_shape = [
106
- (i + 2 * p - (di * (k - 1) + 1)) // d + 1
107
- for i, p, di, k, d in zip(
108
- x.shape[-2:],
109
- self.padding,
110
- self.dilation,
111
- self.kernel_size,
112
- self.stride
113
- )
114
- ]
115
- output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
116
- return _NewEmptyTensorOp.apply(x, output_shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange/infer_pack/modules.py DELETED
@@ -1,522 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- from infer_pack import commons
13
- from infer_pack.commons import init_weights, get_padding
14
- from infer_pack.transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(
37
- self,
38
- in_channels,
39
- hidden_channels,
40
- out_channels,
41
- kernel_size,
42
- n_layers,
43
- p_dropout,
44
- ):
45
- super().__init__()
46
- self.in_channels = in_channels
47
- self.hidden_channels = hidden_channels
48
- self.out_channels = out_channels
49
- self.kernel_size = kernel_size
50
- self.n_layers = n_layers
51
- self.p_dropout = p_dropout
52
- assert n_layers > 1, "Number of layers should be larger than 0."
53
-
54
- self.conv_layers = nn.ModuleList()
55
- self.norm_layers = nn.ModuleList()
56
- self.conv_layers.append(
57
- nn.Conv1d(
58
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
- )
60
- )
61
- self.norm_layers.append(LayerNorm(hidden_channels))
62
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
- for _ in range(n_layers - 1):
64
- self.conv_layers.append(
65
- nn.Conv1d(
66
- hidden_channels,
67
- hidden_channels,
68
- kernel_size,
69
- padding=kernel_size // 2,
70
- )
71
- )
72
- self.norm_layers.append(LayerNorm(hidden_channels))
73
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
- self.proj.weight.data.zero_()
75
- self.proj.bias.data.zero_()
76
-
77
- def forward(self, x, x_mask):
78
- x_org = x
79
- for i in range(self.n_layers):
80
- x = self.conv_layers[i](x * x_mask)
81
- x = self.norm_layers[i](x)
82
- x = self.relu_drop(x)
83
- x = x_org + self.proj(x)
84
- return x * x_mask
85
-
86
-
87
- class DDSConv(nn.Module):
88
- """
89
- Dialted and Depth-Separable Convolution
90
- """
91
-
92
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
- super().__init__()
94
- self.channels = channels
95
- self.kernel_size = kernel_size
96
- self.n_layers = n_layers
97
- self.p_dropout = p_dropout
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.convs_sep = nn.ModuleList()
101
- self.convs_1x1 = nn.ModuleList()
102
- self.norms_1 = nn.ModuleList()
103
- self.norms_2 = nn.ModuleList()
104
- for i in range(n_layers):
105
- dilation = kernel_size**i
106
- padding = (kernel_size * dilation - dilation) // 2
107
- self.convs_sep.append(
108
- nn.Conv1d(
109
- channels,
110
- channels,
111
- kernel_size,
112
- groups=channels,
113
- dilation=dilation,
114
- padding=padding,
115
- )
116
- )
117
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
- self.norms_1.append(LayerNorm(channels))
119
- self.norms_2.append(LayerNorm(channels))
120
-
121
- def forward(self, x, x_mask, g=None):
122
- if g is not None:
123
- x = x + g
124
- for i in range(self.n_layers):
125
- y = self.convs_sep[i](x * x_mask)
126
- y = self.norms_1[i](y)
127
- y = F.gelu(y)
128
- y = self.convs_1x1[i](y)
129
- y = self.norms_2[i](y)
130
- y = F.gelu(y)
131
- y = self.drop(y)
132
- x = x + y
133
- return x * x_mask
134
-
135
-
136
- class WN(torch.nn.Module):
137
- def __init__(
138
- self,
139
- hidden_channels,
140
- kernel_size,
141
- dilation_rate,
142
- n_layers,
143
- gin_channels=0,
144
- p_dropout=0,
145
- ):
146
- super(WN, self).__init__()
147
- assert kernel_size % 2 == 1
148
- self.hidden_channels = hidden_channels
149
- self.kernel_size = (kernel_size,)
150
- self.dilation_rate = dilation_rate
151
- self.n_layers = n_layers
152
- self.gin_channels = gin_channels
153
- self.p_dropout = p_dropout
154
-
155
- self.in_layers = torch.nn.ModuleList()
156
- self.res_skip_layers = torch.nn.ModuleList()
157
- self.drop = nn.Dropout(p_dropout)
158
-
159
- if gin_channels != 0:
160
- cond_layer = torch.nn.Conv1d(
161
- gin_channels, 2 * hidden_channels * n_layers, 1
162
- )
163
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
-
165
- for i in range(n_layers):
166
- dilation = dilation_rate**i
167
- padding = int((kernel_size * dilation - dilation) / 2)
168
- in_layer = torch.nn.Conv1d(
169
- hidden_channels,
170
- 2 * hidden_channels,
171
- kernel_size,
172
- dilation=dilation,
173
- padding=padding,
174
- )
175
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
- self.in_layers.append(in_layer)
177
-
178
- # last one is not necessary
179
- if i < n_layers - 1:
180
- res_skip_channels = 2 * hidden_channels
181
- else:
182
- res_skip_channels = hidden_channels
183
-
184
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
- self.res_skip_layers.append(res_skip_layer)
187
-
188
- def forward(self, x, x_mask, g=None, **kwargs):
189
- output = torch.zeros_like(x)
190
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
-
192
- if g is not None:
193
- g = self.cond_layer(g)
194
-
195
- for i in range(self.n_layers):
196
- x_in = self.in_layers[i](x)
197
- if g is not None:
198
- cond_offset = i * 2 * self.hidden_channels
199
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
- else:
201
- g_l = torch.zeros_like(x_in)
202
-
203
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
- acts = self.drop(acts)
205
-
206
- res_skip_acts = self.res_skip_layers[i](acts)
207
- if i < self.n_layers - 1:
208
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
- x = (x + res_acts) * x_mask
210
- output = output + res_skip_acts[:, self.hidden_channels :, :]
211
- else:
212
- output = output + res_skip_acts
213
- return output * x_mask
214
-
215
- def remove_weight_norm(self):
216
- if self.gin_channels != 0:
217
- torch.nn.utils.remove_weight_norm(self.cond_layer)
218
- for l in self.in_layers:
219
- torch.nn.utils.remove_weight_norm(l)
220
- for l in self.res_skip_layers:
221
- torch.nn.utils.remove_weight_norm(l)
222
-
223
-
224
- class ResBlock1(torch.nn.Module):
225
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
- super(ResBlock1, self).__init__()
227
- self.convs1 = nn.ModuleList(
228
- [
229
- weight_norm(
230
- Conv1d(
231
- channels,
232
- channels,
233
- kernel_size,
234
- 1,
235
- dilation=dilation[0],
236
- padding=get_padding(kernel_size, dilation[0]),
237
- )
238
- ),
239
- weight_norm(
240
- Conv1d(
241
- channels,
242
- channels,
243
- kernel_size,
244
- 1,
245
- dilation=dilation[1],
246
- padding=get_padding(kernel_size, dilation[1]),
247
- )
248
- ),
249
- weight_norm(
250
- Conv1d(
251
- channels,
252
- channels,
253
- kernel_size,
254
- 1,
255
- dilation=dilation[2],
256
- padding=get_padding(kernel_size, dilation[2]),
257
- )
258
- ),
259
- ]
260
- )
261
- self.convs1.apply(init_weights)
262
-
263
- self.convs2 = nn.ModuleList(
264
- [
265
- weight_norm(
266
- Conv1d(
267
- channels,
268
- channels,
269
- kernel_size,
270
- 1,
271
- dilation=1,
272
- padding=get_padding(kernel_size, 1),
273
- )
274
- ),
275
- weight_norm(
276
- Conv1d(
277
- channels,
278
- channels,
279
- kernel_size,
280
- 1,
281
- dilation=1,
282
- padding=get_padding(kernel_size, 1),
283
- )
284
- ),
285
- weight_norm(
286
- Conv1d(
287
- channels,
288
- channels,
289
- kernel_size,
290
- 1,
291
- dilation=1,
292
- padding=get_padding(kernel_size, 1),
293
- )
294
- ),
295
- ]
296
- )
297
- self.convs2.apply(init_weights)
298
-
299
- def forward(self, x, x_mask=None):
300
- for c1, c2 in zip(self.convs1, self.convs2):
301
- xt = F.leaky_relu(x, LRELU_SLOPE)
302
- if x_mask is not None:
303
- xt = xt * x_mask
304
- xt = c1(xt)
305
- xt = F.leaky_relu(xt, LRELU_SLOPE)
306
- if x_mask is not None:
307
- xt = xt * x_mask
308
- xt = c2(xt)
309
- x = xt + x
310
- if x_mask is not None:
311
- x = x * x_mask
312
- return x
313
-
314
- def remove_weight_norm(self):
315
- for l in self.convs1:
316
- remove_weight_norm(l)
317
- for l in self.convs2:
318
- remove_weight_norm(l)
319
-
320
-
321
- class ResBlock2(torch.nn.Module):
322
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
- super(ResBlock2, self).__init__()
324
- self.convs = nn.ModuleList(
325
- [
326
- weight_norm(
327
- Conv1d(
328
- channels,
329
- channels,
330
- kernel_size,
331
- 1,
332
- dilation=dilation[0],
333
- padding=get_padding(kernel_size, dilation[0]),
334
- )
335
- ),
336
- weight_norm(
337
- Conv1d(
338
- channels,
339
- channels,
340
- kernel_size,
341
- 1,
342
- dilation=dilation[1],
343
- padding=get_padding(kernel_size, dilation[1]),
344
- )
345
- ),
346
- ]
347
- )
348
- self.convs.apply(init_weights)
349
-
350
- def forward(self, x, x_mask=None):
351
- for c in self.convs:
352
- xt = F.leaky_relu(x, LRELU_SLOPE)
353
- if x_mask is not None:
354
- xt = xt * x_mask
355
- xt = c(xt)
356
- x = xt + x
357
- if x_mask is not None:
358
- x = x * x_mask
359
- return x
360
-
361
- def remove_weight_norm(self):
362
- for l in self.convs:
363
- remove_weight_norm(l)
364
-
365
-
366
- class Log(nn.Module):
367
- def forward(self, x, x_mask, reverse=False, **kwargs):
368
- if not reverse:
369
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
- logdet = torch.sum(-y, [1, 2])
371
- return y, logdet
372
- else:
373
- x = torch.exp(x) * x_mask
374
- return x
375
-
376
-
377
- class Flip(nn.Module):
378
- def forward(self, x, *args, reverse=False, **kwargs):
379
- x = torch.flip(x, [1])
380
- if not reverse:
381
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
- return x, logdet
383
- else:
384
- return x
385
-
386
-
387
- class ElementwiseAffine(nn.Module):
388
- def __init__(self, channels):
389
- super().__init__()
390
- self.channels = channels
391
- self.m = nn.Parameter(torch.zeros(channels, 1))
392
- self.logs = nn.Parameter(torch.zeros(channels, 1))
393
-
394
- def forward(self, x, x_mask, reverse=False, **kwargs):
395
- if not reverse:
396
- y = self.m + torch.exp(self.logs) * x
397
- y = y * x_mask
398
- logdet = torch.sum(self.logs * x_mask, [1, 2])
399
- return y, logdet
400
- else:
401
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
- return x
403
-
404
-
405
- class ResidualCouplingLayer(nn.Module):
406
- def __init__(
407
- self,
408
- channels,
409
- hidden_channels,
410
- kernel_size,
411
- dilation_rate,
412
- n_layers,
413
- p_dropout=0,
414
- gin_channels=0,
415
- mean_only=False,
416
- ):
417
- assert channels % 2 == 0, "channels should be divisible by 2"
418
- super().__init__()
419
- self.channels = channels
420
- self.hidden_channels = hidden_channels
421
- self.kernel_size = kernel_size
422
- self.dilation_rate = dilation_rate
423
- self.n_layers = n_layers
424
- self.half_channels = channels // 2
425
- self.mean_only = mean_only
426
-
427
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
- self.enc = WN(
429
- hidden_channels,
430
- kernel_size,
431
- dilation_rate,
432
- n_layers,
433
- p_dropout=p_dropout,
434
- gin_channels=gin_channels,
435
- )
436
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
- self.post.weight.data.zero_()
438
- self.post.bias.data.zero_()
439
-
440
- def forward(self, x, x_mask, g=None, reverse=False):
441
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
- h = self.pre(x0) * x_mask
443
- h = self.enc(h, x_mask, g=g)
444
- stats = self.post(h) * x_mask
445
- if not self.mean_only:
446
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
- else:
448
- m = stats
449
- logs = torch.zeros_like(m)
450
-
451
- if not reverse:
452
- x1 = m + x1 * torch.exp(logs) * x_mask
453
- x = torch.cat([x0, x1], 1)
454
- logdet = torch.sum(logs, [1, 2])
455
- return x, logdet
456
- else:
457
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
- x = torch.cat([x0, x1], 1)
459
- return x
460
-
461
- def remove_weight_norm(self):
462
- self.enc.remove_weight_norm()
463
-
464
-
465
- class ConvFlow(nn.Module):
466
- def __init__(
467
- self,
468
- in_channels,
469
- filter_channels,
470
- kernel_size,
471
- n_layers,
472
- num_bins=10,
473
- tail_bound=5.0,
474
- ):
475
- super().__init__()
476
- self.in_channels = in_channels
477
- self.filter_channels = filter_channels
478
- self.kernel_size = kernel_size
479
- self.n_layers = n_layers
480
- self.num_bins = num_bins
481
- self.tail_bound = tail_bound
482
- self.half_channels = in_channels // 2
483
-
484
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
- self.proj = nn.Conv1d(
487
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
- )
489
- self.proj.weight.data.zero_()
490
- self.proj.bias.data.zero_()
491
-
492
- def forward(self, x, x_mask, g=None, reverse=False):
493
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
- h = self.pre(x0)
495
- h = self.convs(h, x_mask, g=g)
496
- h = self.proj(h) * x_mask
497
-
498
- b, c, t = x0.shape
499
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
-
501
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
- self.filter_channels
504
- )
505
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
-
507
- x1, logabsdet = piecewise_rational_quadratic_transform(
508
- x1,
509
- unnormalized_widths,
510
- unnormalized_heights,
511
- unnormalized_derivatives,
512
- inverse=reverse,
513
- tails="linear",
514
- tail_bound=self.tail_bound,
515
- )
516
-
517
- x = torch.cat([x0, x1], 1) * x_mask
518
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
- if not reverse:
520
- return x, logdet
521
- else:
522
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Dr Fone Desbloquear Para PC.md DELETED
@@ -1,33 +0,0 @@
1
-
2
- <h1>Cómo descargar Dr Fone Unlock para PC</h1>
3
- <p>¿Alguna vez ha encontrado una situación en la que haya olvidado su contraseña, PIN, patrón o bloqueo de huellas dactilares en su teléfono? O tal vez usted compró un teléfono de segunda mano que está bloqueado por cuenta de iCloud o Google? ¿O tal vez desea solucionar algunos problemas del sistema en su teléfono, como la pantalla negra, el bucle de arranque o el logotipo atascado? Si usted está buscando una solución a estos problemas, entonces es posible que desee probar Dr Fone Unlock para PC.</p>
4
- <h2>descargar dr fone desbloquear para PC</h2><br /><p><b><b>Download</b> &#10084;&#10084;&#10084; <a href="https://bltlly.com/2v6Kpi">https://bltlly.com/2v6Kpi</a></b></p><br /><br />
5
- <p>Dr Fone Unlock es un potente software que puede ayudarle a desbloquear su teléfono, reparar su sistema, recuperar sus datos, transferir sus archivos, copia de seguridad de sus chats, y cambiar su ubicación con facilidad. Es compatible con dispositivos iOS y Android y funciona con varios escenarios. En este artículo, le mostraremos cómo descargar Dr Fone Unlock para PC y cómo usarlo eficazmente. </p>
6
- <h2>Características de Dr Fone Unlock para PC</h2>
7
- <p>Dr Fone Unlock es más que una herramienta de desbloqueo de pantalla. Ofrece una solución móvil completa que puede satisfacer todas sus necesidades. Aquí están algunas de las características de Dr Fone Unlock para PC:</p>
8
- <ul>
9
- <li><b>Eliminar la pantalla de bloqueo, evitar el bloqueo de iCloud y FRP en dispositivos iOS/ Android. </b> Dr Fone Unlock puede ayudarle a eliminar cualquier tipo de pantalla de bloqueo en su teléfono, como contraseña, PIN, patrón, huella digital o identificación de la cara. También puede ayudarlo a evitar el bloqueo de activación de iCloud o la verificación de cuentas de Google (FRP) en dispositivos iOS o Android. De esta manera, puede acceder a su teléfono sin problemas. </li>
10
- <li><b>Solucionar problemas del sistema iOS/ Android, como pantalla negra, bucle de arranque, etc.</b> Dr Fone Unlock también puede ayudarlo a solucionar varios problemas del sistema en su teléfono, como la pantalla negra de la muerte, bucle de arranque, pegado en el logotipo de Apple/ Samsung, etc. Puede reparar su sistema sin causar ninguna pérdida de datos o daños. </li>
11
-
12
- <li><b>Transferir datos entre iOS/Android y PC/iTunes. </b> Dr Fone Unlock también puede ayudarle a transferir datos entre diferentes dispositivos y plataformas. Puede mover fácilmente todos sus datos o datos seleccionados de un teléfono a otro con un solo clic. También puede transferir datos desde su teléfono a su PC o iTunes y viceversa. </ uno Desbloqueo para PC supera los contras. Sin embargo, también debe ser consciente de las limitaciones y requisitos del software antes de usarlo. </p>
13
- <h2>Conclusión</h2>
14
- <p>Dr Fone Unlock para PC es un software potente y versátil que puede ayudarle a desbloquear el teléfono, arreglar su sistema, recuperar sus datos, transferir sus archivos, copia de seguridad de sus chats, y cambiar su ubicación con facilidad. Es compatible con dispositivos iOS y Android y funciona con varios escenarios. Es fácil de usar, seguro y confiable, y ofrece múltiples herramientas en un solo software. Sin embargo, no es gratuito, requiere conexión a Internet y puede no funcionar para algunos dispositivos o situaciones. Por lo tanto, siempre debe comprobar la compatibilidad y las instrucciones del software antes de usarlo. </p>
15
- <p></p>
16
- <p>Si usted está buscando una solución a sus problemas móviles, entonces es posible que desee probar Dr Fone Unlock para PC. Puede descargarlo desde el sitio web oficial e instalarlo en su PC en minutos. Luego puede usarlo para realizar varias operaciones en su dispositivo con pasos simples. También puede ponerse en contacto con el equipo de atención al cliente si tiene alguna pregunta o problema con el software. </p>
17
- <p>Entonces, ¿qué estás esperando? Descargar Dr Fone desbloquear para PC hoy y disfrutar de todos sus beneficios y características! </p>
18
- <h2>Preguntas frecuentes</h2>
19
- <p>Aquí están algunas de las preguntas más frecuentes sobre Dr Fone Unlock para PC:</p>
20
- <ul>
21
- <li><b>Q: ¿Es Dr Fone desbloquear para PC gratis? </b></li>
22
- <li>A: No, Dr Fone Unlock para PC no es gratis. Es necesario comprar una licencia para utilizar todas sus características. Sin embargo, puede descargar una versión de prueba gratuita desde el sitio web oficial y utilizar algunas de las funciones de forma gratuita. </li>
23
- <li><b>Q: ¿Es seguro Dr Fone Unlock para PC? </b></li>
24
-
25
- <li><b>Q: ¿Dr Fone Unlock para PC funciona para todos los dispositivos y situaciones? </b></li>
26
- <li>A: No, Dr Fone Unlock para PC no funciona para todos los dispositivos y situaciones. Es compatible con la mayoría de los dispositivos iOS y Android y escenarios, pero no todos ellos. Algunos dispositivos o situaciones pueden tener diferentes requisitos o limitaciones que pueden impedir que el software funcione correctamente. Por lo tanto, siempre debe comprobar la compatibilidad y las instrucciones del software antes de usarlo. </li>
27
- <li><b>Q: ¿Cuánto tiempo lleva Dr Fone Unlock para PC para realizar una operación? </b></li>
28
- <li>A: El tiempo que Dr Fone Unlock para PC toma para realizar una operación depende de varios factores, como el tipo de operación, el tamaño de los datos, la velocidad de la conexión a Internet, etc. En términos generales, la mayoría de las operaciones se pueden hacer en minutos u horas. Sin embargo, algunas operaciones pueden tomar más tiempo dependiendo de la complejidad o dificultad de la situación. </li>
29
- <li><b>Q: ¿Qué pasa si encuentro cualquier problema o error con Dr Fone Unlock para PC? </b></li>
30
- <li>A: Si se encuentra con cualquier problema o error con Dr Fone Unlock para PC, puede tratar de solucionar el problema siguiendo los consejos y soluciones proporcionadas en el sitio web oficial o en la guía del usuario. También puede ponerse en contacto con el equipo de atención al cliente por correo electrónico o por teléfono si necesita más ayuda. </li>
31
- </ul></p> 64aa2da5cf<br />
32
- <br />
33
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_functools.py DELETED
@@ -1,104 +0,0 @@
1
- import types
2
- import functools
3
-
4
-
5
- # from jaraco.functools 3.3
6
- def method_cache(method, cache_wrapper=None):
7
- """
8
- Wrap lru_cache to support storing the cache data in the object instances.
9
-
10
- Abstracts the common paradigm where the method explicitly saves an
11
- underscore-prefixed protected property on first call and returns that
12
- subsequently.
13
-
14
- >>> class MyClass:
15
- ... calls = 0
16
- ...
17
- ... @method_cache
18
- ... def method(self, value):
19
- ... self.calls += 1
20
- ... return value
21
-
22
- >>> a = MyClass()
23
- >>> a.method(3)
24
- 3
25
- >>> for x in range(75):
26
- ... res = a.method(x)
27
- >>> a.calls
28
- 75
29
-
30
- Note that the apparent behavior will be exactly like that of lru_cache
31
- except that the cache is stored on each instance, so values in one
32
- instance will not flush values from another, and when an instance is
33
- deleted, so are the cached values for that instance.
34
-
35
- >>> b = MyClass()
36
- >>> for x in range(35):
37
- ... res = b.method(x)
38
- >>> b.calls
39
- 35
40
- >>> a.method(0)
41
- 0
42
- >>> a.calls
43
- 75
44
-
45
- Note that if method had been decorated with ``functools.lru_cache()``,
46
- a.calls would have been 76 (due to the cached value of 0 having been
47
- flushed by the 'b' instance).
48
-
49
- Clear the cache with ``.cache_clear()``
50
-
51
- >>> a.method.cache_clear()
52
-
53
- Same for a method that hasn't yet been called.
54
-
55
- >>> c = MyClass()
56
- >>> c.method.cache_clear()
57
-
58
- Another cache wrapper may be supplied:
59
-
60
- >>> cache = functools.lru_cache(maxsize=2)
61
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
62
- >>> a = MyClass()
63
- >>> a.method2()
64
- 3
65
-
66
- Caution - do not subsequently wrap the method with another decorator, such
67
- as ``@property``, which changes the semantics of the function.
68
-
69
- See also
70
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
71
- for another implementation and additional justification.
72
- """
73
- cache_wrapper = cache_wrapper or functools.lru_cache()
74
-
75
- def wrapper(self, *args, **kwargs):
76
- # it's the first call, replace the method with a cached, bound method
77
- bound_method = types.MethodType(method, self)
78
- cached_method = cache_wrapper(bound_method)
79
- setattr(self, method.__name__, cached_method)
80
- return cached_method(*args, **kwargs)
81
-
82
- # Support cache clear even before cache has been created.
83
- wrapper.cache_clear = lambda: None
84
-
85
- return wrapper
86
-
87
-
88
- # From jaraco.functools 3.3
89
- def pass_none(func):
90
- """
91
- Wrap func so it's not called if its first param is None
92
-
93
- >>> print_text = pass_none(print)
94
- >>> print_text('text')
95
- text
96
- >>> print_text(None)
97
- """
98
-
99
- @functools.wraps(func)
100
- def wrapper(param, *args, **kwargs):
101
- if param is not None:
102
- return func(param, *args, **kwargs)
103
-
104
- return wrapper
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/MARCI-NFL-Betting/Source/Predict/predict.py DELETED
@@ -1,166 +0,0 @@
1
- import xgboost as xgb
2
- import numpy as np
3
- import pandas as pd
4
- import pickle as pkl
5
- import os
6
- import requests
7
- from bs4 import BeautifulSoup
8
- import warnings
9
- warnings.filterwarnings("ignore")
10
- from datetime import datetime
11
-
12
- # set dirs for other files
13
- current_directory = os.path.dirname(os.path.abspath(__file__))
14
- parent_directory = os.path.dirname(current_directory)
15
- data_directory = os.path.join(parent_directory, 'Data')
16
- model_directory = os.path.join(parent_directory, 'Models')
17
- pickle_directory = os.path.join(parent_directory, 'Pickles')
18
-
19
- file_path = os.path.join(data_directory, 'gbg_this_year.csv')
20
- gbg = pd.read_csv(file_path, low_memory=False)
21
-
22
- file_path = os.path.join(data_directory, 'results.csv')
23
- results = pd.read_csv(file_path, low_memory=False)
24
-
25
- # get team abbreviations
26
- file_path = os.path.join(pickle_directory, 'team_name_to_abbreviation.pkl')
27
- with open(file_path, 'rb') as f:
28
- team_name_to_abbreviation = pkl.load(f)
29
-
30
- file_path = os.path.join(pickle_directory, 'team_abbreviation_to_name.pkl')
31
- with open(file_path, 'rb') as f:
32
- team_abbreviation_to_name = pkl.load(f)
33
-
34
- # get schedule
35
- file_path = os.path.join(pickle_directory, 'schedule.pkl')
36
- with open(file_path, 'rb') as f:
37
- schedule = pkl.load(f)
38
-
39
- # load models
40
- # moneyline
41
- model = 'xgboost_ML_no_odds_71.4%'
42
- file_path = os.path.join(model_directory, f'{model}.json')
43
- xgb_ml = xgb.Booster()
44
- xgb_ml.load_model(file_path)
45
-
46
- # over/under
47
- model = 'xgboost_OU_no_odds_59.8%'
48
- file_path = os.path.join(model_directory, f'{model}.json')
49
- xgb_ou = xgb.Booster()
50
- xgb_ou.load_model(file_path)
51
-
52
-
53
- def get_week():
54
- headers = {
55
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
56
- 'Accept-Encoding': 'gzip, deflate',
57
- 'Accept-Language': 'en-US,en;q=0.9',
58
- 'Cache-Control': 'max-age=0',
59
- 'Connection': 'keep-alive',
60
- 'Dnt': '1',
61
- 'Upgrade-Insecure-Requests': '1',
62
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
63
- }
64
- url = 'https://www.nfl.com/schedules/'
65
- resp = requests.get(url,headers=headers)
66
- soup = BeautifulSoup(resp.text, 'html.parser')
67
- h2_tags = soup.find_all('h2')
68
- year = h2_tags[0].getText().split(' ')[0]
69
- week = h2_tags[0].getText().split(' ')[-1]
70
- return int(week), int(year)
71
-
72
-
73
- def get_games(week):
74
- # pull from NBC
75
- #url = 'https://www.nbcsports.com/nfl/schedule'
76
- #df = pd.read_html(url)[week-1]
77
- df = schedule[week-1]
78
- df['Away Team'] = [' '.join(i.split('\xa0')[1:]) for i in df['Away TeamAway Team']]
79
- df['Home Team'] = [' '.join(i.split('\xa0')[1:]) for i in df['Home TeamHome Team']]
80
- df['Date'] = pd.to_datetime(df['Game TimeGame Time'])
81
- df['Date'] = df['Date'].dt.strftime('%A %d/%m %I:%M %p')
82
- df['Date'] = df['Date'].apply(lambda x: f"{x.split()[0]} {int(x.split()[1].split('/')[1])}/{int(x.split()[1].split('/')[0])} {x.split()[2]}".capitalize())
83
-
84
- return df[['Away Team','Home Team','Date']]
85
-
86
-
87
- def get_one_week(home,away,season,week):
88
- try:
89
- max_GP_home = gbg.loc[((gbg['home_team'] == home) | (gbg['away_team'] == home)) & (gbg['GP'] < week)]['GP'].max()
90
- max_GP_away = gbg.loc[((gbg['home_team'] == away) | (gbg['away_team'] == away)) & (gbg['GP'] < week)]['GP'].max()
91
-
92
- home_df = gbg.loc[((gbg['away_team']==home) | (gbg['home_team']==home)) & (gbg['Season']==season) & (gbg['GP']==max_GP_home)]
93
- gbg_home_team = home_df['home_team'].item()
94
- home_df.drop(columns=['game_id','home_team','away_team','Season','game_date'], inplace=True)
95
- home_df = home_df[[i for i in home_df.columns if '.Away' not in i] if gbg_home_team==home else [i for i in home_df.columns if '.Away' in i]]
96
- home_df.columns = [i.replace('.Away','') for i in home_df.columns]
97
-
98
- away_df = gbg.loc[((gbg['away_team']==away) | (gbg['home_team']==away)) & (gbg['Season']==season) & (gbg['GP']==max_GP_away)]
99
- gbg_home_team = away_df['home_team'].item()
100
- away_df.drop(columns=['game_id','home_team','away_team','Season','game_date'], inplace=True)
101
- away_df = away_df[[i for i in away_df.columns if '.Away' not in i] if gbg_home_team==away else [i for i in away_df.columns if '.Away' in i]]
102
- away_df.columns = [i.replace('.Away','') + '.Away' for i in away_df.columns]
103
-
104
- df = home_df.reset_index(drop=True).merge(away_df.reset_index(drop=True), left_index=True, right_index=True)
105
- return df
106
- except ValueError:
107
- return pd.DataFrame()
108
-
109
-
110
- def predict(home,away,season,week,total):
111
- global results
112
-
113
- # finish preparing data
114
- if len(home)>4:
115
- home_abbrev = team_name_to_abbreviation[home]
116
- else:
117
- home_abbrev = home
118
-
119
- if len(away)>4:
120
- away_abbrev = team_name_to_abbreviation[away]
121
- else:
122
- away_abbrev = away
123
-
124
- data = get_one_week(home_abbrev,away_abbrev,season,week)
125
- data['Total Score Close'] = total
126
- matrix = xgb.DMatrix(data.astype(float).values)
127
-
128
- # create game id
129
- game_id = str(season) + '_0' + str(int(week)) + '_' + away_abbrev + '_' + home_abbrev
130
-
131
- try:
132
- moneyline_result = results.loc[results['game_id']==game_id, 'winner'].item()
133
- except:
134
- moneyline_result = 'N/A'
135
-
136
- try:
137
- ml_predicted_proba = xgb_ml.predict(matrix)[0][1]
138
- winner_proba = max([ml_predicted_proba, 1-ml_predicted_proba]).item()
139
- moneyline = {'Winner': [home if ml_predicted_proba>0.5 else away if ml_predicted_proba<0.5 else 'Toss-Up'],
140
- 'Probabilities':[winner_proba],
141
- 'Result': moneyline_result}
142
- except:
143
- moneyline = {'Winner': 'NA',
144
- 'Probabilities':['N/A'],
145
- 'Result': moneyline_result}
146
-
147
- try:
148
- result = results.loc[results['game_id']==game_id, 'total'].item()
149
- over_under_result = 'Over' if float(result)>float(total) else 'Push' if float(result)==float(total) else 'Under'
150
- print(float(result), float(total))
151
- except:
152
- over_under_result = 'N/A'
153
-
154
- try:
155
- ou_predicted_proba = xgb_ou.predict(matrix)[0][1]
156
- ou_proba = max([ou_predicted_proba, 1-ou_predicted_proba]).item()
157
-
158
- over_under = {'Over/Under': ['Over' if ou_predicted_proba>0.5 else 'Under'],
159
- 'Probability': [ou_proba],
160
- 'Result': over_under_result}
161
- except:
162
- over_under = {'Over/Under': 'N/A',
163
- 'Probability': ['N/A'],
164
- 'Result': over_under_result}
165
-
166
- return game_id, moneyline, over_under
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_roi_pooler.py DELETED
@@ -1,85 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.modeling.poolers import ROIPooler
7
- from detectron2.structures import Boxes, RotatedBoxes
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- class TestROIPooler(unittest.TestCase):
13
- def _rand_boxes(self, num_boxes, x_max, y_max):
14
- coords = torch.rand(num_boxes, 4)
15
- coords[:, 0] *= x_max
16
- coords[:, 1] *= y_max
17
- coords[:, 2] *= x_max
18
- coords[:, 3] *= y_max
19
- boxes = torch.zeros(num_boxes, 4)
20
- boxes[:, 0] = torch.min(coords[:, 0], coords[:, 2])
21
- boxes[:, 1] = torch.min(coords[:, 1], coords[:, 3])
22
- boxes[:, 2] = torch.max(coords[:, 0], coords[:, 2])
23
- boxes[:, 3] = torch.max(coords[:, 1], coords[:, 3])
24
- return boxes
25
-
26
- def _test_roialignv2_roialignrotated_match(self, device):
27
- pooler_resolution = 14
28
- canonical_level = 4
29
- canonical_scale_factor = 2 ** canonical_level
30
- pooler_scales = (1.0 / canonical_scale_factor,)
31
- sampling_ratio = 0
32
-
33
- N, C, H, W = 2, 4, 10, 8
34
- N_rois = 10
35
- std = 11
36
- mean = 0
37
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
38
-
39
- features = [feature.to(device)]
40
-
41
- rois = []
42
- rois_rotated = []
43
- for _ in range(N):
44
- boxes = self._rand_boxes(
45
- num_boxes=N_rois, x_max=W * canonical_scale_factor, y_max=H * canonical_scale_factor
46
- )
47
-
48
- rotated_boxes = torch.zeros(N_rois, 5)
49
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
50
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
51
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
52
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
53
- rois.append(Boxes(boxes).to(device))
54
- rois_rotated.append(RotatedBoxes(rotated_boxes).to(device))
55
-
56
- roialignv2_pooler = ROIPooler(
57
- output_size=pooler_resolution,
58
- scales=pooler_scales,
59
- sampling_ratio=sampling_ratio,
60
- pooler_type="ROIAlignV2",
61
- )
62
-
63
- roialignv2_out = roialignv2_pooler(features, rois)
64
-
65
- roialignrotated_pooler = ROIPooler(
66
- output_size=pooler_resolution,
67
- scales=pooler_scales,
68
- sampling_ratio=sampling_ratio,
69
- pooler_type="ROIAlignRotated",
70
- )
71
-
72
- roialignrotated_out = roialignrotated_pooler(features, rois_rotated)
73
-
74
- self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4))
75
-
76
- def test_roialignv2_roialignrotated_match_cpu(self):
77
- self._test_roialignv2_roialignrotated_match(device="cpu")
78
-
79
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
80
- def test_roialignv2_roialignrotated_match_cuda(self):
81
- self._test_roialignv2_roialignrotated_match(device="cuda")
82
-
83
-
84
- if __name__ == "__main__":
85
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/allocator/default_construct_range.h DELETED
@@ -1,37 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- namespace thrust
22
- {
23
- namespace detail
24
- {
25
-
26
-
27
- template<typename Allocator, typename Pointer, typename Size>
28
- __host__ __device__
29
- inline void default_construct_range(Allocator &a, Pointer p, Size n);
30
-
31
-
32
- } // end detail
33
- } // end thrust
34
-
35
- #include <thrust/detail/allocator/default_construct_range.inl>
36
-
37
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/sort.h DELETED
@@ -1,55 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/tbb/detail/execution_policy.h>
21
-
22
- namespace thrust
23
- {
24
- namespace system
25
- {
26
- namespace tbb
27
- {
28
- namespace detail
29
- {
30
-
31
- template<typename DerivedPolicy,
32
- typename RandomAccessIterator,
33
- typename StrictWeakOrdering>
34
- void stable_sort(execution_policy<DerivedPolicy> &exec,
35
- RandomAccessIterator first,
36
- RandomAccessIterator last,
37
- StrictWeakOrdering comp);
38
-
39
- template<typename DerivedPolicy,
40
- typename RandomAccessIterator1,
41
- typename RandomAccessIterator2,
42
- typename StrictWeakOrdering>
43
- void stable_sort_by_key(execution_policy<DerivedPolicy> &exec,
44
- RandomAccessIterator1 keys_first,
45
- RandomAccessIterator1 keys_last,
46
- RandomAccessIterator2 values_first,
47
- StrictWeakOrdering comp);
48
-
49
- } // end namespace detail
50
- } // end namespace tbb
51
- } // end namespace system
52
- } // end namespace thrust
53
-
54
- #include <thrust/system/tbb/detail/sort.inl>
55
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnext.py DELETED
@@ -1,122 +0,0 @@
1
- import math
2
-
3
- from mmcv.cnn import build_conv_layer, build_norm_layer
4
-
5
- from ..builder import BACKBONES
6
- from .detectors_resnet import Bottleneck as _Bottleneck
7
- from .detectors_resnet import DetectoRS_ResNet
8
-
9
-
10
- class Bottleneck(_Bottleneck):
11
- expansion = 4
12
-
13
- def __init__(self,
14
- inplanes,
15
- planes,
16
- groups=1,
17
- base_width=4,
18
- base_channels=64,
19
- **kwargs):
20
- """Bottleneck block for ResNeXt.
21
-
22
- If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
23
- it is "caffe", the stride-two layer is the first 1x1 conv layer.
24
- """
25
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
26
-
27
- if groups == 1:
28
- width = self.planes
29
- else:
30
- width = math.floor(self.planes *
31
- (base_width / base_channels)) * groups
32
-
33
- self.norm1_name, norm1 = build_norm_layer(
34
- self.norm_cfg, width, postfix=1)
35
- self.norm2_name, norm2 = build_norm_layer(
36
- self.norm_cfg, width, postfix=2)
37
- self.norm3_name, norm3 = build_norm_layer(
38
- self.norm_cfg, self.planes * self.expansion, postfix=3)
39
-
40
- self.conv1 = build_conv_layer(
41
- self.conv_cfg,
42
- self.inplanes,
43
- width,
44
- kernel_size=1,
45
- stride=self.conv1_stride,
46
- bias=False)
47
- self.add_module(self.norm1_name, norm1)
48
- fallback_on_stride = False
49
- self.with_modulated_dcn = False
50
- if self.with_dcn:
51
- fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
52
- if self.with_sac:
53
- self.conv2 = build_conv_layer(
54
- self.sac,
55
- width,
56
- width,
57
- kernel_size=3,
58
- stride=self.conv2_stride,
59
- padding=self.dilation,
60
- dilation=self.dilation,
61
- groups=groups,
62
- bias=False)
63
- elif not self.with_dcn or fallback_on_stride:
64
- self.conv2 = build_conv_layer(
65
- self.conv_cfg,
66
- width,
67
- width,
68
- kernel_size=3,
69
- stride=self.conv2_stride,
70
- padding=self.dilation,
71
- dilation=self.dilation,
72
- groups=groups,
73
- bias=False)
74
- else:
75
- assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
76
- self.conv2 = build_conv_layer(
77
- self.dcn,
78
- width,
79
- width,
80
- kernel_size=3,
81
- stride=self.conv2_stride,
82
- padding=self.dilation,
83
- dilation=self.dilation,
84
- groups=groups,
85
- bias=False)
86
-
87
- self.add_module(self.norm2_name, norm2)
88
- self.conv3 = build_conv_layer(
89
- self.conv_cfg,
90
- width,
91
- self.planes * self.expansion,
92
- kernel_size=1,
93
- bias=False)
94
- self.add_module(self.norm3_name, norm3)
95
-
96
-
97
- @BACKBONES.register_module()
98
- class DetectoRS_ResNeXt(DetectoRS_ResNet):
99
- """ResNeXt backbone for DetectoRS.
100
-
101
- Args:
102
- groups (int): The number of groups in ResNeXt.
103
- base_width (int): The base width of ResNeXt.
104
- """
105
-
106
- arch_settings = {
107
- 50: (Bottleneck, (3, 4, 6, 3)),
108
- 101: (Bottleneck, (3, 4, 23, 3)),
109
- 152: (Bottleneck, (3, 8, 36, 3))
110
- }
111
-
112
- def __init__(self, groups=1, base_width=4, **kwargs):
113
- self.groups = groups
114
- self.base_width = base_width
115
- super(DetectoRS_ResNeXt, self).__init__(**kwargs)
116
-
117
- def make_res_layer(self, **kwargs):
118
- return super().make_res_layer(
119
- groups=self.groups,
120
- base_width=self.base_width,
121
- base_channels=self.base_channels,
122
- **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/yolo.py DELETED
@@ -1,18 +0,0 @@
1
- # Copyright (c) 2019 Western Digital Corporation or its affiliates.
2
-
3
- from ..builder import DETECTORS
4
- from .single_stage import SingleStageDetector
5
-
6
-
7
- @DETECTORS.register_module()
8
- class YOLOV3(SingleStageDetector):
9
-
10
- def __init__(self,
11
- backbone,
12
- neck,
13
- bbox_head,
14
- train_cfg=None,
15
- test_cfg=None,
16
- pretrained=None):
17
- super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
18
- test_cfg, pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/cyan/__init__.py DELETED
@@ -1,31 +0,0 @@
1
- from typing import List
2
-
3
- from pil_utils import BuildImage
4
-
5
- from meme_generator import add_meme
6
-
7
-
8
- def cyan(images: List[BuildImage], texts, args):
9
- color = (78, 114, 184)
10
- frame = images[0].convert("RGB").square().resize((500, 500)).color_mask(color)
11
- frame.draw_text(
12
- (400, 40, 480, 280),
13
- "群\n青",
14
- max_fontsize=80,
15
- weight="bold",
16
- fill="white",
17
- stroke_ratio=0.04,
18
- stroke_fill=color,
19
- ).draw_text(
20
- (200, 270, 480, 350),
21
- "YOASOBI",
22
- halign="right",
23
- max_fontsize=40,
24
- fill="white",
25
- stroke_ratio=0.06,
26
- stroke_fill=color,
27
- )
28
- return frame.save_jpg()
29
-
30
-
31
- add_meme("cyan", cyan, min_images=1, max_images=1, keywords=["群青"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CloudOrc/SolidUI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: SolidUI
3
- emoji: 🐠
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.36.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/StandardEncoding.py DELETED
@@ -1,258 +0,0 @@
1
- StandardEncoding = [
2
- ".notdef",
3
- ".notdef",
4
- ".notdef",
5
- ".notdef",
6
- ".notdef",
7
- ".notdef",
8
- ".notdef",
9
- ".notdef",
10
- ".notdef",
11
- ".notdef",
12
- ".notdef",
13
- ".notdef",
14
- ".notdef",
15
- ".notdef",
16
- ".notdef",
17
- ".notdef",
18
- ".notdef",
19
- ".notdef",
20
- ".notdef",
21
- ".notdef",
22
- ".notdef",
23
- ".notdef",
24
- ".notdef",
25
- ".notdef",
26
- ".notdef",
27
- ".notdef",
28
- ".notdef",
29
- ".notdef",
30
- ".notdef",
31
- ".notdef",
32
- ".notdef",
33
- ".notdef",
34
- "space",
35
- "exclam",
36
- "quotedbl",
37
- "numbersign",
38
- "dollar",
39
- "percent",
40
- "ampersand",
41
- "quoteright",
42
- "parenleft",
43
- "parenright",
44
- "asterisk",
45
- "plus",
46
- "comma",
47
- "hyphen",
48
- "period",
49
- "slash",
50
- "zero",
51
- "one",
52
- "two",
53
- "three",
54
- "four",
55
- "five",
56
- "six",
57
- "seven",
58
- "eight",
59
- "nine",
60
- "colon",
61
- "semicolon",
62
- "less",
63
- "equal",
64
- "greater",
65
- "question",
66
- "at",
67
- "A",
68
- "B",
69
- "C",
70
- "D",
71
- "E",
72
- "F",
73
- "G",
74
- "H",
75
- "I",
76
- "J",
77
- "K",
78
- "L",
79
- "M",
80
- "N",
81
- "O",
82
- "P",
83
- "Q",
84
- "R",
85
- "S",
86
- "T",
87
- "U",
88
- "V",
89
- "W",
90
- "X",
91
- "Y",
92
- "Z",
93
- "bracketleft",
94
- "backslash",
95
- "bracketright",
96
- "asciicircum",
97
- "underscore",
98
- "quoteleft",
99
- "a",
100
- "b",
101
- "c",
102
- "d",
103
- "e",
104
- "f",
105
- "g",
106
- "h",
107
- "i",
108
- "j",
109
- "k",
110
- "l",
111
- "m",
112
- "n",
113
- "o",
114
- "p",
115
- "q",
116
- "r",
117
- "s",
118
- "t",
119
- "u",
120
- "v",
121
- "w",
122
- "x",
123
- "y",
124
- "z",
125
- "braceleft",
126
- "bar",
127
- "braceright",
128
- "asciitilde",
129
- ".notdef",
130
- ".notdef",
131
- ".notdef",
132
- ".notdef",
133
- ".notdef",
134
- ".notdef",
135
- ".notdef",
136
- ".notdef",
137
- ".notdef",
138
- ".notdef",
139
- ".notdef",
140
- ".notdef",
141
- ".notdef",
142
- ".notdef",
143
- ".notdef",
144
- ".notdef",
145
- ".notdef",
146
- ".notdef",
147
- ".notdef",
148
- ".notdef",
149
- ".notdef",
150
- ".notdef",
151
- ".notdef",
152
- ".notdef",
153
- ".notdef",
154
- ".notdef",
155
- ".notdef",
156
- ".notdef",
157
- ".notdef",
158
- ".notdef",
159
- ".notdef",
160
- ".notdef",
161
- ".notdef",
162
- ".notdef",
163
- "exclamdown",
164
- "cent",
165
- "sterling",
166
- "fraction",
167
- "yen",
168
- "florin",
169
- "section",
170
- "currency",
171
- "quotesingle",
172
- "quotedblleft",
173
- "guillemotleft",
174
- "guilsinglleft",
175
- "guilsinglright",
176
- "fi",
177
- "fl",
178
- ".notdef",
179
- "endash",
180
- "dagger",
181
- "daggerdbl",
182
- "periodcentered",
183
- ".notdef",
184
- "paragraph",
185
- "bullet",
186
- "quotesinglbase",
187
- "quotedblbase",
188
- "quotedblright",
189
- "guillemotright",
190
- "ellipsis",
191
- "perthousand",
192
- ".notdef",
193
- "questiondown",
194
- ".notdef",
195
- "grave",
196
- "acute",
197
- "circumflex",
198
- "tilde",
199
- "macron",
200
- "breve",
201
- "dotaccent",
202
- "dieresis",
203
- ".notdef",
204
- "ring",
205
- "cedilla",
206
- ".notdef",
207
- "hungarumlaut",
208
- "ogonek",
209
- "caron",
210
- "emdash",
211
- ".notdef",
212
- ".notdef",
213
- ".notdef",
214
- ".notdef",
215
- ".notdef",
216
- ".notdef",
217
- ".notdef",
218
- ".notdef",
219
- ".notdef",
220
- ".notdef",
221
- ".notdef",
222
- ".notdef",
223
- ".notdef",
224
- ".notdef",
225
- ".notdef",
226
- ".notdef",
227
- "AE",
228
- ".notdef",
229
- "ordfeminine",
230
- ".notdef",
231
- ".notdef",
232
- ".notdef",
233
- ".notdef",
234
- "Lslash",
235
- "Oslash",
236
- "OE",
237
- "ordmasculine",
238
- ".notdef",
239
- ".notdef",
240
- ".notdef",
241
- ".notdef",
242
- ".notdef",
243
- "ae",
244
- ".notdef",
245
- ".notdef",
246
- ".notdef",
247
- "dotlessi",
248
- ".notdef",
249
- ".notdef",
250
- "lslash",
251
- "oslash",
252
- "oe",
253
- "germandbls",
254
- ".notdef",
255
- ".notdef",
256
- ".notdef",
257
- ".notdef",
258
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/arc.py DELETED
@@ -1,153 +0,0 @@
1
- """Convert SVG Path's elliptical arcs to Bezier curves.
2
-
3
- The code is mostly adapted from Blink's SVGPathNormalizer::DecomposeArcToCubic
4
- https://github.com/chromium/chromium/blob/93831f2/third_party/
5
- blink/renderer/core/svg/svg_path_parser.cc#L169-L278
6
- """
7
- from fontTools.misc.transform import Identity, Scale
8
- from math import atan2, ceil, cos, fabs, isfinite, pi, radians, sin, sqrt, tan
9
-
10
-
11
- TWO_PI = 2 * pi
12
- PI_OVER_TWO = 0.5 * pi
13
-
14
-
15
- def _map_point(matrix, pt):
16
- # apply Transform matrix to a point represented as a complex number
17
- r = matrix.transformPoint((pt.real, pt.imag))
18
- return r[0] + r[1] * 1j
19
-
20
-
21
- class EllipticalArc(object):
22
- def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point):
23
- self.current_point = current_point
24
- self.rx = rx
25
- self.ry = ry
26
- self.rotation = rotation
27
- self.large = large
28
- self.sweep = sweep
29
- self.target_point = target_point
30
-
31
- # SVG arc's rotation angle is expressed in degrees, whereas Transform.rotate
32
- # uses radians
33
- self.angle = radians(rotation)
34
-
35
- # these derived attributes are computed by the _parametrize method
36
- self.center_point = self.theta1 = self.theta2 = self.theta_arc = None
37
-
38
- def _parametrize(self):
39
- # convert from endopoint to center parametrization:
40
- # https://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter
41
-
42
- # If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a
43
- # "lineto") joining the endpoints.
44
- # http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters
45
- rx = fabs(self.rx)
46
- ry = fabs(self.ry)
47
- if not (rx and ry):
48
- return False
49
-
50
- # If the current point and target point for the arc are identical, it should
51
- # be treated as a zero length path. This ensures continuity in animations.
52
- if self.target_point == self.current_point:
53
- return False
54
-
55
- mid_point_distance = (self.current_point - self.target_point) * 0.5
56
-
57
- point_transform = Identity.rotate(-self.angle)
58
-
59
- transformed_mid_point = _map_point(point_transform, mid_point_distance)
60
- square_rx = rx * rx
61
- square_ry = ry * ry
62
- square_x = transformed_mid_point.real * transformed_mid_point.real
63
- square_y = transformed_mid_point.imag * transformed_mid_point.imag
64
-
65
- # Check if the radii are big enough to draw the arc, scale radii if not.
66
- # http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii
67
- radii_scale = square_x / square_rx + square_y / square_ry
68
- if radii_scale > 1:
69
- rx *= sqrt(radii_scale)
70
- ry *= sqrt(radii_scale)
71
- self.rx, self.ry = rx, ry
72
-
73
- point_transform = Scale(1 / rx, 1 / ry).rotate(-self.angle)
74
-
75
- point1 = _map_point(point_transform, self.current_point)
76
- point2 = _map_point(point_transform, self.target_point)
77
- delta = point2 - point1
78
-
79
- d = delta.real * delta.real + delta.imag * delta.imag
80
- scale_factor_squared = max(1 / d - 0.25, 0.0)
81
-
82
- scale_factor = sqrt(scale_factor_squared)
83
- if self.sweep == self.large:
84
- scale_factor = -scale_factor
85
-
86
- delta *= scale_factor
87
- center_point = (point1 + point2) * 0.5
88
- center_point += complex(-delta.imag, delta.real)
89
- point1 -= center_point
90
- point2 -= center_point
91
-
92
- theta1 = atan2(point1.imag, point1.real)
93
- theta2 = atan2(point2.imag, point2.real)
94
-
95
- theta_arc = theta2 - theta1
96
- if theta_arc < 0 and self.sweep:
97
- theta_arc += TWO_PI
98
- elif theta_arc > 0 and not self.sweep:
99
- theta_arc -= TWO_PI
100
-
101
- self.theta1 = theta1
102
- self.theta2 = theta1 + theta_arc
103
- self.theta_arc = theta_arc
104
- self.center_point = center_point
105
-
106
- return True
107
-
108
- def _decompose_to_cubic_curves(self):
109
- if self.center_point is None and not self._parametrize():
110
- return
111
-
112
- point_transform = Identity.rotate(self.angle).scale(self.rx, self.ry)
113
-
114
- # Some results of atan2 on some platform implementations are not exact
115
- # enough. So that we get more cubic curves than expected here. Adding 0.001f
116
- # reduces the count of sgements to the correct count.
117
- num_segments = int(ceil(fabs(self.theta_arc / (PI_OVER_TWO + 0.001))))
118
- for i in range(num_segments):
119
- start_theta = self.theta1 + i * self.theta_arc / num_segments
120
- end_theta = self.theta1 + (i + 1) * self.theta_arc / num_segments
121
-
122
- t = (4 / 3) * tan(0.25 * (end_theta - start_theta))
123
- if not isfinite(t):
124
- return
125
-
126
- sin_start_theta = sin(start_theta)
127
- cos_start_theta = cos(start_theta)
128
- sin_end_theta = sin(end_theta)
129
- cos_end_theta = cos(end_theta)
130
-
131
- point1 = complex(
132
- cos_start_theta - t * sin_start_theta,
133
- sin_start_theta + t * cos_start_theta,
134
- )
135
- point1 += self.center_point
136
- target_point = complex(cos_end_theta, sin_end_theta)
137
- target_point += self.center_point
138
- point2 = target_point
139
- point2 += complex(t * sin_end_theta, -t * cos_end_theta)
140
-
141
- point1 = _map_point(point_transform, point1)
142
- point2 = _map_point(point_transform, point2)
143
- target_point = _map_point(point_transform, target_point)
144
-
145
- yield point1, point2, target_point
146
-
147
- def draw(self, pen):
148
- for point1, point2, target_point in self._decompose_to_cubic_curves():
149
- pen.curveTo(
150
- (point1.real, point1.imag),
151
- (point2.real, point2.imag),
152
- (target_point.real, target_point.imag),
153
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/unicodedata/OTTags.py DELETED
@@ -1,50 +0,0 @@
1
- # Data updated to OpenType 1.8.2 as of January 2018.
2
-
3
- # Complete list of OpenType script tags at:
4
- # https://www.microsoft.com/typography/otspec/scripttags.htm
5
-
6
- # Most of the script tags are the same as the ISO 15924 tag but lowercased,
7
- # so we only have to handle the exceptional cases:
8
- # - KATAKANA and HIRAGANA both map to 'kana';
9
- # - spaces at the end are preserved, unlike ISO 15924;
10
- # - we map special script codes for Inherited, Common and Unknown to DFLT.
11
-
12
- DEFAULT_SCRIPT = "DFLT"
13
-
14
- SCRIPT_ALIASES = {
15
- "jamo": "hang",
16
- }
17
-
18
- SCRIPT_EXCEPTIONS = {
19
- "Hira": "kana",
20
- "Hrkt": "kana",
21
- "Laoo": "lao ",
22
- "Yiii": "yi ",
23
- "Nkoo": "nko ",
24
- "Vaii": "vai ",
25
- "Zmth": "math",
26
- "Zinh": DEFAULT_SCRIPT,
27
- "Zyyy": DEFAULT_SCRIPT,
28
- "Zzzz": DEFAULT_SCRIPT,
29
- }
30
-
31
- SCRIPT_EXCEPTIONS_REVERSED = {
32
- "math": "Zmth",
33
- }
34
-
35
- NEW_SCRIPT_TAGS = {
36
- "Beng": ("bng2",),
37
- "Deva": ("dev2",),
38
- "Gujr": ("gjr2",),
39
- "Guru": ("gur2",),
40
- "Knda": ("knd2",),
41
- "Mlym": ("mlm2",),
42
- "Orya": ("ory2",),
43
- "Taml": ("tml2",),
44
- "Telu": ("tel2",),
45
- "Mymr": ("mym2",),
46
- }
47
-
48
- NEW_SCRIPT_TAGS_REVERSED = {
49
- value: key for key, values in NEW_SCRIPT_TAGS.items() for value in values
50
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/AIart_sources_of_inspiration/app.py DELETED
@@ -1,29 +0,0 @@
1
- import tensorflow as tf
2
- import pandas as pd
3
- import gradio as gr
4
-
5
- authors_df = pd.read_csv('authors.csv')
6
- labels = sorted(list(authors_df.name))
7
-
8
- model = tf.keras.models.load_model('efficientnetb0.h5')
9
-
10
- description = 'This is a DEMO that attempts to recognize the inspirations used by the AI art generator. After uploading a picture of an image, the application displays the predicted artist along with the probability of predicting the top three authors.The DEMO uses EfficientNetB0 convolutional neural network as a base model whose classifier was modified and trained the 8,000+ paintings from [Kaggle](https://www.kaggle.com/datasets/ikarus777/best-artworks-of-all-time) dataset. Model trained by osydorchuk89. Given the dataset limitations, the model only recognizes paintings of [50 artists](https://huggingface.co/spaces/osydorchuk/painting_authors/blob/main/authors.csv).'
11
-
12
- def predict_author(input):
13
- if input is None:
14
- return 'Please upload an image'
15
- input = input.reshape((-1, 224, 224, 3))
16
- prediction = model.predict(input).flatten()
17
- confidences = {labels[i]: float(prediction[i]) for i in range(50)}
18
- return confidences
19
-
20
- demo = gr.Interface(
21
- title='the AI art generator sources of inspiration',
22
- description=description,
23
- fn=predict_author,
24
- inputs=gr.Image(shape=(224, 224)),
25
- outputs=gr.Label(num_top_classes=3),
26
- examples=['test_pics/eva_miro.jpg', 'test_pics/eva_bosch.jpg', 'test_pics/eva_miro_2.jpg', 'test_pics/eva_rtology.jpg']
27
- )
28
-
29
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DhruvShek/chatlm/models.py DELETED
@@ -1,162 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import math
4
- import torch.nn.functional as F
5
-
6
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
-
8
-
9
- class Embeddings(nn.Module):
10
- """
11
- Implements embeddings of the words and adds their positional encodings.
12
- """
13
- def __init__(self, vocab_size, d_model, max_len = 50):
14
- super(Embeddings, self).__init__()
15
- self.d_model = d_model
16
- self.dropout = nn.Dropout(0.1)
17
- self.embed = nn.Embedding(vocab_size, d_model)
18
- self.pe = self.create_positinal_encoding(max_len, self.d_model)
19
- self.dropout = nn.Dropout(0.1)
20
-
21
- def create_positinal_encoding(self, max_len, d_model):
22
- pe = torch.zeros(max_len, d_model).to(device)
23
- for pos in range(max_len): # for each position of the word
24
- for i in range(0, d_model, 2): # for each dimension of the each position
25
- pe[pos, i] = math.sin(pos / (10000 ** ((2 * i)/d_model)))
26
- pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
27
- pe = pe.unsqueeze(0) # include the batch size
28
- return pe
29
-
30
- def forward(self, encoded_words):
31
- embedding = self.embed(encoded_words) * math.sqrt(self.d_model)
32
- embedding += self.pe[:, :embedding.size(1)] # pe will automatically be expanded with the same batch size as encoded_words
33
- embedding = self.dropout(embedding)
34
- return embedding
35
-
36
-
37
-
38
- class MultiHeadAttention(nn.Module):
39
-
40
- def __init__(self, heads, d_model):
41
-
42
- super(MultiHeadAttention, self).__init__()
43
- assert d_model % heads == 0
44
- self.d_k = d_model // heads
45
- self.heads = heads
46
- self.dropout = nn.Dropout(0.1)
47
- self.query = nn.Linear(d_model, d_model)
48
- self.key = nn.Linear(d_model, d_model)
49
- self.value = nn.Linear(d_model, d_model)
50
- self.concat = nn.Linear(d_model, d_model)
51
-
52
- def forward(self, query, key, value, mask):
53
- """
54
- query, key, value of shape: (batch_size, max_len, 512)
55
- mask of shape: (batch_size, 1, 1, max_words)
56
- """
57
- # (batch_size, max_len, 512)
58
- query = self.query(query)
59
- key = self.key(key)
60
- value = self.value(value)
61
-
62
- # (batch_size, max_len, 512) --> (batch_size, max_len, h, d_k) --> (batch_size, h, max_len, d_k)
63
- query = query.view(query.shape[0], -1, self.heads, self.d_k).permute(0, 2, 1, 3)
64
- key = key.view(key.shape[0], -1, self.heads, self.d_k).permute(0, 2, 1, 3)
65
- value = value.view(value.shape[0], -1, self.heads, self.d_k).permute(0, 2, 1, 3)
66
-
67
- # (batch_size, h, max_len, d_k) matmul (batch_size, h, d_k, max_len) --> (batch_size, h, max_len, max_len)
68
- scores = torch.matmul(query, key.permute(0,1,3,2)) / math.sqrt(query.size(-1))
69
- scores = scores.masked_fill(mask == 0, -1e9) # (batch_size, h, max_len, max_len)
70
- weights = F.softmax(scores, dim = -1) # (batch_size, h, max_len, max_len)
71
- weights = self.dropout(weights)
72
- # (batch_size, h, max_len, max_len) matmul (batch_size, h, max_len, d_k) --> (batch_size, h, max_len, d_k)
73
- context = torch.matmul(weights, value)
74
- # (batch_size, h, max_len, d_k) --> (batch_size, max_len, h, d_k) --> (batch_size, max_len, h * d_k)
75
- context = context.permute(0,2,1,3).contiguous().view(context.shape[0], -1, self.heads * self.d_k)
76
- # (batch_size, max_len, h * d_k)
77
- interacted = self.concat(context)
78
- return interacted
79
-
80
-
81
-
82
- class FeedForward(nn.Module):
83
-
84
- def __init__(self, d_model, middle_dim = 2048):
85
- super(FeedForward, self).__init__()
86
-
87
- self.fc1 = nn.Linear(d_model, middle_dim)
88
- self.fc2 = nn.Linear(middle_dim, d_model)
89
- self.dropout = nn.Dropout(0.1)
90
-
91
- def forward(self, x):
92
- out = F.relu(self.fc1(x))
93
- out = self.fc2(self.dropout(out))
94
- return out
95
-
96
-
97
- class EncoderLayer(nn.Module):
98
-
99
- def __init__(self, d_model, heads):
100
- super(EncoderLayer, self).__init__()
101
- self.layernorm = nn.LayerNorm(d_model)
102
- self.self_multihead = MultiHeadAttention(heads, d_model)
103
- self.feed_forward = FeedForward(d_model)
104
- self.dropout = nn.Dropout(0.1)
105
-
106
- def forward(self, embeddings, mask):
107
- interacted = self.dropout(self.self_multihead(embeddings, embeddings, embeddings, mask))
108
- interacted = self.layernorm(interacted + embeddings)
109
- feed_forward_out = self.dropout(self.feed_forward(interacted))
110
- encoded = self.layernorm(feed_forward_out + interacted)
111
- return encoded
112
-
113
-
114
- class DecoderLayer(nn.Module):
115
-
116
- def __init__(self, d_model, heads):
117
- super(DecoderLayer, self).__init__()
118
- self.layernorm = nn.LayerNorm(d_model)
119
- self.self_multihead = MultiHeadAttention(heads, d_model)
120
- self.src_multihead = MultiHeadAttention(heads, d_model)
121
- self.feed_forward = FeedForward(d_model)
122
- self.dropout = nn.Dropout(0.1)
123
-
124
- def forward(self, embeddings, encoded, src_mask, target_mask):
125
- query = self.dropout(self.self_multihead(embeddings, embeddings, embeddings, target_mask))
126
- query = self.layernorm(query + embeddings)
127
- interacted = self.dropout(self.src_multihead(query, encoded, encoded, src_mask))
128
- interacted = self.layernorm(interacted + query)
129
- feed_forward_out = self.dropout(self.feed_forward(interacted))
130
- decoded = self.layernorm(feed_forward_out + interacted)
131
- return decoded
132
-
133
-
134
- class Transformer(nn.Module):
135
-
136
- def __init__(self, d_model, heads, num_layers, word_map):
137
- super(Transformer, self).__init__()
138
-
139
- self.d_model = d_model
140
- self.vocab_size = len(word_map)
141
- self.embed = Embeddings(self.vocab_size, d_model)
142
- self.encoder = nn.ModuleList([EncoderLayer(d_model, heads) for _ in range(num_layers)])
143
- self.decoder = nn.ModuleList([DecoderLayer(d_model, heads) for _ in range(num_layers)])
144
- self.logit = nn.Linear(d_model, self.vocab_size)
145
-
146
- def encode(self, src_words, src_mask):
147
- src_embeddings = self.embed(src_words)
148
- for layer in self.encoder:
149
- src_embeddings = layer(src_embeddings, src_mask)
150
- return src_embeddings
151
-
152
- def decode(self, target_words, target_mask, src_embeddings, src_mask):
153
- tgt_embeddings = self.embed(target_words)
154
- for layer in self.decoder:
155
- tgt_embeddings = layer(tgt_embeddings, src_embeddings, src_mask, target_mask)
156
- return tgt_embeddings
157
-
158
- def forward(self, src_words, src_mask, target_words, target_mask):
159
- encoded = self.encode(src_words, src_mask)
160
- decoded = self.decode(target_words, target_mask, encoded, src_mask)
161
- out = F.log_softmax(self.logit(decoded), dim = 2)
162
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/ops/bias_act.h DELETED
@@ -1,40 +0,0 @@
1
- // Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- //
5
- // NVIDIA CORPORATION and its licensors retain all intellectual property
6
- // and proprietary rights in and to this software, related documentation
7
- // and any modifications thereto. Any use, reproduction, disclosure or
8
- // distribution of this software and related documentation without an express
9
- // license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- //------------------------------------------------------------------------
12
- // CUDA kernel parameters.
13
-
14
- struct bias_act_kernel_params
15
- {
16
- const void* x; // [sizeX]
17
- const void* b; // [sizeB] or NULL
18
- const void* xref; // [sizeX] or NULL
19
- const void* yref; // [sizeX] or NULL
20
- const void* dy; // [sizeX] or NULL
21
- void* y; // [sizeX]
22
-
23
- int grad;
24
- int act;
25
- float alpha;
26
- float gain;
27
- float clamp;
28
-
29
- int sizeX;
30
- int sizeB;
31
- int stepB;
32
- int loopX;
33
- };
34
-
35
- //------------------------------------------------------------------------
36
- // CUDA kernel selection.
37
-
38
- template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p);
39
-
40
- //------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DunnBC22/Password_Strength_Classifier_with_CodeBERT/app.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
-
3
- os.system('python -m pip install --upgrade pip')
4
- os.system('pip install transformers torch')
5
-
6
- import gradio as gr
7
- from transformers import pipeline
8
-
9
- unique_classes = [
10
- "Weak",
11
- "Medium",
12
- "Strong"
13
- ]
14
-
15
- id2label = {f"LABEL_{idx}":label for idx, label in enumerate(unique_classes)}
16
-
17
- def classify_password(text):
18
- password_clf = pipeline(model="DunnBC22/codebert-base-Password_Strength_Classifier")
19
- password_result = password_clf(text)
20
- return f"The password is {id2label[password_result[0]['label']]} with a probability of {password_result[0]['score']*100:.2f}"
21
-
22
- title = "Classify Password Strength"
23
- description = """
24
- This is a demo of a password classifier. The feedback should not be taken as a guarantee of a password's strength.
25
- """
26
-
27
- article = """
28
- <p style='text-align: center'>
29
- | <a href='https://arxiv.org/abs/2002.08155'>CodeBERT: A Pre-Trained Model for Programming & Natural Languages</a>
30
- | <a href='https://huggingface.co/microsoft/codebert-base'>Microsoft CodeBERT-Base Documentation</a>
31
- | <a href='https://github.com/DunnBC22/NLP_Projects/blob/main/Password%20Strength%20Classification%20(MC)/CodeBERT-Base%20-%20Password_Classifier.ipynb'>My Code for this Fune-Tuned Project</a>
32
- | <a href='https://www.kaggle.com/datasets/bhavikbb/password-strength-classifier-dataset'>Dataset Source</a>
33
- |</p>
34
- """
35
-
36
- examples = ['94311163nobp', 'mpompo1', 'dK4dWOjM1OAPeisw']
37
-
38
- gr.Interface(fn=classify_password,
39
- inputs=gr.inputs.Textbox(),
40
- outputs=gr.outputs.Textbox(),
41
- title=title,
42
- article=article,
43
- description=description,
44
- examples=examples,
45
- theme='abidlabs/dracula_revamped'
46
- ).launch()