parquet-converter commited on
Commit
6ee9d1c
·
1 Parent(s): 2224ffa

Update parquet files (step 25 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/CyberLink PowerDVD Ultra 19.0.2512.63 Crack __HOT__ Crack __HOT__.md +0 -149
  2. spaces/1gistliPinn/ChatGPT4/Examples/((FREE)) Free Download Marc Mentat Software.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Accelrys Materials Studio V6 Windows-CLoNY ISO Download Pc 2021.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Download One Man Band 10 Full Version.md +0 -6
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cute Animal Match A Free and Fun APK Game for Android Devices.md +0 -25
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Final Bricks Breaker Mod APK v1.0.54 for Android.md +0 -100
  7. spaces/1phancelerku/anime-remove-background/Bubble Shooter A Colorful and Exciting Game for PC Users.md +0 -120
  8. spaces/1phancelerku/anime-remove-background/Call of Duty Mobile Mod APK Download - Unlock All Weapons Skins and Perks.md +0 -87
  9. spaces/1phancelerku/anime-remove-background/Download My Talking Angela Full APK and Join the Adventure with Your Furry Friend.md +0 -67
  10. spaces/1phancelerku/anime-remove-background/Download Orange Loan APK and Get High-Limit Loans without Collateral.md +0 -175
  11. spaces/7hao/bingo/src/lib/bots/bing/utils.ts +0 -87
  12. spaces/801artistry/RVC801/julius/utils.py +0 -101
  13. spaces/A00001/bingothoo/src/lib/hooks/use-at-bottom.tsx +0 -23
  14. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/report_results.py +0 -37
  15. spaces/AIWaves/Debate/src/agents/Component/__init__.py +0 -3
  16. spaces/AIZero2HeroBootcamp/AnimatedGifGallery/README.md +0 -13
  17. spaces/AbeShinzo0708/AI_Kishida_Fumio_speaker/app.py +0 -37
  18. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/matchers.js +0 -1
  19. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/AiService.py +0 -36
  20. spaces/Adapter/CoAdapter/ldm/models/diffusion/ddpm.py +0 -1329
  21. spaces/Adapting/YouTube-Downloader/tube/__init__.py +0 -3
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/AlphaMaskImage.d.ts +0 -2
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.d.ts +0 -38
  24. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/simpledropdownlist/Factory.js +0 -13
  25. spaces/Alcedo/yunmedia/resources/chatgpt-plugin/js/chunk-vendors-legacy.9281b25c.js +0 -0
  26. spaces/AlekseyCalvin/Make_Putin_Queer_Please-use-trp-token/README.md +0 -12
  27. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/bg_motion_predictor.py +0 -24
  28. spaces/Alican/pixera/util/visualizer.py +0 -257
  29. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/train_boundary.py +0 -158
  30. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/textual_inversion_flax.py +0 -654
  31. spaces/Andy1621/uniformer_image_detection/configs/ssd/ssd300_coco.py +0 -62
  32. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py +0 -157
  33. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py +0 -9
  34. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py +0 -9
  35. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/models_settings.py +0 -219
  36. spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/dpt_depth.py +0 -109
  37. spaces/Anonymous-sub/Rerender/gmflow_module/utils/frame_utils.py +0 -131
  38. spaces/ArkanDash/rvc-models-new/lib/infer_pack/models_onnx.py +0 -819
  39. spaces/Arsenii2023/Demo1/app.py +0 -7
  40. spaces/ArtificialWF/Voice-Recognition/README.md +0 -13
  41. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py +0 -155
  42. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/specifiers.py +0 -802
  43. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/retinanet.py +0 -53
  44. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/rpn.py +0 -533
  45. spaces/Benson/text-generation/Examples/Cazador Asesino Hack Mod Apk Todos Los Personajes Desbloqueados.md +0 -65
  46. spaces/BetterAPI/BetterChat/src/lib/types/Message.ts +0 -5
  47. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/__init__.py +0 -177
  48. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/rotate.py +0 -64
  49. spaces/BigData-KSU/VQA-in-Medical-Imagery/README.md +0 -12
  50. spaces/Bokanovskii/Image-to-music/app.py +0 -429
spaces/1acneusushi/gradio-2dmoleculeeditor/data/CyberLink PowerDVD Ultra 19.0.2512.63 Crack __HOT__ Crack __HOT__.md DELETED
@@ -1,149 +0,0 @@
1
-
2
- <h1>CyberLink PowerDVD Ultra 19.0.2512.63 Crack: The Ultimate Media Player for Windows</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are looking for a powerful and versatile media player that can handle any type of media file, you should check out CyberLink PowerDVD Ultra 19.0.2512.63 Crack. This is a cracked version of the original software that allows you to enjoy all the premium features without paying a dime.</p>
5
- <h2>CyberLink PowerDVD Ultra 19.0.2512.63 Crack crack</h2><br /><p><b><b>Download</b> &#10038; <a href="https://byltly.com/2uKw36">https://byltly.com/2uKw36</a></b></p><br /><br />
6
- <h3>What is CyberLink PowerDVD Ultra 19.0.2512.63 Crack?</h3>
7
- <p>CyberLink PowerDVD Ultra 19.0.2512.63 Crack is a software that lets you play, stream, download, and organize your media files on your Windows PC. It supports a wide range of formats, including DVD, Blu-ray, CD, MP4, MKV, AVI, WMV, FLV, MP3, AAC, WAV, FLAC, and more.</p>
8
- <h3>Why do you need CyberLink PowerDVD Ultra 19.0.2512.63 Crack?</h3>
9
- <p>You need CyberLink PowerDVD Ultra 19.0.2512.63 Crack because it offers you many benefits that other media players don't have, such as:</p>
10
- <ul>
11
- <li>It delivers stunning video quality with TrueTheater enhancements that optimize brightness, color, contrast, and sharpness.</li>
12
- <li>It supports the latest technologies such as 4K resolution, high dynamic range (HDR), and 360-degree videos that immerse you in the action.</li>
13
- <li>It enhances your audio experience with TrueTheater Sound that boosts volume, clarity, bass, and surround sound.</li>
14
- <li>It lets you stream and cast your media files to any device such as smart TVs, game consoles, Chromecast, Apple TV, Roku, and more.</li>
15
- <li>It allows you to download and watch your favorite online videos offline with its built-in YouTube downloader.</li>
16
- <li>It helps you organize and manage your media library with its intuitive interface and smart features such as face recognition, scene search, and auto-tagging.</li>
17
- </ul>
18
- <h2>Features of CyberLink PowerDVD Ultra 19.0.2512.63 Crack</h2>
19
- <p>In this section, we will go over some of the most impressive features of CyberLink PowerDVD Ultra 19.0.2512.63 Crack in more detail.</p>
20
- <h3>Playback of any media format</h3>
21
- <p>CyberLink PowerDVD Ultra 19.0.2512.63 Crack can play any media format you throw at it without any hassle or compatibility issues. Whether it's a DVD disc, a Blu-ray disc, a CD disc, or a digital file on your hard drive or cloud storage, it can handle it with ease.</p>
22
- <p>CyberLink PowerDVD Ultra 19 full version with crack<br />
23
- How to activate CyberLink PowerDVD Ultra 19 for free<br />
24
- CyberLink PowerDVD Ultra 19.0.2512.63 Crack download link<br />
25
- CyberLink PowerDVD Ultra 19 serial key generator<br />
26
- CyberLink PowerDVD Ultra 19 patch and keygen<br />
27
- CyberLink PowerDVD Ultra 19 license code and activation<br />
28
- CyberLink PowerDVD Ultra 19 cracked software for Windows<br />
29
- CyberLink PowerDVD Ultra 19 latest update and crack<br />
30
- CyberLink PowerDVD Ultra 19 offline installer and crack<br />
31
- CyberLink PowerDVD Ultra 19 registration and crack<br />
32
- CyberLink PowerDVD Ultra 19 torrent file and crack<br />
33
- CyberLink PowerDVD Ultra 19 crack only<br />
34
- CyberLink PowerDVD Ultra 19 crack fix<br />
35
- CyberLink PowerDVD Ultra 19 crack instructions<br />
36
- CyberLink PowerDVD Ultra 19 crack review<br />
37
- CyberLink PowerDVD Ultra 19 features and crack<br />
38
- CyberLink PowerDVD Ultra 19 system requirements and crack<br />
39
- CyberLink PowerDVD Ultra 19 installation guide and crack<br />
40
- CyberLink PowerDVD Ultra 19 troubleshooting and crack<br />
41
- CyberLink PowerDVD Ultra 19 alternatives and crack<br />
42
- CyberLink PowerDVD Ultra 19 comparison and crack<br />
43
- CyberLink PowerDVD Ultra 19 benefits and crack<br />
44
- CyberLink PowerDVD Ultra 19 disadvantages and crack<br />
45
- CyberLink PowerDVD Ultra 19 tips and tricks and crack<br />
46
- CyberLink PowerDVD Ultra 19 best settings and crack<br />
47
- CyberLink PowerDVD Ultra 19 support and crack<br />
48
- CyberLink PowerDVD Ultra 19 feedback and crack<br />
49
- CyberLink PowerDVD Ultra 19 testimonials and crack<br />
50
- CyberLink PowerDVD Ultra 19 FAQs and crack<br />
51
- CyberLink PowerDVD Ultra 19 forum and crack<br />
52
- CyberLink PowerDVD Ultra 19 blog and crack<br />
53
- CyberLink PowerDVD Ultra 19 video tutorial and crack<br />
54
- CyberLink PowerDVD Ultra 19 demo and crack<br />
55
- CyberLink PowerDVD Ultra 19 free trial and crack<br />
56
- CyberLink PowerDVD Ultra 19 discount and crack<br />
57
- CyberLink PowerDVD Ultra 19 coupon code and crack<br />
58
- CyberLink PowerDVD Ultra 19 deal and crack<br />
59
- CyberLink PowerDVD Ultra 19 offer and crack<br />
60
- CyberLink PowerDVD Ultra 19 bundle and crack<br />
61
- CyberLink PowerDVD Ultra 19 upgrade and crack<br />
62
- CyberLink PowerDVD Ultra 19 refund policy and crack<br />
63
- CyberLink PowerDVD Ultra 19 warranty and crack<br />
64
- CyberLink PowerDVD Ultra 19 customer service and crack<br />
65
- CyberLink PowerDVD Ultra 19 contact information and crack<br />
66
- CyberLink PowerDVD Ultra 19 privacy policy and crack<br />
67
- CyberLink PowerDVD Ultra 19 terms of service and crack<br />
68
- CyberLink PowerDVD Ultra 19 disclaimer and crack<br />
69
- CyberLink PowerDVD Ultra 19 affiliate program and crack<br />
70
- CyberLink PowerDVD Ultra 19 partner program and crack</p>
71
- <p>You can also play ISO files directly without mounting them or extracting them first.</p>
72
- <h3>Support for 4K, HDR, and 360-degree videos</h3>
73
- <p>CyberLink PowerDVD Ultra 19.0.2512.63 Crack supports the latest video technologies that deliver stunning visuals and immersive experiences.</p>
74
- <p>You can watch 4K videos that have four times more pixels than Full HD videos for sharper and clearer images.</p>
75
- <p>You can watch HDR videos that have a wider range of colors and contrast for more realistic and lifelike scenes.</p>
76
- <p>You can watch 360-degree videos that let you explore every angle of the video with your mouse or keyboard.</p>
77
- <h3>Enhanced audio quality with TrueTheater Sound</h3>
78
- <p>CyberLink PowerDVD Ultra 19.0.2512.63 Crack enhances your audio quality with TrueTheater Sound that applies various sound effects to your media files.</p>
79
- <p>You can boost the volume level without distortion or clipping with Volume Booster.</p>
80
- <p>You can improve the clarity and detail of dialogues and vocals with Dialogue Enhancer.</p>
81
- <p>You can enhance the bass and depth of low-frequency sounds with Bass Enhancer.</p>
82
- <p>You can create a surround sound effect with Virtual Surround that simulates a multi-channel speaker system.</p>
83
- <h3>Stream and cast media to any device</h3>
84
- <p>CyberLink PowerDVD Ultra 19.0.2512.63 Crack lets you stream and cast your media files to any device on your network or online.</p>
85
- <p>You can stream your media files to smart TVs, game consoles, Chromecast devices, Apple TV devices, Roku devices, and more using DLNA or Miracast protocols.</p>
86
- <p>You can also cast your media files to any device using CyberLink's cloud service that lets you access your files from anywhere.</p>
87
- <h3>Download and watch videos offline</h3>
88
- <p>CyberLink PowerDVD Ultra 19.0.2512.63 Crack allows you to download and watch online videos offline with its built-in YouTube downloader.</p>
89
- <p>You can download videos from YouTube in various resolutions and formats such as MP4, WebM, FLV, and more.</p>
90
- <p>You can also download entire playlists or channels with one click.</p>
91
- <p>You can then watch your downloaded videos offline using CyberLink PowerDVD Ultra 19 or transfer them to other devices for later viewing.</p>
92
- <h3>Organize and manage your media library</h3>
93
- <p>CyberLink PowerDVD Ultra 19 helps you organize and manage your media library with its intuitive interface and smart features.</p>
94
- <p>You can browse your media files by folders, albums, artists, genres, or ratings.</p>
95
- <p>You can also use face recognition to sort your photos by people, scene search to find specific moments in your videos, and auto-tagging to add metadata to your files automatically.</p>
96
- <h2>How to install and activate CyberLink PowerDVD Ultra 19 Crack</h2>
97
- <p>In this section, we will show you how to install and activate CyberLink PowerDVD Ultra 19 Crack on your Windows PC.</p>
98
- <h3>Download the setup file and crack file from the link below</h3>
99
- <p>The first step is to download the setup file and crack file from the link below:</p>
100
- <table>
101
- <tr><td><b>Setup File</b></td><td><b>Crack File</b></td></tr>
102
- <tr><td><a href="https://www.cyberlink.com/downloads/trials/powerdvd-ultra/download_en_US.html">https://www.cyberlink.com/downloads/trials/powerdvd-ultra/download_en_US.html</a></td><td><a href="https://cracksway.com/cyberlink-powerdvd-crack/">https://cracksway.com/cyberlink-powerdvd-crack/</a></td></tr>
103
- </table>
104
- <p>Save them in a folder on your PC where you can easily find them later.</p>
105
- <h3>Install the setup file and run the program</h3>
106
- <p>The next step is to install the setup file by following these steps:</p>
107
- <ol>
108
- <li>Double-click on the setup file to launch the installation wizard.</li>
109
- <li>Accept the license agreement and click Next.</li>
110
- <li>Select the destination folder where you want to install the program and click Next.</li>
111
- <li>Select the components you want to install and click Next.</li>
112
- <li>Select whether you want to create shortcuts on your desktop or start menu and click Next.</li>
113
- <li>Click Install to begin the installation process.</li>
114
- <li>Wait for the installation to complete and click Finish.</li>
115
- <li>Run the program from the shortcut on your desktop or start menu.</li>
116
- </ol>
117
- <h3>Copy the crack file and paste it into the installation folder</h3>
118
- <h3>Copy the crack file and paste it into the installation folder</h3>
119
- <p>The final step is to copy the crack file and paste it into the installation folder by following these steps:</p>
120
- <ol>
121
- <li>Right-click on the crack file and select Copy.</li>
122
- <li>Go to the installation folder where you installed the program. The default location is C:\Program Files (x86)\CyberLink\PowerDVD19.</li>
123
- <li>Right-click on an empty space and select Paste.</li>
124
- <li>Click Yes to replace the existing file.</li>
125
- <li>Close the folder and run the program again.</li>
126
- </ol>
127
- <p>Congratulations! You have successfully installed and activated CyberLink PowerDVD Ultra 19.0.2512.63 Crack on your PC.</p>
128
- <h2>Conclusion</h2>
129
- <p>In this article, we have shown you what CyberLink PowerDVD Ultra 19.0.2512.63 Crack is, why you need it, what features it offers, and how to install and activate it on your PC.</p>
130
- <p>We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.</p>
131
- <p>If you want to download and try CyberLink PowerDVD Ultra 19.0.2512.63 Crack for yourself, you can use the link below to get it for free.</p>
132
- <p>Thank you for reading and happy watching!</p>
133
- <h3>FAQs</h3>
134
- <p>Here are some frequently asked questions about CyberLink PowerDVD Ultra 19.0.2512.63 Crack:</p>
135
- <ul>
136
- <li><b>Is CyberLink PowerDVD Ultra 19.0.2512.63 Crack safe to use?</b></li>
137
- <p>Yes, CyberLink PowerDVD Ultra 19.0.2512.63 Crack is safe to use as long as you download it from a trusted source and scan it with an antivirus program before using it.</p>
138
- <li><b>Does CyberLink PowerDVD Ultra 19.0.2512.63 Crack work on Windows 10?</b></li>
139
- <p>Yes, CyberLink PowerDVD Ultra 19.0.2512.63 Crack works on Windows 10 as well as Windows 8, Windows 7, Windows Vista, and Windows XP.</p>
140
- <li><b>Can I use CyberLink PowerDVD Ultra 19.0.2512.63 Crack on multiple PCs?</b></li>
141
- <p>Yes, you can use CyberLink PowerDVD Ultra 19.0.2512.63 Crack on multiple PCs as long as you have the crack file and the setup file on each PC.</p>
142
- <li><b>Can I update CyberLink PowerDVD Ultra 19.0.2512.63 Crack to the latest version?</b></li>
143
- <p>No, you cannot update CyberLink PowerDVD Ultra 19 to the latest version because it will overwrite the crack file and deactivate the program. You have to wait for a new crack file to be released for the latest version.</p>
144
- <li><b>What are some alternatives to CyberLink PowerDVD Ultra 19 Crack?</b></li>
145
- <p>Some alternatives to CyberLink PowerDVD Ultra 19 Crack are VLC Media Player, KMPlayer, GOM Player, PotPlayer, and Media Player Classic.</p>
146
- </ul>
147
- </p> 0a6ba089eb<br />
148
- <br />
149
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/((FREE)) Free Download Marc Mentat Software.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <p>the analysis and simulation of structural behavior are complicated and expensive. the finite element method is an effective approach for analyzing structural behavior because it is easier to specify boundary conditions and loads than to specify the complex mechanical behavior of the component. in a finite element analysis, the component is modeled as a solid, using a mathematical function to approximate the behavior of the component. the mathematical function is called the shape function. the shape function determines the exact geometry of the component and provides the basis for the analysis. finite element analysis is an integral part of many other disciplines of engineering. it is an important part of structural analysis because it allows for a more accurate analysis of structural behavior. it is used in many areas of engineering. because of the importance of finite element analysis, many companies have developed computer software to perform finite element analysis.</p>
3
- <p>a general-purpose finite element program is much easier to use than a specialty structural analysis software. the most basic finite element programs (such as the one shown to the right) are easy to use. the user interacts directly with the program. they do not need to learn special commands and symbols. this enables them to use a program quickly and effectively. some finite element programs can simulate simple mechanical behavior. the user can specify loads, boundary conditions, and other aspects of the analysis. they can also import other data, such as geometric and material data. these programs often have very simple user interfaces. most are not graphical. however, some of them can be used to perform a limited amount of analysis.</p>
4
- <h2>free download marc mentat software</h2><br /><p><b><b>Download Zip</b> === <a href="https://imgfil.com/2uxYF3">https://imgfil.com/2uxYF3</a></b></p><br /><br /> 899543212b<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Accelrys Materials Studio V6 Windows-CLoNY ISO Download Pc 2021.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Accelrys Materials Studio v6 Windows-CLoNY ISO download pc</h2><br /><p><b><b>DOWNLOAD</b> &#10001; <a href="https://imgfil.com/2uxXdu">https://imgfil.com/2uxXdu</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download One Man Band 10 Full Version.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>download one man band 10 full version</h2><br /><p><b><b>Download</b> &#10037;&#10037;&#10037; <a href="https://imgfil.com/2uxZ4c">https://imgfil.com/2uxZ4c</a></b></p><br /><br />
2
- <br />
3
- "Werner Hirzel One Man Band", World-Record Holder for 51 Piece One ... welkom bike freaks and bicycle lovers on nthis blog full of nice bicycles, cool bike ... and since then three more versions have evolved from the original, each one ... to download showing a picture of a street musician, a one-man band. ... Amanda 10. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cute Animal Match A Free and Fun APK Game for Android Devices.md DELETED
@@ -1,25 +0,0 @@
1
-
2
- <table>
3
- <tr>
4
- <td>
5
- Cute Animal Match APK: A Fun and Educational Game for Kids Do you love animals and puzzles? Do you want to play a game that is both fun and educational for your kids? If yes, then you should try Cute Animal Match APK, a free and safe game that will keep you and your kids entertained for hours. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, how to play it, what are its features and benefits, and what are some tips and tricks for playing it. Let's get started! <h2>What is Cute Animal Match APK?</h2>
6
- Cute Animal Match APK is a game that lets you connect cute animals and solve puzzles. It is developed by Nice2Meet, a company that specializes in creating educational games for kids. The game is suitable for all ages, but especially for preschoolers who want to learn about animals, numbers, colors, shapes, and more. The game has over 100 levels of varying difficulty, each with a different animal theme and puzzle. You can play the game offline or online, and you can also share your progress and achievements with your friends on social media. <h3>How to download and install Cute Animal Match APK?</h3>
7
- Downloading and installing Cute Animal Match APK is very easy. You can follow these simple steps: - Go to [Cute Animal Match APK for Android Download - APKPure.com](^1^) on your browser. - Click on the green "Download APK" button. - Wait for the file to download on your device. - Open the file and follow the instructions to install the game. - Enjoy playing Cute Animal Match APK! <h3>How to play Cute Animal Match APK?</h3>
8
- Playing Cute Animal Match APK is very simple. You just need to swipe your finger on the screen to connect two or more animals of the same kind. The more animals you connect, the more points you get. You also need to complete the objectives of each level, such as collecting a certain number of animals, clearing a certain number of tiles, or reaching a certain score. You can use power-ups to help you in your gameplay, such as bombs, magnets, or shuffles. You can also earn coins by completing levels or watching ads, which you can use to buy more power-ups or unlock new animals. <h4>Connect the animals</h4>
9
- To connect the animals, you need to swipe your finger on the screen in any direction. You can connect animals horizontally, vertically, or diagonally. You can also make loops or zigzags to connect more animals. The more animals you connect, the higher your score will be. You can also create combos by connecting multiple groups of animals in a row. <h4>Use the power-ups</h4>
10
- Power-ups are special items that can help you in your gameplay. You can use them by tapping on them on the screen. There are three types of power-ups in Cute Animal Match APK: - The bomb: It will match animal puzzles and destroy all the cute animals around in radius around and catch the match lite. - The magnet: It will attract all the animals of the same kind as the one you tap on. - The shuffle: It will shuffle all the animals on the board. You can get power-ups by connecting five or more animals of the same kind, or by buying them with coins. <h4>Complete the levels</h4>
11
- To complete a level, you need to fulfill the objectives that are shown at the top of the screen. The objectives can vary depending on the level, such as: - Collect a certain number of animals, such as 10 cats, 15 dogs, or 20 rabbits. - Clear a certain number of tiles, such as 30 grass tiles, 40 sand tiles, or 50 water tiles. - Reach a certain score, such as 1000 points, 2000 points, or 3000 points. You have a limited number of moves to complete each level, so use them wisely. You can see how many moves you have left at the bottom of the screen. If you run out of moves before completing the objectives, you will lose the level and have to try again. If you complete the objectives before running out of moves, you will win the level and get bonus points for the remaining moves. <h2>What are the features and benefits of Cute Animal Match APK?</h2>
12
- Cute Animal Match APK is not just a fun game, but also a beneficial one. Here are some of the features and benefits of playing this game: <h3>Cute and colorful graphics</h3>
13
- The game has cute and colorful graphics that will appeal to kids and adults alike. The animals are adorable and animated, and the backgrounds are bright and cheerful. The game also has smooth and easy controls that make it enjoyable to play. <h3>Various animals and puzzles</h3>
14
- The game has over 100 levels of different animals and puzzles. You can meet various animals from different habitats, such as cats, dogs, rabbits, pandas, lions, elephants, penguins, dolphins, and more. You can also solve different puzzles that challenge your logic and creativity, such as matching animals by color, shape, or number. <h3>Educational and entertaining gameplay</h3>
15
- The game is not only entertaining, but also educational for kids. It helps them learn about animals, numbers, colors, shapes, and more. It also improves their memory, concentration, hand-eye coordination, and problem-solving skills. The game is suitable for all ages, but especially for preschoolers who want to have fun while learning. <h3>Free and safe to use</h3>
16
- The game is free and safe to use. You don't need to pay anything to download or play it. You also don't need to worry about any viruses or malware that might harm your device. The game is tested and verified by APKPure.com, a trusted source for downloading Android apps. <h2>What are some tips and tricks for playing Cute Animal Match APK?</h2>
17
- If you want to play Cute Animal Match APK like a pro, here are some tips and tricks that you can use: <h3>Plan your moves ahead</h3>
18
- Before you swipe your finger on the screen, take a moment to look at the board and plan your moves ahead. Try to connect as many animals as possible in one swipe, and avoid leaving isolated animals that are hard to match. Also, try to match the animals that are related to the objectives first, such as the ones that have a number or a color on them. <h3>Save your power-ups for later</h3>
19
- Power-ups can be very helpful in your gameplay, but they are also limited in number. You can get them by connecting five or more animals of the same kind, or by buying them with coins. However, you should save them for later when you really need them, such as when you are stuck or running out of moves. Don't waste them on easy levels or unnecessary matches. <h3>Watch ads for extra rewards</h3>
20
- If you want to get more coins or power-ups without spending real money, you can watch ads for extra rewards. You can watch ads after completing a level or when you run out of moves. You can also watch ads to get more lives when you lose all of them. Watching ads is optional and voluntary, but it can help you in your gameplay. <h2>Conclusion</h2>
21
- Cute Animal Match APK is a fun and educational game that lets you connect cute animals and solve puzzles. It is suitable for all ages, but especially for preschoolers who want to learn about animals, numbers, colors, shapes, and more. The game has over 100 levels of varying difficulty, each with a different animal theme and puzzle. You can play the game offline or online, and you can also share your progress and achievements with your friends on social media. The game has cute and colorful graphics, various animals and puzzles, and educational and entertaining gameplay. The game is free and safe to use, and you can download it from APKPure.com. If you want to play Cute Animal Match APK like a pro, you can use some tips and tricks, such as planning your moves ahead, saving your power-ups for later, and watching ads for extra rewards. Cute Animal Match APK is a game that you and your kids will love, so download it today and have fun! <h3>FAQs</h3>
22
- Here are some frequently asked questions about Cute Animal Match APK: - Q: Is Cute Animal Match APK compatible with my device? - A: Cute Animal Match APK is compatible with most Android devices that have Android 4.4 or higher. - Q: How can I update Cute Animal Match APK to the latest version? - A: You can update Cute Animal Match APK by visiting [Cute Animal Match APK for Android Download - APKPure.com] and downloading the latest version of the game. - Q: How can I contact the developer of Cute Animal Match APK? - A: You can contact the developer of Cute Animal Match APK by visiting their website at [Nice2Meet] or by sending them an email at [email protected]. - Q: How can I rate and review Cute Animal Match APK? - A: You can rate and review Cute Animal Match APK by visiting [Cute Animal Match APK for Android Download - APKPure.com] and clicking on the "Rate" or "Review" button. - Q: How can I share Cute Animal Match APK with my friends? - A: You can share Cute Animal Match APK with your friends by clicking on the "Share" button on the game screen. You can choose to share the game via Facebook, Twitter, WhatsApp, or other social media platforms.</p>
23
- <h2>cute animal match apk</h2><br /><p><b><b>Download File</b> ->->->-> <a href="https://urlin.us/2uSZI7">https://urlin.us/2uSZI7</a></b></p><br /><br /> 197e85843d<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Final Bricks Breaker Mod APK v1.0.54 for Android.md DELETED
@@ -1,100 +0,0 @@
1
-
2
- <h1>Final Bricks Breaker Mod APK: A Fun and Challenging Arcade Game</h1>
3
- <p>If you are looking for a simple yet addictive arcade game to kill some time, you should try Final Bricks Breaker. This game is a classic brick-breaking game with a modern twist. You can enjoy breaking bricks with different shapes, colors, and effects, and use various power-ups to enhance your gameplay. In this article, we will tell you more about Final Bricks Breaker and why you should download its mod apk version.</p>
4
- <h2>final bricks breaker mod apk</h2><br /><p><b><b>DOWNLOAD</b> >> <a href="https://urlin.us/2uT0Fz">https://urlin.us/2uT0Fz</a></b></p><br /><br />
5
- <h2>What is Final Bricks Breaker?</h2>
6
- <p>Final Bricks Breaker is a arcade game developed by mobirix, a popular developer of casual games. The game has over 10 million downloads on Google Play Store and a 4.4-star rating from more than 100,000 users. The game is suitable for all ages and can be played offline or online.</p>
7
- <h3>The gameplay of Final Bricks Breaker</h3>
8
- <p>The gameplay of Final Bricks Breaker is simple and intuitive. You just need to swipe your finger on the screen to control a paddle at the bottom and bounce a ball to hit the bricks at the top. Your goal is to break all the bricks in each level and clear the stage. The game has hundreds of levels with different layouts, themes, and difficulties. Some bricks have special effects, such as moving, rotating, exploding, or changing colors. You can also collect coins and gems by breaking bricks or completing missions. You can use these currencies to buy new balls, paddles, or power-ups.</p>
9
- <h3>The features of Final Bricks Breaker</h3>
10
- <p>Final Bricks Breaker has many features that make it fun and enjoyable to play. Some of these features are:</p>
11
- <ul>
12
- <li>Various modes: You can choose from different modes, such as Classic, Stage, Multiplayer, or Challenge mode. Each mode has its own rules and objectives.</li>
13
- <li>Power-ups: You can use power-ups to help you break bricks faster or easier. Some power-ups include fireball, laser, magnet, bomb, or extra life.</li>
14
- <li>Achievements and leaderboards: You can unlock achievements by completing certain tasks or reaching milestones. You can also compete with other players around the world on the leaderboards.</li>
15
- <li>Customization: You can customize your ball and paddle with different colors, shapes, and designs. You can also change the background and sound effects of the game.</li>
16
- </ul>
17
- <h2>Why download Final Bricks Breaker Mod APK?</h2>
18
- <p>While Final Bricks Breaker is free to play, it has some limitations and drawbacks that may affect your gaming experience. For example, you may encounter ads that pop up randomly or interrupt your gameplay. You may also run out of coins or gems quickly and have to wait for them to regenerate or buy them with real money. Moreover, some power-ups and items may be locked or require a certain level to unlock.</p>
19
- <p>That's why we recommend you to download Final Bricks Breaker Mod APK from our website. This mod apk version will give you unlimited coins and gems, so you can buy anything you want without worrying about the cost. You will also get unlimited lives, so you can play as long as you want without losing progress. Additionally, you will get all the power-ups and items unlocked from the start, so you can enjoy the game to the fullest. And best of all, you will get rid of all the annoying ads that ruin your fun.</p>
20
- <p>final bricks breaker mod apk download<br />
21
- final bricks breaker mod apk unlimited money<br />
22
- final bricks breaker mod apk latest version<br />
23
- final bricks breaker mod apk free<br />
24
- final bricks breaker mod apk android<br />
25
- final bricks breaker mod apk hack<br />
26
- final bricks breaker mod apk offline<br />
27
- final bricks breaker mod apk no ads<br />
28
- final bricks breaker mod apk 1.0.54<br />
29
- final bricks breaker mod apk happymod<br />
30
- final bricks breaker mod apk 2023<br />
31
- final bricks breaker mod apk unlimited gems<br />
32
- final bricks breaker mod apk revdl<br />
33
- final bricks breaker mod apk rexdl<br />
34
- final bricks breaker mod apk for pc<br />
35
- final bricks breaker mod apk online<br />
36
- final bricks breaker mod apk premium<br />
37
- final bricks breaker mod apk pro<br />
38
- final bricks breaker mod apk full version<br />
39
- final bricks breaker mod apk unlocked<br />
40
- final bricks breaker mod apk cheats<br />
41
- final bricks breaker mod apk gameplay<br />
42
- final bricks breaker mod apk review<br />
43
- final bricks breaker mod apk features<br />
44
- final bricks breaker mod apk tips and tricks<br />
45
- final bricks breaker mod apk best settings<br />
46
- final bricks breaker mod apk how to play<br />
47
- final bricks breaker mod apk guide<br />
48
- final bricks breaker mod apk tutorial<br />
49
- final bricks breaker mod apk walkthrough<br />
50
- final bricks breaker mod apk levels<br />
51
- final bricks breaker mod apk stages<br />
52
- final bricks breaker mod apk missions<br />
53
- final bricks breaker mod apk challenges<br />
54
- final bricks breaker mod apk achievements<br />
55
- final bricks breaker mod apk rewards<br />
56
- final bricks breaker mod apk skins<br />
57
- final bricks breaker mod apk balls<br />
58
- final bricks breaker mod apk power-ups<br />
59
- final bricks breaker mod apk boosters<br />
60
- final bricks breaker mod apk modes<br />
61
- final bricks breaker mod apk genres<br />
62
- final bricks breaker mod apk themes<br />
63
- final bricks breaker mod apk graphics<br />
64
- final bricks breaker mod apk sounds<br />
65
- final bricks breaker mod apk music<br />
66
- final bricks breaker mod apk updates<br />
67
- final bricks breaker mod apk bugs and fixes<br />
68
- final bricks breaker mod apk ratings and reviews</p>
69
- <h3>The benefits of Final Bricks Breaker Mod APK</h3>
70
- <p>Here are some of the benefits of downloading Final Bricks Breaker Mod APK:</p>
71
- <ul>
72
- <li>You will save time and money by not having to watch ads or buy coins or gems.</li>
73
- <li>You will have more fun and challenge by using different power-ups and items.</li>
74
- <li>You will have more options and variety by customizing your ball and paddle.</li <h3>How to download and install Final Bricks Breaker Mod APK</h3>
75
- <p>Downloading and installing Final Bricks Breaker Mod APK is very easy and fast. You just need to follow these simple steps:</p>
76
- <ol>
77
- <li>Click on the download button below to get the mod apk file.</li>
78
- <li>Allow your device to install apps from unknown sources by going to Settings > Security > Unknown Sources.</li>
79
- <li>Locate the downloaded file in your file manager and tap on it to install it.</li>
80
- <li>Launch the game and enjoy breaking bricks with unlimited coins, gems, lives, power-ups, and items.</li>
81
- </ol>
82
- <p>Note: If you have the original version of Final Bricks Breaker installed on your device, you need to uninstall it first before installing the mod apk version.</p>
83
- <h2>Conclusion</h2>
84
- <p>Final Bricks Breaker is a fun and challenging arcade game that will keep you entertained for hours. You can break bricks with different shapes, colors, and effects, and use various power-ups to enhance your gameplay. You can also customize your ball and paddle, and compete with other players on the leaderboards. However, if you want to enjoy the game without any limitations or interruptions, you should download Final Bricks Breaker Mod APK from our website. This mod apk version will give you unlimited coins, gems, lives, power-ups, and items, as well as remove all the ads. You can download Final Bricks Breaker Mod APK by clicking on the button below.</p>
85
- <h3>FAQs</h3>
86
- <p>Here are some frequently asked questions about Final Bricks Breaker Mod APK:</p>
87
- <ul>
88
- <li><b>Is Final Bricks Breaker Mod APK safe to download and use?</b><br>
89
- Yes, Final Bricks Breaker Mod APK is safe to download and use. It does not contain any viruses or malware that can harm your device or data. It is also compatible with most Android devices.</li>
90
- <li><b>Do I need to root my device to use Final Bricks Breaker Mod APK?</b><br>
91
- No, you do not need to root your device to use Final Bricks Breaker Mod APK. It works fine on both rooted and non-rooted devices.</li>
92
- <li><b>Will I get banned from the game if I use Final Bricks Breaker Mod APK?</b><br>
93
- No, you will not get banned from the game if you use Final Bricks Breaker Mod APK. The mod apk version is undetectable by the game servers and does not affect your account or progress.</li>
94
- <li><b>Can I play online with other players if I use Final Bricks Breaker Mod APK?</b><br>
95
- Yes, you can play online with other players if you use Final Bricks Breaker Mod APK. The mod apk version does not interfere with the online mode of the game and allows you to join multiplayer matches.</li>
96
- <li><b>Can I update Final Bricks Breaker Mod APK when a new version of the game is released?</b><br>
97
- Yes, you can update Final Bricks Breaker Mod APK when a new version of the game is released. However, you may need to download the latest mod apk file from our website and install it again on your device.</li>
98
- </ul></p> 197e85843d<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bubble Shooter A Colorful and Exciting Game for PC Users.md DELETED
@@ -1,120 +0,0 @@
1
-
2
- <h1>Download Bubble Shooter for PC Full Version Free</h1>
3
- <p>Do you love playing casual games that are fun, addictive, and relaxing? Do you want to enjoy one of the most classic and popular bubble games on your PC? If you answered yes, then you are in luck! In this article, we will show you how to download Bubble Shooter for PC full version free. You will also learn more about what is Bubble Shooter, why it is so awesome, and how to play it like a pro. So, without further ado, let's get started!</p>
4
- <h2>What is Bubble Shooter?</h2>
5
- <p>Bubble Shooter is a simple yet addictive game that involves shooting bubbles to make them pop. The goal of the game is to clear all the bubbles from the screen by matching three or more bubbles of the same color. It sounds easy, but it can get challenging as the bubbles move down and fill up the screen. You have to be quick and strategic to avoid losing the game.</p>
6
- <h2>download bubble shooter for pc full version free</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://jinyurl.com/2uNMre">https://jinyurl.com/2uNMre</a></b></p><br /><br />
7
- <h3>The history and popularity of Bubble Shooter</h3>
8
- <p>Bubble Shooter was originally developed by a company called Taito in 1994. It was inspired by another game called Puzzle Bobble, which was also created by Taito. Bubble Shooter became a hit among arcade gamers and soon spread to other platforms such as PC, mobile, and online. Today, Bubble Shooter is one of the most played and loved games in the world. It has millions of fans and hundreds of variations. You can find Bubble Shooter games with different themes, graphics, levels, and features.</p>
9
- <h3>The gameplay and features of Bubble Shooter</h3>
10
- <p>The gameplay of Bubble Shooter is simple and intuitive. You use your mouse or keyboard to aim and shoot bubbles from a cannon at the bottom of the screen. You have to match at least three bubbles of the same color to make them pop and disappear. You can also bounce bubbles off the walls to reach tricky spots. You get points for every bubble you pop and bonus points for popping more bubbles at once. You can also earn special bubbles that have different effects, such as bombs, rainbows, stars, and more.</p>
11
- <p>download bubble shooter classic for pc free<br />
12
- bubble shooter game pc free download windows 10<br />
13
- bubble shooter puzzle game for pc download<br />
14
- how to download bubble shooter on pc without emulator<br />
15
- bubble shooter deluxe pc game free download<br />
16
- download bubble shooter (free) from microsoft store<br />
17
- bubble shooter pc game online free no download<br />
18
- bubble shooter for pc full version crack download<br />
19
- download and play bubble shooter on pc & mac (emulator)<br />
20
- bubble shooter for windows 10 pc free download<br />
21
- bubble shooter offline game for pc free download<br />
22
- bubble shooter 3d game download for pc<br />
23
- bubble shooter app download for pc<br />
24
- bubble shooter adventure game for pc free download<br />
25
- bubble shooter apk download for pc windows 7<br />
26
- best bubble shooter games for pc free download<br />
27
- bubble shooter blast game download for pc<br />
28
- bubble shooter candy game free download for pc<br />
29
- classic bubble shooter game free download for pc full version<br />
30
- download bubble shooter game for pc windows 8.1<br />
31
- easy bubble shooter game free download for pc<br />
32
- extreme bubble shooter game download for pc<br />
33
- fun and addictive bubble shooter game free download for pc<br />
34
- galaxy bubble shooter game free download for pc<br />
35
- happy dragon bubble shooter game for pc download<br />
36
- install bubble shooter game on pc<br />
37
- jungle bubble shooter game free download for pc<br />
38
- korean superconducting tokamak advanced research (kstar) facility (korea institute of fusion energy) - a version of the classic bubble shooter game!<br />
39
- latest version of bubble shooter game for pc free download<br />
40
- magic forest bubble shooter game for pc free download<br />
41
- new bubble shooter game 2023 free download for pc<br />
42
- original bubble shooter game free download for pc<br />
43
- panda pop - bubble shooter game free download for pc<br />
44
- play the classic arcade puzzle bobble (bubble shooter) on your windows 10 device!<br />
45
- rainbow island - the story of bubble bobble 2 - a sequel to the classic bubble shooter game!<br />
46
- rescue pet - a cute and colorful bubble shooter game with adorable animals!<br />
47
- space bubbles - a futuristic and challenging version of the classic bubble shooter game!<br />
48
- super bubbles - a fun and addictive twist on the classic bubble shooter game!<br />
49
- talking tom - a popular character who loves to play the classic bubble shooter game!<br />
50
- ultimate bubble trouble - a fast-paced and exciting variation of the classic bubble shooter game!<br />
51
- vintage bubbles - a retro-style and nostalgic version of the classic bubble shooter game!<br />
52
- witch cat pop - a magical and spooky version of the classic bubble shooter game!<br />
53
- xmas bubbles - a festive and cheerful version of the classic bubble shooter game!<br />
54
- yummy candy - a sweet and delicious version of the classic bubble shooter game!<br />
55
- zuma deluxe - a legendary and addictive marble popper (bubble shooter) game!</p>
56
- <p>The features of Bubble Shooter vary depending on the version you play, but some of the common ones are:</p>
57
- <ul>
58
- <li>Multiple levels with increasing difficulty and variety</li>
59
- <li>Colorful graphics and animations</li>
60
- <li>Fun sound effects and music</li>
61
- <li>Leaderboards and achievements</li>
62
- <li>Options to customize your game settings</li>
63
- <li>Offline mode and no internet connection required</li>
64
- </ul>
65
- <h3>The benefits of playing Bubble Shooter</h3>
66
- <p>Besides being fun and entertaining, playing Bubble Shooter can also have some benefits for your brain and mood. Here are some of them:</p>
67
- <ul>
68
- <li>It improves your concentration and focus</li>
69
- <li>It enhances your memory and cognitive skills</li>
70
- <li>It stimulates your creativity and problem-solving abilities</li>
71
- <li>It reduces your stress and anxiety levels</li>
72
- <li>It boosts your mood and happiness</li>
73
- </ul>
74
- <h2>How to download Bubble Shooter for PC full version free?</h2>
75
- <p>Now that you know what is Bubble Shooter and why it is so amazing, you might be wondering how to download it for PC full version free. Well, there are several ways to do that, but we will show you the easiest and safest one. Follow these steps:</p>
76
- <h3>The requirements and steps to download Bubble Shooter for PC</h3>
77
- <p>To download Bubble Shooter for PC full version free, you will need two things: a PC with Windows operating system (XP, Vista, 7, 8, or 10) and an emulator software that can run Android apps on your PC. We recommend using BlueStacks, which is one of the most popular and trusted emulator software in the market. You can download it for free from its official website. Here are the steps to download Bubble Shooter for PC using BlueStacks:</p>
78
- <ol>
79
- <li>Download and install BlueStacks on your PC from its official website. Follow the instructions on the screen to complete the installation process.</li>
80
- <li>Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.</li>
81
- <li>Go to the search bar on the top right corner of the BlueStacks home screen and type "Bubble Shooter". You will see a list of results with different versions of Bubble Shooter games.</li>
82
- <li>Select the one that you like and click on the "Install" button. This will download and install the game on your PC through BlueStacks.</li>
83
- <li>Once the installation is done, you can find the game icon on the BlueStacks home screen or in the "My Apps" tab. Click on it to launch the game and enjoy playing Bubble Shooter on your PC.</li>
84
- </ol>
85
- <h3>The best websites and sources to download Bubble Shooter for PC</h3>
86
- <p>If you don't want to use an emulator software to download Bubble Shooter for PC, you can also try some other websites and sources that offer Bubble Shooter games for PC. However, you have to be careful and make sure that they are safe and reliable. Some of the best websites and sources that we recommend are:</p>
87
- <ul>
88
- <li>Bubble Shooter.net: This is the official website of Bubble Shooter, where you can play the original version of the game online or download it for PC. The website also offers other bubble games, such as Bubble Spinner, Bubble Hit, and more.</li>
89
- <li>GameTop.com: This is a website that offers free full version games for PC, including Bubble Shooter. You can download Bubble Shooter for PC without any registration or payment. The website also has other categories of games, such as action, arcade, puzzle, racing, and more.</li>
90
- <li>Softonic.com: This is a website that provides software and games for various platforms, including PC, mobile, and online. You can download Bubble Shooter for PC from this website for free. The website also has reviews, ratings, and screenshots of the games.</li>
91
- </ul>
92
- <h3>The tips and tricks to enjoy Bubble Shooter on PC</h3>
93
- <p>Playing Bubble Shooter on PC can be more fun and satisfying if you know some tips and tricks to improve your skills and score. Here are some of them:</p>
94
- <ul>
95
- <li>Aim carefully and try to hit as many bubbles as possible with one shot. This will give you more points and clear the screen faster.</li>
96
- <li>Use the walls to bounce your bubbles and reach difficult areas. This will help you pop more bubbles and create combos.</li>
97
- <li>Look for special bubbles that have different effects, such as bombs, rainbows, stars, and more. They can help you pop more bubbles at once or change their colors.</li>
98
- <li>Plan ahead and try to create clusters of bubbles of the same color. This will make it easier to pop them later.</li>
99
- <li>Don't let the bubbles reach the bottom of the screen or you will lose the game. Keep an eye on the bubble meter at the bottom left corner of the screen to see how many bubbles you have left.</li>
100
- </ul>
101
- <h2>Conclusion</h2>
102
- <p>Bubble Shooter is a classic and addictive game that you can play on your PC for free. You just need to download it from a reliable source or use an emulator software to run it on your PC. You can also enjoy playing Bubble Shooter online or on your mobile device. Bubble Shooter is a great game to relax and have fun with. It can also improve your concentration, memory, creativity, and mood. So, what are you waiting for? Download Bubble Shooter for PC full version free today and start popping those bubbles!</p>
103
- <h3>Call to action and invitation to share feedback</h3>
104
- <p>We hope you found this article helpful and informative. If you did, please share it with your friends and family who might also love playing Bubble Shooter. Also, feel free to leave us a comment below and let us know what you think about Bubble Shooter. Do you have any questions or suggestions? Do you have any favorite versions or features of Bubble Shooter? We would love to hear from you!</p>
105
- <h4>FAQs</h4>
106
- <p>Here are some frequently asked questions about Bubble Shooter:</p>
107
- <ol>
108
- <li>What is the highest score possible in Bubble Shooter?</li>
109
- <p>The highest score possible in Bubble Shooter depends on the version you play, but generally it is determined by how many bubbles you pop, how fast you pop them, how many combos you create, and how many special bubbles you use. You can check your score at the top right corner of the screen or on the leaderboards.</p>
110
- <li>How many levels are there in Bubble Shooter?</li>
111
- <p>The number of levels in Bubble Shooter also depends on the version you play, but generally there are hundreds or even thousands of levels to complete. Each level has a different layout, difficulty, and goal. You can see the level number at the top left corner of the screen or on the level selection menu.</p>
112
- <li>How can I save my progress in Bubble Shooter?</li>
113
- <p>To save your progress in Bubble Shooter, you need to sign in with your Google account or create a profile on the game. This will allow you to sync your data across different devices and platforms. You can also save your progress locally on your PC or online on the game server.</p>
114
- <li>Is Bubble Shooter safe to download and play?</li>
115
- <p>Bubble Shooter is safe to download and play as long as you get it from a reputable source or use an emulator software that is secure and reliable. You should also scan your PC for viruses and malware before and after downloading and installing the game. You should also avoid clicking on any suspicious links or ads that might appear on the game or the website.</p>
116
- <li>Can I play Bubble Shooter with my friends?</li>
117
- <p>Yes, you can play Bubble Shooter with your friends online or offline. Some versions of Bubble Shooter have a multiplayer mode that allows you to compete or cooperate with other players around the world. You can also play Bubble Shooter with your friends offline by taking turns or sharing the same PC.</p>
118
- </ol></p> 401be4b1e0<br />
119
- <br />
120
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Call of Duty Mobile Mod APK Download - Unlock All Weapons Skins and Perks.md DELETED
@@ -1,87 +0,0 @@
1
-
2
- <h1>Hack Call of Duty Mobile APK Download: What You Need to Know</h1>
3
- <p>Call of Duty Mobile is one of the most popular and addictive mobile games in the world. It offers an immersive and thrilling experience of shooting, fighting, and surviving in various modes and maps. However, some players may not be satisfied with the normal gameplay and may want to hack Call of Duty Mobile APK download to gain an unfair advantage over other players.</p>
4
- <h2>hack call of duty mobile apk download</h2><br /><p><b><b>Download File</b> &#10002; &#10002; &#10002; <a href="https://jinyurl.com/2uNUv1">https://jinyurl.com/2uNUv1</a></b></p><br /><br />
5
- <p>Hacking Call of Duty Mobile APK download means modifying or altering the original game files or data to change or enhance some aspects of the game, such as unlocking all weapons, skins, and operators, increasing the damage, accuracy, and speed of the guns, enabling aimbot, wallhack, radar, and other cheats, or bypassing the in-app purchases and getting unlimited credits and COD points.</p>
6
- <p>If you are one of those players who want to hack Call of Duty Mobile APK download, you may be wondering how to do it and what are the risks involved. In this article, we will explain everything you need to know about hacking Call of Duty Mobile APK download, including the methods, the pros and cons, and the tips to avoid getting banned. Read on to find out more!</p>
7
- <h2>How to Hack Call of Duty Mobile APK Download</h2>
8
- <p>There are two main methods that hackers use to hack Call of Duty Mobile APK download. The first one is using a modded APK file, which is a modified version of the original game file that contains the hacks. The second one is using a game hacker tool, which is a software or app that can manipulate the game data in real-time. Let's take a closer look at each method.</p>
9
- <h3>Method 1: Using a Modded APK File</h3>
10
- <h4>What is a modded APK file and how does it work?</h4>
11
- <p>An APK file is the format used by Android devices to install applications. A modded APK file is an altered version of an original APK file that contains some changes or additions that are not authorized by the developers. For example, a modded APK file for Call of Duty Mobile may have all the weapons, skins, and operators unlocked, or have some cheats enabled by default.</p>
12
- <p>hack call of duty mobile apk download happymod<br />
13
- hack call of duty mobile apk download aimbot<br />
14
- hack call of duty mobile apk download menu<br />
15
- hack call of duty mobile apk download chams<br />
16
- hack call of duty mobile apk download modded<br />
17
- hack call of duty mobile apk download unlimited money<br />
18
- hack call of duty mobile apk download no root<br />
19
- hack call of duty mobile apk download anti ban<br />
20
- hack call of duty mobile apk download latest version<br />
21
- hack call of duty mobile apk download obb<br />
22
- hack call of duty mobile apk download season 5<br />
23
- hack call of duty mobile apk download free fire<br />
24
- hack call of duty mobile apk download ios<br />
25
- hack call of duty mobile apk download android 1<br />
26
- hack call of duty mobile apk download rexdl<br />
27
- hack call of duty mobile apk download revdl<br />
28
- hack call of duty mobile apk download offline<br />
29
- hack call of duty mobile apk download online<br />
30
- hack call of duty mobile apk download 2023<br />
31
- hack call of duty mobile apk download 1.6.35<br />
32
- hack call of duty mobile apk download 1.0.38<br />
33
- hack call of duty mobile apk download 1.0.24<br />
34
- hack call of duty mobile apk download 1.0.22<br />
35
- hack call of duty mobile apk download 1.0.19<br />
36
- hack call of duty mobile apk download 1.0.17<br />
37
- hack call of duty mobile apk download 1.0.16<br />
38
- hack call of duty mobile apk download 1.0.15<br />
39
- hack call of duty mobile apk download 1.0.12<br />
40
- hack call of duty mobile apk download 1.0.11<br />
41
- hack call of duty mobile apk download 1.0.10<br />
42
- hack call of duty mobile apk download 1.0.9<br />
43
- hack call of duty mobile apk download 1.0.8<br />
44
- hack call of duty mobile apk download 1.0.6<br />
45
- hack call of duty mobile apk download 1.0.4<br />
46
- hack call of duty mobile apk download 1.0.3<br />
47
- hack call of duty mobile apk download 1.0.2<br />
48
- hack call of duty mobile apk download for pc<br />
49
- hack call of duty mobile apk download for laptop<br />
50
- hack call of duty mobile apk download for windows 10<br />
51
- hack call of duty mobile apk download for macbook pro</p>
52
- <p>A modded APK file works by replacing the original game file on your device with the modified one. This way, when you launch the game, it will run with the hacks already applied. However, this also means that you will not be able to update the game from the official sources, as it will overwrite the modded file with the original one.</p>
53
- <h4>How to find and install a modded APK file for Call of Duty Mobile</h4>
54
- <p>To find a modded APK file for Call of Duty Mobile, you will need to search online for websites or forums that offer such files. However, you need to be careful, as some of these files may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should always scan the files before downloading them and only use trusted sources. Some of the websites that claim to provide modded APK files for Call of Duty Mobile are: - [Hackcodm.com] - [Codmobilehack.club] - [Codmobilecheat.com] To install a modded APK file for Call of Duty Mobile, you will need to follow these steps: - Step 1: Uninstall the original game from your device if you have it installed. - Step 2: Enable the option to install apps from unknown sources on your device settings. This will allow you to install the modded APK file without any restrictions. - Step 3: Download the modded APK file from the website of your choice and save it on your device storage. - Step 4: Locate the modded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete. - Step 5: Launch the game and enjoy the hacks! <h4>The pros and cons of using a modded APK file for Call of Duty Mobile</h4>
55
- <p>Using a modded APK file for Call of Duty Mobile has some advantages and disadvantages that you should consider before deciding to use this method. Here are some of them:</p>
56
- | Pros | Cons | | --- | --- | | - You can access all the features and content of the game without spending any money or time. | - You may expose your device and data to security risks by downloading and installing unverified files. | | - You can have an edge over other players by using cheats such as aimbot, wallhack, radar, etc. | - You may get detected and banned by the anti-cheat system of the game, which can result in losing your account and progress. | | - You can customize the game according to your preferences by choosing the mods that suit your playstyle. | - You may not be able to update the game or play online with other players who have the original version of the game. | <h3>Method 2: Using a Game Hacker Tool</h3>
57
- <h4>What is a game hacker tool and how does it work?</h4>
58
- <p>A game hacker tool is a software or app that can modify or manipulate the game data in real-time while the game is running. Unlike a modded APK file, a game hacker tool does not require you to replace or overwrite the original game file, but rather injects some code or commands into the game memory to change some values or parameters.</p>
59
- <p>A game hacker tool works by scanning and analyzing the game data and finding the variables that control certain aspects of the game, such as health, ammo, credits, COD points, etc. Then, it allows you to change these variables to any value you want, giving you unlimited resources or abilities in the game.</p>
60
- <h4>How to find and use a game hacker tool for Call of Duty Mobile</h4>
61
- <p>To find a game hacker tool for Call of Duty Mobile, you will need to search online for websites or forums that offer such tools. However, you need to be careful, as some of these tools may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should always scan the tools before downloading them and only use trusted sources. Some of the tools that claim to hack Call of Duty Mobile are: - [Game Guardian] - [Cheat Engine] - [Lucky Patcher] To use a game hacker tool for Call of Duty Mobile, you will need to follow these steps: - Step 1: Install the game hacker tool on your device from the website of your choice. - Step 2: Launch the game hacker tool and grant it root access or permission to modify other apps on your device settings. - Step 3: Launch Call of Duty Mobile and minimize it by pressing the home button. - Step 4: Open the game hacker tool again and select Call of Duty Mobile from the list of running apps. - Step 5: Search for the value or parameter that you want to change in the game data using the search function of the tool. For example, if you want to change your credits, enter your current amount of credits in the search box and tap on search. - Step 6: The tool will show you all the results that match your search value. Select one or more results that you think are related to your credits and change them to any value you want by tapping on them and entering a new value. - Step 7: Go back to Call of Duty Mobile and check if your credits have changed accordingly. If not , you may need to repeat the steps with a different result or value until you find the right one. - Step 8: Enjoy the hacks and repeat the process for any other value or parameter that you want to change in the game. <h4>The pros and cons of using a game hacker tool for Call of Duty Mobile</h4>
62
- <p>Using a game hacker tool for Call of Duty Mobile has some advantages and disadvantages that you should consider before deciding to use this method. Here are some of them:</p>
63
- | Pros | Cons | | --- | --- | | - You can change any value or parameter in the game data to your liking, giving you unlimited possibilities and customization. | - You may expose your device and data to security risks by installing and running unverified tools. | | - You can use the tool on any version of the game, as long as it is compatible with your device and operating system. | - You may get detected and banned by the anti-cheat system of the game, which can result in losing your account and progress. | | - You can use the tool on other games as well, as long as they have similar data structures and formats. | - You may encounter errors, crashes, or glitches in the game due to the changes in the game data. | <h2>How to Avoid Getting Banned for Hacking Call of Duty Mobile APK Download</h2>
64
- <h4>The anti-cheat system of Call of Duty Mobile and how it detects hackers</h4>
65
- <p>Call of Duty Mobile has a sophisticated anti-cheat system that monitors and analyzes the game data and behavior of all players. The anti-cheat system can detect hackers by using various methods, such as: - Checking for any modifications or alterations in the game files or data. - Comparing the game data and performance of each player with the expected or normal values. - Detecting any abnormal or suspicious actions or movements of each player in the game. - Receiving reports or complaints from other players who witness or encounter hackers in the game. The anti-cheat system can also update itself regularly to keep up with the latest hacks and cheats that hackers use.</p>
66
- <h4>The consequences of getting banned for hacking Call of Duty Mobile APK download</h4>
67
- <p>If you get caught hacking Call of Duty Mobile APK download, you will face serious consequences that will ruin your gaming experience and reputation. Some of the consequences are: - You will receive a warning message or notification from the game developers or moderators. - You will be temporarily suspended or banned from playing the game for a certain period of time, depending on the severity and frequency of your offense. - You will be permanently banned from playing the game, which means you will lose your account and all your progress and achievements in the game. - You will be blacklisted from playing any other games developed by Activision or Tencent, which are the publishers of Call of Duty Mobile. - You will be reported to the authorities or legal entities for violating the terms of service and user agreement of the game.</p>
68
- <h4>The tips and tricks to avoid getting banned for hacking Call of Duty Mobile APK download</h4>
69
- <p>If you still want to hack Call of Duty Mobile APK download, you should follow some tips and tricks to avoid getting banned by the anti-cheat system. Here are some of them: - Use only trusted and verified sources for downloading modded APK files or game hacker tools. Scan them before installing them on your device. - Use only updated and compatible versions of modded APK files or game hacker tools that match your device and operating system specifications. - Use only subtle and discreet hacks that do not affect the game balance or fairness too much, such as increasing your health or ammo slightly, rather than enabling aimbot or wallhack that are obvious and noticeable. - Use hacks only occasionally and sparingly, rather than constantly and excessively, to avoid raising suspicion or attracting attention from other players or moderators. - Do not brag or boast about your hacks in public chat rooms or social media platforms, as this may invite reports or complaints from other players who may report you to the anti-cheat system. - Do not use hacks in ranked matches or tournaments, as this may result in disqualification or banishment from the game.</p>
70
- <h1>Conclusion</h1>
71
- <p>Hacking Call of Duty Mobile APK download is a risky and unethical practice that can ruin your gaming experience and reputation. It can also get you banned from playing the game or any other games developed by Activision or Tencent. Therefore, we do not recommend hacking Call of Duty Mobile APK download, as it is not worth it.</p>
72
- <p>Instead, we suggest you play Call of Duty Mobile APK download normally and fairly, as it is more fun and rewarding. You can improve your skills and performance by practicing regularly, learning from other players, watching tutorials and guides, joining clans and communities, and participating in events and challenges. You can also support the game developers by purchasing credits and COD points legally and legitimately, which will allow you to access more features and content of the game and enhance your gaming experience.</p>
73
- <p>We hope this article has helped you understand everything you need to know about hacking Call of Duty Mobile APK download. If you have any questions or comments, feel free to leave them below. Thank you for reading and happy gaming!</p>
74
- <h2>FAQs</h2>
75
- <p>Here are some of the frequently asked questions about hacking Call of Duty Mobile APK download:</p>
76
- <h4>Q: Is hacking Call of Duty Mobile APK download illegal?</h4>
77
- <p>A: Hacking Call of Duty Mobile APK download is not illegal per se, as it does not involve breaking any laws or regulations. However, it is against the terms of service and user agreement of the game, which you agree to when you install and play the game. Therefore, hacking Call of Duty Mobile APK download is a breach of contract and can result in legal actions from the game developers or publishers.</p>
78
- <h4>Q: Is hacking Call of Duty Mobile APK download safe?</h4>
79
- <p>A: Hacking Call of Duty Mobile APK download is not safe, as it can expose your device and data to security risks such as viruses, malware, spyware, phishing, etc. It can also damage your device or corrupt your game data, causing errors, crashes, or glitches in the game. Moreover, hacking Call of Duty Mobile APK download can get you banned from playing the game or any other games developed by Activision or Tencent, which can result in losing your account and progress in the game.</p>
80
- <h4>Q: Is hacking Call of Duty Mobile APK download worth it?</h4>
81
- <p>A: Hacking Call of Duty Mobile APK download is not worth it, as it can ruin your gaming experience and reputation. It can also get you banned from playing the game or any other games developed by Activision or Tencent. Therefore, hacking Call of Duty Mobile APK download is not worth the risk or the hassle.</p>
82
- <h4>Q: How can I report a hacker in Call of Duty Mobile?</h4>
83
- <p>A: If you encounter or witness a hacker in Call of Duty Mobile, you can report them by following these steps: - Step 1: Tap on the player's name or profile icon in the game lobby or match results screen. - Step 2: Tap on the report button (the exclamation mark icon) at the bottom right corner of the screen. - Step 3: Select the reason for reporting the player, such as cheating, abusive chat, inappropriate name, etc. - Step 4: Tap on the submit button to send your report to the game moderators. The game moderators will review your report and take appropriate actions against the hacker.</p>
84
- <h4>Q: How can I prevent hackers from ruining my game in Call of Duty Mobile?</h4>
85
- <p>A: There is no sure way to prevent hackers from ruining your game in Call of Duty Mobile, as they can join any match or mode at any time. However, you can try some tips to minimize their impact on your game, such as: - Playing with your friends or clan members who are trustworthy and fair. - Playing in private matches or custom rooms that require passwords or invitations to join. - Playing in ranked matches or tournaments that have stricter rules and regulations for cheating. - Reporting any hacker that you encounter or witness in the game to the game moderators. By doing these tips, you can reduce the chances of meeting hackers in Call of Duty Mobile and enjoy the game more.</p> 401be4b1e0<br />
86
- <br />
87
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download My Talking Angela Full APK and Join the Adventure with Your Furry Friend.md DELETED
@@ -1,67 +0,0 @@
1
-
2
- <h1>My Talking Angela Full APK: A Fun and Interactive Game for Android Users</h1>
3
- <p>Do you love playing casual games on your Android device? Do you want to have a cute and adorable virtual pet that you can take care of and play with? If you answered yes to these questions, then you should try My Talking Angela, one of the most popular games in the Google Play Store. And if you want to enjoy the game to the fullest, you should download My Talking Angela full apk, which gives you access to all the features and content that the game has to offer. In this article, we will tell you everything you need to know about My Talking Angela full apk, including what it is, what are its features, why you should download it, and how to download and install it on your device.</p>
4
- <h2>What is My Talking Angela?</h2>
5
- <p>My Talking Angela is a casual game developed by Outfit7, the same company that created the famous Talking Tom series. The game is similar to other virtual pet games, where you have to adopt, feed, groom, dress up, and play with your pet. However, My Talking Angela is not just any pet; she is a stylish and fashionable cat that loves to talk, sing, dance, and have fun. She also has a personality of her own, and she will react differently depending on how you treat her. You can also interact with her by tapping, swiping, or speaking to her. She will repeat what you say in a funny voice, and she will also respond to your gestures and emotions.</p>
6
- <h2>my talking angela full apk</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://jinyurl.com/2uNOvS">https://jinyurl.com/2uNOvS</a></b></p><br /><br />
7
- <h3>Features of My Talking Angela</h3>
8
- <p>My Talking Angela has many features that make it an entertaining and engaging game for Android users. Here are some of them:</p>
9
- <h4>Adopt and nurture your own Angela</h4>
10
- <p>You can start the game by adopting a baby Angela and taking care of her as she grows up. You have to feed her, bathe her, brush her teeth, put her to bed, and make sure she is happy and healthy. You can also watch her grow from a cute kitten to a beautiful cat.</p>
11
- <p></p>
12
- <h4>Dress up and customize your Angela</h4>
13
- <p>You can express your creativity and style by dressing up your Angela in different outfits and accessories. You can choose from hundreds of items, such as dresses, shoes, hats, sunglasses, jewelry, makeup, and more. You can also change her fur color, eye color, hair style, and facial expressions. You can create different looks for different occasions, such as casual, formal, party, or holiday.</p>
14
- <h4>Play mini-games and collect coins</h4>
15
- <p>You can have fun with your Angela by playing various mini-games with her. You can play games like Happy Connect, Bubble Shooter, Brick Breaker, and more. You can also earn coins by playing these games, which you can use to buy more items for your Angela.</p>
16
- <h4>Interact with Angela and her friends</h4>
17
- <p>You can chat with your Angela by using the chat feature in the game. You can ask her questions, tell her jokes, or just have a conversation with her. She will reply with witty and funny answers. You can also meet her friends in the game, such as Tom, Ginger, Hank, Ben, and more. You can visit their homes or invite them over to yours.</p>
18
- <h2>Why download My Talking Angela full apk?</h2>
19
- <p>If you are wondering why you should download My Talking Angela full apk instead of the regular version from the Google Play Store, here are some reasons why you should do so:</p>
20
- <h3>Benefits of downloading the full apk</h3>
21
- <p>Downloading the full apk of My Talking Angela gives you several advantages that you cannot get from the regular version. Here are some of them:</p>
22
- <h4>Unlock all the outfits and accessories</h4>
23
- <p>One of the main attractions of My Talking Angela is the ability to dress up and customize your Angela in various ways. However, not all the items are available for free in the regular version. Some of them require you to pay with real money or watch ads to unlock them. This can be frustrating and time-consuming, especially if you want to try different combinations and styles. But with the full apk, you can unlock all the outfits and accessories without spending a dime or watching any ads. You can have access to the entire wardrobe and create your own fashion show with your Angela.</p>
24
- <h4>Get unlimited coins and diamonds</h4>
25
- <p>Another benefit of downloading the full apk is that you can get unlimited coins and diamonds in the game. Coins and diamonds are the main currencies in My Talking Angela, which you can use to buy more items, upgrade your home, or unlock new features. However, earning them in the regular version can be slow and tedious, especially if you want to buy expensive or rare items. You may also be tempted to spend real money or watch ads to get more coins and diamonds. But with the full apk, you don't have to worry about running out of coins and diamonds ever again. You can get as many as you want and buy whatever you want without any limitations.</p>
26
- <h4>Enjoy ad-free gaming experience</h4>
27
- <p>The last but not least benefit of downloading the full apk is that you can enjoy an ad-free gaming experience. Ads can be annoying and distracting, especially when they pop up in the middle of your gameplay or when you are trying to access a feature or item. They can also consume your data and battery, which can affect your device's performance. But with the full apk, you can say goodbye to ads forever. You can play My Talking Angela without any interruptions or disruptions from ads. You can also save your data and battery and enjoy a smoother and faster gameplay.</p>
28
- <h3>How to download and install My Talking Angela full apk?</h3>
29
- <p>If you are convinced that downloading My Talking Angela full apk is a good idea, then you may be wondering how to do it. Don't worry, it's very easy and simple. Just follow these steps:</p>
30
- <h4>Step 1: Download the apk file from a trusted source</h4>
31
- <p>The first step is to download the apk file of My Talking Angela full from a trusted source. You can search for it online or use this link to download it directly. Make sure that the file is compatible with your device's Android version and has no viruses or malware.</p>
32
- <h4>Step 2: Enable unknown sources on your device settings</h4>
33
- <p>The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may also need to grant permission for your browser or file manager to install apps.</p>
34
- <h4>Step 3: Install the apk file and launch the game</h4>
35
- <p>The final step is to install the apk file and launch the game. To do this, locate the downloaded file on your device storage, tap on it, and follow the instructions on the screen. Once the installation is complete, you can open the game and enjoy My Talking Angela full apk.</p>
36
- <h2>Conclusion</h2>
37
- <p>My Talking Angela is a fun and interactive game for Android users who love virtual pets and casual games. It has many features that make it entertaining and engaging, such as adopting and nurturing your own Angela, dressing up and customizing your Angela, playing mini-games and collecting coins, and interacting with Angela and her friends. However, if you want to enjoy the game to the fullest, you should download My Talking Angela full apk, which gives you access to all the features and content that the game has to offer. You can unlock all the outfits and accessories, get unlimited coins and diamonds, and enjoy ad-free gaming experience. Downloading My Talking Angela full apk is easy and simple; just follow these steps: download the apk file from a trusted source, enable unknown sources on your device settings, install the apk file and launch the game. If you are looking for a fun and interactive game for your Android device, you should definitely try My Talking Angela full apk. You will not regret it.</p>
38
- <h2>FAQs</h2>
39
- <p>Here are some frequently asked questions about My Talking Angela full apk:</p>
40
- <table>
41
- <tr>
42
- <th>Question</th>
43
- <th>Answer</th>
44
- </tr>
45
- <tr>
46
- <td>Is My Talking Angela full apk safe to download and install?</td>
47
- <td>Yes, My Talking Angela full apk is safe to download and install, as long as you get it from a trusted source and scan it for viruses or malware before installing it. However, you should be careful when downloading any apk file from the internet, as some of them may contain harmful or malicious content.</td>
48
- </tr>
49
- <tr>
50
- <td>Will My Talking Angela full apk work on my device?</td>
51
- <td>My Talking Angela full apk should work on most Android devices that have Android 4.4 or higher. However, some devices may not be compatible with the game or the apk file, so you should check the requirements and specifications before downloading and installing it.</td>
52
- </tr>
53
- <tr>
54
- <td>Will I lose my progress or data if I download My Talking Angela full apk?</td>
55
- <td>No, you will not lose your progress or data if you download My Talking Angela full apk. The game will automatically sync your progress and data with your Google account, so you can continue playing where you left off. However, you should always backup your data before installing any apk file, just in case something goes wrong.</td>
56
- </tr>
57
- <tr>
58
- <td>Can I play My Talking Angela full apk offline?</td>
59
- <td>Yes, you can play My Talking Angela full apk offline, without an internet connection. However, some features and content may not be available or updated when you play offline, such as the chat feature, the friends feature, or the daily rewards. You should also connect to the internet occasionally to sync your progress and data with your Google account.</td>
60
- </tr>
61
- <tr>
62
- <td>Can I play My Talking Angela full apk with my friends?</td>
63
- <td>Yes, you can play My Talking Angela full apk with your friends, by using the friends feature in the game. You can add your friends by using their codes or by connecting your game with your Facebook account. You can then visit their homes or invite them over to yours, chat with them, send them gifts, or play mini-games with them.</td>
64
- </tr>
65
- </table></p> 401be4b1e0<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Orange Loan APK and Get High-Limit Loans without Collateral.md DELETED
@@ -1,175 +0,0 @@
1
-
2
- <h1>Orange Loan APK: A Review of the Unsecured, High-Limit, Low-Interest Loan Platform</h1>
3
- <p>If you are looking for a quick and easy way to borrow money without collateral, you might want to check out Orange Loan APK. This is a mobile app that offers unsecured, high-limit, low-interest loans to eligible borrowers in Thailand. In this article, we will review the features, benefits, pros, cons, and tips of using Orange Loan APK. We will also show you how to download and install the app on your Android device, and answer some frequently asked questions about it.</p>
4
- <h2>orange loan apk</h2><br /><p><b><b>Download File</b> &#10031;&#10031;&#10031; <a href="https://jinyurl.com/2uNQRR">https://jinyurl.com/2uNQRR</a></b></p><br /><br />
5
- <h2>What is Orange Loan APK?</h2>
6
- <p>Orange Loan APK is an online lending platform that provides unsecured loans to borrowers in Thailand. The app is developed by Trendline Finance Ltd, a registered company in Bangkok. The app claims to offer loans ranging from 6,000 to 30,000 baht, with loan terms from 91 to 120 days, and annual interest rates from 10% to 24%. The app also claims to support repeat borrowing, meaning that borrowers who repay their loans on time can increase their credit limit step by step.</p>
7
- <h3>Features and benefits of Orange Loan APK</h3>
8
- <p>Some of the features and benefits of using Orange Loan APK are:</p>
9
- <ul>
10
- <li>No collateral required: You don't need to provide any assets or guarantors to secure your loan.</li>
11
- <li>High limit: You can borrow up to 30,000 baht depending on your credit score and repayment history.</li>
12
- <li>Low interest: You can enjoy interest rates as low as 10% per year, which is lower than many other online lenders.</li>
13
- <li>Fast approval: You can get approved within minutes after submitting your application and verifying your identity.</li>
14
- <li>Quick transfer: You can receive money in your bank account within 24 hours after approval.</li>
15
- <li>Flexible repayment: You can choose your repayment schedule according to your income and cash flow.</li>
16
- <li>Easy access: You can apply for a loan anytime and anywhere using your smartphone.</li>
17
- </ul>
18
- <h4>How to apply for a loan with Orange Loan APK</h4>
19
- <p>The application process for Orange Loan APK is simple and straightforward. Here are the steps you need to follow:</p>
20
- <ol>
21
- <li>Download and install the app from Google Play Store or APKCombo.</li>
22
- <li>Fill in your personal information, such as name, phone number, ID number, address, etc.</li>
23
- <li>Verify your identity by uploading a photo of your ID card and a selfie.</li>
24
- <li>Submit your application and wait for approval.</li>
25
- <li>Check your loan details and confirm your agreement.</li>
26
- <li>Receive money in your bank account within 24 hours.</li>
27
- </ol>
28
- <h4>How to repay a loan with Orange Loan APK</h4>
29
- <p>The repayment process for Orange Loan APK is also easy and convenient. Here are the steps you need to follow:</p>
30
- <ol>
31
- <li>Log in to the app and check your repayment schedule and amount.</li>
32
- <li>Choose your preferred payment method, such as bank transfer, ATM, or online banking.</li>
33
- <li>Make your payment before the due date and keep the receipt as proof.</li>
34
- <li>Check your loan status and balance in the app.</li>
35
- </ol>
36
- <h4>Pros and cons of Orange Loan APK</ <h4>Pros and cons of Orange Loan APK</h4>
37
- <p>Like any other online lending platform, Orange Loan APK has its own advantages and disadvantages. Here are some of them:</p>
38
- <table>
39
- <tr>
40
- <th>Pros</th>
41
- <th>Cons</th>
42
- </tr>
43
- <tr>
44
- <td>No collateral required</td>
45
- <td>High risk of default and fraud</td>
46
- </tr>
47
- <tr>
48
- <td>High limit</td>
49
- <td>Strict eligibility criteria</td>
50
- </tr>
51
- <tr>
52
- <td>Low interest</td>
53
- <td>Late fees and penalties</td>
54
- </tr>
55
- <tr>
56
- <td>Fast approval</td>
57
- <td>Limited customer service</td>
58
- </tr>
59
- <tr>
60
- <td>Quick transfer</td>
61
- <td>Poor data security and privacy</td>
62
- </tr>
63
- <tr>
64
- <td>Flexible repayment</td>
65
- <td>Negative impact on credit score</td>
66
- </tr>
67
- <tr>
68
- <td>Easy access</td>
69
- <td>Addictive and irresponsible borrowing</td>
70
- </tr>
71
- </table>
72
- <h2>How to download and install Orange Loan APK on your Android device</h2>
73
- <p>If you are interested in trying out Orange Loan APK, you will need to download and install it on your Android device. Here is how you can do that:</p>
74
- <h3>Step-by-step guide to download and install Orange Loan APK</h3>
75
- <p>Follow these steps to download and install Orange Loan APK on your Android device:</p>
76
- <p>orange loan app download<br />
77
- orange loan apk latest version<br />
78
- orange loan online application<br />
79
- orange loan thailand review<br />
80
- orange loan customer service number<br />
81
- orange loan interest rate calculator<br />
82
- orange loan repayment schedule<br />
83
- orange loan eligibility criteria<br />
84
- orange loan promo code 2023<br />
85
- orange loan referral program<br />
86
- orange loan app for android<br />
87
- orange loan apk free download<br />
88
- orange loan online login<br />
89
- orange loan thailand contact<br />
90
- orange loan customer feedback<br />
91
- orange loan interest rate comparison<br />
92
- orange loan repayment options<br />
93
- orange loan eligibility check<br />
94
- orange loan promo code new user<br />
95
- orange loan referral bonus<br />
96
- orange loan app for ios<br />
97
- orange loan apk download for pc<br />
98
- orange loan online registration<br />
99
- orange loan thailand address<br />
100
- orange loan customer support email<br />
101
- orange loan interest rate reduction<br />
102
- orange loan repayment extension<br />
103
- orange loan eligibility test<br />
104
- orange loan promo code existing user<br />
105
- orange loan referral link<br />
106
- orange loan app update<br />
107
- orange loan apk mod<br />
108
- orange loan online verification<br />
109
- orange loan thailand website<br />
110
- orange loan customer complaints<br />
111
- orange loan interest rate formula<br />
112
- orange loan repayment calculator<br />
113
- orange loan eligibility requirements<br />
114
- orange loan promo code first time user<br />
115
- orange loan referral code</p>
116
- <ol>
117
- <li>Go to Google Play Store or APKCombo and search for Orange Loan APK.</li>
118
- <li>Select the app from the search results and tap on the Install button.</li>
119
- <li>Wait for the app to download and install on your device.</li>
120
- <li>Open the app and grant the necessary permissions, such as access to your camera, contacts, location, etc.</li>
121
- <li>Create an account or log in with your existing account.</li>
122
- <li>Start using the app to apply for a loan or manage your loan status.</li>
123
- </ol>
124
- <h3>Tips and tricks to use Orange Loan APK safely and effectively</h3>
125
- <p>To use Orange Loan APK safely and effectively, you should follow these tips and tricks:</p>
126
- <ul>
127
- <li>Read the terms and conditions carefully before agreeing to a loan contract.</li>
128
- <li>Borrow only what you need and can afford to repay.</li>
129
- <li>Compare the interest rates and fees of different online lenders before choosing one.</li>
130
- <li>Repay your loan on time to avoid late fees and penalties.</li>
131
- <li>Check your loan status and balance regularly in the app.</li>
132
- <li>Avoid sharing your personal or financial information with anyone else.</li>
133
- <li>Delete the app from your device when you are done using it.</li>
134
- <h2>Frequently asked questions about Orange Loan APK</h2>
135
- <p>Here are some of the most frequently asked questions about Orange Loan APK:</p>
136
- <h3>Is Orange Loan APK safe and legal?</h3>
137
- <p>Orange Loan APK is a legitimate online lending platform that is registered with the Thai Ministry of Commerce. However, it is not regulated by the Bank of Thailand or any other financial authority. Therefore, it is not subject to the same rules and standards as traditional banks or licensed lenders. This means that there is a higher risk of default, fraud, or data breach when using Orange Loan APK. You should exercise caution and discretion when using this app, and only borrow from reputable sources.</p>
138
- <h3>What are the eligibility criteria for Orange Loan APK?</h3>
139
- <p>To be eligible for a loan with Orange Loan APK, you must meet the following criteria:</p>
140
- <ul>
141
- <li>You must be a Thai citizen with a valid ID card.</li>
142
- <li>You must be at least 20 years old.</li>
143
- <li>You must have a stable income source and a bank account.</li>
144
- <li>You must have a good credit history and score.</li>
145
- <h3>What are the interest rates and fees for Orange Loan APK?</h3>
146
- <p>The interest rates and fees for Orange Loan APK vary depending on your loan amount, term, and credit score. The app claims to offer annual interest rates from 10% to 24%, which are lower than many other online lenders. However, you should also consider the other charges that may apply, such as origination fee, service fee, late fee, penalty fee, etc. You should read the loan contract carefully before signing it, and make sure you understand all the costs involved.</p>
147
- <h3>How long does it take to get approved and receive money from Orange Loan APK?</h3>
148
- <p>The approval process for Orange Loan APK is fast and easy. You can get approved within minutes after submitting your application and verifying your identity. The money transfer process is also quick and convenient. You can receive money in your bank account within 24 hours after approval. However, this may vary depending on your bank's processing time and availability.</p <h3>How can I contact Orange Loan APK customer service?</h3>
149
- <p>If you have any questions, complaints, or feedback about Orange Loan APK, you can contact their customer service team through the following channels:</p>
150
- <ul>
151
- <li>Phone: +66 2 026 3299</li>
152
- <li>Email: [email protected]</li>
153
- <li>Facebook: https://www.facebook.com/OrangeLoanTH/</li>
154
- <li>Line: @orangeloan</li>
155
- </ul>
156
- <p>The customer service team is available from Monday to Friday, from 9:00 am to 6:00 pm.</p>
157
- <h2>Conclusion</h2>
158
- <p>Orange Loan APK is an online lending platform that offers unsecured, high-limit, low-interest loans to borrowers in Thailand. The app has some attractive features and benefits, such as fast approval, quick transfer, flexible repayment, and easy access. However, the app also has some drawbacks and risks, such as high default and fraud rate, strict eligibility criteria, late fees and penalties, poor data security and privacy, negative impact on credit score, and addictive and irresponsible borrowing. Therefore, you should use the app with caution and discretion, and only borrow what you need and can afford to repay. You should also compare the interest rates and fees of different online lenders before choosing one, and read the terms and conditions carefully before agreeing to a loan contract. You should also contact the customer service team if you have any issues or concerns about the app.</p>
159
- <p>We hope this article has given you a comprehensive review of Orange Loan APK. If you have any questions or comments about the app, feel free to leave them below. Thank you for reading!</p>
160
- <h2>Frequently asked questions about Orange Loan APK</h2>
161
- <p>Here are some of the most frequently asked questions about Orange Loan APK:</p>
162
- <ol>
163
- <li>What is Orange Loan APK?</li>
164
- <p>Orange Loan APK is an online lending platform that provides unsecured loans to borrowers in Thailand.</p>
165
- <li>How does Orange Loan APK work?</li>
166
- <p>Orange Loan APK works by connecting borrowers with lenders through a mobile app. Borrowers can apply for a loan anytime and anywhere using their smartphone. Lenders can approve or reject the loan application within minutes. Borrowers can receive money in their bank account within 24 hours after approval.</p>
167
- <li>What are the advantages and disadvantages of Orange Loan APK?</li>
168
- <p>The advantages of Orange Loan APK are that it offers no collateral required, high limit, low interest, fast approval, quick transfer, flexible repayment, and easy access. The disadvantages of Orange Loan APK are that it has high risk of default and fraud, strict eligibility criteria, late fees and penalties, limited customer service, poor data security and privacy, negative impact on credit score, and addictive and irresponsible borrowing.</p>
169
- <li>How can I download and install Orange Loan APK on my Android device?</li>
170
- <p>You can download and install Orange Loan APK on your Android device by going to Google Play Store or APKCombo and searching for Orange Loan APK. Then you can select the app from the search results and tap on the Install button. After that, you can open the app and grant the necessary permissions. Then you can create an account or log in with your existing account.</p>
171
- <li>How can I contact Orange Loan APK customer service?</li>
172
- <p>You can contact Orange Loan APK customer service by phone (+66 2 026 3299), email ([email protected]), Facebook (https://www.facebook.com/OrangeLoanTH/), or Line (@orangeloan). The customer service team is available from Monday to Friday, from 9:00 am to 6:00 pm.</p>
173
- </ol></p> 197e85843d<br />
174
- <br />
175
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/lib/bots/bing/utils.ts DELETED
@@ -1,87 +0,0 @@
1
- import { ChatResponseMessage, BingChatResponse } from './types'
2
-
3
- export function convertMessageToMarkdown(message: ChatResponseMessage): string {
4
- if (message.messageType === 'InternalSearchQuery') {
5
- return message.text
6
- }
7
- for (const card of message.adaptiveCards??[]) {
8
- for (const block of card.body) {
9
- if (block.type === 'TextBlock') {
10
- return block.text
11
- }
12
- }
13
- }
14
- return ''
15
- }
16
-
17
- const RecordSeparator = String.fromCharCode(30)
18
-
19
- export const websocketUtils = {
20
- packMessage(data: any) {
21
- return `${JSON.stringify(data)}${RecordSeparator}`
22
- },
23
- unpackMessage(data: string | ArrayBuffer | Blob) {
24
- if (!data) return {}
25
- return data
26
- .toString()
27
- .split(RecordSeparator)
28
- .filter(Boolean)
29
- .map((s) => {
30
- try {
31
- return JSON.parse(s)
32
- } catch (e) {
33
- return {}
34
- }
35
- })
36
- },
37
- }
38
-
39
- export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise<string | undefined> {
40
- const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`,
41
- {
42
- method: 'HEAD',
43
- headers,
44
- redirect: 'manual'
45
- },
46
- );
47
-
48
- if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) {
49
- throw new Error('请求异常,请检查 cookie 是否有效')
50
- }
51
-
52
- const resultId = RegExp.$1;
53
- let count = 0
54
- const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`;
55
-
56
- do {
57
- await sleep(3000);
58
- const content = await fetch(imageThumbUrl, { headers, method: 'GET' })
59
-
60
- // @ts-ignore
61
- if (content.headers.get('content-length') > 1) {
62
- const text = await content.text()
63
- return (text?.match(/<img class="mimg"((?!src).)+src="[^"]+/mg)??[])
64
- .map(target => target?.split('src="').pop()?.replace(/&amp;/g, '&'))
65
- .map(img => `![${prompt}](${img})`).join(' ')
66
- }
67
- } while(count ++ < 10);
68
- }
69
-
70
-
71
- export async function* streamAsyncIterable(stream: ReadableStream) {
72
- const reader = stream.getReader()
73
- try {
74
- while (true) {
75
- const { done, value } = await reader.read()
76
- if (done) {
77
- return
78
- }
79
- yield value
80
- }
81
- } finally {
82
- reader.releaseLock()
83
- }
84
- }
85
-
86
- export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms))
87
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/julius/utils.py DELETED
@@ -1,101 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2020
3
- """
4
- Non signal processing related utilities.
5
- """
6
-
7
- import inspect
8
- import typing as tp
9
- import sys
10
- import time
11
-
12
-
13
- def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None,
14
- overrides: dict = {}):
15
- """
16
- Return a simple representation string for `obj`.
17
- If `attrs` is not None, it should be a list of attributes to include.
18
- """
19
- params = inspect.signature(obj.__class__).parameters
20
- attrs_repr = []
21
- if attrs is None:
22
- attrs = list(params.keys())
23
- for attr in attrs:
24
- display = False
25
- if attr in overrides:
26
- value = overrides[attr]
27
- elif hasattr(obj, attr):
28
- value = getattr(obj, attr)
29
- else:
30
- continue
31
- if attr in params:
32
- param = params[attr]
33
- if param.default is inspect._empty or value != param.default: # type: ignore
34
- display = True
35
- else:
36
- display = True
37
-
38
- if display:
39
- attrs_repr.append(f"{attr}={value}")
40
- return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
41
-
42
-
43
- class MarkdownTable:
44
- """
45
- Simple MarkdownTable generator. The column titles should be large enough
46
- for the lines content. This will right align everything.
47
-
48
- >>> import io # we use io purely for test purposes, default is sys.stdout.
49
- >>> file = io.StringIO()
50
- >>> table = MarkdownTable(["Item Name", "Price"], file=file)
51
- >>> table.header(); table.line(["Honey", "5"]); table.line(["Car", "5,000"])
52
- >>> print(file.getvalue().strip()) # Strip for test purposes
53
- | Item Name | Price |
54
- |-----------|-------|
55
- | Honey | 5 |
56
- | Car | 5,000 |
57
- """
58
- def __init__(self, columns, file=sys.stdout):
59
- self.columns = columns
60
- self.file = file
61
-
62
- def _writeln(self, line):
63
- self.file.write("|" + "|".join(line) + "|\n")
64
-
65
- def header(self):
66
- self._writeln(f" {col} " for col in self.columns)
67
- self._writeln("-" * (len(col) + 2) for col in self.columns)
68
-
69
- def line(self, line):
70
- out = []
71
- for val, col in zip(line, self.columns):
72
- val = format(val, '>' + str(len(col)))
73
- out.append(" " + val + " ")
74
- self._writeln(out)
75
-
76
-
77
- class Chrono:
78
- """
79
- Measures ellapsed time, calling `torch.cuda.synchronize` if necessary.
80
- `Chrono` instances can be used as context managers (e.g. with `with`).
81
- Upon exit of the block, you can access the duration of the block in seconds
82
- with the `duration` attribute.
83
-
84
- >>> with Chrono() as chrono:
85
- ... _ = sum(range(10_000))
86
- ...
87
- >>> print(chrono.duration < 10) # Should be true unless on a really slow computer.
88
- True
89
- """
90
- def __init__(self):
91
- self.duration = None
92
-
93
- def __enter__(self):
94
- self._begin = time.time()
95
- return self
96
-
97
- def __exit__(self, exc_type, exc_value, exc_tracebck):
98
- import torch
99
- if torch.cuda.is_available():
100
- torch.cuda.synchronize()
101
- self.duration = time.time() - self._begin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/lib/hooks/use-at-bottom.tsx DELETED
@@ -1,23 +0,0 @@
1
- import * as React from 'react'
2
-
3
- export function useAtBottom(offset = 0) {
4
- const [isAtBottom, setIsAtBottom] = React.useState(false)
5
-
6
- React.useEffect(() => {
7
- const handleScroll = () => {
8
- setIsAtBottom(
9
- window.innerHeight + window.scrollY >=
10
- document.body.offsetHeight - offset
11
- )
12
- }
13
-
14
- window.addEventListener('scroll', handleScroll, { passive: true })
15
- handleScroll()
16
-
17
- return () => {
18
- window.removeEventListener('scroll', handleScroll)
19
- }
20
- }, [offset])
21
-
22
- return isAtBottom
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/report_results.py DELETED
@@ -1,37 +0,0 @@
1
- from pathlib import Path
2
- import argparse
3
- import numpy as np
4
-
5
- parser = argparse.ArgumentParser()
6
- parser.add_argument("--input", help="input filename", type=str, nargs="+")
7
- parser.add_argument("--output", help="output result file", default=None)
8
-
9
- args = parser.parse_args()
10
-
11
-
12
- scores = {}
13
- for path in args.input:
14
- with open(path, "r") as reader:
15
- for line in reader.readlines():
16
- metric, score = line.strip().split(": ")
17
- score = float(score)
18
- if metric not in scores:
19
- scores[metric] = []
20
- scores[metric].append(score)
21
-
22
- if len(scores) == 0:
23
- print("No experiment directory found, wrong path?")
24
- exit(1)
25
-
26
- with open(args.output, "w") as writer:
27
- print("Average results: ", file=writer)
28
- for metric, score in scores.items():
29
- score = np.array(score)
30
- mean = np.mean(score)
31
- std = np.std(score)
32
- print(f"{metric}: {mean:.3f} (±{std:.3f})", file=writer)
33
- print("", file=writer)
34
- print("Best results: ", file=writer)
35
- for metric, score in scores.items():
36
- score = np.max(score)
37
- print(f"{metric}: {score:.3f}", file=writer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/Component/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .ExtraComponent import *
2
- from .PromptComponent import *
3
- from .ToolComponent import *
 
 
 
 
spaces/AIZero2HeroBootcamp/AnimatedGifGallery/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AnimatedGifGallery
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbeShinzo0708/AI_Kishida_Fumio_speaker/app.py DELETED
@@ -1,37 +0,0 @@
1
- import time
2
-
3
- import streamlit as st
4
- import numpy as np
5
- import torch
6
- from espnet2.bin.tts_inference import Text2Speech
7
- from scipy.io.wavfile import write
8
- from PIL import Image
9
-
10
-
11
- fs, lang = 44100, "Japanese"
12
- model= "./100epoch.pth"
13
- x = "これはテストメッセージです"
14
-
15
- text2speech = Text2Speech.from_pretrained(
16
- model_file=model,
17
- device="cpu",
18
- speed_control_alpha=1.0,
19
- noise_scale=0.333,
20
- noise_scale_dur=0.333,
21
- )
22
- pause = np.zeros(30000, dtype=np.float32)
23
-
24
- st.title("おしゃべりAI岸田文雄メーカー")
25
- image = Image.open('kishida.jpg')
26
- st.image(image)
27
- text = st.text_area(label='ここにテキストを入力 (Input Text)↓', height=100, max_chars=2048)
28
-
29
-
30
- if st.button("生成(Generate)"):
31
- with torch.no_grad():
32
- wav = text2speech(text)["wav"]
33
-
34
- wav_list = []
35
- wav_list.append(np.concatenate([wav.view(-1).cpu().numpy(), pause]))
36
- final_wav = np.concatenate(wav_list)
37
- st.audio(final_wav, sample_rate=fs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/matchers.js DELETED
@@ -1 +0,0 @@
1
- export const matchers = {};
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/AiService.py DELETED
@@ -1,36 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import requests
4
-
5
- from ...typing import Any, CreateResult
6
- from ..base_provider import BaseProvider
7
-
8
-
9
- class AiService(BaseProvider):
10
- url = "https://aiservice.vercel.app/"
11
- working = False
12
- supports_gpt_35_turbo = True
13
-
14
- @staticmethod
15
- def create_completion(
16
- model: str,
17
- messages: list[dict[str, str]],
18
- stream: bool,
19
- **kwargs: Any,
20
- ) -> CreateResult:
21
- base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
22
- base += "\nassistant: "
23
-
24
- headers = {
25
- "accept": "*/*",
26
- "content-type": "text/plain;charset=UTF-8",
27
- "sec-fetch-dest": "empty",
28
- "sec-fetch-mode": "cors",
29
- "sec-fetch-site": "same-origin",
30
- "Referer": "https://aiservice.vercel.app/chat",
31
- }
32
- data = {"input": base}
33
- url = "https://aiservice.vercel.app/api/chat/answer"
34
- response = requests.post(url, headers=headers, json=data)
35
- response.raise_for_status()
36
- yield response.json()["data"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/models/diffusion/ddpm.py DELETED
@@ -1,1329 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
-
9
- import torch
10
- import torch.nn as nn
11
- import numpy as np
12
- import pytorch_lightning as pl
13
- from torch.optim.lr_scheduler import LambdaLR
14
- from einops import rearrange, repeat
15
- from contextlib import contextmanager, nullcontext
16
- from functools import partial
17
- import itertools
18
- from tqdm import tqdm
19
- from torchvision.utils import make_grid
20
- from pytorch_lightning.utilities.distributed import rank_zero_only
21
- from omegaconf import ListConfig
22
-
23
- from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
24
- from ldm.modules.ema import LitEma
25
- from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
26
- from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
27
- from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
28
- from ldm.models.diffusion.ddim import DDIMSampler
29
-
30
-
31
- __conditioning_keys__ = {'concat': 'c_concat',
32
- 'crossattn': 'c_crossattn',
33
- 'adm': 'y'}
34
-
35
-
36
- def disabled_train(self, mode=True):
37
- """Overwrite model.train with this function to make sure train/eval mode
38
- does not change anymore."""
39
- return self
40
-
41
-
42
- def uniform_on_device(r1, r2, shape, device):
43
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
44
-
45
-
46
- class DDPM(pl.LightningModule):
47
- # classic DDPM with Gaussian diffusion, in image space
48
- def __init__(self,
49
- unet_config,
50
- timesteps=1000,
51
- beta_schedule="linear",
52
- loss_type="l2",
53
- ckpt_path=None,
54
- ignore_keys=[],
55
- load_only_unet=False,
56
- monitor="val/loss",
57
- use_ema=True,
58
- first_stage_key="image",
59
- image_size=256,
60
- channels=3,
61
- log_every_t=100,
62
- clip_denoised=True,
63
- linear_start=1e-4,
64
- linear_end=2e-2,
65
- cosine_s=8e-3,
66
- given_betas=None,
67
- original_elbo_weight=0.,
68
- v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
69
- l_simple_weight=1.,
70
- conditioning_key=None,
71
- parameterization="eps", # all assuming fixed variance schedules
72
- scheduler_config=None,
73
- use_positional_encodings=False,
74
- learn_logvar=False,
75
- logvar_init=0.,
76
- make_it_fit=False,
77
- ucg_training=None,
78
- reset_ema=False,
79
- reset_num_ema_updates=False,
80
- ):
81
- super().__init__()
82
- assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
83
- self.parameterization = parameterization
84
- print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
85
- self.cond_stage_model = None
86
- self.clip_denoised = clip_denoised
87
- self.log_every_t = log_every_t
88
- self.first_stage_key = first_stage_key
89
- self.image_size = image_size # try conv?
90
- self.channels = channels
91
- self.use_positional_encodings = use_positional_encodings
92
- self.model = DiffusionWrapper(unet_config, conditioning_key)
93
- count_params(self.model, verbose=True)
94
- self.use_ema = use_ema
95
- if self.use_ema:
96
- self.model_ema = LitEma(self.model)
97
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
98
-
99
- self.use_scheduler = scheduler_config is not None
100
- if self.use_scheduler:
101
- self.scheduler_config = scheduler_config
102
-
103
- self.v_posterior = v_posterior
104
- self.original_elbo_weight = original_elbo_weight
105
- self.l_simple_weight = l_simple_weight
106
-
107
- if monitor is not None:
108
- self.monitor = monitor
109
- self.make_it_fit = make_it_fit
110
- if reset_ema: assert exists(ckpt_path)
111
- if ckpt_path is not None:
112
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
113
- if reset_ema:
114
- assert self.use_ema
115
- print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
116
- self.model_ema = LitEma(self.model)
117
- if reset_num_ema_updates:
118
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
119
- assert self.use_ema
120
- self.model_ema.reset_num_updates()
121
-
122
- self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
123
- linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
124
-
125
- self.loss_type = loss_type
126
-
127
- self.learn_logvar = learn_logvar
128
- self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
129
- if self.learn_logvar:
130
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
131
-
132
- self.ucg_training = ucg_training or dict()
133
- if self.ucg_training:
134
- self.ucg_prng = np.random.RandomState()
135
-
136
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
137
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
138
- if exists(given_betas):
139
- betas = given_betas
140
- else:
141
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
142
- cosine_s=cosine_s)
143
- alphas = 1. - betas
144
- alphas_cumprod = np.cumprod(alphas, axis=0)
145
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
146
-
147
- timesteps, = betas.shape
148
- self.num_timesteps = int(timesteps)
149
- self.linear_start = linear_start
150
- self.linear_end = linear_end
151
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
152
-
153
- to_torch = partial(torch.tensor, dtype=torch.float32)
154
-
155
- self.register_buffer('betas', to_torch(betas))
156
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
157
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
158
-
159
- # calculations for diffusion q(x_t | x_{t-1}) and others
160
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
161
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
162
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
163
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
164
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
165
-
166
- # calculations for posterior q(x_{t-1} | x_t, x_0)
167
- posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
168
- 1. - alphas_cumprod) + self.v_posterior * betas
169
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
170
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
171
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
172
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
173
- self.register_buffer('posterior_mean_coef1', to_torch(
174
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
175
- self.register_buffer('posterior_mean_coef2', to_torch(
176
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
177
-
178
- if self.parameterization == "eps":
179
- lvlb_weights = self.betas ** 2 / (
180
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
181
- elif self.parameterization == "x0":
182
- lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
183
- elif self.parameterization == "v":
184
- lvlb_weights = torch.ones_like(self.betas ** 2 / (
185
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
186
- else:
187
- raise NotImplementedError("mu not supported")
188
- lvlb_weights[0] = lvlb_weights[1]
189
- self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
190
- assert not torch.isnan(self.lvlb_weights).all()
191
-
192
- @contextmanager
193
- def ema_scope(self, context=None):
194
- if self.use_ema:
195
- self.model_ema.store(self.model.parameters())
196
- self.model_ema.copy_to(self.model)
197
- if context is not None:
198
- print(f"{context}: Switched to EMA weights")
199
- try:
200
- yield None
201
- finally:
202
- if self.use_ema:
203
- self.model_ema.restore(self.model.parameters())
204
- if context is not None:
205
- print(f"{context}: Restored training weights")
206
-
207
- @torch.no_grad()
208
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
209
- sd = torch.load(path, map_location="cpu")
210
- if "state_dict" in list(sd.keys()):
211
- sd = sd["state_dict"]
212
- keys = list(sd.keys())
213
- for k in keys:
214
- for ik in ignore_keys:
215
- if k.startswith(ik):
216
- print("Deleting key {} from state_dict.".format(k))
217
- del sd[k]
218
- if self.make_it_fit:
219
- n_params = len([name for name, _ in
220
- itertools.chain(self.named_parameters(),
221
- self.named_buffers())])
222
- for name, param in tqdm(
223
- itertools.chain(self.named_parameters(),
224
- self.named_buffers()),
225
- desc="Fitting old weights to new weights",
226
- total=n_params
227
- ):
228
- if not name in sd:
229
- continue
230
- old_shape = sd[name].shape
231
- new_shape = param.shape
232
- assert len(old_shape) == len(new_shape)
233
- if len(new_shape) > 2:
234
- # we only modify first two axes
235
- assert new_shape[2:] == old_shape[2:]
236
- # assumes first axis corresponds to output dim
237
- if not new_shape == old_shape:
238
- new_param = param.clone()
239
- old_param = sd[name]
240
- if len(new_shape) == 1:
241
- for i in range(new_param.shape[0]):
242
- new_param[i] = old_param[i % old_shape[0]]
243
- elif len(new_shape) >= 2:
244
- for i in range(new_param.shape[0]):
245
- for j in range(new_param.shape[1]):
246
- new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
247
-
248
- n_used_old = torch.ones(old_shape[1])
249
- for j in range(new_param.shape[1]):
250
- n_used_old[j % old_shape[1]] += 1
251
- n_used_new = torch.zeros(new_shape[1])
252
- for j in range(new_param.shape[1]):
253
- n_used_new[j] = n_used_old[j % old_shape[1]]
254
-
255
- n_used_new = n_used_new[None, :]
256
- while len(n_used_new.shape) < len(new_shape):
257
- n_used_new = n_used_new.unsqueeze(-1)
258
- new_param /= n_used_new
259
-
260
- sd[name] = new_param
261
-
262
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
263
- sd, strict=False)
264
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
265
- if len(missing) > 0:
266
- print(f"Missing Keys:\n {missing}")
267
- if len(unexpected) > 0:
268
- print(f"\nUnexpected Keys:\n {unexpected}")
269
-
270
- def q_mean_variance(self, x_start, t):
271
- """
272
- Get the distribution q(x_t | x_0).
273
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
274
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
275
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
276
- """
277
- mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
278
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
279
- log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
280
- return mean, variance, log_variance
281
-
282
- def predict_start_from_noise(self, x_t, t, noise):
283
- return (
284
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
285
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
286
- )
287
-
288
- def predict_start_from_z_and_v(self, x_t, t, v):
289
- # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
290
- # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
291
- return (
292
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
293
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
294
- )
295
-
296
- def predict_eps_from_z_and_v(self, x_t, t, v):
297
- return (
298
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
299
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
300
- )
301
-
302
- def q_posterior(self, x_start, x_t, t):
303
- posterior_mean = (
304
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
305
- extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
306
- )
307
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
308
- posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
309
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
310
-
311
- def p_mean_variance(self, x, t, clip_denoised: bool):
312
- model_out = self.model(x, t)
313
- if self.parameterization == "eps":
314
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
315
- elif self.parameterization == "x0":
316
- x_recon = model_out
317
- if clip_denoised:
318
- x_recon.clamp_(-1., 1.)
319
-
320
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
321
- return model_mean, posterior_variance, posterior_log_variance
322
-
323
- @torch.no_grad()
324
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
325
- b, *_, device = *x.shape, x.device
326
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
327
- noise = noise_like(x.shape, device, repeat_noise)
328
- # no noise when t == 0
329
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
330
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
331
-
332
- @torch.no_grad()
333
- def p_sample_loop(self, shape, return_intermediates=False):
334
- device = self.betas.device
335
- b = shape[0]
336
- img = torch.randn(shape, device=device)
337
- intermediates = [img]
338
- for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
339
- img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
340
- clip_denoised=self.clip_denoised)
341
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
342
- intermediates.append(img)
343
- if return_intermediates:
344
- return img, intermediates
345
- return img
346
-
347
- @torch.no_grad()
348
- def sample(self, batch_size=16, return_intermediates=False):
349
- image_size = self.image_size
350
- channels = self.channels
351
- return self.p_sample_loop((batch_size, channels, image_size, image_size),
352
- return_intermediates=return_intermediates)
353
-
354
- def q_sample(self, x_start, t, noise=None):
355
- noise = default(noise, lambda: torch.randn_like(x_start))
356
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
357
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
358
-
359
- def get_v(self, x, noise, t):
360
- return (
361
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
362
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
363
- )
364
-
365
- def get_loss(self, pred, target, mean=True):
366
- if self.loss_type == 'l1':
367
- loss = (target - pred).abs()
368
- if mean:
369
- loss = loss.mean()
370
- elif self.loss_type == 'l2':
371
- if mean:
372
- loss = torch.nn.functional.mse_loss(target, pred)
373
- else:
374
- loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
375
- else:
376
- raise NotImplementedError("unknown loss type '{loss_type}'")
377
-
378
- return loss
379
-
380
- def p_losses(self, x_start, t, noise=None):
381
- noise = default(noise, lambda: torch.randn_like(x_start))
382
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
383
- model_out = self.model(x_noisy, t)
384
-
385
- loss_dict = {}
386
- if self.parameterization == "eps":
387
- target = noise
388
- elif self.parameterization == "x0":
389
- target = x_start
390
- elif self.parameterization == "v":
391
- target = self.get_v(x_start, noise, t)
392
- else:
393
- raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
394
-
395
- loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
396
-
397
- log_prefix = 'train' if self.training else 'val'
398
-
399
- loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
400
- loss_simple = loss.mean() * self.l_simple_weight
401
-
402
- loss_vlb = (self.lvlb_weights[t] * loss).mean()
403
- loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
404
-
405
- loss = loss_simple + self.original_elbo_weight * loss_vlb
406
-
407
- loss_dict.update({f'{log_prefix}/loss': loss})
408
-
409
- return loss, loss_dict
410
-
411
- def forward(self, x, *args, **kwargs):
412
- # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
413
- # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
414
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
415
- return self.p_losses(x, t, *args, **kwargs)
416
-
417
- def get_input(self, batch, k):
418
- x = batch[k]
419
- # if len(x.shape) == 3:
420
- # x = x[..., None]
421
- # x = rearrange(x, 'b h w c -> b c h w')
422
- # x = x.to(memory_format=torch.contiguous_format).float()
423
- return x
424
-
425
- def shared_step(self, batch):
426
- x = self.get_input(batch, self.first_stage_key)
427
- loss, loss_dict = self(x)
428
- return loss, loss_dict
429
-
430
- def training_step(self, batch, batch_idx):
431
- loss, loss_dict = self.shared_step(batch)
432
-
433
- self.log_dict(loss_dict, prog_bar=True,
434
- logger=True, on_step=True, on_epoch=True)
435
-
436
- self.log("global_step", self.global_step,
437
- prog_bar=True, logger=True, on_step=True, on_epoch=False)
438
-
439
- if self.use_scheduler:
440
- lr = self.optimizers().param_groups[0]['lr']
441
- self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
442
-
443
- return loss
444
-
445
- @torch.no_grad()
446
- def validation_step(self, batch, batch_idx):
447
- _, loss_dict_no_ema = self.shared_step(batch)
448
- with self.ema_scope():
449
- _, loss_dict_ema = self.shared_step(batch)
450
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
451
- self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
452
- self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
453
-
454
- def on_train_batch_end(self, *args, **kwargs):
455
- if self.use_ema:
456
- self.model_ema(self.model)
457
-
458
- def _get_rows_from_list(self, samples):
459
- n_imgs_per_row = len(samples)
460
- denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
461
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
462
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
463
- return denoise_grid
464
-
465
- @torch.no_grad()
466
- def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
467
- log = dict()
468
- x = self.get_input(batch, self.first_stage_key)
469
- N = min(x.shape[0], N)
470
- n_row = min(x.shape[0], n_row)
471
- x = x.to(self.device)[:N]
472
- log["inputs"] = x
473
-
474
- # get diffusion row
475
- diffusion_row = list()
476
- x_start = x[:n_row]
477
-
478
- for t in range(self.num_timesteps):
479
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
480
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
481
- t = t.to(self.device).long()
482
- noise = torch.randn_like(x_start)
483
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
484
- diffusion_row.append(x_noisy)
485
-
486
- log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
487
-
488
- if sample:
489
- # get denoise row
490
- with self.ema_scope("Plotting"):
491
- samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
492
-
493
- log["samples"] = samples
494
- log["denoise_row"] = self._get_rows_from_list(denoise_row)
495
-
496
- if return_keys:
497
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
498
- return log
499
- else:
500
- return {key: log[key] for key in return_keys}
501
- return log
502
-
503
- def configure_optimizers(self):
504
- lr = self.learning_rate
505
- params = list(self.model.parameters())
506
- if self.learn_logvar:
507
- params = params + [self.logvar]
508
- opt = torch.optim.AdamW(params, lr=lr)
509
- return opt
510
-
511
-
512
- class LatentDiffusion(DDPM):
513
- """main class"""
514
-
515
- def __init__(self,
516
- first_stage_config,
517
- cond_stage_config,
518
- num_timesteps_cond=None,
519
- cond_stage_key="image",
520
- cond_stage_trainable=False,
521
- concat_mode=True,
522
- cond_stage_forward=None,
523
- conditioning_key=None,
524
- scale_factor=1.0,
525
- scale_by_std=False,
526
- *args, **kwargs):
527
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
528
- self.scale_by_std = scale_by_std
529
- assert self.num_timesteps_cond <= kwargs['timesteps']
530
- # for backwards compatibility after implementation of DiffusionWrapper
531
- if conditioning_key is None:
532
- conditioning_key = 'concat' if concat_mode else 'crossattn'
533
- if cond_stage_config == '__is_unconditional__':
534
- conditioning_key = None
535
- ckpt_path = kwargs.pop("ckpt_path", None)
536
- reset_ema = kwargs.pop("reset_ema", False)
537
- reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
538
- ignore_keys = kwargs.pop("ignore_keys", [])
539
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
540
- self.concat_mode = concat_mode
541
- self.cond_stage_trainable = cond_stage_trainable
542
- self.cond_stage_key = cond_stage_key
543
- try:
544
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
545
- except:
546
- self.num_downs = 0
547
- if not scale_by_std:
548
- self.scale_factor = scale_factor
549
- else:
550
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
551
- self.instantiate_first_stage(first_stage_config)
552
- self.instantiate_cond_stage(cond_stage_config)
553
- self.cond_stage_forward = cond_stage_forward
554
- self.clip_denoised = False
555
- self.bbox_tokenizer = None
556
-
557
- self.restarted_from_ckpt = False
558
- if ckpt_path is not None:
559
- self.init_from_ckpt(ckpt_path, ignore_keys)
560
- self.restarted_from_ckpt = True
561
- if reset_ema:
562
- assert self.use_ema
563
- print(
564
- f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
565
- self.model_ema = LitEma(self.model)
566
- if reset_num_ema_updates:
567
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
568
- assert self.use_ema
569
- self.model_ema.reset_num_updates()
570
-
571
- def make_cond_schedule(self, ):
572
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
573
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
574
- self.cond_ids[:self.num_timesteps_cond] = ids
575
-
576
- def register_schedule(self,
577
- given_betas=None, beta_schedule="linear", timesteps=1000,
578
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
579
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
580
-
581
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
582
- if self.shorten_cond_schedule:
583
- self.make_cond_schedule()
584
-
585
- def instantiate_first_stage(self, config):
586
- model = instantiate_from_config(config)
587
- self.first_stage_model = model.eval()
588
- self.first_stage_model.train = disabled_train
589
- for param in self.first_stage_model.parameters():
590
- param.requires_grad = False
591
-
592
- def instantiate_cond_stage(self, config):
593
- if not self.cond_stage_trainable:
594
- if config == "__is_first_stage__":
595
- print("Using first stage also as cond stage.")
596
- self.cond_stage_model = self.first_stage_model
597
- elif config == "__is_unconditional__":
598
- print(f"Training {self.__class__.__name__} as an unconditional model.")
599
- self.cond_stage_model = None
600
- # self.be_unconditional = True
601
- else:
602
- model = instantiate_from_config(config)
603
- self.cond_stage_model = model.eval()
604
- self.cond_stage_model.train = disabled_train
605
- for param in self.cond_stage_model.parameters():
606
- param.requires_grad = False
607
- else:
608
- assert config != '__is_first_stage__'
609
- assert config != '__is_unconditional__'
610
- model = instantiate_from_config(config)
611
- self.cond_stage_model = model
612
-
613
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
614
- denoise_row = []
615
- for zd in tqdm(samples, desc=desc):
616
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
617
- force_not_quantize=force_no_decoder_quantization))
618
- n_imgs_per_row = len(denoise_row)
619
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
620
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
621
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
622
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
623
- return denoise_grid
624
-
625
- def get_first_stage_encoding(self, encoder_posterior):
626
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
627
- z = encoder_posterior.sample()
628
- elif isinstance(encoder_posterior, torch.Tensor):
629
- z = encoder_posterior
630
- else:
631
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
632
- return self.scale_factor * z
633
-
634
- def get_learned_conditioning(self, c):
635
- if self.cond_stage_forward is None:
636
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
637
- c = self.cond_stage_model.encode(c)
638
- if isinstance(c, DiagonalGaussianDistribution):
639
- c = c.mode()
640
- else:
641
- c = self.cond_stage_model(c)
642
- else:
643
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
644
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
645
- return c
646
-
647
- def meshgrid(self, h, w):
648
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
649
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
650
-
651
- arr = torch.cat([y, x], dim=-1)
652
- return arr
653
-
654
- def delta_border(self, h, w):
655
- """
656
- :param h: height
657
- :param w: width
658
- :return: normalized distance to image border,
659
- wtith min distance = 0 at border and max dist = 0.5 at image center
660
- """
661
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
662
- arr = self.meshgrid(h, w) / lower_right_corner
663
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
664
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
665
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
666
- return edge_dist
667
-
668
- def get_weighting(self, h, w, Ly, Lx, device):
669
- weighting = self.delta_border(h, w)
670
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
671
- self.split_input_params["clip_max_weight"], )
672
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
673
-
674
- if self.split_input_params["tie_braker"]:
675
- L_weighting = self.delta_border(Ly, Lx)
676
- L_weighting = torch.clip(L_weighting,
677
- self.split_input_params["clip_min_tie_weight"],
678
- self.split_input_params["clip_max_tie_weight"])
679
-
680
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
681
- weighting = weighting * L_weighting
682
- return weighting
683
-
684
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
685
- """
686
- :param x: img of size (bs, c, h, w)
687
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
688
- """
689
- bs, nc, h, w = x.shape
690
-
691
- # number of crops in image
692
- Ly = (h - kernel_size[0]) // stride[0] + 1
693
- Lx = (w - kernel_size[1]) // stride[1] + 1
694
-
695
- if uf == 1 and df == 1:
696
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
697
- unfold = torch.nn.Unfold(**fold_params)
698
-
699
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
700
-
701
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
702
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
703
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
704
-
705
- elif uf > 1 and df == 1:
706
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
707
- unfold = torch.nn.Unfold(**fold_params)
708
-
709
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
710
- dilation=1, padding=0,
711
- stride=(stride[0] * uf, stride[1] * uf))
712
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
713
-
714
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
715
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
716
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
717
-
718
- elif df > 1 and uf == 1:
719
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
720
- unfold = torch.nn.Unfold(**fold_params)
721
-
722
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
723
- dilation=1, padding=0,
724
- stride=(stride[0] // df, stride[1] // df))
725
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
726
-
727
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
728
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
729
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
730
-
731
- else:
732
- raise NotImplementedError
733
-
734
- return fold, unfold, normalization, weighting
735
-
736
- @torch.no_grad()
737
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
738
- cond_key=None, return_original_cond=False, bs=None):
739
- x = super().get_input(batch, k)
740
- if bs is not None:
741
- x = x[:bs]
742
- x = x.to(self.device)
743
- encoder_posterior = self.encode_first_stage(x)
744
- z = self.get_first_stage_encoding(encoder_posterior).detach()
745
-
746
- if self.model.conditioning_key is not None:
747
- if cond_key is None:
748
- cond_key = self.cond_stage_key
749
- if cond_key != self.first_stage_key:
750
- if cond_key in ['caption', 'coordinates_bbox', "txt"]:
751
- xc = batch[cond_key]
752
- elif cond_key in ['class_label', 'cls']:
753
- xc = batch
754
- else:
755
- xc = super().get_input(batch, cond_key).to(self.device)
756
- else:
757
- xc = x
758
- if not self.cond_stage_trainable or force_c_encode:
759
- if isinstance(xc, dict) or isinstance(xc, list):
760
- # import pudb; pudb.set_trace()
761
- c = self.get_learned_conditioning(xc)
762
- else:
763
- c = self.get_learned_conditioning(xc.to(self.device))
764
- else:
765
- c = xc
766
- if bs is not None:
767
- c = c[:bs]
768
-
769
- if self.use_positional_encodings:
770
- pos_x, pos_y = self.compute_latent_shifts(batch)
771
- ckey = __conditioning_keys__[self.model.conditioning_key]
772
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
773
-
774
- else:
775
- c = None
776
- xc = None
777
- if self.use_positional_encodings:
778
- pos_x, pos_y = self.compute_latent_shifts(batch)
779
- c = {'pos_x': pos_x, 'pos_y': pos_y}
780
- out = [z, c]
781
- if return_first_stage_outputs:
782
- xrec = self.decode_first_stage(z)
783
- out.extend([x, xrec])
784
- if return_original_cond:
785
- out.append(xc)
786
- return out
787
-
788
- @torch.no_grad()
789
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
790
- if predict_cids:
791
- if z.dim() == 4:
792
- z = torch.argmax(z.exp(), dim=1).long()
793
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
794
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
795
-
796
- z = 1. / self.scale_factor * z
797
- return self.first_stage_model.decode(z)
798
-
799
- @torch.no_grad()
800
- def encode_first_stage(self, x):
801
- return self.first_stage_model.encode(x)
802
-
803
- def shared_step(self, batch, **kwargs):
804
- x, c = self.get_input(batch, self.first_stage_key)
805
- loss = self(x, c, **kwargs)
806
- return loss
807
-
808
- def get_time_with_schedule(self, scheduler, bs):
809
- if scheduler == 'linear':
810
- t = torch.randint(0, self.num_timesteps, (bs,), device=self.device).long()
811
- elif scheduler == 'cosine':
812
- t = torch.rand((bs, ), device=self.device)
813
- t = torch.cos(torch.pi / 2. * t) * self.num_timesteps
814
- t = t.long()
815
- elif scheduler == 'cubic':
816
- t = torch.rand((bs,), device=self.device)
817
- t = (1 - t ** 3) * self.num_timesteps
818
- t = t.long()
819
- else:
820
- raise NotImplementedError
821
- t = torch.clamp(t, min=0, max=self.num_timesteps-1)
822
- return t
823
-
824
- def forward(self, x, c, *args, **kwargs):
825
- if 't' not in kwargs:
826
- t = torch.randint(0, self.num_timesteps, (x.shape[0], ), device=self.device).long()
827
- else:
828
- t = kwargs.pop('t')
829
-
830
- return self.p_losses(x, c, t, *args, **kwargs)
831
-
832
- def apply_model(self, x_noisy, t, cond, return_ids=False, **kwargs):
833
- if isinstance(cond, dict):
834
- # hybrid case, cond is expected to be a dict
835
- pass
836
- else:
837
- if not isinstance(cond, list):
838
- cond = [cond]
839
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
840
- cond = {key: cond}
841
-
842
- x_recon = self.model(x_noisy, t, **cond, **kwargs)
843
-
844
- if isinstance(x_recon, tuple) and not return_ids:
845
- return x_recon[0]
846
- else:
847
- return x_recon
848
-
849
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
850
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
851
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
852
-
853
- def _prior_bpd(self, x_start):
854
- """
855
- Get the prior KL term for the variational lower-bound, measured in
856
- bits-per-dim.
857
- This term can't be optimized, as it only depends on the encoder.
858
- :param x_start: the [N x C x ...] tensor of inputs.
859
- :return: a batch of [N] KL values (in bits), one per batch element.
860
- """
861
- batch_size = x_start.shape[0]
862
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
863
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
864
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
865
- return mean_flat(kl_prior) / np.log(2.0)
866
-
867
- def p_losses(self, x_start, cond, t, noise=None, **kwargs):
868
- noise = default(noise, lambda: torch.randn_like(x_start))
869
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
870
- model_output = self.apply_model(x_noisy, t, cond, **kwargs)
871
-
872
- loss_dict = {}
873
- prefix = 'train' if self.training else 'val'
874
-
875
- if self.parameterization == "x0":
876
- target = x_start
877
- elif self.parameterization == "eps":
878
- target = noise
879
- elif self.parameterization == "v":
880
- target = self.get_v(x_start, noise, t)
881
- else:
882
- raise NotImplementedError()
883
-
884
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
885
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
886
-
887
- logvar_t = self.logvar[t].to(self.device)
888
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
889
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
890
- if self.learn_logvar:
891
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
892
- loss_dict.update({'logvar': self.logvar.data.mean()})
893
-
894
- loss = self.l_simple_weight * loss.mean()
895
-
896
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
897
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
898
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
899
- loss += (self.original_elbo_weight * loss_vlb)
900
- loss_dict.update({f'{prefix}/loss': loss})
901
-
902
- return loss, loss_dict
903
-
904
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
905
- return_x0=False, score_corrector=None, corrector_kwargs=None):
906
- t_in = t
907
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
908
-
909
- if score_corrector is not None:
910
- assert self.parameterization == "eps"
911
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
912
-
913
- if return_codebook_ids:
914
- model_out, logits = model_out
915
-
916
- if self.parameterization == "eps":
917
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
918
- elif self.parameterization == "x0":
919
- x_recon = model_out
920
- else:
921
- raise NotImplementedError()
922
-
923
- if clip_denoised:
924
- x_recon.clamp_(-1., 1.)
925
- if quantize_denoised:
926
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
927
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
928
- if return_codebook_ids:
929
- return model_mean, posterior_variance, posterior_log_variance, logits
930
- elif return_x0:
931
- return model_mean, posterior_variance, posterior_log_variance, x_recon
932
- else:
933
- return model_mean, posterior_variance, posterior_log_variance
934
-
935
- @torch.no_grad()
936
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
937
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
938
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
939
- b, *_, device = *x.shape, x.device
940
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
941
- return_codebook_ids=return_codebook_ids,
942
- quantize_denoised=quantize_denoised,
943
- return_x0=return_x0,
944
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
945
- if return_codebook_ids:
946
- raise DeprecationWarning("Support dropped.")
947
- model_mean, _, model_log_variance, logits = outputs
948
- elif return_x0:
949
- model_mean, _, model_log_variance, x0 = outputs
950
- else:
951
- model_mean, _, model_log_variance = outputs
952
-
953
- noise = noise_like(x.shape, device, repeat_noise) * temperature
954
- if noise_dropout > 0.:
955
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
956
- # no noise when t == 0
957
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
958
-
959
- if return_codebook_ids:
960
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
961
- if return_x0:
962
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
963
- else:
964
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
965
-
966
- @torch.no_grad()
967
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
968
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
969
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
970
- log_every_t=None):
971
- if not log_every_t:
972
- log_every_t = self.log_every_t
973
- timesteps = self.num_timesteps
974
- if batch_size is not None:
975
- b = batch_size if batch_size is not None else shape[0]
976
- shape = [batch_size] + list(shape)
977
- else:
978
- b = batch_size = shape[0]
979
- if x_T is None:
980
- img = torch.randn(shape, device=self.device)
981
- else:
982
- img = x_T
983
- intermediates = []
984
- if cond is not None:
985
- if isinstance(cond, dict):
986
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
987
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
988
- else:
989
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
990
-
991
- if start_T is not None:
992
- timesteps = min(timesteps, start_T)
993
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
994
- total=timesteps) if verbose else reversed(
995
- range(0, timesteps))
996
- if type(temperature) == float:
997
- temperature = [temperature] * timesteps
998
-
999
- for i in iterator:
1000
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1001
- if self.shorten_cond_schedule:
1002
- assert self.model.conditioning_key != 'hybrid'
1003
- tc = self.cond_ids[ts].to(cond.device)
1004
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1005
-
1006
- img, x0_partial = self.p_sample(img, cond, ts,
1007
- clip_denoised=self.clip_denoised,
1008
- quantize_denoised=quantize_denoised, return_x0=True,
1009
- temperature=temperature[i], noise_dropout=noise_dropout,
1010
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1011
- if mask is not None:
1012
- assert x0 is not None
1013
- img_orig = self.q_sample(x0, ts)
1014
- img = img_orig * mask + (1. - mask) * img
1015
-
1016
- if i % log_every_t == 0 or i == timesteps - 1:
1017
- intermediates.append(x0_partial)
1018
- if callback: callback(i)
1019
- if img_callback: img_callback(img, i)
1020
- return img, intermediates
1021
-
1022
- @torch.no_grad()
1023
- def p_sample_loop(self, cond, shape, return_intermediates=False,
1024
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1025
- mask=None, x0=None, img_callback=None, start_T=None,
1026
- log_every_t=None):
1027
-
1028
- if not log_every_t:
1029
- log_every_t = self.log_every_t
1030
- device = self.betas.device
1031
- b = shape[0]
1032
- if x_T is None:
1033
- img = torch.randn(shape, device=device)
1034
- else:
1035
- img = x_T
1036
-
1037
- intermediates = [img]
1038
- if timesteps is None:
1039
- timesteps = self.num_timesteps
1040
-
1041
- if start_T is not None:
1042
- timesteps = min(timesteps, start_T)
1043
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1044
- range(0, timesteps))
1045
-
1046
- if mask is not None:
1047
- assert x0 is not None
1048
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1049
-
1050
- for i in iterator:
1051
- ts = torch.full((b,), i, device=device, dtype=torch.long)
1052
- if self.shorten_cond_schedule:
1053
- assert self.model.conditioning_key != 'hybrid'
1054
- tc = self.cond_ids[ts].to(cond.device)
1055
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1056
-
1057
- img = self.p_sample(img, cond, ts,
1058
- clip_denoised=self.clip_denoised,
1059
- quantize_denoised=quantize_denoised)
1060
- if mask is not None:
1061
- img_orig = self.q_sample(x0, ts)
1062
- img = img_orig * mask + (1. - mask) * img
1063
-
1064
- if i % log_every_t == 0 or i == timesteps - 1:
1065
- intermediates.append(img)
1066
- if callback: callback(i)
1067
- if img_callback: img_callback(img, i)
1068
-
1069
- if return_intermediates:
1070
- return img, intermediates
1071
- return img
1072
-
1073
- @torch.no_grad()
1074
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1075
- verbose=True, timesteps=None, quantize_denoised=False,
1076
- mask=None, x0=None, shape=None, **kwargs):
1077
- if shape is None:
1078
- shape = (batch_size, self.channels, self.image_size, self.image_size)
1079
- if cond is not None:
1080
- if isinstance(cond, dict):
1081
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1082
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1083
- else:
1084
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1085
- return self.p_sample_loop(cond,
1086
- shape,
1087
- return_intermediates=return_intermediates, x_T=x_T,
1088
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1089
- mask=mask, x0=x0)
1090
-
1091
- @torch.no_grad()
1092
- def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
1093
- if ddim:
1094
- ddim_sampler = DDIMSampler(self)
1095
- shape = (self.channels, self.image_size, self.image_size)
1096
- samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
1097
- shape, cond, verbose=False, **kwargs)
1098
-
1099
- else:
1100
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1101
- return_intermediates=True, **kwargs)
1102
-
1103
- return samples, intermediates
1104
-
1105
- @torch.no_grad()
1106
- def get_unconditional_conditioning(self, batch_size, null_label=None):
1107
- if null_label is not None:
1108
- xc = null_label
1109
- if isinstance(xc, ListConfig):
1110
- xc = list(xc)
1111
- if isinstance(xc, dict) or isinstance(xc, list):
1112
- c = self.get_learned_conditioning(xc)
1113
- else:
1114
- if hasattr(xc, "to"):
1115
- xc = xc.to(self.device)
1116
- c = self.get_learned_conditioning(xc)
1117
- else:
1118
- if self.cond_stage_key in ["class_label", "cls"]:
1119
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
1120
- return self.get_learned_conditioning(xc)
1121
- else:
1122
- raise NotImplementedError("todo")
1123
- if isinstance(c, list): # in case the encoder gives us a list
1124
- for i in range(len(c)):
1125
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
1126
- else:
1127
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
1128
- return c
1129
-
1130
- @torch.no_grad()
1131
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
1132
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1133
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1134
- use_ema_scope=True,
1135
- **kwargs):
1136
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
1137
- use_ddim = ddim_steps is not None
1138
-
1139
- log = dict()
1140
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1141
- return_first_stage_outputs=True,
1142
- force_c_encode=True,
1143
- return_original_cond=True,
1144
- bs=N)
1145
- N = min(x.shape[0], N)
1146
- n_row = min(x.shape[0], n_row)
1147
- log["inputs"] = x
1148
- log["reconstruction"] = xrec
1149
- if self.model.conditioning_key is not None:
1150
- if hasattr(self.cond_stage_model, "decode"):
1151
- xc = self.cond_stage_model.decode(c)
1152
- log["conditioning"] = xc
1153
- elif self.cond_stage_key in ["caption", "txt"]:
1154
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1155
- log["conditioning"] = xc
1156
- elif self.cond_stage_key in ['class_label', "cls"]:
1157
- try:
1158
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1159
- log['conditioning'] = xc
1160
- except KeyError:
1161
- # probably no "human_label" in batch
1162
- pass
1163
- elif isimage(xc):
1164
- log["conditioning"] = xc
1165
- if ismap(xc):
1166
- log["original_conditioning"] = self.to_rgb(xc)
1167
-
1168
- if plot_diffusion_rows:
1169
- # get diffusion row
1170
- diffusion_row = list()
1171
- z_start = z[:n_row]
1172
- for t in range(self.num_timesteps):
1173
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1174
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1175
- t = t.to(self.device).long()
1176
- noise = torch.randn_like(z_start)
1177
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1178
- diffusion_row.append(self.decode_first_stage(z_noisy))
1179
-
1180
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1181
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1182
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1183
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1184
- log["diffusion_row"] = diffusion_grid
1185
-
1186
- if sample:
1187
- # get denoise row
1188
- with ema_scope("Sampling"):
1189
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1190
- ddim_steps=ddim_steps, eta=ddim_eta)
1191
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1192
- x_samples = self.decode_first_stage(samples)
1193
- log["samples"] = x_samples
1194
- if plot_denoise_rows:
1195
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1196
- log["denoise_row"] = denoise_grid
1197
-
1198
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1199
- self.first_stage_model, IdentityFirstStage):
1200
- # also display when quantizing x0 while sampling
1201
- with ema_scope("Plotting Quantized Denoised"):
1202
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1203
- ddim_steps=ddim_steps, eta=ddim_eta,
1204
- quantize_denoised=True)
1205
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1206
- # quantize_denoised=True)
1207
- x_samples = self.decode_first_stage(samples.to(self.device))
1208
- log["samples_x0_quantized"] = x_samples
1209
-
1210
- if unconditional_guidance_scale > 1.0:
1211
- uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1212
- if self.model.conditioning_key == "crossattn-adm":
1213
- uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
1214
- with ema_scope("Sampling with classifier-free guidance"):
1215
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1216
- ddim_steps=ddim_steps, eta=ddim_eta,
1217
- unconditional_guidance_scale=unconditional_guidance_scale,
1218
- unconditional_conditioning=uc,
1219
- )
1220
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1221
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1222
-
1223
- if inpaint:
1224
- # make a simple center square
1225
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
1226
- mask = torch.ones(N, h, w).to(self.device)
1227
- # zeros will be filled in
1228
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1229
- mask = mask[:, None, ...]
1230
- with ema_scope("Plotting Inpaint"):
1231
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1232
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1233
- x_samples = self.decode_first_stage(samples.to(self.device))
1234
- log["samples_inpainting"] = x_samples
1235
- log["mask"] = mask
1236
-
1237
- # outpaint
1238
- mask = 1. - mask
1239
- with ema_scope("Plotting Outpaint"):
1240
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1241
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1242
- x_samples = self.decode_first_stage(samples.to(self.device))
1243
- log["samples_outpainting"] = x_samples
1244
-
1245
- if plot_progressive_rows:
1246
- with ema_scope("Plotting Progressives"):
1247
- img, progressives = self.progressive_denoising(c,
1248
- shape=(self.channels, self.image_size, self.image_size),
1249
- batch_size=N)
1250
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1251
- log["progressive_row"] = prog_row
1252
-
1253
- if return_keys:
1254
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1255
- return log
1256
- else:
1257
- return {key: log[key] for key in return_keys}
1258
- return log
1259
-
1260
- def configure_optimizers(self):
1261
- lr = self.learning_rate
1262
- params = list(self.model.parameters())
1263
- if self.cond_stage_trainable:
1264
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1265
- params = params + list(self.cond_stage_model.parameters())
1266
- if self.learn_logvar:
1267
- print('Diffusion model optimizing logvar')
1268
- params.append(self.logvar)
1269
- opt = torch.optim.AdamW(params, lr=lr)
1270
- if self.use_scheduler:
1271
- assert 'target' in self.scheduler_config
1272
- scheduler = instantiate_from_config(self.scheduler_config)
1273
-
1274
- print("Setting up LambdaLR scheduler...")
1275
- scheduler = [
1276
- {
1277
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1278
- 'interval': 'step',
1279
- 'frequency': 1
1280
- }]
1281
- return [opt], scheduler
1282
- return opt
1283
-
1284
- @torch.no_grad()
1285
- def to_rgb(self, x):
1286
- x = x.float()
1287
- if not hasattr(self, "colorize"):
1288
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1289
- x = nn.functional.conv2d(x, weight=self.colorize)
1290
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1291
- return x
1292
-
1293
-
1294
- class DiffusionWrapper(pl.LightningModule):
1295
- def __init__(self, diff_model_config, conditioning_key):
1296
- super().__init__()
1297
- self.diffusion_model = instantiate_from_config(diff_model_config)
1298
- self.conditioning_key = conditioning_key
1299
- assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
1300
-
1301
- def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None, **kwargs):
1302
- if self.conditioning_key is None:
1303
- out = self.diffusion_model(x, t, **kwargs)
1304
- elif self.conditioning_key == 'concat':
1305
- xc = torch.cat([x] + c_concat, dim=1)
1306
- out = self.diffusion_model(xc, t, **kwargs)
1307
- elif self.conditioning_key == 'crossattn':
1308
- cc = torch.cat(c_crossattn, 1)
1309
- out = self.diffusion_model(x, t, context=cc, **kwargs)
1310
- elif self.conditioning_key == 'hybrid':
1311
- xc = torch.cat([x] + c_concat, dim=1)
1312
- cc = torch.cat(c_crossattn, 1)
1313
- out = self.diffusion_model(xc, t, context=cc, **kwargs)
1314
- elif self.conditioning_key == 'hybrid-adm':
1315
- assert c_adm is not None
1316
- xc = torch.cat([x] + c_concat, dim=1)
1317
- cc = torch.cat(c_crossattn, 1)
1318
- out = self.diffusion_model(xc, t, context=cc, y=c_adm, **kwargs)
1319
- elif self.conditioning_key == 'crossattn-adm':
1320
- assert c_adm is not None
1321
- cc = torch.cat(c_crossattn, 1)
1322
- out = self.diffusion_model(x, t, context=cc, y=c_adm, **kwargs)
1323
- elif self.conditioning_key == 'adm':
1324
- cc = c_crossattn[0]
1325
- out = self.diffusion_model(x, t, y=cc, **kwargs)
1326
- else:
1327
- raise NotImplementedError()
1328
-
1329
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapting/YouTube-Downloader/tube/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .download import download_yt
2
- from .utils import clear_cache
3
- from .var import OUTPUT_DIR
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/AlphaMaskImage.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import AlphaMaskImage from '../../../plugins/alphamaskimage';
2
- export default AlphaMaskImage;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/ColorInputBase.d.ts DELETED
@@ -1,38 +0,0 @@
1
- import Sizer from '../../sizer/Sizer';
2
- import RoundRectangle from '../../roundrectangle/RoundRectangle'
3
- import CanvasInput from '../../canvasinput/CanvasInput';
4
-
5
- export default ColorInputBase;
6
-
7
- declare namespace ColorInputBase {
8
- interface ISwatchConfig extends RoundRectangle.IConfig {
9
- size?: number,
10
- }
11
-
12
- interface IConfig extends Sizer.IConfig {
13
- background?: Phaser.GameObjects.GameObject,
14
-
15
- swatch?: Phaser.GameObjects.GameObject | ISwatchConfig,
16
- swatchSize?: number,
17
- squareExpandSwatch?: boolean,
18
-
19
- inputText?: CanvasInput.IConfig,
20
-
21
- valuechangeCallback: (newValue: number, oldValue: number, colorPicker: ColorInputBase) => void,
22
-
23
- value?: number | string
24
- }
25
- }
26
-
27
- declare class ColorInputBase extends Sizer {
28
- constructor(
29
- scene: Phaser.Scene,
30
- config?: ColorInputBase.IConfig
31
- );
32
-
33
- setValue(value: number): this;
34
- value: number;
35
-
36
- setColor(color: number): this;
37
- color: number;
38
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/simpledropdownlist/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import SimpleDropDownList from './SimpleDropDownList.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('simpleDropDownList', function (config, creators) {
6
- var gameObject = new SimpleDropDownList(this.scene, config, creators);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.SimpleDropDownList', SimpleDropDownList);
12
-
13
- export default SimpleDropDownList;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/js/chunk-vendors-legacy.9281b25c.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/AlekseyCalvin/Make_Putin_Queer_Please-use-trp-token/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Make Putin Queer Please-use-trp-token
3
- emoji: 🐨
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.13.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/bg_motion_predictor.py DELETED
@@ -1,24 +0,0 @@
1
- from torch import nn
2
- import torch
3
- from torchvision import models
4
-
5
- class BGMotionPredictor(nn.Module):
6
- """
7
- Module for background estimation, return single transformation, parametrized as 3x3 matrix. The third row is [0 0 1]
8
- """
9
-
10
- def __init__(self):
11
- super(BGMotionPredictor, self).__init__()
12
- self.bg_encoder = models.resnet18(pretrained=False)
13
- self.bg_encoder.conv1 = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
14
- num_features = self.bg_encoder.fc.in_features
15
- self.bg_encoder.fc = nn.Linear(num_features, 6)
16
- self.bg_encoder.fc.weight.data.zero_()
17
- self.bg_encoder.fc.bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
18
-
19
- def forward(self, source_image, driving_image):
20
- bs = source_image.shape[0]
21
- out = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).type(source_image.type())
22
- prediction = self.bg_encoder(torch.cat([source_image, driving_image], dim=1))
23
- out[:, :2, :] = prediction.view(bs, 2, 3)
24
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alican/pixera/util/visualizer.py DELETED
@@ -1,257 +0,0 @@
1
- import numpy as np
2
- import os
3
- import sys
4
- import ntpath
5
- import time
6
- from . import util, html
7
- from subprocess import Popen, PIPE
8
-
9
-
10
- try:
11
- import wandb
12
- except ImportError:
13
- print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
14
-
15
- if sys.version_info[0] == 2:
16
- VisdomExceptionBase = Exception
17
- else:
18
- VisdomExceptionBase = ConnectionError
19
-
20
-
21
- def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256, use_wandb=False):
22
- """Save images to the disk.
23
-
24
- Parameters:
25
- webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
26
- visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
27
- image_path (str) -- the string is used to create image paths
28
- aspect_ratio (float) -- the aspect ratio of saved images
29
- width (int) -- the images will be resized to width x width
30
-
31
- This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
32
- """
33
- image_dir = webpage.get_image_dir()
34
- short_path = ntpath.basename(image_path[0])
35
- name = os.path.splitext(short_path)[0]
36
-
37
- webpage.add_header(name)
38
- ims, txts, links = [], [], []
39
- ims_dict = {}
40
- for label, im_data in visuals.items():
41
- im = util.tensor2im(im_data)
42
- image_name = '%s_%s.png' % (name, label)
43
- save_path = os.path.join(image_dir, image_name)
44
- util.save_image(im, save_path, aspect_ratio=aspect_ratio)
45
- ims.append(image_name)
46
- txts.append(label)
47
- links.append(image_name)
48
- if use_wandb:
49
- ims_dict[label] = wandb.Image(im)
50
- webpage.add_images(ims, txts, links, width=width)
51
- if use_wandb:
52
- wandb.log(ims_dict)
53
-
54
-
55
- class Visualizer():
56
- """This class includes several functions that can display/save images and print/save logging information.
57
-
58
- It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
59
- """
60
-
61
- def __init__(self, opt):
62
- """Initialize the Visualizer class
63
-
64
- Parameters:
65
- opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
66
- Step 1: Cache the training/test options
67
- Step 2: connect to a visdom server
68
- Step 3: create an HTML object for saveing HTML filters
69
- Step 4: create a logging file to store training losses
70
- """
71
- self.opt = opt # cache the option
72
- self.display_id = opt.display_id
73
- self.use_html = opt.isTrain and not opt.no_html
74
- self.win_size = opt.display_winsize
75
- self.name = opt.name
76
- self.port = opt.display_port
77
- self.saved = False
78
- self.use_wandb = opt.use_wandb
79
- self.wandb_project_name = opt.wandb_project_name
80
- self.current_epoch = 0
81
- self.ncols = opt.display_ncols
82
-
83
- if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
84
- import visdom
85
- self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
86
- if not self.vis.check_connection():
87
- self.create_visdom_connections()
88
-
89
- if self.use_wandb:
90
- self.wandb_run = wandb.init(project=self.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
91
- self.wandb_run._label(repo='CycleGAN-and-pix2pix')
92
-
93
- if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
94
- self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
95
- self.img_dir = os.path.join(self.web_dir, 'images')
96
- print('create web directory %s...' % self.web_dir)
97
- util.mkdirs([self.web_dir, self.img_dir])
98
- # create a logging file to store training losses
99
- self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
100
- with open(self.log_name, "a") as log_file:
101
- now = time.strftime("%c")
102
- log_file.write('================ Training Loss (%s) ================\n' % now)
103
-
104
- def reset(self):
105
- """Reset the self.saved status"""
106
- self.saved = False
107
-
108
- def create_visdom_connections(self):
109
- """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
110
- cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
111
- print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
112
- print('Command: %s' % cmd)
113
- Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
114
-
115
- def display_current_results(self, visuals, epoch, save_result):
116
- """Display current results on visdom; save current results to an HTML file.
117
-
118
- Parameters:
119
- visuals (OrderedDict) - - dictionary of images to display or save
120
- epoch (int) - - the current epoch
121
- save_result (bool) - - if save the current results to an HTML file
122
- """
123
- if self.display_id > 0: # show images in the browser using visdom
124
- ncols = self.ncols
125
- if ncols > 0: # show all the images in one visdom panel
126
- ncols = min(ncols, len(visuals))
127
- h, w = next(iter(visuals.values())).shape[:2]
128
- table_css = """<style>
129
- table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
130
- table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
131
- </style>""" % (w, h) # create a table css
132
- # create a table of images.
133
- title = self.name
134
- label_html = ''
135
- label_html_row = ''
136
- images = []
137
- idx = 0
138
- for label, image in visuals.items():
139
- image_numpy = util.tensor2im(image)
140
- label_html_row += '<td>%s</td>' % label
141
- images.append(image_numpy.transpose([2, 0, 1]))
142
- idx += 1
143
- if idx % ncols == 0:
144
- label_html += '<tr>%s</tr>' % label_html_row
145
- label_html_row = ''
146
- white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
147
- while idx % ncols != 0:
148
- images.append(white_image)
149
- label_html_row += '<td></td>'
150
- idx += 1
151
- if label_html_row != '':
152
- label_html += '<tr>%s</tr>' % label_html_row
153
- try:
154
- self.vis.images(images, nrow=ncols, win=self.display_id + 1,
155
- padding=2, opts=dict(title=title + ' images'))
156
- label_html = '<table>%s</table>' % label_html
157
- self.vis.text(table_css + label_html, win=self.display_id + 2,
158
- opts=dict(title=title + ' labels'))
159
- except VisdomExceptionBase:
160
- self.create_visdom_connections()
161
-
162
- else: # show each image in a separate visdom panel;
163
- idx = 1
164
- try:
165
- for label, image in visuals.items():
166
- image_numpy = util.tensor2im(image)
167
- self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
168
- win=self.display_id + idx)
169
- idx += 1
170
- except VisdomExceptionBase:
171
- self.create_visdom_connections()
172
-
173
- if self.use_wandb:
174
- columns = [key for key, _ in visuals.items()]
175
- columns.insert(0, 'epoch')
176
- result_table = wandb.Table(columns=columns)
177
- table_row = [epoch]
178
- ims_dict = {}
179
- for label, image in visuals.items():
180
- image_numpy = util.tensor2im(image)
181
- wandb_image = wandb.Image(image_numpy)
182
- table_row.append(wandb_image)
183
- ims_dict[label] = wandb_image
184
- self.wandb_run.log(ims_dict)
185
- if epoch != self.current_epoch:
186
- self.current_epoch = epoch
187
- result_table.add_data(*table_row)
188
- self.wandb_run.log({"Result": result_table})
189
-
190
- if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
191
- self.saved = True
192
- # save images to the disk
193
- for label, image in visuals.items():
194
- image_numpy = util.tensor2im(image)
195
- img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
196
- util.save_image(image_numpy, img_path)
197
-
198
- # update website
199
- webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
200
- for n in range(epoch, 0, -1):
201
- webpage.add_header('epoch [%d]' % n)
202
- ims, txts, links = [], [], []
203
-
204
- for label, image_numpy in visuals.items():
205
- image_numpy = util.tensor2im(image)
206
- img_path = 'epoch%.3d_%s.png' % (n, label)
207
- ims.append(img_path)
208
- txts.append(label)
209
- links.append(img_path)
210
- webpage.add_images(ims, txts, links, width=self.win_size)
211
- webpage.save()
212
-
213
- def plot_current_losses(self, epoch, counter_ratio, losses):
214
- """display the current losses on visdom display: dictionary of error labels and values
215
-
216
- Parameters:
217
- epoch (int) -- current epoch
218
- counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
219
- losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
220
- """
221
- if not hasattr(self, 'plot_data'):
222
- self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
223
- self.plot_data['X'].append(epoch + counter_ratio)
224
- self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
225
- try:
226
- self.vis.line(
227
- X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
228
- Y=np.array(self.plot_data['Y']),
229
- opts={
230
- 'title': self.name + ' loss over time',
231
- 'legend': self.plot_data['legend'],
232
- 'xlabel': 'epoch',
233
- 'ylabel': 'loss'},
234
- win=self.display_id)
235
- except VisdomExceptionBase:
236
- self.create_visdom_connections()
237
- if self.use_wandb:
238
- self.wandb_run.log(losses)
239
-
240
- # losses: same format as |losses| of plot_current_losses
241
- def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
242
- """print current losses on console; also save the losses to the disk
243
-
244
- Parameters:
245
- epoch (int) -- current epoch
246
- iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
247
- losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
248
- t_comp (float) -- computational time per data point (normalized by batch_size)
249
- t_data (float) -- data loading time per data point (normalized by batch_size)
250
- """
251
- message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
252
- for k, v in losses.items():
253
- message += '%s: %.3f ' % (k, v)
254
-
255
- print(message) # print the message
256
- with open(self.log_name, "a") as log_file:
257
- log_file.write('%s\n' % message) # save the message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/train_boundary.py DELETED
@@ -1,158 +0,0 @@
1
-
2
- import numpy as np
3
- from sklearn import svm
4
-
5
-
6
-
7
-
8
-
9
- def train_boundary(latent_codes,
10
- scores,
11
- chosen_num_or_ratio=0.02,
12
- split_ratio=0.7,
13
- invalid_value=None,
14
- logger=None,
15
- logger_name='train_boundary'):
16
- """Trains boundary in latent space with offline predicted attribute scores.
17
-
18
- Given a collection of latent codes and the attribute scores predicted from the
19
- corresponding images, this function will train a linear SVM by treating it as
20
- a bi-classification problem. Basically, the samples with highest attribute
21
- scores are treated as positive samples, while those with lowest scores as
22
- negative. For now, the latent code can ONLY be with 1 dimension.
23
-
24
- NOTE: The returned boundary is with shape (1, latent_space_dim), and also
25
- normalized with unit norm.
26
-
27
- Args:
28
- latent_codes: Input latent codes as training data.
29
- scores: Input attribute scores used to generate training labels.
30
- chosen_num_or_ratio: How many samples will be chosen as positive (negative)
31
- samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio *
32
- latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio,
33
- 0.5 * latent_codes_num)` will be used. (default: 0.02)
34
- split_ratio: Ratio to split training and validation sets. (default: 0.7)
35
- invalid_value: This field is used to filter out data. (default: None)
36
- logger: Logger for recording log messages. If set as `None`, a default
37
- logger, which prints messages from all levels to screen, will be created.
38
- (default: None)
39
-
40
- Returns:
41
- A decision boundary with type `numpy.ndarray`.
42
-
43
- Raises:
44
- ValueError: If the input `latent_codes` or `scores` are with invalid format.
45
- """
46
- # if not logger:
47
- # logger = setup_logger(work_dir='', logger_name=logger_name)
48
-
49
- if (not isinstance(latent_codes, np.ndarray) or
50
- not len(latent_codes.shape) == 2):
51
- raise ValueError(f'Input `latent_codes` should be with type'
52
- f'`numpy.ndarray`, and shape [num_samples, '
53
- f'latent_space_dim]!')
54
- num_samples = latent_codes.shape[0]
55
- latent_space_dim = latent_codes.shape[1]
56
- if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or
57
- not scores.shape[0] == num_samples or not scores.shape[1] == 1):
58
- raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '
59
- f'shape [num_samples, 1], where `num_samples` should be '
60
- f'exactly same as that of input `latent_codes`!')
61
- if chosen_num_or_ratio <= 0:
62
- raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '
63
- f'but {chosen_num_or_ratio} received!')
64
-
65
- # logger.info(f'Filtering training data.')
66
- print('Filtering training data.')
67
- if invalid_value is not None:
68
- latent_codes = latent_codes[scores[:, 0] != invalid_value]
69
- scores = scores[scores[:, 0] != invalid_value]
70
-
71
- # logger.info(f'Sorting scores to get positive and negative samples.')
72
- print('Sorting scores to get positive and negative samples.')
73
-
74
- sorted_idx = np.argsort(scores, axis=0)[::-1, 0]
75
- latent_codes = latent_codes[sorted_idx]
76
- scores = scores[sorted_idx]
77
- num_samples = latent_codes.shape[0]
78
- if 0 < chosen_num_or_ratio <= 1:
79
- chosen_num = int(num_samples * chosen_num_or_ratio)
80
- else:
81
- chosen_num = int(chosen_num_or_ratio)
82
- chosen_num = min(chosen_num, num_samples // 2)
83
-
84
- # logger.info(f'Spliting training and validation sets:')
85
- print('Filtering training data.')
86
-
87
- train_num = int(chosen_num * split_ratio)
88
- val_num = chosen_num - train_num
89
- # Positive samples.
90
- positive_idx = np.arange(chosen_num)
91
- np.random.shuffle(positive_idx)
92
- positive_train = latent_codes[:chosen_num][positive_idx[:train_num]]
93
- positive_val = latent_codes[:chosen_num][positive_idx[train_num:]]
94
- # Negative samples.
95
- negative_idx = np.arange(chosen_num)
96
- np.random.shuffle(negative_idx)
97
- negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]]
98
- negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]]
99
- # Training set.
100
- train_data = np.concatenate([positive_train, negative_train], axis=0)
101
- train_label = np.concatenate([np.ones(train_num, dtype=np.int),
102
- np.zeros(train_num, dtype=np.int)], axis=0)
103
- # logger.info(f' Training: {train_num} positive, {train_num} negative.')
104
- print(f' Training: {train_num} positive, {train_num} negative.')
105
- # Validation set.
106
- val_data = np.concatenate([positive_val, negative_val], axis=0)
107
- val_label = np.concatenate([np.ones(val_num, dtype=np.int),
108
- np.zeros(val_num, dtype=np.int)], axis=0)
109
- # logger.info(f' Validation: {val_num} positive, {val_num} negative.')
110
- print(f' Validation: {val_num} positive, {val_num} negative.')
111
-
112
- # Remaining set.
113
- remaining_num = num_samples - chosen_num * 2
114
- remaining_data = latent_codes[chosen_num:-chosen_num]
115
- remaining_scores = scores[chosen_num:-chosen_num]
116
- decision_value = (scores[0] + scores[-1]) / 2
117
- remaining_label = np.ones(remaining_num, dtype=np.int)
118
- remaining_label[remaining_scores.ravel() < decision_value] = 0
119
- remaining_positive_num = np.sum(remaining_label == 1)
120
- remaining_negative_num = np.sum(remaining_label == 0)
121
- # logger.info(f' Remaining: {remaining_positive_num} positive, '
122
- # f'{remaining_negative_num} negative.')
123
- print(f' Remaining: {remaining_positive_num} positive, '
124
- f'{remaining_negative_num} negative.')
125
- # logger.info(f'Training boundary.')
126
- print(f'Training boundary.')
127
-
128
- clf = svm.SVC(kernel='linear')
129
- classifier = clf.fit(train_data, train_label)
130
- # logger.info(f'Finish training.')
131
- print(f'Finish training.')
132
-
133
-
134
- if val_num:
135
- val_prediction = classifier.predict(val_data)
136
- correct_num = np.sum(val_label == val_prediction)
137
- # logger.info(f'Accuracy for validation set: '
138
- # f'{correct_num} / {val_num * 2} = '
139
- # f'{correct_num / (val_num * 2):.6f}')
140
- print(f'Accuracy for validation set: '
141
- f'{correct_num} / {val_num * 2} = '
142
- f'{correct_num / (val_num * 2):.6f}')
143
- vacc=correct_num/len(val_label)
144
- '''
145
- if remaining_num:
146
- remaining_prediction = classifier.predict(remaining_data)
147
- correct_num = np.sum(remaining_label == remaining_prediction)
148
- logger.info(f'Accuracy for remaining set: '
149
- f'{correct_num} / {remaining_num} = '
150
- f'{correct_num / remaining_num:.6f}')
151
- '''
152
- a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32)
153
- return a / np.linalg.norm(a),vacc
154
-
155
-
156
-
157
-
158
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/textual_inversion_flax.py DELETED
@@ -1,654 +0,0 @@
1
- import argparse
2
- import logging
3
- import math
4
- import os
5
- import random
6
- from pathlib import Path
7
-
8
- import jax
9
- import jax.numpy as jnp
10
- import numpy as np
11
- import optax
12
- import PIL
13
- import torch
14
- import torch.utils.checkpoint
15
- import transformers
16
- from flax import jax_utils
17
- from flax.training import train_state
18
- from flax.training.common_utils import shard
19
- from huggingface_hub import create_repo, upload_folder
20
-
21
- # TODO: remove and import from diffusers.utils when the new version of diffusers is released
22
- from packaging import version
23
- from PIL import Image
24
- from torch.utils.data import Dataset
25
- from torchvision import transforms
26
- from tqdm.auto import tqdm
27
- from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
28
-
29
- from diffusers import (
30
- FlaxAutoencoderKL,
31
- FlaxDDPMScheduler,
32
- FlaxPNDMScheduler,
33
- FlaxStableDiffusionPipeline,
34
- FlaxUNet2DConditionModel,
35
- )
36
- from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
37
- from diffusers.utils import check_min_version
38
-
39
-
40
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
41
- PIL_INTERPOLATION = {
42
- "linear": PIL.Image.Resampling.BILINEAR,
43
- "bilinear": PIL.Image.Resampling.BILINEAR,
44
- "bicubic": PIL.Image.Resampling.BICUBIC,
45
- "lanczos": PIL.Image.Resampling.LANCZOS,
46
- "nearest": PIL.Image.Resampling.NEAREST,
47
- }
48
- else:
49
- PIL_INTERPOLATION = {
50
- "linear": PIL.Image.LINEAR,
51
- "bilinear": PIL.Image.BILINEAR,
52
- "bicubic": PIL.Image.BICUBIC,
53
- "lanczos": PIL.Image.LANCZOS,
54
- "nearest": PIL.Image.NEAREST,
55
- }
56
- # ------------------------------------------------------------------------------
57
-
58
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
59
- check_min_version("0.14.0.dev0")
60
-
61
- logger = logging.getLogger(__name__)
62
-
63
-
64
- def parse_args():
65
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
66
- parser.add_argument(
67
- "--pretrained_model_name_or_path",
68
- type=str,
69
- default=None,
70
- required=True,
71
- help="Path to pretrained model or model identifier from huggingface.co/models.",
72
- )
73
- parser.add_argument(
74
- "--tokenizer_name",
75
- type=str,
76
- default=None,
77
- help="Pretrained tokenizer name or path if not the same as model_name",
78
- )
79
- parser.add_argument(
80
- "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
81
- )
82
- parser.add_argument(
83
- "--placeholder_token",
84
- type=str,
85
- default=None,
86
- required=True,
87
- help="A token to use as a placeholder for the concept.",
88
- )
89
- parser.add_argument(
90
- "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
91
- )
92
- parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
93
- parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
94
- parser.add_argument(
95
- "--output_dir",
96
- type=str,
97
- default="text-inversion-model",
98
- help="The output directory where the model predictions and checkpoints will be written.",
99
- )
100
- parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
101
- parser.add_argument(
102
- "--resolution",
103
- type=int,
104
- default=512,
105
- help=(
106
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
107
- " resolution"
108
- ),
109
- )
110
- parser.add_argument(
111
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
112
- )
113
- parser.add_argument(
114
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
115
- )
116
- parser.add_argument("--num_train_epochs", type=int, default=100)
117
- parser.add_argument(
118
- "--max_train_steps",
119
- type=int,
120
- default=5000,
121
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
122
- )
123
- parser.add_argument(
124
- "--learning_rate",
125
- type=float,
126
- default=1e-4,
127
- help="Initial learning rate (after the potential warmup period) to use.",
128
- )
129
- parser.add_argument(
130
- "--scale_lr",
131
- action="store_true",
132
- default=True,
133
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
134
- )
135
- parser.add_argument(
136
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
137
- )
138
- parser.add_argument(
139
- "--lr_scheduler",
140
- type=str,
141
- default="constant",
142
- help=(
143
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
144
- ' "constant", "constant_with_warmup"]'
145
- ),
146
- )
147
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
148
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
149
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
150
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
151
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
152
- parser.add_argument(
153
- "--use_auth_token",
154
- action="store_true",
155
- help=(
156
- "Will use the token generated when running `huggingface-cli login` (necessary to use this script with"
157
- " private models)."
158
- ),
159
- )
160
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
161
- parser.add_argument(
162
- "--hub_model_id",
163
- type=str,
164
- default=None,
165
- help="The name of the repository to keep in sync with the local `output_dir`.",
166
- )
167
- parser.add_argument(
168
- "--logging_dir",
169
- type=str,
170
- default="logs",
171
- help=(
172
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
173
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
174
- ),
175
- )
176
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
177
-
178
- args = parser.parse_args()
179
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
180
- if env_local_rank != -1 and env_local_rank != args.local_rank:
181
- args.local_rank = env_local_rank
182
-
183
- if args.train_data_dir is None:
184
- raise ValueError("You must specify a train data directory.")
185
-
186
- return args
187
-
188
-
189
- imagenet_templates_small = [
190
- "a photo of a {}",
191
- "a rendering of a {}",
192
- "a cropped photo of the {}",
193
- "the photo of a {}",
194
- "a photo of a clean {}",
195
- "a photo of a dirty {}",
196
- "a dark photo of the {}",
197
- "a photo of my {}",
198
- "a photo of the cool {}",
199
- "a close-up photo of a {}",
200
- "a bright photo of the {}",
201
- "a cropped photo of a {}",
202
- "a photo of the {}",
203
- "a good photo of the {}",
204
- "a photo of one {}",
205
- "a close-up photo of the {}",
206
- "a rendition of the {}",
207
- "a photo of the clean {}",
208
- "a rendition of a {}",
209
- "a photo of a nice {}",
210
- "a good photo of a {}",
211
- "a photo of the nice {}",
212
- "a photo of the small {}",
213
- "a photo of the weird {}",
214
- "a photo of the large {}",
215
- "a photo of a cool {}",
216
- "a photo of a small {}",
217
- ]
218
-
219
- imagenet_style_templates_small = [
220
- "a painting in the style of {}",
221
- "a rendering in the style of {}",
222
- "a cropped painting in the style of {}",
223
- "the painting in the style of {}",
224
- "a clean painting in the style of {}",
225
- "a dirty painting in the style of {}",
226
- "a dark painting in the style of {}",
227
- "a picture in the style of {}",
228
- "a cool painting in the style of {}",
229
- "a close-up painting in the style of {}",
230
- "a bright painting in the style of {}",
231
- "a cropped painting in the style of {}",
232
- "a good painting in the style of {}",
233
- "a close-up painting in the style of {}",
234
- "a rendition in the style of {}",
235
- "a nice painting in the style of {}",
236
- "a small painting in the style of {}",
237
- "a weird painting in the style of {}",
238
- "a large painting in the style of {}",
239
- ]
240
-
241
-
242
- class TextualInversionDataset(Dataset):
243
- def __init__(
244
- self,
245
- data_root,
246
- tokenizer,
247
- learnable_property="object", # [object, style]
248
- size=512,
249
- repeats=100,
250
- interpolation="bicubic",
251
- flip_p=0.5,
252
- set="train",
253
- placeholder_token="*",
254
- center_crop=False,
255
- ):
256
- self.data_root = data_root
257
- self.tokenizer = tokenizer
258
- self.learnable_property = learnable_property
259
- self.size = size
260
- self.placeholder_token = placeholder_token
261
- self.center_crop = center_crop
262
- self.flip_p = flip_p
263
-
264
- self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
265
-
266
- self.num_images = len(self.image_paths)
267
- self._length = self.num_images
268
-
269
- if set == "train":
270
- self._length = self.num_images * repeats
271
-
272
- self.interpolation = {
273
- "linear": PIL_INTERPOLATION["linear"],
274
- "bilinear": PIL_INTERPOLATION["bilinear"],
275
- "bicubic": PIL_INTERPOLATION["bicubic"],
276
- "lanczos": PIL_INTERPOLATION["lanczos"],
277
- }[interpolation]
278
-
279
- self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
280
- self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
281
-
282
- def __len__(self):
283
- return self._length
284
-
285
- def __getitem__(self, i):
286
- example = {}
287
- image = Image.open(self.image_paths[i % self.num_images])
288
-
289
- if not image.mode == "RGB":
290
- image = image.convert("RGB")
291
-
292
- placeholder_string = self.placeholder_token
293
- text = random.choice(self.templates).format(placeholder_string)
294
-
295
- example["input_ids"] = self.tokenizer(
296
- text,
297
- padding="max_length",
298
- truncation=True,
299
- max_length=self.tokenizer.model_max_length,
300
- return_tensors="pt",
301
- ).input_ids[0]
302
-
303
- # default to score-sde preprocessing
304
- img = np.array(image).astype(np.uint8)
305
-
306
- if self.center_crop:
307
- crop = min(img.shape[0], img.shape[1])
308
- (
309
- h,
310
- w,
311
- ) = (
312
- img.shape[0],
313
- img.shape[1],
314
- )
315
- img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
316
-
317
- image = Image.fromarray(img)
318
- image = image.resize((self.size, self.size), resample=self.interpolation)
319
-
320
- image = self.flip_transform(image)
321
- image = np.array(image).astype(np.uint8)
322
- image = (image / 127.5 - 1.0).astype(np.float32)
323
-
324
- example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
325
- return example
326
-
327
-
328
- def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng):
329
- if model.config.vocab_size == new_num_tokens or new_num_tokens is None:
330
- return
331
- model.config.vocab_size = new_num_tokens
332
-
333
- params = model.params
334
- old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"]
335
- old_num_tokens, emb_dim = old_embeddings.shape
336
-
337
- initializer = jax.nn.initializers.normal()
338
-
339
- new_embeddings = initializer(rng, (new_num_tokens, emb_dim))
340
- new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings)
341
- new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id])
342
- params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings
343
-
344
- model.params = params
345
- return model
346
-
347
-
348
- def get_params_to_save(params):
349
- return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
350
-
351
-
352
- def main():
353
- args = parse_args()
354
-
355
- if args.seed is not None:
356
- set_seed(args.seed)
357
-
358
- if jax.process_index() == 0:
359
- if args.output_dir is not None:
360
- os.makedirs(args.output_dir, exist_ok=True)
361
-
362
- if args.push_to_hub:
363
- repo_id = create_repo(
364
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
365
- ).repo_id
366
-
367
- # Make one log on every process with the configuration for debugging.
368
- logging.basicConfig(
369
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
370
- datefmt="%m/%d/%Y %H:%M:%S",
371
- level=logging.INFO,
372
- )
373
- # Setup logging, we only want one process per machine to log things on the screen.
374
- logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
375
- if jax.process_index() == 0:
376
- transformers.utils.logging.set_verbosity_info()
377
- else:
378
- transformers.utils.logging.set_verbosity_error()
379
-
380
- # Load the tokenizer and add the placeholder token as a additional special token
381
- if args.tokenizer_name:
382
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
383
- elif args.pretrained_model_name_or_path:
384
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
385
-
386
- # Add the placeholder token in tokenizer
387
- num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
388
- if num_added_tokens == 0:
389
- raise ValueError(
390
- f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
391
- " `placeholder_token` that is not already in the tokenizer."
392
- )
393
-
394
- # Convert the initializer_token, placeholder_token to ids
395
- token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
396
- # Check if initializer_token is a single token or a sequence of tokens
397
- if len(token_ids) > 1:
398
- raise ValueError("The initializer token must be a single token.")
399
-
400
- initializer_token_id = token_ids[0]
401
- placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
402
-
403
- # Load models and create wrapper for stable diffusion
404
- text_encoder = FlaxCLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
405
- vae, vae_params = FlaxAutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
406
- unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
407
-
408
- # Create sampling rng
409
- rng = jax.random.PRNGKey(args.seed)
410
- rng, _ = jax.random.split(rng)
411
- # Resize the token embeddings as we are adding new special tokens to the tokenizer
412
- text_encoder = resize_token_embeddings(
413
- text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng
414
- )
415
- original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"]
416
-
417
- train_dataset = TextualInversionDataset(
418
- data_root=args.train_data_dir,
419
- tokenizer=tokenizer,
420
- size=args.resolution,
421
- placeholder_token=args.placeholder_token,
422
- repeats=args.repeats,
423
- learnable_property=args.learnable_property,
424
- center_crop=args.center_crop,
425
- set="train",
426
- )
427
-
428
- def collate_fn(examples):
429
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
430
- input_ids = torch.stack([example["input_ids"] for example in examples])
431
-
432
- batch = {"pixel_values": pixel_values, "input_ids": input_ids}
433
- batch = {k: v.numpy() for k, v in batch.items()}
434
-
435
- return batch
436
-
437
- total_train_batch_size = args.train_batch_size * jax.local_device_count()
438
- train_dataloader = torch.utils.data.DataLoader(
439
- train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn
440
- )
441
-
442
- # Optimization
443
- if args.scale_lr:
444
- args.learning_rate = args.learning_rate * total_train_batch_size
445
-
446
- constant_scheduler = optax.constant_schedule(args.learning_rate)
447
-
448
- optimizer = optax.adamw(
449
- learning_rate=constant_scheduler,
450
- b1=args.adam_beta1,
451
- b2=args.adam_beta2,
452
- eps=args.adam_epsilon,
453
- weight_decay=args.adam_weight_decay,
454
- )
455
-
456
- def create_mask(params, label_fn):
457
- def _map(params, mask, label_fn):
458
- for k in params:
459
- if label_fn(k):
460
- mask[k] = "token_embedding"
461
- else:
462
- if isinstance(params[k], dict):
463
- mask[k] = {}
464
- _map(params[k], mask[k], label_fn)
465
- else:
466
- mask[k] = "zero"
467
-
468
- mask = {}
469
- _map(params, mask, label_fn)
470
- return mask
471
-
472
- def zero_grads():
473
- # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491
474
- def init_fn(_):
475
- return ()
476
-
477
- def update_fn(updates, state, params=None):
478
- return jax.tree_util.tree_map(jnp.zeros_like, updates), ()
479
-
480
- return optax.GradientTransformation(init_fn, update_fn)
481
-
482
- # Zero out gradients of layers other than the token embedding layer
483
- tx = optax.multi_transform(
484
- {"token_embedding": optimizer, "zero": zero_grads()},
485
- create_mask(text_encoder.params, lambda s: s == "token_embedding"),
486
- )
487
-
488
- state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx)
489
-
490
- noise_scheduler = FlaxDDPMScheduler(
491
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
492
- )
493
- noise_scheduler_state = noise_scheduler.create_state()
494
-
495
- # Initialize our training
496
- train_rngs = jax.random.split(rng, jax.local_device_count())
497
-
498
- # Define gradient train step fn
499
- def train_step(state, vae_params, unet_params, batch, train_rng):
500
- dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
501
-
502
- def compute_loss(params):
503
- vae_outputs = vae.apply(
504
- {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
505
- )
506
- latents = vae_outputs.latent_dist.sample(sample_rng)
507
- # (NHWC) -> (NCHW)
508
- latents = jnp.transpose(latents, (0, 3, 1, 2))
509
- latents = latents * vae.config.scaling_factor
510
-
511
- noise_rng, timestep_rng = jax.random.split(sample_rng)
512
- noise = jax.random.normal(noise_rng, latents.shape)
513
- bsz = latents.shape[0]
514
- timesteps = jax.random.randint(
515
- timestep_rng,
516
- (bsz,),
517
- 0,
518
- noise_scheduler.config.num_train_timesteps,
519
- )
520
- noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
521
- encoder_hidden_states = state.apply_fn(
522
- batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True
523
- )[0]
524
- # Predict the noise residual and compute loss
525
- model_pred = unet.apply(
526
- {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False
527
- ).sample
528
-
529
- # Get the target for loss depending on the prediction type
530
- if noise_scheduler.config.prediction_type == "epsilon":
531
- target = noise
532
- elif noise_scheduler.config.prediction_type == "v_prediction":
533
- target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
534
- else:
535
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
536
-
537
- loss = (target - model_pred) ** 2
538
- loss = loss.mean()
539
-
540
- return loss
541
-
542
- grad_fn = jax.value_and_grad(compute_loss)
543
- loss, grad = grad_fn(state.params)
544
- grad = jax.lax.pmean(grad, "batch")
545
- new_state = state.apply_gradients(grads=grad)
546
-
547
- # Keep the token embeddings fixed except the newly added embeddings for the concept,
548
- # as we only want to optimize the concept embeddings
549
- token_embeds = original_token_embeds.at[placeholder_token_id].set(
550
- new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id]
551
- )
552
- new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds
553
-
554
- metrics = {"loss": loss}
555
- metrics = jax.lax.pmean(metrics, axis_name="batch")
556
- return new_state, metrics, new_train_rng
557
-
558
- # Create parallel version of the train and eval step
559
- p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
560
-
561
- # Replicate the train state on each device
562
- state = jax_utils.replicate(state)
563
- vae_params = jax_utils.replicate(vae_params)
564
- unet_params = jax_utils.replicate(unet_params)
565
-
566
- # Train!
567
- num_update_steps_per_epoch = math.ceil(len(train_dataloader))
568
-
569
- # Scheduler and math around the number of training steps.
570
- if args.max_train_steps is None:
571
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
572
-
573
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
574
-
575
- logger.info("***** Running training *****")
576
- logger.info(f" Num examples = {len(train_dataset)}")
577
- logger.info(f" Num Epochs = {args.num_train_epochs}")
578
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
579
- logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
580
- logger.info(f" Total optimization steps = {args.max_train_steps}")
581
-
582
- global_step = 0
583
-
584
- epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0)
585
- for epoch in epochs:
586
- # ======================== Training ================================
587
-
588
- train_metrics = []
589
-
590
- steps_per_epoch = len(train_dataset) // total_train_batch_size
591
- train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
592
- # train
593
- for batch in train_dataloader:
594
- batch = shard(batch)
595
- state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs)
596
- train_metrics.append(train_metric)
597
-
598
- train_step_progress_bar.update(1)
599
- global_step += 1
600
-
601
- if global_step >= args.max_train_steps:
602
- break
603
-
604
- train_metric = jax_utils.unreplicate(train_metric)
605
-
606
- train_step_progress_bar.close()
607
- epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
608
-
609
- # Create the pipeline using using the trained modules and save it.
610
- if jax.process_index() == 0:
611
- scheduler = FlaxPNDMScheduler(
612
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
613
- )
614
- safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
615
- "CompVis/stable-diffusion-safety-checker", from_pt=True
616
- )
617
- pipeline = FlaxStableDiffusionPipeline(
618
- text_encoder=text_encoder,
619
- vae=vae,
620
- unet=unet,
621
- tokenizer=tokenizer,
622
- scheduler=scheduler,
623
- safety_checker=safety_checker,
624
- feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"),
625
- )
626
-
627
- pipeline.save_pretrained(
628
- args.output_dir,
629
- params={
630
- "text_encoder": get_params_to_save(state.params),
631
- "vae": get_params_to_save(vae_params),
632
- "unet": get_params_to_save(unet_params),
633
- "safety_checker": safety_checker.params,
634
- },
635
- )
636
-
637
- # Also save the newly trained embeddings
638
- learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][
639
- placeholder_token_id
640
- ]
641
- learned_embeds_dict = {args.placeholder_token: learned_embeds}
642
- jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict)
643
-
644
- if args.push_to_hub:
645
- upload_folder(
646
- repo_id=repo_id,
647
- folder_path=args.output_dir,
648
- commit_message="End of training",
649
- ignore_patterns=["step_*", "epoch_*"],
650
- )
651
-
652
-
653
- if __name__ == "__main__":
654
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/ssd/ssd300_coco.py DELETED
@@ -1,62 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
3
- '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
4
- ]
5
- # dataset settings
6
- dataset_type = 'CocoDataset'
7
- data_root = 'data/coco/'
8
- img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
9
- train_pipeline = [
10
- dict(type='LoadImageFromFile', to_float32=True),
11
- dict(type='LoadAnnotations', with_bbox=True),
12
- dict(
13
- type='PhotoMetricDistortion',
14
- brightness_delta=32,
15
- contrast_range=(0.5, 1.5),
16
- saturation_range=(0.5, 1.5),
17
- hue_delta=18),
18
- dict(
19
- type='Expand',
20
- mean=img_norm_cfg['mean'],
21
- to_rgb=img_norm_cfg['to_rgb'],
22
- ratio_range=(1, 4)),
23
- dict(
24
- type='MinIoURandomCrop',
25
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
26
- min_crop_size=0.3),
27
- dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
28
- dict(type='Normalize', **img_norm_cfg),
29
- dict(type='RandomFlip', flip_ratio=0.5),
30
- dict(type='DefaultFormatBundle'),
31
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
32
- ]
33
- test_pipeline = [
34
- dict(type='LoadImageFromFile'),
35
- dict(
36
- type='MultiScaleFlipAug',
37
- img_scale=(300, 300),
38
- flip=False,
39
- transforms=[
40
- dict(type='Resize', keep_ratio=False),
41
- dict(type='Normalize', **img_norm_cfg),
42
- dict(type='ImageToTensor', keys=['img']),
43
- dict(type='Collect', keys=['img']),
44
- ])
45
- ]
46
- data = dict(
47
- samples_per_gpu=8,
48
- workers_per_gpu=3,
49
- train=dict(
50
- _delete_=True,
51
- type='RepeatDataset',
52
- times=5,
53
- dataset=dict(
54
- type=dataset_type,
55
- ann_file=data_root + 'annotations/instances_train2017.json',
56
- img_prefix=data_root + 'train2017/',
57
- pipeline=train_pipeline)),
58
- val=dict(pipeline=test_pipeline),
59
- test=dict(pipeline=test_pipeline))
60
- # optimizer
61
- optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
62
- optimizer_config = dict(_delete_=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py DELETED
@@ -1,157 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
- from ..builder import BBOX_SAMPLERS
5
- from .random_sampler import RandomSampler
6
-
7
-
8
- @BBOX_SAMPLERS.register_module()
9
- class IoUBalancedNegSampler(RandomSampler):
10
- """IoU Balanced Sampling.
11
-
12
- arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
13
-
14
- Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
15
- are sampled from proposals whose IoU are lower than `floor_thr` randomly.
16
- The others are sampled from proposals whose IoU are higher than
17
- `floor_thr`. These proposals are sampled from some bins evenly, which are
18
- split by `num_bins` via IoU evenly.
19
-
20
- Args:
21
- num (int): number of proposals.
22
- pos_fraction (float): fraction of positive proposals.
23
- floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
24
- set to -1 if all using IoU balanced sampling.
25
- floor_fraction (float): sampling fraction of proposals under floor_thr.
26
- num_bins (int): number of bins in IoU balanced sampling.
27
- """
28
-
29
- def __init__(self,
30
- num,
31
- pos_fraction,
32
- floor_thr=-1,
33
- floor_fraction=0,
34
- num_bins=3,
35
- **kwargs):
36
- super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
37
- **kwargs)
38
- assert floor_thr >= 0 or floor_thr == -1
39
- assert 0 <= floor_fraction <= 1
40
- assert num_bins >= 1
41
-
42
- self.floor_thr = floor_thr
43
- self.floor_fraction = floor_fraction
44
- self.num_bins = num_bins
45
-
46
- def sample_via_interval(self, max_overlaps, full_set, num_expected):
47
- """Sample according to the iou interval.
48
-
49
- Args:
50
- max_overlaps (torch.Tensor): IoU between bounding boxes and ground
51
- truth boxes.
52
- full_set (set(int)): A full set of indices of boxes。
53
- num_expected (int): Number of expected samples。
54
-
55
- Returns:
56
- np.ndarray: Indices of samples
57
- """
58
- max_iou = max_overlaps.max()
59
- iou_interval = (max_iou - self.floor_thr) / self.num_bins
60
- per_num_expected = int(num_expected / self.num_bins)
61
-
62
- sampled_inds = []
63
- for i in range(self.num_bins):
64
- start_iou = self.floor_thr + i * iou_interval
65
- end_iou = self.floor_thr + (i + 1) * iou_interval
66
- tmp_set = set(
67
- np.where(
68
- np.logical_and(max_overlaps >= start_iou,
69
- max_overlaps < end_iou))[0])
70
- tmp_inds = list(tmp_set & full_set)
71
- if len(tmp_inds) > per_num_expected:
72
- tmp_sampled_set = self.random_choice(tmp_inds,
73
- per_num_expected)
74
- else:
75
- tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
76
- sampled_inds.append(tmp_sampled_set)
77
-
78
- sampled_inds = np.concatenate(sampled_inds)
79
- if len(sampled_inds) < num_expected:
80
- num_extra = num_expected - len(sampled_inds)
81
- extra_inds = np.array(list(full_set - set(sampled_inds)))
82
- if len(extra_inds) > num_extra:
83
- extra_inds = self.random_choice(extra_inds, num_extra)
84
- sampled_inds = np.concatenate([sampled_inds, extra_inds])
85
-
86
- return sampled_inds
87
-
88
- def _sample_neg(self, assign_result, num_expected, **kwargs):
89
- """Sample negative boxes.
90
-
91
- Args:
92
- assign_result (:obj:`AssignResult`): The assigned results of boxes.
93
- num_expected (int): The number of expected negative samples
94
-
95
- Returns:
96
- Tensor or ndarray: sampled indices.
97
- """
98
- neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
99
- if neg_inds.numel() != 0:
100
- neg_inds = neg_inds.squeeze(1)
101
- if len(neg_inds) <= num_expected:
102
- return neg_inds
103
- else:
104
- max_overlaps = assign_result.max_overlaps.cpu().numpy()
105
- # balance sampling for negative samples
106
- neg_set = set(neg_inds.cpu().numpy())
107
-
108
- if self.floor_thr > 0:
109
- floor_set = set(
110
- np.where(
111
- np.logical_and(max_overlaps >= 0,
112
- max_overlaps < self.floor_thr))[0])
113
- iou_sampling_set = set(
114
- np.where(max_overlaps >= self.floor_thr)[0])
115
- elif self.floor_thr == 0:
116
- floor_set = set(np.where(max_overlaps == 0)[0])
117
- iou_sampling_set = set(
118
- np.where(max_overlaps > self.floor_thr)[0])
119
- else:
120
- floor_set = set()
121
- iou_sampling_set = set(
122
- np.where(max_overlaps > self.floor_thr)[0])
123
- # for sampling interval calculation
124
- self.floor_thr = 0
125
-
126
- floor_neg_inds = list(floor_set & neg_set)
127
- iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
128
- num_expected_iou_sampling = int(num_expected *
129
- (1 - self.floor_fraction))
130
- if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
131
- if self.num_bins >= 2:
132
- iou_sampled_inds = self.sample_via_interval(
133
- max_overlaps, set(iou_sampling_neg_inds),
134
- num_expected_iou_sampling)
135
- else:
136
- iou_sampled_inds = self.random_choice(
137
- iou_sampling_neg_inds, num_expected_iou_sampling)
138
- else:
139
- iou_sampled_inds = np.array(
140
- iou_sampling_neg_inds, dtype=np.int)
141
- num_expected_floor = num_expected - len(iou_sampled_inds)
142
- if len(floor_neg_inds) > num_expected_floor:
143
- sampled_floor_inds = self.random_choice(
144
- floor_neg_inds, num_expected_floor)
145
- else:
146
- sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
147
- sampled_inds = np.concatenate(
148
- (sampled_floor_inds, iou_sampled_inds))
149
- if len(sampled_inds) < num_expected:
150
- num_extra = num_expected - len(sampled_inds)
151
- extra_inds = np.array(list(neg_set - set(sampled_inds)))
152
- if len(extra_inds) > num_extra:
153
- extra_inds = self.random_choice(extra_inds, num_extra)
154
- sampled_inds = np.concatenate((sampled_inds, extra_inds))
155
- sampled_inds = torch.from_numpy(sampled_inds).long().to(
156
- assign_result.gt_inds.device)
157
- return sampled_inds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/danet_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './ocrnet_hr18_512x512_40k_voc12aug.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18_small',
4
- backbone=dict(
5
- extra=dict(
6
- stage1=dict(num_blocks=(2, )),
7
- stage2=dict(num_blocks=(2, 2)),
8
- stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
9
- stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/models_settings.py DELETED
@@ -1,219 +0,0 @@
1
- import json
2
- import re
3
- from pathlib import Path
4
-
5
- import yaml
6
-
7
- from modules import loaders, metadata_gguf, shared, ui
8
-
9
-
10
- def get_fallback_settings():
11
- return {
12
- 'wbits': 'None',
13
- 'groupsize': 'None',
14
- 'desc_act': False,
15
- 'model_type': 'None',
16
- 'max_seq_len': 2048,
17
- 'n_ctx': 2048,
18
- 'rope_freq_base': 0,
19
- 'compress_pos_emb': 1,
20
- 'truncation_length': shared.settings['truncation_length'],
21
- 'skip_special_tokens': shared.settings['skip_special_tokens'],
22
- 'custom_stopping_strings': shared.settings['custom_stopping_strings'],
23
- }
24
-
25
-
26
- def get_model_metadata(model):
27
- model_settings = {}
28
-
29
- # Get settings from models/config.yaml and models/config-user.yaml
30
- settings = shared.model_config
31
- for pat in settings:
32
- if re.match(pat.lower(), model.lower()):
33
- for k in settings[pat]:
34
- model_settings[k] = settings[pat][k]
35
-
36
- if 'loader' not in model_settings:
37
- loader = infer_loader(model, model_settings)
38
- if 'wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0:
39
- loader = 'AutoGPTQ'
40
-
41
- model_settings['loader'] = loader
42
-
43
- # Read GGUF metadata
44
- if model_settings['loader'] in ['llama.cpp', 'llamacpp_HF', 'ctransformers']:
45
- path = Path(f'{shared.args.model_dir}/{model}')
46
- if path.is_file():
47
- model_file = path
48
- else:
49
- model_file = list(path.glob('*.gguf'))[0]
50
-
51
- metadata = metadata_gguf.load_metadata(model_file)
52
- if 'llama.context_length' in metadata:
53
- model_settings['n_ctx'] = metadata['llama.context_length']
54
- if 'llama.rope.scale_linear' in metadata:
55
- model_settings['compress_pos_emb'] = metadata['llama.rope.scale_linear']
56
- if 'llama.rope.freq_base' in metadata:
57
- model_settings['rope_freq_base'] = metadata['llama.rope.freq_base']
58
-
59
- else:
60
- # Read transformers metadata
61
- path = Path(f'{shared.args.model_dir}/{model}/config.json')
62
- if path.exists():
63
- metadata = json.loads(open(path, 'r').read())
64
- if 'max_position_embeddings' in metadata:
65
- model_settings['truncation_length'] = metadata['max_position_embeddings']
66
- model_settings['max_seq_len'] = metadata['max_position_embeddings']
67
-
68
- if 'rope_theta' in metadata:
69
- model_settings['rope_freq_base'] = metadata['rope_theta']
70
-
71
- if 'rope_scaling' in metadata and type(metadata['rope_scaling']) is dict and all(key in metadata['rope_scaling'] for key in ('type', 'factor')):
72
- if metadata['rope_scaling']['type'] == 'linear':
73
- model_settings['compress_pos_emb'] = metadata['rope_scaling']['factor']
74
-
75
- if 'quantization_config' in metadata:
76
- if 'bits' in metadata['quantization_config']:
77
- model_settings['wbits'] = metadata['quantization_config']['bits']
78
- if 'group_size' in metadata['quantization_config']:
79
- model_settings['groupsize'] = metadata['quantization_config']['group_size']
80
- if 'desc_act' in metadata['quantization_config']:
81
- model_settings['desc_act'] = metadata['quantization_config']['desc_act']
82
-
83
- # Read AutoGPTQ metadata
84
- path = Path(f'{shared.args.model_dir}/{model}/quantize_config.json')
85
- if path.exists():
86
- metadata = json.loads(open(path, 'r').read())
87
- if 'bits' in metadata:
88
- model_settings['wbits'] = metadata['bits']
89
- if 'group_size' in metadata:
90
- model_settings['groupsize'] = metadata['group_size']
91
- if 'desc_act' in metadata:
92
- model_settings['desc_act'] = metadata['desc_act']
93
-
94
- # Apply user settings from models/config-user.yaml
95
- settings = shared.user_config
96
- for pat in settings:
97
- if re.match(pat.lower(), model.lower()):
98
- for k in settings[pat]:
99
- model_settings[k] = settings[pat][k]
100
-
101
- return model_settings
102
-
103
-
104
- def infer_loader(model_name, model_settings):
105
- path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
106
- if not path_to_model.exists():
107
- loader = None
108
- elif (path_to_model / 'quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
109
- loader = 'AutoGPTQ'
110
- elif (path_to_model / 'quant_config.json').exists() or re.match(r'.*-awq', model_name.lower()):
111
- loader = 'AutoAWQ'
112
- elif len(list(path_to_model.glob('*.gguf'))) > 0:
113
- loader = 'llama.cpp'
114
- elif re.match(r'.*\.gguf', model_name.lower()):
115
- loader = 'llama.cpp'
116
- elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
117
- loader = 'RWKV'
118
- elif re.match(r'.*exl2', model_name.lower()):
119
- loader = 'ExLlamav2_HF'
120
- else:
121
- loader = 'Transformers'
122
-
123
- return loader
124
-
125
-
126
- # UI: update the command-line arguments based on the interface values
127
- def update_model_parameters(state, initial=False):
128
- elements = ui.list_model_elements() # the names of the parameters
129
- gpu_memories = []
130
-
131
- for i, element in enumerate(elements):
132
- if element not in state:
133
- continue
134
-
135
- value = state[element]
136
- if element.startswith('gpu_memory'):
137
- gpu_memories.append(value)
138
- continue
139
-
140
- if initial and element in shared.provided_arguments:
141
- continue
142
-
143
- # Setting null defaults
144
- if element in ['wbits', 'groupsize', 'model_type'] and value == 'None':
145
- value = vars(shared.args_defaults)[element]
146
- elif element in ['cpu_memory'] and value == 0:
147
- value = vars(shared.args_defaults)[element]
148
-
149
- # Making some simple conversions
150
- if element in ['wbits', 'groupsize', 'pre_layer']:
151
- value = int(value)
152
- elif element == 'cpu_memory' and value is not None:
153
- value = f"{value}MiB"
154
-
155
- if element in ['pre_layer']:
156
- value = [value] if value > 0 else None
157
-
158
- setattr(shared.args, element, value)
159
-
160
- found_positive = False
161
- for i in gpu_memories:
162
- if i > 0:
163
- found_positive = True
164
- break
165
-
166
- if not (initial and vars(shared.args)['gpu_memory'] != vars(shared.args_defaults)['gpu_memory']):
167
- if found_positive:
168
- shared.args.gpu_memory = [f"{i}MiB" for i in gpu_memories]
169
- else:
170
- shared.args.gpu_memory = None
171
-
172
-
173
- # UI: update the state variable with the model settings
174
- def apply_model_settings_to_state(model, state):
175
- model_settings = get_model_metadata(model)
176
- if 'loader' in model_settings:
177
- loader = model_settings.pop('loader')
178
-
179
- # If the user is using an alternative loader for the same model type, let them keep using it
180
- if not (loader == 'AutoGPTQ' and state['loader'] in ['GPTQ-for-LLaMa', 'ExLlama', 'ExLlama_HF', 'ExLlamav2', 'ExLlamav2_HF']) and not (loader == 'llama.cpp' and state['loader'] in ['llamacpp_HF', 'ctransformers']):
181
- state['loader'] = loader
182
-
183
- for k in model_settings:
184
- if k in state:
185
- if k in ['wbits', 'groupsize']:
186
- state[k] = str(model_settings[k])
187
- else:
188
- state[k] = model_settings[k]
189
-
190
- return state
191
-
192
-
193
- # Save the settings for this model to models/config-user.yaml
194
- def save_model_settings(model, state):
195
- if model == 'None':
196
- yield ("Not saving the settings because no model is loaded.")
197
- return
198
-
199
- with Path(f'{shared.args.model_dir}/config-user.yaml') as p:
200
- if p.exists():
201
- user_config = yaml.safe_load(open(p, 'r').read())
202
- else:
203
- user_config = {}
204
-
205
- model_regex = model + '$' # For exact matches
206
- if model_regex not in user_config:
207
- user_config[model_regex] = {}
208
-
209
- for k in ui.list_model_elements():
210
- if k == 'loader' or k in loaders.loaders_and_params[state['loader']]:
211
- user_config[model_regex][k] = state[k]
212
-
213
- shared.user_config = user_config
214
-
215
- output = yaml.dump(user_config, sort_keys=False)
216
- with open(p, 'w') as f:
217
- f.write(output)
218
-
219
- yield (f"Settings for {model} saved to {p}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/dpt_depth.py DELETED
@@ -1,109 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from .base_model import BaseModel
6
- from .blocks import (
7
- FeatureFusionBlock,
8
- FeatureFusionBlock_custom,
9
- Interpolate,
10
- _make_encoder,
11
- forward_vit,
12
- )
13
-
14
-
15
- def _make_fusion_block(features, use_bn):
16
- return FeatureFusionBlock_custom(
17
- features,
18
- nn.ReLU(False),
19
- deconv=False,
20
- bn=use_bn,
21
- expand=False,
22
- align_corners=True,
23
- )
24
-
25
-
26
- class DPT(BaseModel):
27
- def __init__(
28
- self,
29
- head,
30
- features=256,
31
- backbone="vitb_rn50_384",
32
- readout="project",
33
- channels_last=False,
34
- use_bn=False,
35
- ):
36
-
37
- super(DPT, self).__init__()
38
-
39
- self.channels_last = channels_last
40
-
41
- hooks = {
42
- "vitb_rn50_384": [0, 1, 8, 11],
43
- "vitb16_384": [2, 5, 8, 11],
44
- "vitl16_384": [5, 11, 17, 23],
45
- }
46
-
47
- # Instantiate backbone and reassemble blocks
48
- self.pretrained, self.scratch = _make_encoder(
49
- backbone,
50
- features,
51
- False, # Set to true of you want to train from scratch, uses ImageNet weights
52
- groups=1,
53
- expand=False,
54
- exportable=False,
55
- hooks=hooks[backbone],
56
- use_readout=readout,
57
- )
58
-
59
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
-
64
- self.scratch.output_conv = head
65
-
66
-
67
- def forward(self, x):
68
- if self.channels_last == True:
69
- x.contiguous(memory_format=torch.channels_last)
70
-
71
- layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
-
73
- layer_1_rn = self.scratch.layer1_rn(layer_1)
74
- layer_2_rn = self.scratch.layer2_rn(layer_2)
75
- layer_3_rn = self.scratch.layer3_rn(layer_3)
76
- layer_4_rn = self.scratch.layer4_rn(layer_4)
77
-
78
- path_4 = self.scratch.refinenet4(layer_4_rn)
79
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
-
83
- out = self.scratch.output_conv(path_1)
84
-
85
- return out
86
-
87
-
88
- class DPTDepthModel(DPT):
89
- def __init__(self, path=None, non_negative=True, **kwargs):
90
- features = kwargs["features"] if "features" in kwargs else 256
91
-
92
- head = nn.Sequential(
93
- nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
- Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
- nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
- nn.ReLU(True),
97
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
- nn.ReLU(True) if non_negative else nn.Identity(),
99
- nn.Identity(),
100
- )
101
-
102
- super().__init__(head, **kwargs)
103
-
104
- if path is not None:
105
- self.load(path)
106
-
107
- def forward(self, x):
108
- return super().forward(x).squeeze(dim=1)
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/utils/frame_utils.py DELETED
@@ -1,131 +0,0 @@
1
- import numpy as np
2
- from PIL import Image
3
- from os.path import *
4
- import re
5
- import cv2
6
-
7
- TAG_CHAR = np.array([202021.25], np.float32)
8
-
9
-
10
- def readFlow(fn):
11
- """ Read .flo file in Middlebury format"""
12
- # Code adapted from:
13
- # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
14
-
15
- # WARNING: this will work on little-endian architectures (eg Intel x86) only!
16
- # print 'fn = %s'%(fn)
17
- with open(fn, 'rb') as f:
18
- magic = np.fromfile(f, np.float32, count=1)
19
- if 202021.25 != magic:
20
- print('Magic number incorrect. Invalid .flo file')
21
- return None
22
- else:
23
- w = np.fromfile(f, np.int32, count=1)
24
- h = np.fromfile(f, np.int32, count=1)
25
- # print 'Reading %d x %d flo file\n' % (w, h)
26
- data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))
27
- # Reshape testdata into 3D array (columns, rows, bands)
28
- # The reshape here is for visualization, the original code is (w,h,2)
29
- return np.resize(data, (int(h), int(w), 2))
30
-
31
-
32
- def readPFM(file):
33
- file = open(file, 'rb')
34
-
35
- color = None
36
- width = None
37
- height = None
38
- scale = None
39
- endian = None
40
-
41
- header = file.readline().rstrip()
42
- if header == b'PF':
43
- color = True
44
- elif header == b'Pf':
45
- color = False
46
- else:
47
- raise Exception('Not a PFM file.')
48
-
49
- dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
50
- if dim_match:
51
- width, height = map(int, dim_match.groups())
52
- else:
53
- raise Exception('Malformed PFM header.')
54
-
55
- scale = float(file.readline().rstrip())
56
- if scale < 0: # little-endian
57
- endian = '<'
58
- scale = -scale
59
- else:
60
- endian = '>' # big-endian
61
-
62
- data = np.fromfile(file, endian + 'f')
63
- shape = (height, width, 3) if color else (height, width)
64
-
65
- data = np.reshape(data, shape)
66
- data = np.flipud(data)
67
- return data
68
-
69
-
70
- def writeFlow(filename, uv, v=None):
71
- """ Write optical flow to file.
72
-
73
- If v is None, uv is assumed to contain both u and v channels,
74
- stacked in depth.
75
- Original code by Deqing Sun, adapted from Daniel Scharstein.
76
- """
77
- nBands = 2
78
-
79
- if v is None:
80
- assert (uv.ndim == 3)
81
- assert (uv.shape[2] == 2)
82
- u = uv[:, :, 0]
83
- v = uv[:, :, 1]
84
- else:
85
- u = uv
86
-
87
- assert (u.shape == v.shape)
88
- height, width = u.shape
89
- f = open(filename, 'wb')
90
- # write the header
91
- f.write(TAG_CHAR)
92
- np.array(width).astype(np.int32).tofile(f)
93
- np.array(height).astype(np.int32).tofile(f)
94
- # arrange into matrix form
95
- tmp = np.zeros((height, width * nBands))
96
- tmp[:, np.arange(width) * 2] = u
97
- tmp[:, np.arange(width) * 2 + 1] = v
98
- tmp.astype(np.float32).tofile(f)
99
- f.close()
100
-
101
-
102
- def readFlowKITTI(filename):
103
- flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
104
- flow = flow[:, :, ::-1].astype(np.float32)
105
- flow, valid = flow[:, :, :2], flow[:, :, 2]
106
- flow = (flow - 2 ** 15) / 64.0
107
- return flow, valid
108
-
109
-
110
- def writeFlowKITTI(filename, uv):
111
- uv = 64.0 * uv + 2 ** 15
112
- valid = np.ones([uv.shape[0], uv.shape[1], 1])
113
- uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
114
- cv2.imwrite(filename, uv[..., ::-1])
115
-
116
-
117
- def read_gen(file_name, pil=False):
118
- ext = splitext(file_name)[-1]
119
- if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
120
- return Image.open(file_name)
121
- elif ext == '.bin' or ext == '.raw':
122
- return np.load(file_name)
123
- elif ext == '.flo':
124
- return readFlow(file_name).astype(np.float32)
125
- elif ext == '.pfm':
126
- flow = readPFM(file_name).astype(np.float32)
127
- if len(flow.shape) == 2:
128
- return flow
129
- else:
130
- return flow[:, :, :-1]
131
- return []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArkanDash/rvc-models-new/lib/infer_pack/models_onnx.py DELETED
@@ -1,819 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from lib.infer_pack import modules
7
- from lib.infer_pack import attentions
8
- from lib.infer_pack import commons
9
- from lib.infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from lib.infer_pack.commons import init_weights
13
- import numpy as np
14
- from lib.infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder768(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(768, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- stats = self.proj(x) * x_mask
106
-
107
- m, logs = torch.split(stats, self.out_channels, dim=1)
108
- return m, logs, x_mask
109
-
110
-
111
- class ResidualCouplingBlock(nn.Module):
112
- def __init__(
113
- self,
114
- channels,
115
- hidden_channels,
116
- kernel_size,
117
- dilation_rate,
118
- n_layers,
119
- n_flows=4,
120
- gin_channels=0,
121
- ):
122
- super().__init__()
123
- self.channels = channels
124
- self.hidden_channels = hidden_channels
125
- self.kernel_size = kernel_size
126
- self.dilation_rate = dilation_rate
127
- self.n_layers = n_layers
128
- self.n_flows = n_flows
129
- self.gin_channels = gin_channels
130
-
131
- self.flows = nn.ModuleList()
132
- for i in range(n_flows):
133
- self.flows.append(
134
- modules.ResidualCouplingLayer(
135
- channels,
136
- hidden_channels,
137
- kernel_size,
138
- dilation_rate,
139
- n_layers,
140
- gin_channels=gin_channels,
141
- mean_only=True,
142
- )
143
- )
144
- self.flows.append(modules.Flip())
145
-
146
- def forward(self, x, x_mask, g=None, reverse=False):
147
- if not reverse:
148
- for flow in self.flows:
149
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
- else:
151
- for flow in reversed(self.flows):
152
- x = flow(x, x_mask, g=g, reverse=reverse)
153
- return x
154
-
155
- def remove_weight_norm(self):
156
- for i in range(self.n_flows):
157
- self.flows[i * 2].remove_weight_norm()
158
-
159
-
160
- class PosteriorEncoder(nn.Module):
161
- def __init__(
162
- self,
163
- in_channels,
164
- out_channels,
165
- hidden_channels,
166
- kernel_size,
167
- dilation_rate,
168
- n_layers,
169
- gin_channels=0,
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.hidden_channels = hidden_channels
175
- self.kernel_size = kernel_size
176
- self.dilation_rate = dilation_rate
177
- self.n_layers = n_layers
178
- self.gin_channels = gin_channels
179
-
180
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
- self.enc = modules.WN(
182
- hidden_channels,
183
- kernel_size,
184
- dilation_rate,
185
- n_layers,
186
- gin_channels=gin_channels,
187
- )
188
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
-
190
- def forward(self, x, x_lengths, g=None):
191
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
- x.dtype
193
- )
194
- x = self.pre(x) * x_mask
195
- x = self.enc(x, x_mask, g=g)
196
- stats = self.proj(x) * x_mask
197
- m, logs = torch.split(stats, self.out_channels, dim=1)
198
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
- return z, m, logs, x_mask
200
-
201
- def remove_weight_norm(self):
202
- self.enc.remove_weight_norm()
203
-
204
-
205
- class Generator(torch.nn.Module):
206
- def __init__(
207
- self,
208
- initial_channel,
209
- resblock,
210
- resblock_kernel_sizes,
211
- resblock_dilation_sizes,
212
- upsample_rates,
213
- upsample_initial_channel,
214
- upsample_kernel_sizes,
215
- gin_channels=0,
216
- ):
217
- super(Generator, self).__init__()
218
- self.num_kernels = len(resblock_kernel_sizes)
219
- self.num_upsamples = len(upsample_rates)
220
- self.conv_pre = Conv1d(
221
- initial_channel, upsample_initial_channel, 7, 1, padding=3
222
- )
223
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
-
225
- self.ups = nn.ModuleList()
226
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
- self.ups.append(
228
- weight_norm(
229
- ConvTranspose1d(
230
- upsample_initial_channel // (2**i),
231
- upsample_initial_channel // (2 ** (i + 1)),
232
- k,
233
- u,
234
- padding=(k - u) // 2,
235
- )
236
- )
237
- )
238
-
239
- self.resblocks = nn.ModuleList()
240
- for i in range(len(self.ups)):
241
- ch = upsample_initial_channel // (2 ** (i + 1))
242
- for j, (k, d) in enumerate(
243
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
- ):
245
- self.resblocks.append(resblock(ch, k, d))
246
-
247
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
- self.ups.apply(init_weights)
249
-
250
- if gin_channels != 0:
251
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
-
253
- def forward(self, x, g=None):
254
- x = self.conv_pre(x)
255
- if g is not None:
256
- x = x + self.cond(g)
257
-
258
- for i in range(self.num_upsamples):
259
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
- x = self.ups[i](x)
261
- xs = None
262
- for j in range(self.num_kernels):
263
- if xs is None:
264
- xs = self.resblocks[i * self.num_kernels + j](x)
265
- else:
266
- xs += self.resblocks[i * self.num_kernels + j](x)
267
- x = xs / self.num_kernels
268
- x = F.leaky_relu(x)
269
- x = self.conv_post(x)
270
- x = torch.tanh(x)
271
-
272
- return x
273
-
274
- def remove_weight_norm(self):
275
- for l in self.ups:
276
- remove_weight_norm(l)
277
- for l in self.resblocks:
278
- l.remove_weight_norm()
279
-
280
-
281
- class SineGen(torch.nn.Module):
282
- """Definition of sine generator
283
- SineGen(samp_rate, harmonic_num = 0,
284
- sine_amp = 0.1, noise_std = 0.003,
285
- voiced_threshold = 0,
286
- flag_for_pulse=False)
287
- samp_rate: sampling rate in Hz
288
- harmonic_num: number of harmonic overtones (default 0)
289
- sine_amp: amplitude of sine-wavefrom (default 0.1)
290
- noise_std: std of Gaussian noise (default 0.003)
291
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
- Note: when flag_for_pulse is True, the first time step of a voiced
294
- segment is always sin(np.pi) or cos(0)
295
- """
296
-
297
- def __init__(
298
- self,
299
- samp_rate,
300
- harmonic_num=0,
301
- sine_amp=0.1,
302
- noise_std=0.003,
303
- voiced_threshold=0,
304
- flag_for_pulse=False,
305
- ):
306
- super(SineGen, self).__init__()
307
- self.sine_amp = sine_amp
308
- self.noise_std = noise_std
309
- self.harmonic_num = harmonic_num
310
- self.dim = self.harmonic_num + 1
311
- self.sampling_rate = samp_rate
312
- self.voiced_threshold = voiced_threshold
313
-
314
- def _f02uv(self, f0):
315
- # generate uv signal
316
- uv = torch.ones_like(f0)
317
- uv = uv * (f0 > self.voiced_threshold)
318
- return uv
319
-
320
- def forward(self, f0, upp):
321
- """sine_tensor, uv = forward(f0)
322
- input F0: tensor(batchsize=1, length, dim=1)
323
- f0 for unvoiced steps should be 0
324
- output sine_tensor: tensor(batchsize=1, length, dim)
325
- output uv: tensor(batchsize=1, length, 1)
326
- """
327
- with torch.no_grad():
328
- f0 = f0[:, None].transpose(1, 2)
329
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
- # fundamental component
331
- f0_buf[:, :, 0] = f0[:, :, 0]
332
- for idx in np.arange(self.harmonic_num):
333
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
- idx + 2
335
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
- rand_ini = torch.rand(
338
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
- )
340
- rand_ini[:, 0] = 0
341
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
- tmp_over_one *= upp
344
- tmp_over_one = F.interpolate(
345
- tmp_over_one.transpose(2, 1),
346
- scale_factor=upp,
347
- mode="linear",
348
- align_corners=True,
349
- ).transpose(2, 1)
350
- rad_values = F.interpolate(
351
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
- ).transpose(
353
- 2, 1
354
- ) #######
355
- tmp_over_one %= 1
356
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
- cumsum_shift = torch.zeros_like(rad_values)
358
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
- sine_waves = torch.sin(
360
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
- )
362
- sine_waves = sine_waves * self.sine_amp
363
- uv = self._f02uv(f0)
364
- uv = F.interpolate(
365
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
- ).transpose(2, 1)
367
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
- noise = noise_amp * torch.randn_like(sine_waves)
369
- sine_waves = sine_waves * uv + noise
370
- return sine_waves, uv, noise
371
-
372
-
373
- class SourceModuleHnNSF(torch.nn.Module):
374
- """SourceModule for hn-nsf
375
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
- add_noise_std=0.003, voiced_threshod=0)
377
- sampling_rate: sampling_rate in Hz
378
- harmonic_num: number of harmonic above F0 (default: 0)
379
- sine_amp: amplitude of sine source signal (default: 0.1)
380
- add_noise_std: std of additive Gaussian noise (default: 0.003)
381
- note that amplitude of noise in unvoiced is decided
382
- by sine_amp
383
- voiced_threshold: threhold to set U/V given F0 (default: 0)
384
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
- F0_sampled (batchsize, length, 1)
386
- Sine_source (batchsize, length, 1)
387
- noise_source (batchsize, length 1)
388
- uv (batchsize, length, 1)
389
- """
390
-
391
- def __init__(
392
- self,
393
- sampling_rate,
394
- harmonic_num=0,
395
- sine_amp=0.1,
396
- add_noise_std=0.003,
397
- voiced_threshod=0,
398
- is_half=True,
399
- ):
400
- super(SourceModuleHnNSF, self).__init__()
401
-
402
- self.sine_amp = sine_amp
403
- self.noise_std = add_noise_std
404
- self.is_half = is_half
405
- # to produce sine waveforms
406
- self.l_sin_gen = SineGen(
407
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
- )
409
-
410
- # to merge source harmonics into a single excitation
411
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
- self.l_tanh = torch.nn.Tanh()
413
-
414
- def forward(self, x, upp=None):
415
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
- if self.is_half:
417
- sine_wavs = sine_wavs.half()
418
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
- return sine_merge, None, None # noise, uv
420
-
421
-
422
- class GeneratorNSF(torch.nn.Module):
423
- def __init__(
424
- self,
425
- initial_channel,
426
- resblock,
427
- resblock_kernel_sizes,
428
- resblock_dilation_sizes,
429
- upsample_rates,
430
- upsample_initial_channel,
431
- upsample_kernel_sizes,
432
- gin_channels,
433
- sr,
434
- is_half=False,
435
- ):
436
- super(GeneratorNSF, self).__init__()
437
- self.num_kernels = len(resblock_kernel_sizes)
438
- self.num_upsamples = len(upsample_rates)
439
-
440
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
- self.m_source = SourceModuleHnNSF(
442
- sampling_rate=sr, harmonic_num=0, is_half=is_half
443
- )
444
- self.noise_convs = nn.ModuleList()
445
- self.conv_pre = Conv1d(
446
- initial_channel, upsample_initial_channel, 7, 1, padding=3
447
- )
448
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
-
450
- self.ups = nn.ModuleList()
451
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
- c_cur = upsample_initial_channel // (2 ** (i + 1))
453
- self.ups.append(
454
- weight_norm(
455
- ConvTranspose1d(
456
- upsample_initial_channel // (2**i),
457
- upsample_initial_channel // (2 ** (i + 1)),
458
- k,
459
- u,
460
- padding=(k - u) // 2,
461
- )
462
- )
463
- )
464
- if i + 1 < len(upsample_rates):
465
- stride_f0 = np.prod(upsample_rates[i + 1 :])
466
- self.noise_convs.append(
467
- Conv1d(
468
- 1,
469
- c_cur,
470
- kernel_size=stride_f0 * 2,
471
- stride=stride_f0,
472
- padding=stride_f0 // 2,
473
- )
474
- )
475
- else:
476
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
-
478
- self.resblocks = nn.ModuleList()
479
- for i in range(len(self.ups)):
480
- ch = upsample_initial_channel // (2 ** (i + 1))
481
- for j, (k, d) in enumerate(
482
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
- ):
484
- self.resblocks.append(resblock(ch, k, d))
485
-
486
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
- self.ups.apply(init_weights)
488
-
489
- if gin_channels != 0:
490
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
-
492
- self.upp = np.prod(upsample_rates)
493
-
494
- def forward(self, x, f0, g=None):
495
- har_source, noi_source, uv = self.m_source(f0, self.upp)
496
- har_source = har_source.transpose(1, 2)
497
- x = self.conv_pre(x)
498
- if g is not None:
499
- x = x + self.cond(g)
500
-
501
- for i in range(self.num_upsamples):
502
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
- x = self.ups[i](x)
504
- x_source = self.noise_convs[i](har_source)
505
- x = x + x_source
506
- xs = None
507
- for j in range(self.num_kernels):
508
- if xs is None:
509
- xs = self.resblocks[i * self.num_kernels + j](x)
510
- else:
511
- xs += self.resblocks[i * self.num_kernels + j](x)
512
- x = xs / self.num_kernels
513
- x = F.leaky_relu(x)
514
- x = self.conv_post(x)
515
- x = torch.tanh(x)
516
- return x
517
-
518
- def remove_weight_norm(self):
519
- for l in self.ups:
520
- remove_weight_norm(l)
521
- for l in self.resblocks:
522
- l.remove_weight_norm()
523
-
524
-
525
- sr2sr = {
526
- "32k": 32000,
527
- "40k": 40000,
528
- "48k": 48000,
529
- }
530
-
531
-
532
- class SynthesizerTrnMsNSFsidM(nn.Module):
533
- def __init__(
534
- self,
535
- spec_channels,
536
- segment_size,
537
- inter_channels,
538
- hidden_channels,
539
- filter_channels,
540
- n_heads,
541
- n_layers,
542
- kernel_size,
543
- p_dropout,
544
- resblock,
545
- resblock_kernel_sizes,
546
- resblock_dilation_sizes,
547
- upsample_rates,
548
- upsample_initial_channel,
549
- upsample_kernel_sizes,
550
- spk_embed_dim,
551
- gin_channels,
552
- sr,
553
- version,
554
- **kwargs
555
- ):
556
- super().__init__()
557
- if type(sr) == type("strr"):
558
- sr = sr2sr[sr]
559
- self.spec_channels = spec_channels
560
- self.inter_channels = inter_channels
561
- self.hidden_channels = hidden_channels
562
- self.filter_channels = filter_channels
563
- self.n_heads = n_heads
564
- self.n_layers = n_layers
565
- self.kernel_size = kernel_size
566
- self.p_dropout = p_dropout
567
- self.resblock = resblock
568
- self.resblock_kernel_sizes = resblock_kernel_sizes
569
- self.resblock_dilation_sizes = resblock_dilation_sizes
570
- self.upsample_rates = upsample_rates
571
- self.upsample_initial_channel = upsample_initial_channel
572
- self.upsample_kernel_sizes = upsample_kernel_sizes
573
- self.segment_size = segment_size
574
- self.gin_channels = gin_channels
575
- # self.hop_length = hop_length#
576
- self.spk_embed_dim = spk_embed_dim
577
- if version == "v1":
578
- self.enc_p = TextEncoder256(
579
- inter_channels,
580
- hidden_channels,
581
- filter_channels,
582
- n_heads,
583
- n_layers,
584
- kernel_size,
585
- p_dropout,
586
- )
587
- else:
588
- self.enc_p = TextEncoder768(
589
- inter_channels,
590
- hidden_channels,
591
- filter_channels,
592
- n_heads,
593
- n_layers,
594
- kernel_size,
595
- p_dropout,
596
- )
597
- self.dec = GeneratorNSF(
598
- inter_channels,
599
- resblock,
600
- resblock_kernel_sizes,
601
- resblock_dilation_sizes,
602
- upsample_rates,
603
- upsample_initial_channel,
604
- upsample_kernel_sizes,
605
- gin_channels=gin_channels,
606
- sr=sr,
607
- is_half=kwargs["is_half"],
608
- )
609
- self.enc_q = PosteriorEncoder(
610
- spec_channels,
611
- inter_channels,
612
- hidden_channels,
613
- 5,
614
- 1,
615
- 16,
616
- gin_channels=gin_channels,
617
- )
618
- self.flow = ResidualCouplingBlock(
619
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
620
- )
621
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
622
- self.speaker_map = None
623
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
624
-
625
- def remove_weight_norm(self):
626
- self.dec.remove_weight_norm()
627
- self.flow.remove_weight_norm()
628
- self.enc_q.remove_weight_norm()
629
-
630
- def construct_spkmixmap(self, n_speaker):
631
- self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
632
- for i in range(n_speaker):
633
- self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
634
- self.speaker_map = self.speaker_map.unsqueeze(0)
635
-
636
- def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
637
- if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
638
- g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
639
- g = g * self.speaker_map # [N, S, B, 1, H]
640
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
641
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
642
- else:
643
- g = g.unsqueeze(0)
644
- g = self.emb_g(g).transpose(1, 2)
645
-
646
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
647
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
648
- z = self.flow(z_p, x_mask, g=g, reverse=True)
649
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
650
- return o
651
-
652
-
653
- class MultiPeriodDiscriminator(torch.nn.Module):
654
- def __init__(self, use_spectral_norm=False):
655
- super(MultiPeriodDiscriminator, self).__init__()
656
- periods = [2, 3, 5, 7, 11, 17]
657
- # periods = [3, 5, 7, 11, 17, 23, 37]
658
-
659
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
660
- discs = discs + [
661
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
662
- ]
663
- self.discriminators = nn.ModuleList(discs)
664
-
665
- def forward(self, y, y_hat):
666
- y_d_rs = [] #
667
- y_d_gs = []
668
- fmap_rs = []
669
- fmap_gs = []
670
- for i, d in enumerate(self.discriminators):
671
- y_d_r, fmap_r = d(y)
672
- y_d_g, fmap_g = d(y_hat)
673
- # for j in range(len(fmap_r)):
674
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
675
- y_d_rs.append(y_d_r)
676
- y_d_gs.append(y_d_g)
677
- fmap_rs.append(fmap_r)
678
- fmap_gs.append(fmap_g)
679
-
680
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
681
-
682
-
683
- class MultiPeriodDiscriminatorV2(torch.nn.Module):
684
- def __init__(self, use_spectral_norm=False):
685
- super(MultiPeriodDiscriminatorV2, self).__init__()
686
- # periods = [2, 3, 5, 7, 11, 17]
687
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
688
-
689
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
690
- discs = discs + [
691
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
692
- ]
693
- self.discriminators = nn.ModuleList(discs)
694
-
695
- def forward(self, y, y_hat):
696
- y_d_rs = [] #
697
- y_d_gs = []
698
- fmap_rs = []
699
- fmap_gs = []
700
- for i, d in enumerate(self.discriminators):
701
- y_d_r, fmap_r = d(y)
702
- y_d_g, fmap_g = d(y_hat)
703
- # for j in range(len(fmap_r)):
704
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
705
- y_d_rs.append(y_d_r)
706
- y_d_gs.append(y_d_g)
707
- fmap_rs.append(fmap_r)
708
- fmap_gs.append(fmap_g)
709
-
710
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
711
-
712
-
713
- class DiscriminatorS(torch.nn.Module):
714
- def __init__(self, use_spectral_norm=False):
715
- super(DiscriminatorS, self).__init__()
716
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
717
- self.convs = nn.ModuleList(
718
- [
719
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
720
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
721
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
722
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
723
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
724
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
725
- ]
726
- )
727
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
728
-
729
- def forward(self, x):
730
- fmap = []
731
-
732
- for l in self.convs:
733
- x = l(x)
734
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
735
- fmap.append(x)
736
- x = self.conv_post(x)
737
- fmap.append(x)
738
- x = torch.flatten(x, 1, -1)
739
-
740
- return x, fmap
741
-
742
-
743
- class DiscriminatorP(torch.nn.Module):
744
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
745
- super(DiscriminatorP, self).__init__()
746
- self.period = period
747
- self.use_spectral_norm = use_spectral_norm
748
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
749
- self.convs = nn.ModuleList(
750
- [
751
- norm_f(
752
- Conv2d(
753
- 1,
754
- 32,
755
- (kernel_size, 1),
756
- (stride, 1),
757
- padding=(get_padding(kernel_size, 1), 0),
758
- )
759
- ),
760
- norm_f(
761
- Conv2d(
762
- 32,
763
- 128,
764
- (kernel_size, 1),
765
- (stride, 1),
766
- padding=(get_padding(kernel_size, 1), 0),
767
- )
768
- ),
769
- norm_f(
770
- Conv2d(
771
- 128,
772
- 512,
773
- (kernel_size, 1),
774
- (stride, 1),
775
- padding=(get_padding(kernel_size, 1), 0),
776
- )
777
- ),
778
- norm_f(
779
- Conv2d(
780
- 512,
781
- 1024,
782
- (kernel_size, 1),
783
- (stride, 1),
784
- padding=(get_padding(kernel_size, 1), 0),
785
- )
786
- ),
787
- norm_f(
788
- Conv2d(
789
- 1024,
790
- 1024,
791
- (kernel_size, 1),
792
- 1,
793
- padding=(get_padding(kernel_size, 1), 0),
794
- )
795
- ),
796
- ]
797
- )
798
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
799
-
800
- def forward(self, x):
801
- fmap = []
802
-
803
- # 1d to 2d
804
- b, c, t = x.shape
805
- if t % self.period != 0: # pad first
806
- n_pad = self.period - (t % self.period)
807
- x = F.pad(x, (0, n_pad), "reflect")
808
- t = t + n_pad
809
- x = x.view(b, c, t // self.period, self.period)
810
-
811
- for l in self.convs:
812
- x = l(x)
813
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
814
- fmap.append(x)
815
- x = self.conv_post(x)
816
- fmap.append(x)
817
- x = torch.flatten(x, 1, -1)
818
-
819
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arsenii2023/Demo1/app.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
spaces/ArtificialWF/Voice-Recognition/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Voice to Text
3
- emoji: 🌖
4
- colorFrom: gray
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
-
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py DELETED
@@ -1,155 +0,0 @@
1
- """Utilities to lazily create and visit candidates found.
2
-
3
- Creating and visiting a candidate is a *very* costly operation. It involves
4
- fetching, extracting, potentially building modules from source, and verifying
5
- distribution metadata. It is therefore crucial for performance to keep
6
- everything here lazy all the way down, so we only touch candidates that we
7
- absolutely need, and not "download the world" when we only need one version of
8
- something.
9
- """
10
-
11
- import functools
12
- from collections.abc import Sequence
13
- from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Set, Tuple
14
-
15
- from pip._vendor.packaging.version import _BaseVersion
16
-
17
- from .base import Candidate
18
-
19
- IndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]]
20
-
21
- if TYPE_CHECKING:
22
- SequenceCandidate = Sequence[Candidate]
23
- else:
24
- # For compatibility: Python before 3.9 does not support using [] on the
25
- # Sequence class.
26
- #
27
- # >>> from collections.abc import Sequence
28
- # >>> Sequence[str]
29
- # Traceback (most recent call last):
30
- # File "<stdin>", line 1, in <module>
31
- # TypeError: 'ABCMeta' object is not subscriptable
32
- #
33
- # TODO: Remove this block after dropping Python 3.8 support.
34
- SequenceCandidate = Sequence
35
-
36
-
37
- def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]:
38
- """Iterator for ``FoundCandidates``.
39
-
40
- This iterator is used when the package is not already installed. Candidates
41
- from index come later in their normal ordering.
42
- """
43
- versions_found: Set[_BaseVersion] = set()
44
- for version, func in infos:
45
- if version in versions_found:
46
- continue
47
- candidate = func()
48
- if candidate is None:
49
- continue
50
- yield candidate
51
- versions_found.add(version)
52
-
53
-
54
- def _iter_built_with_prepended(
55
- installed: Candidate, infos: Iterator[IndexCandidateInfo]
56
- ) -> Iterator[Candidate]:
57
- """Iterator for ``FoundCandidates``.
58
-
59
- This iterator is used when the resolver prefers the already-installed
60
- candidate and NOT to upgrade. The installed candidate is therefore
61
- always yielded first, and candidates from index come later in their
62
- normal ordering, except skipped when the version is already installed.
63
- """
64
- yield installed
65
- versions_found: Set[_BaseVersion] = {installed.version}
66
- for version, func in infos:
67
- if version in versions_found:
68
- continue
69
- candidate = func()
70
- if candidate is None:
71
- continue
72
- yield candidate
73
- versions_found.add(version)
74
-
75
-
76
- def _iter_built_with_inserted(
77
- installed: Candidate, infos: Iterator[IndexCandidateInfo]
78
- ) -> Iterator[Candidate]:
79
- """Iterator for ``FoundCandidates``.
80
-
81
- This iterator is used when the resolver prefers to upgrade an
82
- already-installed package. Candidates from index are returned in their
83
- normal ordering, except replaced when the version is already installed.
84
-
85
- The implementation iterates through and yields other candidates, inserting
86
- the installed candidate exactly once before we start yielding older or
87
- equivalent candidates, or after all other candidates if they are all newer.
88
- """
89
- versions_found: Set[_BaseVersion] = set()
90
- for version, func in infos:
91
- if version in versions_found:
92
- continue
93
- # If the installed candidate is better, yield it first.
94
- if installed.version >= version:
95
- yield installed
96
- versions_found.add(installed.version)
97
- candidate = func()
98
- if candidate is None:
99
- continue
100
- yield candidate
101
- versions_found.add(version)
102
-
103
- # If the installed candidate is older than all other candidates.
104
- if installed.version not in versions_found:
105
- yield installed
106
-
107
-
108
- class FoundCandidates(SequenceCandidate):
109
- """A lazy sequence to provide candidates to the resolver.
110
-
111
- The intended usage is to return this from `find_matches()` so the resolver
112
- can iterate through the sequence multiple times, but only access the index
113
- page when remote packages are actually needed. This improve performances
114
- when suitable candidates are already installed on disk.
115
- """
116
-
117
- def __init__(
118
- self,
119
- get_infos: Callable[[], Iterator[IndexCandidateInfo]],
120
- installed: Optional[Candidate],
121
- prefers_installed: bool,
122
- incompatible_ids: Set[int],
123
- ):
124
- self._get_infos = get_infos
125
- self._installed = installed
126
- self._prefers_installed = prefers_installed
127
- self._incompatible_ids = incompatible_ids
128
-
129
- def __getitem__(self, index: Any) -> Any:
130
- # Implemented to satisfy the ABC check. This is not needed by the
131
- # resolver, and should not be used by the provider either (for
132
- # performance reasons).
133
- raise NotImplementedError("don't do this")
134
-
135
- def __iter__(self) -> Iterator[Candidate]:
136
- infos = self._get_infos()
137
- if not self._installed:
138
- iterator = _iter_built(infos)
139
- elif self._prefers_installed:
140
- iterator = _iter_built_with_prepended(self._installed, infos)
141
- else:
142
- iterator = _iter_built_with_inserted(self._installed, infos)
143
- return (c for c in iterator if id(c) not in self._incompatible_ids)
144
-
145
- def __len__(self) -> int:
146
- # Implemented to satisfy the ABC check. This is not needed by the
147
- # resolver, and should not be used by the provider either (for
148
- # performance reasons).
149
- raise NotImplementedError("don't do this")
150
-
151
- @functools.lru_cache(maxsize=1)
152
- def __bool__(self) -> bool:
153
- if self._prefers_installed and self._installed:
154
- return True
155
- return any(self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/specifiers.py DELETED
@@ -1,802 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import abc
6
- import functools
7
- import itertools
8
- import re
9
- import warnings
10
- from typing import (
11
- Callable,
12
- Dict,
13
- Iterable,
14
- Iterator,
15
- List,
16
- Optional,
17
- Pattern,
18
- Set,
19
- Tuple,
20
- TypeVar,
21
- Union,
22
- )
23
-
24
- from .utils import canonicalize_version
25
- from .version import LegacyVersion, Version, parse
26
-
27
- ParsedVersion = Union[Version, LegacyVersion]
28
- UnparsedVersion = Union[Version, LegacyVersion, str]
29
- VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
30
- CallableOperator = Callable[[ParsedVersion, str], bool]
31
-
32
-
33
- class InvalidSpecifier(ValueError):
34
- """
35
- An invalid specifier was found, users should refer to PEP 440.
36
- """
37
-
38
-
39
- class BaseSpecifier(metaclass=abc.ABCMeta):
40
- @abc.abstractmethod
41
- def __str__(self) -> str:
42
- """
43
- Returns the str representation of this Specifier like object. This
44
- should be representative of the Specifier itself.
45
- """
46
-
47
- @abc.abstractmethod
48
- def __hash__(self) -> int:
49
- """
50
- Returns a hash value for this Specifier like object.
51
- """
52
-
53
- @abc.abstractmethod
54
- def __eq__(self, other: object) -> bool:
55
- """
56
- Returns a boolean representing whether or not the two Specifier like
57
- objects are equal.
58
- """
59
-
60
- @abc.abstractproperty
61
- def prereleases(self) -> Optional[bool]:
62
- """
63
- Returns whether or not pre-releases as a whole are allowed by this
64
- specifier.
65
- """
66
-
67
- @prereleases.setter
68
- def prereleases(self, value: bool) -> None:
69
- """
70
- Sets whether or not pre-releases as a whole are allowed by this
71
- specifier.
72
- """
73
-
74
- @abc.abstractmethod
75
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
76
- """
77
- Determines if the given item is contained within this specifier.
78
- """
79
-
80
- @abc.abstractmethod
81
- def filter(
82
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
83
- ) -> Iterable[VersionTypeVar]:
84
- """
85
- Takes an iterable of items and filters them so that only items which
86
- are contained within this specifier are allowed in it.
87
- """
88
-
89
-
90
- class _IndividualSpecifier(BaseSpecifier):
91
-
92
- _operators: Dict[str, str] = {}
93
- _regex: Pattern[str]
94
-
95
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
96
- match = self._regex.search(spec)
97
- if not match:
98
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
99
-
100
- self._spec: Tuple[str, str] = (
101
- match.group("operator").strip(),
102
- match.group("version").strip(),
103
- )
104
-
105
- # Store whether or not this Specifier should accept prereleases
106
- self._prereleases = prereleases
107
-
108
- def __repr__(self) -> str:
109
- pre = (
110
- f", prereleases={self.prereleases!r}"
111
- if self._prereleases is not None
112
- else ""
113
- )
114
-
115
- return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
116
-
117
- def __str__(self) -> str:
118
- return "{}{}".format(*self._spec)
119
-
120
- @property
121
- def _canonical_spec(self) -> Tuple[str, str]:
122
- return self._spec[0], canonicalize_version(self._spec[1])
123
-
124
- def __hash__(self) -> int:
125
- return hash(self._canonical_spec)
126
-
127
- def __eq__(self, other: object) -> bool:
128
- if isinstance(other, str):
129
- try:
130
- other = self.__class__(str(other))
131
- except InvalidSpecifier:
132
- return NotImplemented
133
- elif not isinstance(other, self.__class__):
134
- return NotImplemented
135
-
136
- return self._canonical_spec == other._canonical_spec
137
-
138
- def _get_operator(self, op: str) -> CallableOperator:
139
- operator_callable: CallableOperator = getattr(
140
- self, f"_compare_{self._operators[op]}"
141
- )
142
- return operator_callable
143
-
144
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
145
- if not isinstance(version, (LegacyVersion, Version)):
146
- version = parse(version)
147
- return version
148
-
149
- @property
150
- def operator(self) -> str:
151
- return self._spec[0]
152
-
153
- @property
154
- def version(self) -> str:
155
- return self._spec[1]
156
-
157
- @property
158
- def prereleases(self) -> Optional[bool]:
159
- return self._prereleases
160
-
161
- @prereleases.setter
162
- def prereleases(self, value: bool) -> None:
163
- self._prereleases = value
164
-
165
- def __contains__(self, item: str) -> bool:
166
- return self.contains(item)
167
-
168
- def contains(
169
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
170
- ) -> bool:
171
-
172
- # Determine if prereleases are to be allowed or not.
173
- if prereleases is None:
174
- prereleases = self.prereleases
175
-
176
- # Normalize item to a Version or LegacyVersion, this allows us to have
177
- # a shortcut for ``"2.0" in Specifier(">=2")
178
- normalized_item = self._coerce_version(item)
179
-
180
- # Determine if we should be supporting prereleases in this specifier
181
- # or not, if we do not support prereleases than we can short circuit
182
- # logic if this version is a prereleases.
183
- if normalized_item.is_prerelease and not prereleases:
184
- return False
185
-
186
- # Actually do the comparison to determine if this item is contained
187
- # within this Specifier or not.
188
- operator_callable: CallableOperator = self._get_operator(self.operator)
189
- return operator_callable(normalized_item, self.version)
190
-
191
- def filter(
192
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
193
- ) -> Iterable[VersionTypeVar]:
194
-
195
- yielded = False
196
- found_prereleases = []
197
-
198
- kw = {"prereleases": prereleases if prereleases is not None else True}
199
-
200
- # Attempt to iterate over all the values in the iterable and if any of
201
- # them match, yield them.
202
- for version in iterable:
203
- parsed_version = self._coerce_version(version)
204
-
205
- if self.contains(parsed_version, **kw):
206
- # If our version is a prerelease, and we were not set to allow
207
- # prereleases, then we'll store it for later in case nothing
208
- # else matches this specifier.
209
- if parsed_version.is_prerelease and not (
210
- prereleases or self.prereleases
211
- ):
212
- found_prereleases.append(version)
213
- # Either this is not a prerelease, or we should have been
214
- # accepting prereleases from the beginning.
215
- else:
216
- yielded = True
217
- yield version
218
-
219
- # Now that we've iterated over everything, determine if we've yielded
220
- # any values, and if we have not and we have any prereleases stored up
221
- # then we will go ahead and yield the prereleases.
222
- if not yielded and found_prereleases:
223
- for version in found_prereleases:
224
- yield version
225
-
226
-
227
- class LegacySpecifier(_IndividualSpecifier):
228
-
229
- _regex_str = r"""
230
- (?P<operator>(==|!=|<=|>=|<|>))
231
- \s*
232
- (?P<version>
233
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
234
- # string can be just about anything, we match everything
235
- # except for whitespace, a semi-colon for marker support,
236
- # a closing paren since versions can be enclosed in
237
- # them, and a comma since it's a version separator.
238
- )
239
- """
240
-
241
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
242
-
243
- _operators = {
244
- "==": "equal",
245
- "!=": "not_equal",
246
- "<=": "less_than_equal",
247
- ">=": "greater_than_equal",
248
- "<": "less_than",
249
- ">": "greater_than",
250
- }
251
-
252
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
253
- super().__init__(spec, prereleases)
254
-
255
- warnings.warn(
256
- "Creating a LegacyVersion has been deprecated and will be "
257
- "removed in the next major release",
258
- DeprecationWarning,
259
- )
260
-
261
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
262
- if not isinstance(version, LegacyVersion):
263
- version = LegacyVersion(str(version))
264
- return version
265
-
266
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
267
- return prospective == self._coerce_version(spec)
268
-
269
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
270
- return prospective != self._coerce_version(spec)
271
-
272
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
273
- return prospective <= self._coerce_version(spec)
274
-
275
- def _compare_greater_than_equal(
276
- self, prospective: LegacyVersion, spec: str
277
- ) -> bool:
278
- return prospective >= self._coerce_version(spec)
279
-
280
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
281
- return prospective < self._coerce_version(spec)
282
-
283
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
284
- return prospective > self._coerce_version(spec)
285
-
286
-
287
- def _require_version_compare(
288
- fn: Callable[["Specifier", ParsedVersion, str], bool]
289
- ) -> Callable[["Specifier", ParsedVersion, str], bool]:
290
- @functools.wraps(fn)
291
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
292
- if not isinstance(prospective, Version):
293
- return False
294
- return fn(self, prospective, spec)
295
-
296
- return wrapped
297
-
298
-
299
- class Specifier(_IndividualSpecifier):
300
-
301
- _regex_str = r"""
302
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
303
- (?P<version>
304
- (?:
305
- # The identity operators allow for an escape hatch that will
306
- # do an exact string match of the version you wish to install.
307
- # This will not be parsed by PEP 440 and we cannot determine
308
- # any semantic meaning from it. This operator is discouraged
309
- # but included entirely as an escape hatch.
310
- (?<====) # Only match for the identity operator
311
- \s*
312
- [^\s]* # We just match everything, except for whitespace
313
- # since we are only testing for strict identity.
314
- )
315
- |
316
- (?:
317
- # The (non)equality operators allow for wild card and local
318
- # versions to be specified so we have to define these two
319
- # operators separately to enable that.
320
- (?<===|!=) # Only match for equals and not equals
321
-
322
- \s*
323
- v?
324
- (?:[0-9]+!)? # epoch
325
- [0-9]+(?:\.[0-9]+)* # release
326
- (?: # pre release
327
- [-_\.]?
328
- (a|b|c|rc|alpha|beta|pre|preview)
329
- [-_\.]?
330
- [0-9]*
331
- )?
332
- (?: # post release
333
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
334
- )?
335
-
336
- # You cannot use a wild card and a dev or local version
337
- # together so group them with a | and make them optional.
338
- (?:
339
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
340
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
341
- |
342
- \.\* # Wild card syntax of .*
343
- )?
344
- )
345
- |
346
- (?:
347
- # The compatible operator requires at least two digits in the
348
- # release segment.
349
- (?<=~=) # Only match for the compatible operator
350
-
351
- \s*
352
- v?
353
- (?:[0-9]+!)? # epoch
354
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
355
- (?: # pre release
356
- [-_\.]?
357
- (a|b|c|rc|alpha|beta|pre|preview)
358
- [-_\.]?
359
- [0-9]*
360
- )?
361
- (?: # post release
362
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
363
- )?
364
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
365
- )
366
- |
367
- (?:
368
- # All other operators only allow a sub set of what the
369
- # (non)equality operators do. Specifically they do not allow
370
- # local versions to be specified nor do they allow the prefix
371
- # matching wild cards.
372
- (?<!==|!=|~=) # We have special cases for these
373
- # operators so we want to make sure they
374
- # don't match here.
375
-
376
- \s*
377
- v?
378
- (?:[0-9]+!)? # epoch
379
- [0-9]+(?:\.[0-9]+)* # release
380
- (?: # pre release
381
- [-_\.]?
382
- (a|b|c|rc|alpha|beta|pre|preview)
383
- [-_\.]?
384
- [0-9]*
385
- )?
386
- (?: # post release
387
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
388
- )?
389
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
390
- )
391
- )
392
- """
393
-
394
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
395
-
396
- _operators = {
397
- "~=": "compatible",
398
- "==": "equal",
399
- "!=": "not_equal",
400
- "<=": "less_than_equal",
401
- ">=": "greater_than_equal",
402
- "<": "less_than",
403
- ">": "greater_than",
404
- "===": "arbitrary",
405
- }
406
-
407
- @_require_version_compare
408
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
409
-
410
- # Compatible releases have an equivalent combination of >= and ==. That
411
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
412
- # implement this in terms of the other specifiers instead of
413
- # implementing it ourselves. The only thing we need to do is construct
414
- # the other specifiers.
415
-
416
- # We want everything but the last item in the version, but we want to
417
- # ignore suffix segments.
418
- prefix = ".".join(
419
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
420
- )
421
-
422
- # Add the prefix notation to the end of our string
423
- prefix += ".*"
424
-
425
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
426
- prospective, prefix
427
- )
428
-
429
- @_require_version_compare
430
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
431
-
432
- # We need special logic to handle prefix matching
433
- if spec.endswith(".*"):
434
- # In the case of prefix matching we want to ignore local segment.
435
- prospective = Version(prospective.public)
436
- # Split the spec out by dots, and pretend that there is an implicit
437
- # dot in between a release segment and a pre-release segment.
438
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
439
-
440
- # Split the prospective version out by dots, and pretend that there
441
- # is an implicit dot in between a release segment and a pre-release
442
- # segment.
443
- split_prospective = _version_split(str(prospective))
444
-
445
- # Shorten the prospective version to be the same length as the spec
446
- # so that we can determine if the specifier is a prefix of the
447
- # prospective version or not.
448
- shortened_prospective = split_prospective[: len(split_spec)]
449
-
450
- # Pad out our two sides with zeros so that they both equal the same
451
- # length.
452
- padded_spec, padded_prospective = _pad_version(
453
- split_spec, shortened_prospective
454
- )
455
-
456
- return padded_prospective == padded_spec
457
- else:
458
- # Convert our spec string into a Version
459
- spec_version = Version(spec)
460
-
461
- # If the specifier does not have a local segment, then we want to
462
- # act as if the prospective version also does not have a local
463
- # segment.
464
- if not spec_version.local:
465
- prospective = Version(prospective.public)
466
-
467
- return prospective == spec_version
468
-
469
- @_require_version_compare
470
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
471
- return not self._compare_equal(prospective, spec)
472
-
473
- @_require_version_compare
474
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
475
-
476
- # NB: Local version identifiers are NOT permitted in the version
477
- # specifier, so local version labels can be universally removed from
478
- # the prospective version.
479
- return Version(prospective.public) <= Version(spec)
480
-
481
- @_require_version_compare
482
- def _compare_greater_than_equal(
483
- self, prospective: ParsedVersion, spec: str
484
- ) -> bool:
485
-
486
- # NB: Local version identifiers are NOT permitted in the version
487
- # specifier, so local version labels can be universally removed from
488
- # the prospective version.
489
- return Version(prospective.public) >= Version(spec)
490
-
491
- @_require_version_compare
492
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
493
-
494
- # Convert our spec to a Version instance, since we'll want to work with
495
- # it as a version.
496
- spec = Version(spec_str)
497
-
498
- # Check to see if the prospective version is less than the spec
499
- # version. If it's not we can short circuit and just return False now
500
- # instead of doing extra unneeded work.
501
- if not prospective < spec:
502
- return False
503
-
504
- # This special case is here so that, unless the specifier itself
505
- # includes is a pre-release version, that we do not accept pre-release
506
- # versions for the version mentioned in the specifier (e.g. <3.1 should
507
- # not match 3.1.dev0, but should match 3.0.dev0).
508
- if not spec.is_prerelease and prospective.is_prerelease:
509
- if Version(prospective.base_version) == Version(spec.base_version):
510
- return False
511
-
512
- # If we've gotten to here, it means that prospective version is both
513
- # less than the spec version *and* it's not a pre-release of the same
514
- # version in the spec.
515
- return True
516
-
517
- @_require_version_compare
518
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
519
-
520
- # Convert our spec to a Version instance, since we'll want to work with
521
- # it as a version.
522
- spec = Version(spec_str)
523
-
524
- # Check to see if the prospective version is greater than the spec
525
- # version. If it's not we can short circuit and just return False now
526
- # instead of doing extra unneeded work.
527
- if not prospective > spec:
528
- return False
529
-
530
- # This special case is here so that, unless the specifier itself
531
- # includes is a post-release version, that we do not accept
532
- # post-release versions for the version mentioned in the specifier
533
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
534
- if not spec.is_postrelease and prospective.is_postrelease:
535
- if Version(prospective.base_version) == Version(spec.base_version):
536
- return False
537
-
538
- # Ensure that we do not allow a local version of the version mentioned
539
- # in the specifier, which is technically greater than, to match.
540
- if prospective.local is not None:
541
- if Version(prospective.base_version) == Version(spec.base_version):
542
- return False
543
-
544
- # If we've gotten to here, it means that prospective version is both
545
- # greater than the spec version *and* it's not a pre-release of the
546
- # same version in the spec.
547
- return True
548
-
549
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
550
- return str(prospective).lower() == str(spec).lower()
551
-
552
- @property
553
- def prereleases(self) -> bool:
554
-
555
- # If there is an explicit prereleases set for this, then we'll just
556
- # blindly use that.
557
- if self._prereleases is not None:
558
- return self._prereleases
559
-
560
- # Look at all of our specifiers and determine if they are inclusive
561
- # operators, and if they are if they are including an explicit
562
- # prerelease.
563
- operator, version = self._spec
564
- if operator in ["==", ">=", "<=", "~=", "==="]:
565
- # The == specifier can include a trailing .*, if it does we
566
- # want to remove before parsing.
567
- if operator == "==" and version.endswith(".*"):
568
- version = version[:-2]
569
-
570
- # Parse the version, and if it is a pre-release than this
571
- # specifier allows pre-releases.
572
- if parse(version).is_prerelease:
573
- return True
574
-
575
- return False
576
-
577
- @prereleases.setter
578
- def prereleases(self, value: bool) -> None:
579
- self._prereleases = value
580
-
581
-
582
- _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
583
-
584
-
585
- def _version_split(version: str) -> List[str]:
586
- result: List[str] = []
587
- for item in version.split("."):
588
- match = _prefix_regex.search(item)
589
- if match:
590
- result.extend(match.groups())
591
- else:
592
- result.append(item)
593
- return result
594
-
595
-
596
- def _is_not_suffix(segment: str) -> bool:
597
- return not any(
598
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
599
- )
600
-
601
-
602
- def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
603
- left_split, right_split = [], []
604
-
605
- # Get the release segment of our versions
606
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
607
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
608
-
609
- # Get the rest of our versions
610
- left_split.append(left[len(left_split[0]) :])
611
- right_split.append(right[len(right_split[0]) :])
612
-
613
- # Insert our padding
614
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
615
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
616
-
617
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
618
-
619
-
620
- class SpecifierSet(BaseSpecifier):
621
- def __init__(
622
- self, specifiers: str = "", prereleases: Optional[bool] = None
623
- ) -> None:
624
-
625
- # Split on , to break each individual specifier into it's own item, and
626
- # strip each item to remove leading/trailing whitespace.
627
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
628
-
629
- # Parsed each individual specifier, attempting first to make it a
630
- # Specifier and falling back to a LegacySpecifier.
631
- parsed: Set[_IndividualSpecifier] = set()
632
- for specifier in split_specifiers:
633
- try:
634
- parsed.add(Specifier(specifier))
635
- except InvalidSpecifier:
636
- parsed.add(LegacySpecifier(specifier))
637
-
638
- # Turn our parsed specifiers into a frozen set and save them for later.
639
- self._specs = frozenset(parsed)
640
-
641
- # Store our prereleases value so we can use it later to determine if
642
- # we accept prereleases or not.
643
- self._prereleases = prereleases
644
-
645
- def __repr__(self) -> str:
646
- pre = (
647
- f", prereleases={self.prereleases!r}"
648
- if self._prereleases is not None
649
- else ""
650
- )
651
-
652
- return f"<SpecifierSet({str(self)!r}{pre})>"
653
-
654
- def __str__(self) -> str:
655
- return ",".join(sorted(str(s) for s in self._specs))
656
-
657
- def __hash__(self) -> int:
658
- return hash(self._specs)
659
-
660
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
661
- if isinstance(other, str):
662
- other = SpecifierSet(other)
663
- elif not isinstance(other, SpecifierSet):
664
- return NotImplemented
665
-
666
- specifier = SpecifierSet()
667
- specifier._specs = frozenset(self._specs | other._specs)
668
-
669
- if self._prereleases is None and other._prereleases is not None:
670
- specifier._prereleases = other._prereleases
671
- elif self._prereleases is not None and other._prereleases is None:
672
- specifier._prereleases = self._prereleases
673
- elif self._prereleases == other._prereleases:
674
- specifier._prereleases = self._prereleases
675
- else:
676
- raise ValueError(
677
- "Cannot combine SpecifierSets with True and False prerelease "
678
- "overrides."
679
- )
680
-
681
- return specifier
682
-
683
- def __eq__(self, other: object) -> bool:
684
- if isinstance(other, (str, _IndividualSpecifier)):
685
- other = SpecifierSet(str(other))
686
- elif not isinstance(other, SpecifierSet):
687
- return NotImplemented
688
-
689
- return self._specs == other._specs
690
-
691
- def __len__(self) -> int:
692
- return len(self._specs)
693
-
694
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
695
- return iter(self._specs)
696
-
697
- @property
698
- def prereleases(self) -> Optional[bool]:
699
-
700
- # If we have been given an explicit prerelease modifier, then we'll
701
- # pass that through here.
702
- if self._prereleases is not None:
703
- return self._prereleases
704
-
705
- # If we don't have any specifiers, and we don't have a forced value,
706
- # then we'll just return None since we don't know if this should have
707
- # pre-releases or not.
708
- if not self._specs:
709
- return None
710
-
711
- # Otherwise we'll see if any of the given specifiers accept
712
- # prereleases, if any of them do we'll return True, otherwise False.
713
- return any(s.prereleases for s in self._specs)
714
-
715
- @prereleases.setter
716
- def prereleases(self, value: bool) -> None:
717
- self._prereleases = value
718
-
719
- def __contains__(self, item: UnparsedVersion) -> bool:
720
- return self.contains(item)
721
-
722
- def contains(
723
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
724
- ) -> bool:
725
-
726
- # Ensure that our item is a Version or LegacyVersion instance.
727
- if not isinstance(item, (LegacyVersion, Version)):
728
- item = parse(item)
729
-
730
- # Determine if we're forcing a prerelease or not, if we're not forcing
731
- # one for this particular filter call, then we'll use whatever the
732
- # SpecifierSet thinks for whether or not we should support prereleases.
733
- if prereleases is None:
734
- prereleases = self.prereleases
735
-
736
- # We can determine if we're going to allow pre-releases by looking to
737
- # see if any of the underlying items supports them. If none of them do
738
- # and this item is a pre-release then we do not allow it and we can
739
- # short circuit that here.
740
- # Note: This means that 1.0.dev1 would not be contained in something
741
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
742
- if not prereleases and item.is_prerelease:
743
- return False
744
-
745
- # We simply dispatch to the underlying specs here to make sure that the
746
- # given version is contained within all of them.
747
- # Note: This use of all() here means that an empty set of specifiers
748
- # will always return True, this is an explicit design decision.
749
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
750
-
751
- def filter(
752
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
753
- ) -> Iterable[VersionTypeVar]:
754
-
755
- # Determine if we're forcing a prerelease or not, if we're not forcing
756
- # one for this particular filter call, then we'll use whatever the
757
- # SpecifierSet thinks for whether or not we should support prereleases.
758
- if prereleases is None:
759
- prereleases = self.prereleases
760
-
761
- # If we have any specifiers, then we want to wrap our iterable in the
762
- # filter method for each one, this will act as a logical AND amongst
763
- # each specifier.
764
- if self._specs:
765
- for spec in self._specs:
766
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
767
- return iterable
768
- # If we do not have any specifiers, then we need to have a rough filter
769
- # which will filter out any pre-releases, unless there are no final
770
- # releases, and which will filter out LegacyVersion in general.
771
- else:
772
- filtered: List[VersionTypeVar] = []
773
- found_prereleases: List[VersionTypeVar] = []
774
-
775
- item: UnparsedVersion
776
- parsed_version: Union[Version, LegacyVersion]
777
-
778
- for item in iterable:
779
- # Ensure that we some kind of Version class for this item.
780
- if not isinstance(item, (LegacyVersion, Version)):
781
- parsed_version = parse(item)
782
- else:
783
- parsed_version = item
784
-
785
- # Filter out any item which is parsed as a LegacyVersion
786
- if isinstance(parsed_version, LegacyVersion):
787
- continue
788
-
789
- # Store any item which is a pre-release for later unless we've
790
- # already found a final version or we are accepting prereleases
791
- if parsed_version.is_prerelease and not prereleases:
792
- if not filtered:
793
- found_prereleases.append(item)
794
- else:
795
- filtered.append(item)
796
-
797
- # If we've found no items except for pre-releases, then we'll go
798
- # ahead and use the pre-releases
799
- if not filtered and found_prereleases and prereleases is None:
800
- return found_prereleases
801
-
802
- return filtered
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/retinanet.py DELETED
@@ -1,53 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- from detectron2.config import LazyCall as L
4
- from detectron2.layers import ShapeSpec
5
- from detectron2.modeling.meta_arch import RetinaNet
6
- from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
7
- from detectron2.modeling.backbone.fpn import LastLevelP6P7
8
- from detectron2.modeling.backbone import BasicStem, FPN, ResNet
9
- from detectron2.modeling.box_regression import Box2BoxTransform
10
- from detectron2.modeling.matcher import Matcher
11
- from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
12
-
13
- model = L(RetinaNet)(
14
- backbone=L(FPN)(
15
- bottom_up=L(ResNet)(
16
- stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
17
- stages=L(ResNet.make_default_stages)(
18
- depth=50,
19
- stride_in_1x1=True,
20
- norm="FrozenBN",
21
- ),
22
- out_features=["res3", "res4", "res5"],
23
- ),
24
- in_features=["res3", "res4", "res5"],
25
- out_channels=256,
26
- top_block=L(LastLevelP6P7)(in_channels=2048, out_channels="${..out_channels}"),
27
- ),
28
- head=L(RetinaNetHead)(
29
- # Shape for each input feature map
30
- input_shape=[ShapeSpec(channels=256)] * 5,
31
- num_classes="${..num_classes}",
32
- conv_dims=[256, 256, 256, 256],
33
- prior_prob=0.01,
34
- num_anchors=9,
35
- ),
36
- anchor_generator=L(DefaultAnchorGenerator)(
37
- sizes=[[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]],
38
- aspect_ratios=[0.5, 1.0, 2.0],
39
- strides=[8, 16, 32, 64, 128],
40
- offset=0.0,
41
- ),
42
- box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
43
- anchor_matcher=L(Matcher)(
44
- thresholds=[0.4, 0.5], labels=[0, -1, 1], allow_low_quality_matches=True
45
- ),
46
- num_classes=80,
47
- head_in_features=["p3", "p4", "p5", "p6", "p7"],
48
- focal_loss_alpha=0.25,
49
- focal_loss_gamma=2.0,
50
- pixel_mean=[103.530, 116.280, 123.675],
51
- pixel_std=[1.0, 1.0, 1.0],
52
- input_format="BGR",
53
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/rpn.py DELETED
@@ -1,533 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from typing import Dict, List, Optional, Tuple, Union
3
- import torch
4
- import torch.nn.functional as F
5
- from torch import nn
6
-
7
- from detectron2.config import configurable
8
- from detectron2.layers import Conv2d, ShapeSpec, cat
9
- from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
10
- from detectron2.utils.events import get_event_storage
11
- from detectron2.utils.memory import retry_if_cuda_oom
12
- from detectron2.utils.registry import Registry
13
-
14
- from ..anchor_generator import build_anchor_generator
15
- from ..box_regression import Box2BoxTransform, _dense_box_regression_loss
16
- from ..matcher import Matcher
17
- from ..sampling import subsample_labels
18
- from .build import PROPOSAL_GENERATOR_REGISTRY
19
- from .proposal_utils import find_top_rpn_proposals
20
-
21
- RPN_HEAD_REGISTRY = Registry("RPN_HEAD")
22
- RPN_HEAD_REGISTRY.__doc__ = """
23
- Registry for RPN heads, which take feature maps and perform
24
- objectness classification and bounding box regression for anchors.
25
-
26
- The registered object will be called with `obj(cfg, input_shape)`.
27
- The call should return a `nn.Module` object.
28
- """
29
-
30
-
31
- """
32
- Shape shorthand in this module:
33
-
34
- N: number of images in the minibatch
35
- L: number of feature maps per image on which RPN is run
36
- A: number of cell anchors (must be the same for all feature maps)
37
- Hi, Wi: height and width of the i-th feature map
38
- B: size of the box parameterization
39
-
40
- Naming convention:
41
-
42
- objectness: refers to the binary classification of an anchor as object vs. not object.
43
-
44
- deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
45
- transform (see :class:`box_regression.Box2BoxTransform`), or 5d for rotated boxes.
46
-
47
- pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use
48
- sigmoid(pred_objectness_logits) to estimate P(object).
49
-
50
- gt_labels: ground-truth binary classification labels for objectness
51
-
52
- pred_anchor_deltas: predicted box2box transform deltas
53
-
54
- gt_anchor_deltas: ground-truth box2box transform deltas
55
- """
56
-
57
-
58
- def build_rpn_head(cfg, input_shape):
59
- """
60
- Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.
61
- """
62
- name = cfg.MODEL.RPN.HEAD_NAME
63
- return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape)
64
-
65
-
66
- @RPN_HEAD_REGISTRY.register()
67
- class StandardRPNHead(nn.Module):
68
- """
69
- Standard RPN classification and regression heads described in :paper:`Faster R-CNN`.
70
- Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts
71
- objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas
72
- specifying how to deform each anchor into an object proposal.
73
- """
74
-
75
- @configurable
76
- def __init__(
77
- self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,)
78
- ):
79
- """
80
- NOTE: this interface is experimental.
81
-
82
- Args:
83
- in_channels (int): number of input feature channels. When using multiple
84
- input features, they must have the same number of channels.
85
- num_anchors (int): number of anchors to predict for *each spatial position*
86
- on the feature map. The total number of anchors for each
87
- feature map will be `num_anchors * H * W`.
88
- box_dim (int): dimension of a box, which is also the number of box regression
89
- predictions to make for each anchor. An axis aligned box has
90
- box_dim=4, while a rotated box has box_dim=5.
91
- conv_dims (list[int]): a list of integers representing the output channels
92
- of N conv layers. Set it to -1 to use the same number of output channels
93
- as input channels.
94
- """
95
- super().__init__()
96
- cur_channels = in_channels
97
- # Keeping the old variable names and structure for backwards compatiblity.
98
- # Otherwise the old checkpoints will fail to load.
99
- if len(conv_dims) == 1:
100
- out_channels = cur_channels if conv_dims[0] == -1 else conv_dims[0]
101
- # 3x3 conv for the hidden representation
102
- self.conv = self._get_rpn_conv(cur_channels, out_channels)
103
- cur_channels = out_channels
104
- else:
105
- self.conv = nn.Sequential()
106
- for k, conv_dim in enumerate(conv_dims):
107
- out_channels = cur_channels if conv_dim == -1 else conv_dim
108
- if out_channels <= 0:
109
- raise ValueError(
110
- f"Conv output channels should be greater than 0. Got {out_channels}"
111
- )
112
- conv = self._get_rpn_conv(cur_channels, out_channels)
113
- self.conv.add_module(f"conv{k}", conv)
114
- cur_channels = out_channels
115
- # 1x1 conv for predicting objectness logits
116
- self.objectness_logits = nn.Conv2d(cur_channels, num_anchors, kernel_size=1, stride=1)
117
- # 1x1 conv for predicting box2box transform deltas
118
- self.anchor_deltas = nn.Conv2d(cur_channels, num_anchors * box_dim, kernel_size=1, stride=1)
119
-
120
- # Keeping the order of weights initialization same for backwards compatiblility.
121
- for layer in self.modules():
122
- if isinstance(layer, nn.Conv2d):
123
- nn.init.normal_(layer.weight, std=0.01)
124
- nn.init.constant_(layer.bias, 0)
125
-
126
- def _get_rpn_conv(self, in_channels, out_channels):
127
- return Conv2d(
128
- in_channels,
129
- out_channels,
130
- kernel_size=3,
131
- stride=1,
132
- padding=1,
133
- activation=nn.ReLU(),
134
- )
135
-
136
- @classmethod
137
- def from_config(cls, cfg, input_shape):
138
- # Standard RPN is shared across levels:
139
- in_channels = [s.channels for s in input_shape]
140
- assert len(set(in_channels)) == 1, "Each level must have the same channel!"
141
- in_channels = in_channels[0]
142
-
143
- # RPNHead should take the same input as anchor generator
144
- # NOTE: it assumes that creating an anchor generator does not have unwanted side effect.
145
- anchor_generator = build_anchor_generator(cfg, input_shape)
146
- num_anchors = anchor_generator.num_anchors
147
- box_dim = anchor_generator.box_dim
148
- assert (
149
- len(set(num_anchors)) == 1
150
- ), "Each level must have the same number of anchors per spatial position"
151
- return {
152
- "in_channels": in_channels,
153
- "num_anchors": num_anchors[0],
154
- "box_dim": box_dim,
155
- "conv_dims": cfg.MODEL.RPN.CONV_DIMS,
156
- }
157
-
158
- def forward(self, features: List[torch.Tensor]):
159
- """
160
- Args:
161
- features (list[Tensor]): list of feature maps
162
-
163
- Returns:
164
- list[Tensor]: A list of L elements.
165
- Element i is a tensor of shape (N, A, Hi, Wi) representing
166
- the predicted objectness logits for all anchors. A is the number of cell anchors.
167
- list[Tensor]: A list of L elements. Element i is a tensor of shape
168
- (N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors
169
- to proposals.
170
- """
171
- pred_objectness_logits = []
172
- pred_anchor_deltas = []
173
- for x in features:
174
- t = self.conv(x)
175
- pred_objectness_logits.append(self.objectness_logits(t))
176
- pred_anchor_deltas.append(self.anchor_deltas(t))
177
- return pred_objectness_logits, pred_anchor_deltas
178
-
179
-
180
- @PROPOSAL_GENERATOR_REGISTRY.register()
181
- class RPN(nn.Module):
182
- """
183
- Region Proposal Network, introduced by :paper:`Faster R-CNN`.
184
- """
185
-
186
- @configurable
187
- def __init__(
188
- self,
189
- *,
190
- in_features: List[str],
191
- head: nn.Module,
192
- anchor_generator: nn.Module,
193
- anchor_matcher: Matcher,
194
- box2box_transform: Box2BoxTransform,
195
- batch_size_per_image: int,
196
- positive_fraction: float,
197
- pre_nms_topk: Tuple[float, float],
198
- post_nms_topk: Tuple[float, float],
199
- nms_thresh: float = 0.7,
200
- min_box_size: float = 0.0,
201
- anchor_boundary_thresh: float = -1.0,
202
- loss_weight: Union[float, Dict[str, float]] = 1.0,
203
- box_reg_loss_type: str = "smooth_l1",
204
- smooth_l1_beta: float = 0.0,
205
- ):
206
- """
207
- NOTE: this interface is experimental.
208
-
209
- Args:
210
- in_features (list[str]): list of names of input features to use
211
- head (nn.Module): a module that predicts logits and regression deltas
212
- for each level from a list of per-level features
213
- anchor_generator (nn.Module): a module that creates anchors from a
214
- list of features. Usually an instance of :class:`AnchorGenerator`
215
- anchor_matcher (Matcher): label the anchors by matching them with ground truth.
216
- box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to
217
- instance boxes
218
- batch_size_per_image (int): number of anchors per image to sample for training
219
- positive_fraction (float): fraction of foreground anchors to sample for training
220
- pre_nms_topk (tuple[float]): (train, test) that represents the
221
- number of top k proposals to select before NMS, in
222
- training and testing.
223
- post_nms_topk (tuple[float]): (train, test) that represents the
224
- number of top k proposals to select after NMS, in
225
- training and testing.
226
- nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals
227
- min_box_size (float): remove proposal boxes with any side smaller than this threshold,
228
- in the unit of input image pixels
229
- anchor_boundary_thresh (float): legacy option
230
- loss_weight (float|dict): weights to use for losses. Can be single float for weighting
231
- all rpn losses together, or a dict of individual weightings. Valid dict keys are:
232
- "loss_rpn_cls" - applied to classification loss
233
- "loss_rpn_loc" - applied to box regression loss
234
- box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou".
235
- smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
236
- use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
237
- """
238
- super().__init__()
239
- self.in_features = in_features
240
- self.rpn_head = head
241
- self.anchor_generator = anchor_generator
242
- self.anchor_matcher = anchor_matcher
243
- self.box2box_transform = box2box_transform
244
- self.batch_size_per_image = batch_size_per_image
245
- self.positive_fraction = positive_fraction
246
- # Map from self.training state to train/test settings
247
- self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]}
248
- self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]}
249
- self.nms_thresh = nms_thresh
250
- self.min_box_size = float(min_box_size)
251
- self.anchor_boundary_thresh = anchor_boundary_thresh
252
- if isinstance(loss_weight, float):
253
- loss_weight = {"loss_rpn_cls": loss_weight, "loss_rpn_loc": loss_weight}
254
- self.loss_weight = loss_weight
255
- self.box_reg_loss_type = box_reg_loss_type
256
- self.smooth_l1_beta = smooth_l1_beta
257
-
258
- @classmethod
259
- def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
260
- in_features = cfg.MODEL.RPN.IN_FEATURES
261
- ret = {
262
- "in_features": in_features,
263
- "min_box_size": cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE,
264
- "nms_thresh": cfg.MODEL.RPN.NMS_THRESH,
265
- "batch_size_per_image": cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE,
266
- "positive_fraction": cfg.MODEL.RPN.POSITIVE_FRACTION,
267
- "loss_weight": {
268
- "loss_rpn_cls": cfg.MODEL.RPN.LOSS_WEIGHT,
269
- "loss_rpn_loc": cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT,
270
- },
271
- "anchor_boundary_thresh": cfg.MODEL.RPN.BOUNDARY_THRESH,
272
- "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS),
273
- "box_reg_loss_type": cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE,
274
- "smooth_l1_beta": cfg.MODEL.RPN.SMOOTH_L1_BETA,
275
- }
276
-
277
- ret["pre_nms_topk"] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST)
278
- ret["post_nms_topk"] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST)
279
-
280
- ret["anchor_generator"] = build_anchor_generator(cfg, [input_shape[f] for f in in_features])
281
- ret["anchor_matcher"] = Matcher(
282
- cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True
283
- )
284
- ret["head"] = build_rpn_head(cfg, [input_shape[f] for f in in_features])
285
- return ret
286
-
287
- def _subsample_labels(self, label):
288
- """
289
- Randomly sample a subset of positive and negative examples, and overwrite
290
- the label vector to the ignore value (-1) for all elements that are not
291
- included in the sample.
292
-
293
- Args:
294
- labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
295
- """
296
- pos_idx, neg_idx = subsample_labels(
297
- label, self.batch_size_per_image, self.positive_fraction, 0
298
- )
299
- # Fill with the ignore label (-1), then set positive and negative labels
300
- label.fill_(-1)
301
- label.scatter_(0, pos_idx, 1)
302
- label.scatter_(0, neg_idx, 0)
303
- return label
304
-
305
- @torch.jit.unused
306
- @torch.no_grad()
307
- def label_and_sample_anchors(
308
- self, anchors: List[Boxes], gt_instances: List[Instances]
309
- ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
310
- """
311
- Args:
312
- anchors (list[Boxes]): anchors for each feature map.
313
- gt_instances: the ground-truth instances for each image.
314
-
315
- Returns:
316
- list[Tensor]:
317
- List of #img tensors. i-th element is a vector of labels whose length is
318
- the total number of anchors across all feature maps R = sum(Hi * Wi * A).
319
- Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative
320
- class; 1 = positive class.
321
- list[Tensor]:
322
- i-th element is a Rx4 tensor. The values are the matched gt boxes for each
323
- anchor. Values are undefined for those anchors not labeled as 1.
324
- """
325
- anchors = Boxes.cat(anchors)
326
-
327
- gt_boxes = [x.gt_boxes for x in gt_instances]
328
- image_sizes = [x.image_size for x in gt_instances]
329
- del gt_instances
330
-
331
- gt_labels = []
332
- matched_gt_boxes = []
333
- for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes):
334
- """
335
- image_size_i: (h, w) for the i-th image
336
- gt_boxes_i: ground-truth boxes for i-th image
337
- """
338
-
339
- match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors)
340
- matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
341
- # Matching is memory-expensive and may result in CPU tensors. But the result is small
342
- gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
343
- del match_quality_matrix
344
-
345
- if self.anchor_boundary_thresh >= 0:
346
- # Discard anchors that go out of the boundaries of the image
347
- # NOTE: This is legacy functionality that is turned off by default in Detectron2
348
- anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh)
349
- gt_labels_i[~anchors_inside_image] = -1
350
-
351
- # A vector of labels (-1, 0, 1) for each anchor
352
- gt_labels_i = self._subsample_labels(gt_labels_i)
353
-
354
- if len(gt_boxes_i) == 0:
355
- # These values won't be used anyway since the anchor is labeled as background
356
- matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
357
- else:
358
- # TODO wasted indexing computation for ignored boxes
359
- matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
360
-
361
- gt_labels.append(gt_labels_i) # N,AHW
362
- matched_gt_boxes.append(matched_gt_boxes_i)
363
- return gt_labels, matched_gt_boxes
364
-
365
- @torch.jit.unused
366
- def losses(
367
- self,
368
- anchors: List[Boxes],
369
- pred_objectness_logits: List[torch.Tensor],
370
- gt_labels: List[torch.Tensor],
371
- pred_anchor_deltas: List[torch.Tensor],
372
- gt_boxes: List[torch.Tensor],
373
- ) -> Dict[str, torch.Tensor]:
374
- """
375
- Return the losses from a set of RPN predictions and their associated ground-truth.
376
-
377
- Args:
378
- anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each
379
- has shape (Hi*Wi*A, B), where B is box dimension (4 or 5).
380
- pred_objectness_logits (list[Tensor]): A list of L elements.
381
- Element i is a tensor of shape (N, Hi*Wi*A) representing
382
- the predicted objectness logits for all anchors.
383
- gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
384
- pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
385
- (N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors
386
- to proposals.
387
- gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
388
-
389
- Returns:
390
- dict[loss name -> loss value]: A dict mapping from loss name to loss value.
391
- Loss names are: `loss_rpn_cls` for objectness classification and
392
- `loss_rpn_loc` for proposal localization.
393
- """
394
- num_images = len(gt_labels)
395
- gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai))
396
-
397
- # Log the number of positive/negative anchors per-image that's used in training
398
- pos_mask = gt_labels == 1
399
- num_pos_anchors = pos_mask.sum().item()
400
- num_neg_anchors = (gt_labels == 0).sum().item()
401
- storage = get_event_storage()
402
- storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images)
403
- storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images)
404
-
405
- localization_loss = _dense_box_regression_loss(
406
- anchors,
407
- self.box2box_transform,
408
- pred_anchor_deltas,
409
- gt_boxes,
410
- pos_mask,
411
- box_reg_loss_type=self.box_reg_loss_type,
412
- smooth_l1_beta=self.smooth_l1_beta,
413
- )
414
-
415
- valid_mask = gt_labels >= 0
416
- objectness_loss = F.binary_cross_entropy_with_logits(
417
- cat(pred_objectness_logits, dim=1)[valid_mask],
418
- gt_labels[valid_mask].to(torch.float32),
419
- reduction="sum",
420
- )
421
- normalizer = self.batch_size_per_image * num_images
422
- losses = {
423
- "loss_rpn_cls": objectness_loss / normalizer,
424
- # The original Faster R-CNN paper uses a slightly different normalizer
425
- # for loc loss. But it doesn't matter in practice
426
- "loss_rpn_loc": localization_loss / normalizer,
427
- }
428
- losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
429
- return losses
430
-
431
- def forward(
432
- self,
433
- images: ImageList,
434
- features: Dict[str, torch.Tensor],
435
- gt_instances: Optional[List[Instances]] = None,
436
- ):
437
- """
438
- Args:
439
- images (ImageList): input images of length `N`
440
- features (dict[str, Tensor]): input data as a mapping from feature
441
- map name to tensor. Axis 0 represents the number of images `N` in
442
- the input data; axes 1-3 are channels, height, and width, which may
443
- vary between feature maps (e.g., if a feature pyramid is used).
444
- gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
445
- Each `Instances` stores ground-truth instances for the corresponding image.
446
-
447
- Returns:
448
- proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits"
449
- loss: dict[Tensor] or None
450
- """
451
- features = [features[f] for f in self.in_features]
452
- anchors = self.anchor_generator(features)
453
-
454
- pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
455
- # Transpose the Hi*Wi*A dimension to the middle:
456
- pred_objectness_logits = [
457
- # (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
458
- score.permute(0, 2, 3, 1).flatten(1)
459
- for score in pred_objectness_logits
460
- ]
461
- pred_anchor_deltas = [
462
- # (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B)
463
- x.view(x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1])
464
- .permute(0, 3, 4, 1, 2)
465
- .flatten(1, -2)
466
- for x in pred_anchor_deltas
467
- ]
468
-
469
- if self.training:
470
- assert gt_instances is not None, "RPN requires gt_instances in training!"
471
- gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances)
472
- losses = self.losses(
473
- anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes
474
- )
475
- else:
476
- losses = {}
477
- proposals = self.predict_proposals(
478
- anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes
479
- )
480
- return proposals, losses
481
-
482
- def predict_proposals(
483
- self,
484
- anchors: List[Boxes],
485
- pred_objectness_logits: List[torch.Tensor],
486
- pred_anchor_deltas: List[torch.Tensor],
487
- image_sizes: List[Tuple[int, int]],
488
- ):
489
- """
490
- Decode all the predicted box regression deltas to proposals. Find the top proposals
491
- by applying NMS and removing boxes that are too small.
492
-
493
- Returns:
494
- proposals (list[Instances]): list of N Instances. The i-th Instances
495
- stores post_nms_topk object proposals for image i, sorted by their
496
- objectness score in descending order.
497
- """
498
- # The proposals are treated as fixed for joint training with roi heads.
499
- # This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that
500
- # are also network responses.
501
- with torch.no_grad():
502
- pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
503
- return find_top_rpn_proposals(
504
- pred_proposals,
505
- pred_objectness_logits,
506
- image_sizes,
507
- self.nms_thresh,
508
- self.pre_nms_topk[self.training],
509
- self.post_nms_topk[self.training],
510
- self.min_box_size,
511
- self.training,
512
- )
513
-
514
- def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]):
515
- """
516
- Transform anchors into proposals by applying the predicted anchor deltas.
517
-
518
- Returns:
519
- proposals (list[Tensor]): A list of L tensors. Tensor i has shape
520
- (N, Hi*Wi*A, B)
521
- """
522
- N = pred_anchor_deltas[0].shape[0]
523
- proposals = []
524
- # For each feature map
525
- for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas):
526
- B = anchors_i.tensor.size(1)
527
- pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B)
528
- # Expand anchors to shape (N*Hi*Wi*A, B)
529
- anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B)
530
- proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i)
531
- # Append feature map proposals with shape (N, Hi*Wi*A, B)
532
- proposals.append(proposals_i.view(N, -1, B))
533
- return proposals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cazador Asesino Hack Mod Apk Todos Los Personajes Desbloqueados.md DELETED
@@ -1,65 +0,0 @@
1
-
2
- <h1>Hunter Assassin Hack Mod APK: Todos los personajes desbloqueados</h1>
3
- <p>Si usted está buscando un juego divertido y adictivo que desafía sus reflejos y habilidades de sigilo, es posible que desee probar Hunter Assassin. Este juego es un éxito entre millones de jugadores que disfrutan escabulléndose y eliminando enemigos con un cuchillo. Pero lo que si quieres desbloquear todos los personajes y disfrutar del juego sin limitaciones? Ahí es donde Hunter Assassin Hack Mod APK entra en juego. En este artículo, le diremos todo lo que necesita saber acerca de este apk mod, cómo descargar e instalar, y algunos consejos y trucos para jugar Hunter Assassin.</p>
4
- <h2>¿Qué es Hunter Assassin? </h2>
5
- <p>Hunter Assassin es un juego desarrollado por Ruby Game Studio, los mismos creadores de juegos populares como Gym Flip y Idle Digging Tycoon. El juego está disponible para dispositivos Android e iOS, y tiene más de 100 millones de descargas en Google Play Store. El juego tiene una premisa simple pero atractiva: eres un asesino que tiene que infiltrarse en una base llena de guardias armados y eliminarlos uno por uno. Suena fácil, ¿verdad? Bueno, no del todo. Los guardias tienen armas y pueden dispararte desde la distancia, mientras que solo tienes un cuchillo y tu agilidad. Tienes que usar las sombras, evitar los focos y planificar tus movimientos cuidadosamente para evitar ser detectado y asesinado. </p>
6
- <h2>cazador asesino hack mod apk todos los personajes desbloqueados</h2><br /><p><b><b>Download Zip</b> &#10026; <a href="https://bltlly.com/2v6K7z">https://bltlly.com/2v6K7z</a></b></p><br /><br />
7
- <h3>Juego y características</h3>
8
- <p>El modo de juego de Hunter Assassin es sencillo: toca la pantalla para mover a tu personaje y atacar a los guardias. Tienes que ser rápido y preciso, ya que los guardias reaccionarán a cualquier ruido o movimiento. También debes tener cuidado con las trampas, como las minas y los láseres, que pueden dañarte. El juego tiene cientos de niveles, cada uno con un diseño diferente y el número de enemigos. La dificultad aumenta a medida que avanzas, y te enfrentarás a más desafíos y obstáculos. </p>
9
-
10
- <h3>Cómo desbloquear caracteres</h3>
11
- <p>Como se mencionó anteriormente, hay dos maneras de desbloquear personajes en Hunter Assassin: gemas y llaves. Las gemas son la moneda principal del juego, y puedes usarlas para comprar personajes aleatorios de la tienda. El precio de cada personaje varía dependiendo de su rareza, desde 500 gemas para los comunes hasta 1000 gemas para los legendarios. También puedes usar gemas para mejorar tus personajes y aumentar sus estadísticas. </p>
12
- <p>Las llaves son otra forma de desbloquear caracteres, pero son más difíciles de conseguir. Las llaves se usan para abrir cofres que contienen caracteres o gemas aleatorias. Puedes obtener claves completando ciertos niveles o logros, o viendo anuncios. Necesitas 36 teclas para abrir un cofre, lo que significa que tienes que jugar muchos niveles o ver muchos anuncios para obtener suficientes teclas. </p>
13
- <h2>¿Qué es Hunter Assassin Hack Mod APK? </h2>
14
- <p>Si no quieres pasar horas jugando niveles o viendo anuncios para desbloquear personajes, hay otra opción: Hunter Assassin Hack Mod APK. Esta es una versión modificada del juego original que te da cristales ilimitados y todos los personajes desbloqueados desde el principio. De esta forma, podrás disfrutar del juego sin restricciones ni limitaciones. </p>
15
- <h3>Beneficios de usar el mod apk</h3>
16
- <p>Hay muchos beneficios de usar Hunter Assassin Hack Mod APK, tales como:</p>
17
- <ul>
18
- <li>Puedes acceder a todos los caracteres sin gastar ninguna joya o clave. </li>
19
- <li>Puedes actualizar tus personajes al nivel máximo sin gastar ninguna joya. </li>
20
- <li>Puedes jugar a cualquier nivel sin preocuparte por tu salud o enemigos. </li>
21
- <li>Puedes disfrutar del juego sin anuncios ni interrupciones. </li>
22
- <li> Usted puede tener más diversión y emoción con el juego. </li>
23
- </ul>
24
- <h3>Cómo descargar e instalar el mod apk</h3>
25
- <p>Descargar e instalar Hunter Assassin Hack Mod APK es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
26
- <p></p>
27
- <ol>
28
- <li>Haga clic en este enlace para descargar el archivo apk mod: [Hunter Assassin Hack Mod APK Download]. </li>
29
-
30
- <li>Localice el archivo descargado en el administrador de archivos de su dispositivo y toque en él para instalarlo. </li>
31
- <li>Iniciar el juego y disfrutar! </li>
32
- </ol>
33
- <h2>Consejos y trucos para jugar Hunter Assassin</h2>
34
- <p>Ahora que tienes Hunter Assassin Hack Mod APK, puede jugar el juego con más facilidad y diversión. Sin embargo, eso no significa que no necesites ninguna habilidad o estrategia para jugar el juego. Aquí hay algunos consejos y trucos que pueden ayudarle a dominar el juego y convertirse en un asesino profesional:</p>
35
- <h3>Usa sigilo y velocidad</h3>
36
- <p>La clave para jugar Hunter Assassin es ser sigiloso y rápido. Tienes que evitar ser visto u oído por los guardias, ya que te dispararán en el acto. También tienes que ser rápido y decisivo, ya que los guardias reaccionarán a cualquier movimiento o ruido. Puede utilizar las sombras, paredes, cajas y otros objetos para esconderse y escabullirse. También puedes usar el mapa para ver dónde están los guardias y planificar tus movimientos en consecuencia. Recuerda, el tiempo es todo en este juego. </p>
37
- <h3>Actualiza tus caracteres</h3>
38
- <p>A pesar de que tienes todos los caracteres desbloqueados, todavía necesitas actualizarlos para mejorar su rendimiento. Cada personaje tiene tres estadísticas: velocidad, salud y habilidad. La velocidad determina qué tan rápido se mueve y ataca tu personaje. La salud determina cuánto daño puede sufrir tu personaje antes de morir. La habilidad determina cuán efectiva es la habilidad especial de tu personaje. Puedes actualizar estas estadísticas gastando gemas, que puedes ganar jugando niveles o abriendo cofres. Actualizar tus personajes los hará más potentes y versátiles, y te ayudará a completar los niveles más rápido y fácil. </p>
39
- <h3>Recoge gemas y llaves</h3>
40
-
41
- <h2>Conclusión</h2>
42
- <p>Hunter Assassin es un juego divertido y adictivo que pone a prueba tus reflejos y habilidades de sigilo. Tienes que infiltrarte en una base llena de guardias armados y eliminarlos uno por uno con un cuchillo. Puedes desbloquear diferentes personajes con habilidades y estadísticas únicas, y actualizarlos para hacerlos más poderosos. También puede utilizar Hunter Assassin Hack Mod APK para obtener gemas ilimitadas y todos los personajes desbloqueados desde el principio, lo que hará que el juego más agradable y emocionante. </p>
43
- <h3>Resumen de los puntos principales</h3>
44
- <p>En este artículo, hemos cubierto:</p>
45
- <ul>
46
- <li>Qué es Hunter Assassin y cómo jugarlo. </li>
47
- <li> ¿Qué es Hunter Assassin Hack Mod APK y cómo descargarlo e instalarlo. </li>
48
- <li>Consejos y trucos para jugar Hunter Assassin.</li>
49
- </ul>
50
- <h3>Llamada a la acción</h3>
51
- <p>Si usted está listo para convertirse en un asesino cazador, descargar Hunter Assassin Hack Mod APK ahora y empezar a jugar! Te encantará este juego si te gusta el sigilo, la acción y el desafío. No se olvide de compartir este artículo con sus amigos que también pueden disfrutar de este juego. Caza feliz! </p>
52
- <h2>Preguntas frecuentes</h2>
53
- <p>Aquí hay algunas preguntas frecuentes sobre Hunter Assassin Hack Mod APK:</p>
54
- <h4>Q: ¿Es seguro usar Hunter Assassin Hack Mod APK? </h4>
55
- <p>A: Sí, Hunter Assassin Hack Mod APK es seguro de usar. No contiene ningún virus o malware que pueda dañar su dispositivo o datos. Sin embargo, siempre debes descargarlo de una fuente de confianza como esta, ya que algunos sitios web pueden ofrecer archivos falsos o dañinos. </p>
56
- <h4>Q: ¿Necesito raíz o jailbreak mi dispositivo para utilizar Hunter Assassin Hack Mod APK? </h4>
57
- <p>A: No, usted no necesita raíz o jailbreak su dispositivo para utilizar Hunter Assassin Hack Mod APK. Funciona tanto en dispositivos rooteados como no rooteados, así como en dispositivos Android e iOS. </p>
58
- <h4>Q: ¿Voy a obtener prohibido del juego si uso Hunter Assassin Hack Mod APK? </h4>
59
-
60
- <h4>Q: ¿Cómo puedo actualizar Hunter Assassin Hack Mod APK? </h4>
61
- <p>A: Hunter Assassin Hack Mod APK se actualiza regularmente para que coincida con la última versión del juego original. Siempre que haya una nueva actualización, puede descargarla de este sitio web e instalarla sobre la existente. No necesitas desinstalar o reinstalar el juego, solo sobrescribe el archivo antiguo con el nuevo. </p>
62
- <h4>Q: ¿Puedo jugar Hunter Assassin Hack Mod APK offline? </h4>
63
- <p>A: Sí, puedes jugar Hunter Assassin Hack Mod APK fuera de línea. El juego no requiere una conexión a Internet para funcionar, y se puede disfrutar de todas las características de la apk mod sin ningún problema. Sin embargo, es posible que necesite una conexión a Internet para acceder a algunas funciones en línea, como tablas de clasificación o logros. </p> 64aa2da5cf<br />
64
- <br />
65
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/lib/types/Message.ts DELETED
@@ -1,5 +0,0 @@
1
- export interface Message {
2
- from: "user" | "assistant";
3
- id: ReturnType<typeof crypto.randomUUID>;
4
- content: string;
5
- }
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/__init__.py DELETED
@@ -1,177 +0,0 @@
1
- """Rich text and beautiful formatting in the terminal."""
2
-
3
- import os
4
- from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
5
-
6
- from ._extension import load_ipython_extension # noqa: F401
7
-
8
- __all__ = ["get_console", "reconfigure", "print", "inspect", "print_json"]
9
-
10
- if TYPE_CHECKING:
11
- from .console import Console
12
-
13
- # Global console used by alternative print
14
- _console: Optional["Console"] = None
15
-
16
- try:
17
- _IMPORT_CWD = os.path.abspath(os.getcwd())
18
- except FileNotFoundError:
19
- # Can happen if the cwd has been deleted
20
- _IMPORT_CWD = ""
21
-
22
-
23
- def get_console() -> "Console":
24
- """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
25
- and hasn't been explicitly given one.
26
-
27
- Returns:
28
- Console: A console instance.
29
- """
30
- global _console
31
- if _console is None:
32
- from .console import Console
33
-
34
- _console = Console()
35
-
36
- return _console
37
-
38
-
39
- def reconfigure(*args: Any, **kwargs: Any) -> None:
40
- """Reconfigures the global console by replacing it with another.
41
-
42
- Args:
43
- *args (Any): Positional arguments for the replacement :class:`~rich.console.Console`.
44
- **kwargs (Any): Keyword arguments for the replacement :class:`~rich.console.Console`.
45
- """
46
- from pip._vendor.rich.console import Console
47
-
48
- new_console = Console(*args, **kwargs)
49
- _console = get_console()
50
- _console.__dict__ = new_console.__dict__
51
-
52
-
53
- def print(
54
- *objects: Any,
55
- sep: str = " ",
56
- end: str = "\n",
57
- file: Optional[IO[str]] = None,
58
- flush: bool = False,
59
- ) -> None:
60
- r"""Print object(s) supplied via positional arguments.
61
- This function has an identical signature to the built-in print.
62
- For more advanced features, see the :class:`~rich.console.Console` class.
63
-
64
- Args:
65
- sep (str, optional): Separator between printed objects. Defaults to " ".
66
- end (str, optional): Character to write at end of output. Defaults to "\\n".
67
- file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
68
- flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
69
-
70
- """
71
- from .console import Console
72
-
73
- write_console = get_console() if file is None else Console(file=file)
74
- return write_console.print(*objects, sep=sep, end=end)
75
-
76
-
77
- def print_json(
78
- json: Optional[str] = None,
79
- *,
80
- data: Any = None,
81
- indent: Union[None, int, str] = 2,
82
- highlight: bool = True,
83
- skip_keys: bool = False,
84
- ensure_ascii: bool = False,
85
- check_circular: bool = True,
86
- allow_nan: bool = True,
87
- default: Optional[Callable[[Any], Any]] = None,
88
- sort_keys: bool = False,
89
- ) -> None:
90
- """Pretty prints JSON. Output will be valid JSON.
91
-
92
- Args:
93
- json (str): A string containing JSON.
94
- data (Any): If json is not supplied, then encode this data.
95
- indent (int, optional): Number of spaces to indent. Defaults to 2.
96
- highlight (bool, optional): Enable highlighting of output: Defaults to True.
97
- skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
98
- ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
99
- check_circular (bool, optional): Check for circular references. Defaults to True.
100
- allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
101
- default (Callable, optional): A callable that converts values that can not be encoded
102
- in to something that can be JSON encoded. Defaults to None.
103
- sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
104
- """
105
-
106
- get_console().print_json(
107
- json,
108
- data=data,
109
- indent=indent,
110
- highlight=highlight,
111
- skip_keys=skip_keys,
112
- ensure_ascii=ensure_ascii,
113
- check_circular=check_circular,
114
- allow_nan=allow_nan,
115
- default=default,
116
- sort_keys=sort_keys,
117
- )
118
-
119
-
120
- def inspect(
121
- obj: Any,
122
- *,
123
- console: Optional["Console"] = None,
124
- title: Optional[str] = None,
125
- help: bool = False,
126
- methods: bool = False,
127
- docs: bool = True,
128
- private: bool = False,
129
- dunder: bool = False,
130
- sort: bool = True,
131
- all: bool = False,
132
- value: bool = True,
133
- ) -> None:
134
- """Inspect any Python object.
135
-
136
- * inspect(<OBJECT>) to see summarized info.
137
- * inspect(<OBJECT>, methods=True) to see methods.
138
- * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
139
- * inspect(<OBJECT>, private=True) to see private attributes (single underscore).
140
- * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
141
- * inspect(<OBJECT>, all=True) to see all attributes.
142
-
143
- Args:
144
- obj (Any): An object to inspect.
145
- title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
146
- help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
147
- methods (bool, optional): Enable inspection of callables. Defaults to False.
148
- docs (bool, optional): Also render doc strings. Defaults to True.
149
- private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
150
- dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
151
- sort (bool, optional): Sort attributes alphabetically. Defaults to True.
152
- all (bool, optional): Show all attributes. Defaults to False.
153
- value (bool, optional): Pretty print value. Defaults to True.
154
- """
155
- _console = console or get_console()
156
- from pip._vendor.rich._inspect import Inspect
157
-
158
- # Special case for inspect(inspect)
159
- is_inspect = obj is inspect
160
-
161
- _inspect = Inspect(
162
- obj,
163
- title=title,
164
- help=is_inspect or help,
165
- methods=is_inspect or methods,
166
- docs=is_inspect or docs,
167
- private=private,
168
- dunder=dunder,
169
- sort=sort,
170
- all=all,
171
- value=value,
172
- )
173
- _console.print(_inspect)
174
-
175
-
176
- if __name__ == "__main__": # pragma: no cover
177
- print("Hello, **World**")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/rotate.py DELETED
@@ -1,64 +0,0 @@
1
- from distutils.util import convert_path
2
- from distutils import log
3
- from distutils.errors import DistutilsOptionError
4
- import os
5
- import shutil
6
-
7
- from setuptools import Command
8
-
9
-
10
- class rotate(Command):
11
- """Delete older distributions"""
12
-
13
- description = "delete older distributions, keeping N newest files"
14
- user_options = [
15
- ('match=', 'm', "patterns to match (required)"),
16
- ('dist-dir=', 'd', "directory where the distributions are"),
17
- ('keep=', 'k', "number of matching distributions to keep"),
18
- ]
19
-
20
- boolean_options = []
21
-
22
- def initialize_options(self):
23
- self.match = None
24
- self.dist_dir = None
25
- self.keep = None
26
-
27
- def finalize_options(self):
28
- if self.match is None:
29
- raise DistutilsOptionError(
30
- "Must specify one or more (comma-separated) match patterns "
31
- "(e.g. '.zip' or '.egg')"
32
- )
33
- if self.keep is None:
34
- raise DistutilsOptionError("Must specify number of files to keep")
35
- try:
36
- self.keep = int(self.keep)
37
- except ValueError as e:
38
- raise DistutilsOptionError("--keep must be an integer") from e
39
- if isinstance(self.match, str):
40
- self.match = [
41
- convert_path(p.strip()) for p in self.match.split(',')
42
- ]
43
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
44
-
45
- def run(self):
46
- self.run_command("egg_info")
47
- from glob import glob
48
-
49
- for pattern in self.match:
50
- pattern = self.distribution.get_name() + '*' + pattern
51
- files = glob(os.path.join(self.dist_dir, pattern))
52
- files = [(os.path.getmtime(f), f) for f in files]
53
- files.sort()
54
- files.reverse()
55
-
56
- log.info("%d file(s) matching %s", len(files), pattern)
57
- files = files[self.keep:]
58
- for (t, f) in files:
59
- log.info("Deleting %s", f)
60
- if not self.dry_run:
61
- if os.path.isdir(f):
62
- shutil.rmtree(f)
63
- else:
64
- os.unlink(f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigData-KSU/VQA-in-Medical-Imagery/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Visual Question Answering in Medical Imagery
3
- emoji: 🧑‍⚕️
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: MED_VQA_Huggyface_Gradio.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bokanovskii/Image-to-music/app.py DELETED
@@ -1,429 +0,0 @@
1
- import gradio as gr
2
- import spotipy
3
- from spotipy import oauth2
4
-
5
- from transformers import ViTForImageClassification, ViTImageProcessor
6
- import torch
7
- from torch.nn import functional as F
8
- from torchvision.io import read_image
9
-
10
- import tensorflow as tf
11
-
12
- from fastapi import FastAPI
13
- from starlette.middleware.sessions import SessionMiddleware
14
- from starlette.responses import HTMLResponse, RedirectResponse
15
- from starlette.requests import Request
16
- import gradio as gr
17
- import uvicorn
18
- from fastapi.responses import HTMLResponse
19
- from fastapi.responses import RedirectResponse
20
-
21
- import numpy as np
22
- import base64
23
- from io import BytesIO
24
- from PIL import Image
25
- import time
26
-
27
- import shred_model
28
-
29
- # Xception fine tuned from pretrained imagenet weights for identifying Sraddha
30
- SRADDHA_MODEL_PATH = "shred_model"
31
- SHRED_MODEL = tf.keras.models.load_model(SRADDHA_MODEL_PATH)
32
-
33
- SPOTIPY_TOKEN = None # Set in the homepage function
34
-
35
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
36
- print("Grabbing model")
37
- mood_model = ViTForImageClassification.from_pretrained("jayanta/google-vit-base-patch16-224-cartoon-emotion-detection")
38
- mood_model.eval()
39
- mood_model.to(device)
40
- print("Grabbing feature extractor")
41
- mood_feature_extractor = ViTImageProcessor.from_pretrained("jayanta/google-vit-base-patch16-224-cartoon-emotion-detection")
42
-
43
- def main(img, playlist_length, privacy, gen_mode, genre_choice, request: gr.Request):
44
- if img is None:
45
- return None
46
- print("Getting image inference from tansformer")
47
- mood_dict = get_image_mood_dict_from_transformer(img)
48
- print("Getting Sraddha Found Boolean from model")
49
- sraddha_found = get_sraddha(img)
50
- print("Building playlist")
51
- playlist = get_playlist(mood_dict, img, playlist_length, privacy, gen_mode, genre_choice, request)
52
- if playlist is None:
53
- playlist = "Spotipy account token not set"
54
-
55
- ret = playlist
56
- if sraddha_found:
57
- valentines_jokes = ["Why shouldn't you trust a pastry chef on Valentine's Day? Because he will dessert you.",
58
- "What do you give your Valentine in France? A big quiche.",
59
- "What did the tortoise say on Valentine's Day? I turt-ally love you.",
60
- "How did the squirrel get his Valentine's attention? He acted like a nut.",
61
- "What do you call sweets that can keep a beat? Candy rappers.",
62
- "What did the paper clip say to the magnet? I find you very attractive.",
63
- "What did the caclulator say to the pencil? You can count on me."]
64
- joke = valentines_jokes[np.random.randint(0, len(valentines_jokes)-1)]
65
- sraddha_msg = """Sraddha, you are the love of my life and seeing you always lifts my spirits. Hopefully these tunes and a joke can do the same for you.
66
- <p>
67
- </p>""" + \
68
- f"<p>{joke}</p><p></p>" + \
69
- """- With Love, Scoob"""
70
- return gr.update(value=ret, visible=True), gr.update(value=sraddha_msg, visible=True)
71
- return gr.update(value=ret, visible=True), gr.update(visible=False)
72
-
73
- def get_image_mood_dict_from_transformer(img):
74
- img = read_image(img)
75
- encoding = mood_feature_extractor(images=img, return_tensors="pt")
76
- pixel_values = encoding['pixel_values'].to(device)
77
-
78
- print('Running mood prediction')
79
- outputs = mood_model(pixel_values)
80
-
81
- logits = outputs.logits
82
- probabilities = F.softmax(logits, dim = -1).detach().numpy()[0]
83
- mood_dict = dict(zip(mood_model.config.id2label.values(), probabilities))
84
- return mood_dict
85
-
86
- def get_sraddha(img):
87
- fixed_img = shred_model.prepare_image(img)
88
- prob = SHRED_MODEL.predict(fixed_img)[0]
89
- if prob >= .5:
90
- return True
91
-
92
- def compute_mood(mood_dict):
93
- print(mood_dict)
94
- return mood_dict['happy'] + mood_dict['angry'] * .5 + mood_dict['sad'] * .1
95
-
96
- def get_playlist(mood_dict, img, playlist_length, privacy, gen_mode, genre_choice, request: gr.Request):
97
- token = request.request.session.get('token')
98
- genre_map = {'Rock': ['alt-rock', 'alternative', 'indie', 'r-n-b', 'rock'], 'Hip-hop': ['hip-hop'], 'Party': ['house', 'pop', 'party'], 'Mellow': ['blues', 'jazz', 'happy'], 'Indian': ['idm', 'indian'], 'Pop': ['pop', 'new-age'], 'Study': ['study', 'classical', 'jazz', 'happy', 'chill'], 'Romance': ['romance', 'happy', 'pop']}
99
-
100
- if token:
101
- mood = compute_mood(mood_dict)
102
- if gen_mode == "By a Chosen Genre":
103
- playlist_name = "Mood " + str(round(mood * 100, 1)) + f": {genre_choice}"
104
- else:
105
- playlist_name = "Mood " + str(round(mood * 100, 1)) + f": {gen_mode}"
106
- sp = spotipy.Spotify(token)
107
-
108
- if gen_mode == 'Recently Played':
109
- top_tracks_uri = set([x['track']['uri'] for x in sp.current_user_recently_played(limit=50)['items']])
110
- # I honestly don't know if this errors for people with not enough saved tracks
111
- # Shouldn't be a problem for Sraddha
112
- first_few = [x['track']['uri'] for x in sp.current_user_saved_tracks(limit=50)['items']]
113
- top_tracks_uri.update(first_few)
114
- top_tracks_uri.update([x['track']['uri'] for x in sp.current_user_saved_tracks(limit=50, offset=50)['items']])
115
- top_tracks_uri.update([x['track']['uri'] for x in sp.current_user_saved_tracks(limit=50, offset=100)['items']])
116
- top_tracks_uri.update([x['track']['uri'] for x in sp.current_user_saved_tracks(limit=50, offset=150)['items']])
117
- top_tracks_uri.update([x['uri'] for x in sp.recommendations(seed_tracks=first_few[:5], limit=50)['tracks']])
118
- top_tracks_uri.update([x['uri'] for x in sp.recommendations(seed_tracks=first_few[5:10], limit=50)['tracks']])
119
- top_tracks_uri = list(top_tracks_uri)
120
- elif gen_mode == 'By a Chosen Genre':
121
- genres = genre_map[genre_choice]
122
- final_track_list = [x['uri'] for x in sp.recommendations(
123
- seed_genres=genres, limit=playlist_length, max_valence=mood+.15,
124
- min_valence=mood-.15, min_danceability=mood/1.75, max_danceability=mood*8,
125
- min_energy=mood/2)['tracks']]
126
- else:
127
- top_artists_uri = aggregate_favorite_artists(sp)
128
- top_tracks_uri = aggregate_top_tracks(sp, top_artists_uri)
129
-
130
- if gen_mode != 'By a Chosen Genre':
131
- final_track_list = filter_tracks(sp, top_tracks_uri, mood, playlist_length)
132
-
133
- # If no tracks fit the filter: generate some results anyways
134
- if len(final_track_list) != playlist_length:
135
- diff = playlist_length - len(final_track_list)
136
- print(f'Filling playlist with {diff} more songs (filter too big)')
137
- seed = [x['track']['uri'] for x in sp.current_user_recently_played(limit=5)['items']]
138
- final_track_list += [x['uri'] for x in sp.recommendations(
139
- seed_tracks=seed, limit=diff,
140
- min_valence=mood-.3, min_energy=mood/3)['tracks']]
141
-
142
- iframe_embedding = create_playlist(sp, img, final_track_list, playlist_name,
143
- privacy)
144
- return iframe_embedding
145
- return None
146
-
147
- def create_playlist(sp, img, tracks, playlist_name, privacy):
148
- privacy = privacy == "Public"
149
- user_id = sp.current_user()['id']
150
- playlist_description = "This playlist was created using the img-to-music application built by the best boyfriend there ever was and ever will be"
151
- playlist_data = sp.user_playlist_create(user_id, playlist_name, public=privacy,
152
- description=playlist_description)
153
- playlist_id = playlist_data['id']
154
- if len(tracks) == 0:
155
- return """No tracks could be generated from this image"""
156
- sp.user_playlist_add_tracks(user_id, playlist_id, tracks)
157
-
158
- def upload_img():
159
- with Image.open(img) as im_file:
160
- im_file.thumbnail((300, 300))
161
- buffered = BytesIO()
162
- im_file.save(buffered, format="JPEG")
163
- img_str = base64.b64encode(buffered.getvalue())
164
- sp.playlist_upload_cover_image(playlist_id, img_str)
165
- try:
166
- upload_img()
167
- except spotipy.exceptions.SpotifyException as e:
168
- print(f"SpotiftException on image upload: {e}")
169
- print("Retrying")
170
- time.sleep(5)
171
- try:
172
- upload_img()
173
- except Exception as e:
174
- print(e)
175
- except requests.exceptions.ReadTimeout as e:
176
- print(f"Image upload request timeout: {e}")
177
- print("Retrying...")
178
- time.sleep(5)
179
- try:
180
- upload_img()
181
- except Exception as e:
182
- print(e)
183
- time.sleep(3)
184
- iframe_embedding = f"""<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/{playlist_id}" width="100%" height="352" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture" loading="lazy"></iframe>"""
185
- return iframe_embedding
186
-
187
- def aggregate_favorite_artists(sp):
188
- top_artists_name = set()
189
- top_artists_uri = []
190
-
191
- ranges = ['short_term', 'medium_term', 'long_term']
192
- for r in ranges:
193
- top_artists_all_data = sp.current_user_top_artists(limit=50, time_range=r)
194
- top_artists_data = top_artists_all_data['items']
195
- for artist_data in top_artists_data:
196
- if artist_data["name"] not in top_artists_name:
197
- top_artists_name.add(artist_data['name'])
198
- top_artists_uri.append(artist_data['uri'])
199
-
200
- followed_artists_all_data = sp.current_user_followed_artists(limit=50)
201
- followed_artsits_data = followed_artists_all_data['artists']
202
- for artist_data in followed_artsits_data['items']:
203
- if artist_data["name"] not in top_artists_name:
204
- top_artists_name.add(artist_data['name'])
205
- top_artists_uri.append(artist_data['uri'])
206
-
207
- # attempt to garauntee 200 artists
208
- i = 0
209
- while len(top_artists_uri) < 200:
210
- related_artists_all_data = sp.artist_related_artists(top_artists_uri[i])
211
- i += 1
212
- related_artists_data = related_artists_all_data['artists']
213
- for artist_data in related_artists_data:
214
- if artist_data["name"] not in top_artists_name:
215
- top_artists_name.add(artist_data['name'])
216
- top_artists_uri.append(artist_data['uri'])
217
- if i == len(top_artists_uri):
218
- # could build in a deeper artist recommendation finder here
219
- # would do this if it was going to production but Sraddha follows lots of artists
220
- break
221
-
222
- return top_artists_uri
223
-
224
- def aggregate_top_tracks(sp, top_artists_uri):
225
- top_tracks_uri = []
226
- for artist in top_artists_uri:
227
- top_tracks_all_data = sp.artist_top_tracks(artist)
228
- top_tracks_data = top_tracks_all_data['tracks']
229
- for track_data in top_tracks_data:
230
- top_tracks_uri.append(track_data['uri'])
231
- return top_tracks_uri
232
-
233
- def filter_tracks(sp, top_tracks_uri, mood, playlist_length):
234
- selected_tracks_uri = []
235
-
236
- np.random.shuffle(top_tracks_uri)
237
- # Batch network requests
238
- BATCH_SIZE = 100
239
- i = 0
240
- all_track_data = []
241
- while i + BATCH_SIZE < len(top_tracks_uri):
242
- all_track_data += sp.audio_features(top_tracks_uri[i:i+BATCH_SIZE])
243
- i += BATCH_SIZE
244
- all_track_data += sp.audio_features(top_tracks_uri[i:])
245
-
246
- for i, track in enumerate(top_tracks_uri):
247
- track_data = all_track_data[i]
248
- if track_data is None:
249
- continue
250
-
251
- valence = track_data['valence']
252
- danceability = track_data['danceability']
253
- energy = track_data['energy']
254
- if mood < .1:
255
- if valence <= mood + .15 and \
256
- danceability <= mood * 8 and \
257
- energy <= mood * 10:
258
- selected_tracks_uri.append(track)
259
- elif mood < .25:
260
- if (mood - .1) <= valence <= (mood + .1) and \
261
- danceability <= mood * 4 and \
262
- energy <= mood * 5:
263
- selected_tracks_uri.append(track)
264
- elif mood < .5:
265
- if mood - .05 <= valence <= mood + .05 and \
266
- danceability <= mood * 1.75 and \
267
- energy <= mood * 1.75:
268
- selected_tracks_uri.append(track)
269
- elif mood < .75:
270
- if mood - .1 <= valence <= mood + .1 and \
271
- danceability >= mood / 2.5 and \
272
- energy >= mood / 2:
273
- selected_tracks_uri.append(track)
274
- elif mood < .9:
275
- if mood - .1 <= valence <= mood + .1 and \
276
- danceability >= mood / 2 and \
277
- energy >= mood / 1.75:
278
- selected_tracks_uri.append(track)
279
- else:
280
- if mood - .15 <= valence <= 1 and \
281
- danceability >= mood / 1.75 and \
282
- energy >= mood / 1.5:
283
- selected_tracks_uri.append(track)
284
-
285
- if len(selected_tracks_uri) >= playlist_length:
286
- break
287
- return selected_tracks_uri
288
-
289
- # Define login and frontend
290
- PORT_NUMBER = 8080
291
- SPOTIPY_CLIENT_ID = '2320153024d042c8ba138a108066246c'
292
- SPOTIPY_CLIENT_SECRET = 'da2746490f6542a3b0cfcff50893e8e8'
293
- #SPOTIPY_REDIRECT_URI = 'http://localhost:7860'
294
- SPOTIPY_REDIRECT_URI = "https://Bokanovskii-Image-to-music.hf.space"
295
- SCOPE = 'ugc-image-upload playlist-read-private playlist-read-collaborative playlist-modify-private playlist-modify-public user-top-read user-read-playback-position user-read-recently-played user-read-email user-follow-read user-library-modify user-library-read user-read-email user-read-private user-read-playback-state user-modify-playback-state user-read-currently-playing app-remote-control streaming'
296
-
297
- sp_oauth = oauth2.SpotifyOAuth(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET, SPOTIPY_REDIRECT_URI, scope=SCOPE)
298
-
299
- app = FastAPI()
300
- app.add_middleware(SessionMiddleware, secret_key="w.o.w")
301
-
302
- @app.get('/', response_class=HTMLResponse)
303
- async def homepage(request: Request):
304
- url = str(request.url)
305
- auth_url = sp_oauth.get_authorize_url()
306
- try:
307
- code = sp_oauth.parse_response_code(url)
308
- if code != url:
309
- request.session['token'] = sp_oauth.get_access_token(code, as_dict=False, check_cache=False)
310
- return RedirectResponse("/gradio")
311
- except:
312
- return """<div style="text-align: center; max-width: 1000px; margin: 0 auto;">
313
- <div
314
- style="
315
- align-items: center;
316
- gap: 0.8rem;
317
- font-size: 1.25rem;
318
- "
319
- >
320
- <h3 style="font-weight: 900; margin-bottom: 30px; margin-top: 20px;">
321
- Image to Music Generator
322
- </h3>\n""" + \
323
- "<p> The server couldn't make a connection with Spotify: please try again </p>\n" + \
324
- f"<a href='" + auth_url + "'>Login to Spotify</a>\n" + \
325
- """<p>
326
- </p>
327
- <p>
328
- </p>
329
- <small>
330
- Click 'Open in a new window/tab'
331
- <small>
332
- <div
333
- style="
334
- align-items: center;
335
- gap: 0.8rem;
336
- font-size: 1rem;
337
- "
338
- >
339
- <small>
340
- This applet requires a whitelisted Spotify account (contact Charlie Ward)
341
- </small>"""
342
- return """<div style="text-align: center; max-width: 1000px; margin: 0 auto;">
343
- <div
344
- style="
345
- align-items: center;
346
- gap: 0.8rem;
347
- font-size: 1.75rem;
348
- "
349
- >
350
- <h3 style="font-weight: 900; margin-bottom: 30px; margin-top: 20px;">
351
- Image to Music Generator
352
- </h3>\n""" + \
353
- f"<a href='" + auth_url + "'>Login to Spotify</a>\n" + \
354
- """<p>
355
- </p>
356
- <p>
357
- </p>
358
- <small>
359
- Click 'Open in a new window/tab'
360
- <small>
361
- <div
362
- style="
363
- align-items: center;
364
- gap: 0.8rem;
365
- font-size: 1rem;
366
- "
367
- >
368
- <small>
369
- This applet requires a whitelisted Spotify account (contact Charlie Ward)
370
- </small>"""
371
-
372
- with gr.Blocks(css="style.css") as demo:
373
- with gr.Column(elem_id="col-container"):
374
- gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
375
- <div
376
- style="
377
- display: inline-flex;
378
- align-items: center;
379
- gap: 0.8rem;
380
- font-size: 1.75rem;
381
- "
382
- >
383
- <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
384
- Image to Music Generator
385
- </h1>""")
386
-
387
- input_img = gr.Image(type="filepath", elem_id="input-img")
388
- sraddhas_box = gr.HTML(label="Sraddha's Box", elem_id="sraddhas-box", visible=False)
389
- playlist_output = gr.HTML(label="Generated Playlist", elem_id="app-output", visible=True)
390
-
391
- with gr.Accordion(label="Playlist Generation Options", open=False):
392
- playlist_length = gr.Slider(minimum=5, maximum=100, value=30, step=5,
393
- label="Playlist Length", elem_id="playlist-length")
394
- with gr.Row():
395
- privacy = gr.Radio(label="Playlist Privacy Level", choices=["Public", "Private"],
396
- value="Private")
397
- gen_mode = gr.Radio(label="Recommendation Base", choices=["Favorites", "Recently Played", "By a Chosen Genre"], value="Favorites")
398
- with gr.Row(visible=False) as genre_choice_row:
399
- genre_choice = gr.Dropdown(label='Choose a Genre', choices=['Rock', 'Pop', 'Hip-hop', 'Party', 'Mellow', 'Indian', 'Study', 'Romance'], value='Pop')
400
-
401
- def sraddha_box_hide():
402
- return {sraddhas_box: gr.update(visible=False)}
403
-
404
- def genre_dropdown_toggle(gen_mode):
405
- if gen_mode == 'By a Chosen Genre':
406
- return {genre_choice_row: gr.update(visible=True)}
407
- else:
408
- return {genre_choice_row: gr.update(visible=False)}
409
-
410
- generate = gr.Button("Generate Playlist from Image")
411
-
412
- article = """
413
- <div class="footer">
414
- <p>
415
- Built for Sraddha: playlist generation from image inference
416
- </p>
417
- <p>
418
- Sending Love 🤗
419
- </p>
420
- </div>
421
- """
422
- gr.HTML(article)
423
- gen_mode.change(genre_dropdown_toggle, inputs=[gen_mode], outputs=[genre_choice_row])
424
- generate.click(sraddha_box_hide, outputs=[sraddhas_box])
425
- generate.click(main, inputs=[input_img, playlist_length, privacy, gen_mode, genre_choice],
426
- outputs=[playlist_output, sraddhas_box], api_name="img-to-music")
427
-
428
- gradio_app = gr.mount_gradio_app(app, demo, "/gradio")
429
- uvicorn.run(app, host="0.0.0.0", port=7860)