parquet-converter commited on
Commit
d42fae7
·
1 Parent(s): e5c1e17

Update parquet files (step 10 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Bill3d Taiylagymbars Mpg.md +0 -6
  2. spaces/1gistliPinn/ChatGPT4/Examples/Blood Brothers in Hindi Full Movie Download The Ultimate Guide.md +0 -7
  3. spaces/1phancelerku/anime-remove-background/Brawlhalla Mod Apk 6.06 Unlock All Legends and Crossovers with Data.md +0 -110
  4. spaces/1phancelerku/anime-remove-background/Download Instagram APK for Android 4.0 and enjoy the latest features.md +0 -105
  5. spaces/1phancelerku/anime-remove-background/Download Sonic Dash 2 Sonic Boom MOD APK with Unlimited Money 3.8.1.md +0 -86
  6. spaces/1phancelerku/anime-remove-background/Experience Realistic Car Parking and Racing with Car Parking Multiplayer on Windows 7.md +0 -115
  7. spaces/232labs/VToonify/vtoonify/model/encoder/readme.md +0 -9
  8. spaces/52Hz/SRMNet_real_world_denoising/README.md +0 -37
  9. spaces/AFischer1985/German-Flan-T5/README.md +0 -12
  10. spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/utils.py +0 -120
  11. spaces/AIConsultant/MusicGen/tests/modules/test_rope.py +0 -168
  12. spaces/AIFILMS/StyleGANEX/models/psp.py +0 -148
  13. spaces/AIGC-Audio/AudioGPT/text_to_speech/egs/datasets/audio/biaobei/preprocess.py +0 -16
  14. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/metrics.py +0 -69
  15. spaces/AIML-TUDA/unsafe-vs-safe-stable-diffusion/README.md +0 -13
  16. spaces/Abhilashvj/planogram-compliance/detect.py +0 -460
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/CreateColorPicker.js +0 -56
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateVideo.js +0 -23
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/RoundRectangle.js +0 -2
  20. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/preprocess_v2.py +0 -151
  21. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions_pfc.py +0 -23
  22. spaces/Amrrs/portfolio-github/index.html +0 -108
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +0 -1352
  24. spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +0 -5
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/__init__.py +0 -16
  26. spaces/Anonymous-sub/Rerender/gmflow_module/utils/flow_viz.py +0 -291
  27. spaces/ArtGAN/Video-Diffusion-WebUI/app.py +0 -50
  28. spaces/ArtyomKhyan/Detection/utils/activations.py +0 -63
  29. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/lazy.py +0 -399
  30. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/common.py +0 -241
  31. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/backbone/fpn.py +0 -255
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/config/dir1/dir1_a.py +0 -3
  33. spaces/B2gan/LLM_Can_See/README.md +0 -13
  34. spaces/Bambicita/rvc-models/vc_infer_pipeline.py +0 -306
  35. spaces/BartPoint/VoiceChange/infer_pack/modules/F0Predictor/__init__.py +0 -0
  36. spaces/Benson/text-generation/Examples/Api-ms-win-core-path- L1-1-0.dll Descargar.md +0 -127
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/__init__.py +0 -51
  38. spaces/Billyosoro/ESRGAN/scripts/pytorch2onnx.py +0 -36
  39. spaces/CM-15/NLP-demo/app.py +0 -421
  40. spaces/CVMX-jaca-tonos/YouTube-Video-Streaming-Spanish-ASR/streaming.py +0 -66
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/eval.py +0 -198
  42. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/equal.h +0 -23
  43. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/__init__.py +0 -18
  44. spaces/ChevyWithAI/rvc-aicover/README.md +0 -14
  45. spaces/CikeyQI/meme-api/meme_generator/memes/blood_pressure/__init__.py +0 -22
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/feaLib/location.py +0 -12
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G_P_O_S_.py +0 -5
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_m_o_r_x.py +0 -6
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-37519934.css +0 -1
  50. spaces/Dagfinn1962/diffusers-gallery/index.html +0 -162
spaces/1gistliPinn/ChatGPT4/Examples/Bill3d Taiylagymbars Mpg.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>bill3d taiylagymbars mpg</h2><br /><p><b><b>Download File</b> &#10040; <a href="https://imgfil.com/2uy1ZY">https://imgfil.com/2uy1ZY</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Blood Brothers in Hindi Full Movie Download The Ultimate Guide.md DELETED
@@ -1,7 +0,0 @@
1
- <br />
2
- <p>Tags; Blood Brothers 2015 full movie watch online, Blood Brothers 2015 full movie release date, Blood Brothers 2015 full movie Hindi Dubbed + English subtitles, Blood Brothers 2015 full movie download 720p, Blood Brothers (2015) full movie dailymotion, Blood Brothers 2015 full movie free download mp4, Blood Brothers 2015 full movie in hindi watch online, Blood Brothers 2015 full movie in hindi dubbed watch online, Blood Brothers 2015 full movie Desiremovies 4movierulz</p>
3
- <p>Watch online streaming dan Nonton Movie Blood Brother 2018 BluRay 480p & 720p mp4 mkv hindi dubbed, eng sub, sub indo, nonton online streaming film Blood Brother full hd movies free download Movie gratis via google drive, openload, uptobox, upfile, mediafire direct link download on index movies, world4ufree, bolly4u, downloadhub, tamilrockers, rarbg, torrent, yify, eztv, erosnow, mkvcage, pahe.in, ganool, filmywap, bioskopkeren, layarkaca21, indoxxi, dunia21, Lk21, 123movies, 300mbfilms, subscene, 300mb movies, Tv21, Televisi21, 9xmovie, khatrimaza, moviesbaba, hdmovie8, mkv movies king, mkvmoviesking, Mkvking, Mkvking.com .</p>
4
- <h2>Blood Brothers in hindi full movie download</h2><br /><p><b><b>Download</b> &#10027; <a href="https://imgfil.com/2uxZYD">https://imgfil.com/2uxZYD</a></b></p><br /><br />
5
- <p>Download hd pc mp4 720p 480p Scarlett Johansson,Pilou Asbæk,Takeshi Kitano,Juliette Binoche,Michael Pitt,Chin Han,Danusia Samal,Lasarus Ratuere, 31 March 2017dvdrip mp4mobilemovies hon3yhd 3gpmobilemovies 02cinema downloadhub</p> aaccfb2cb3<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Brawlhalla Mod Apk 6.06 Unlock All Legends and Crossovers with Data.md DELETED
@@ -1,110 +0,0 @@
1
- <br />
2
- <h1>Brawlhalla Mod Apk 6.06: A Guide for Beginners</h1>
3
- <p>If you are looking for a fun and exciting platform fighting game that you can play with your friends or online players across different platforms, you might want to check out Brawlhalla. And if you want to enjoy unlimited money and access to all the characters in the game, you might want to try Brawlhalla Mod Apk 6.06. In this article, we will tell you everything you need to know about this modded version of the game, including what it is, how to download and install it, how to play it, and some tips and tricks to improve your skills.</p>
4
- <h2>What is Brawlhalla?</h2>
5
- <h3>A free platform fighting game with over 80 million players</h3>
6
- <p>Brawlhalla is a free-to-play 2D platform fighting game developed by Blue Mammoth Games and published by Ubisoft. It supports up to 8 players online or local in a single match with full cross-play for PC, PS5, PS4, Xbox Series X|S, Xbox One, Nintendo Switch, iOS, and Android devices. You can join casual free-for-alls, queue for ranked matches, or make a custom room with your friends.</p>
7
- <h2>brawlhalla mod apk 6.06</h2><br /><p><b><b>Download File</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://jinyurl.com/2uNKNo">https://jinyurl.com/2uNKNo</a></b></p><br /><br />
8
- <h3>A game with cross-play, frequent updates, and 50+ characters</h3>
9
- <p>Brawlhalla is constantly updated with new features, events, and characters. There are over 50 unique characters (called Legends) to choose from, each with their own stats, abilities, and weapons. You can also unlock crossover characters from other popular franchises such as Adventure Time, Ben 10, The Walking Dead, Tomb Raider, WWE, Shovel Knight, Hellboy, Rayman, Steven Universe, and more.</p>
10
- <h3>A game with various modes, maps, and cosmetics</h3>
11
- <p>Brawlhalla offers a variety of game modes to suit your preferences and mood. You can play classic modes such as Stock (last man standing), Timed (most points), Strikeout (switch characters), or Brawlball (score goals). You can also try out fun party modes such as Kung Foot (soccer), Bombsketball (basketball), Capture the Flag (CTF), Bubble Tag (tag), Temple Climb (climb), Morph (change weapons), Walker Attack! (zombies), Showdown (battle royale), Crew Battle (team elimination), Street Brawl (gang fight), Bounty ( collect bounties), and more. You can also play custom games with your own rules and settings, or join tournaments and events to compete for glory and prizes. Brawlhalla has over 40 maps to fight on, each with different layouts, hazards, and themes. You can also customize your characters with hundreds of skins, weapons, colors, emotes, taunts, and podiums that you can buy with in-game currency or real money.</p>
12
- <h2>What is Brawlhalla Mod Apk 6.06?</h2>
13
- <h3>A modified version of the game that offers unlimited money and unlocked characters</h3>
14
- <p>Brawlhalla Mod Apk 6.06 is a modified version of the game that gives you unlimited money (gold and mammoth coins) and access to all the characters (including the crossover ones) in the game. With this mod, you can buy any cosmetic item you want, and play with any character you like. You can also enjoy the game without ads or interruptions.</p>
15
- <h3>A version that requires data download and installation</h3>
16
- <p>Brawlhalla Mod Apk 6.06 is not available on the official app stores, so you need to download it from a third-party source. You also need to download a separate data file that contains the game assets and resources. You need to install the mod apk file and copy the data file to the obb folder in your device storage. This process may take some time and space, depending on your device and internet speed.</p>
17
- <h3>A version that may not be compatible with the official game or safe to use</h3>
18
- <p>Brawlhalla Mod Apk 6.06 is not an official version of the game, so it may not be compatible with the latest updates or patches of the game. It may also cause errors, glitches, or crashes in your device. Moreover, using a modded version of the game may violate the terms of service of the game, and result in a ban or suspension of your account. You may also risk exposing your device to malware or viruses from untrusted sources. Therefore, use this mod at your own risk and discretion.</p>
19
- <h2>How to download and install Brawlhalla Mod Apk 6.06?</h2>
20
- <h3>Find a reliable source for the mod apk file and the data file</h3>
21
- <p>The first step to download and install Brawlhalla Mod Apk 6.06 is to find a reliable source for the mod apk file and the data file. You can search online for websites or blogs that offer these files for free download. However, be careful of fake or malicious links that may harm your device or steal your information. Always check the reviews, ratings, and comments of other users before downloading anything.</p>
22
- <h3>Enable unknown sources in your device settings</h3>
23
- <p>The next step is to enable unknown sources in your device settings. This will allow you to install apps that are not from the official app stores. To do this, go to your device settings > security > unknown sources > enable. You may also need to disable any antivirus or firewall software that may block the installation.</p>
24
- <h3>Install the mod apk file and copy the data file to the obb folder</h3>
25
- <p>The final step is to install the mod apk file and copy the data file to the obb folder in your device storage. To do this, follow these steps:</p>
26
- <p>brawlhalla mod apk 6.06 download<br />
27
- brawlhalla mod apk 6.06 unlimited money<br />
28
- brawlhalla mod apk 6.06 unlock all characters<br />
29
- brawlhalla mod apk 6.06 latest version<br />
30
- brawlhalla mod apk 6.06 android<br />
31
- brawlhalla mod apk 6.06 free<br />
32
- brawlhalla mod apk 6.06 online<br />
33
- brawlhalla mod apk 6.06 hack<br />
34
- brawlhalla mod apk 6.06 cheats<br />
35
- brawlhalla mod apk 6.06 gameplay<br />
36
- brawlhalla mod apk 6.06 review<br />
37
- brawlhalla mod apk 6.06 update<br />
38
- brawlhalla mod apk 6.06 features<br />
39
- brawlhalla mod apk 6.06 install<br />
40
- brawlhalla mod apk 6.06 guide<br />
41
- brawlhalla mod apk 6.06 tips<br />
42
- brawlhalla mod apk 6.06 tricks<br />
43
- brawlhalla mod apk 6.06 tutorial<br />
44
- brawlhalla mod apk 6.06 multiplayer<br />
45
- brawlhalla mod apk 6.06 cross-play<br />
46
- brawlhalla mod apk 6.06 legends<br />
47
- brawlhalla mod apk 6.06 skins<br />
48
- brawlhalla mod apk 6.06 weapons<br />
49
- brawlhalla mod apk 6.06 modes<br />
50
- brawlhalla mod apk 6.06 maps<br />
51
- brawlhalla mod apk 6.06 ranked<br />
52
- brawlhalla mod apk 6.06 season<br />
53
- brawlhalla mod apk 6.06 events<br />
54
- brawlhalla mod apk 6.06 mallhalla<br />
55
- brawlhalla mod apk 6.06 bloomhalla<br />
56
- brawlhalla mod apk 6.06 emotes<br />
57
- brawlhalla mod apk 6.06 training room<br />
58
- brawlhalla mod apk 6.06 spectating<br />
59
- brawlhalla mod apk 6.06 recording<br />
60
- brawlhalla mod apk 6.06 replay<br />
61
- brawlhalla mod apk 6.06 esports<br />
62
- brawlhalla mod apk 6.06 tournaments<br />
63
- brawlhalla mod apk 6.06 controller support<br />
64
- brawlhalla mod apk 6.06 keyboard support<br />
65
- brawlhalla mod apk 6.06 progress rewards<br />
66
- brawlhalla mod apk 6.06 career history<br />
67
- brawlhalla mod apk 6.06 free to play<br />
68
- brawlhalla mod apk 6.06 no pay to win<br />
69
- brawlhalla mod apk 6.06 all legends pack<br />
70
- brawlhalla mod apk 6.06 crossovers</p>
71
- <ol>
72
- <li>Download the mod apk file and the data file from your chosen source.</li>
73
- <li>Locate the downloaded files in your device storage using a file manager app.</li>
74
- <li>Tap on the mod apk file and follow the instructions to install it.</li>
75
- <li>Do not open the game yet after installation.</li>
76
- <li>Extract or unzip the data file using a zip extractor app.</li>
77
- <li>Copy or move the extracted folder named "com.bmg.brawlhalla" to your device storage > Android > obb.</li>
78
- <li>Make sure that the folder is placed inside the obb folder correctly.</li>
79
- <li>Now you can open the game and enjoy Brawlhalla Mod Apk 6.06.</li>
80
- </ol> <h2>How to play Brawlhalla Mod Apk 6.06?</h2>
81
- <h3>Choose your character from the unlocked roster</h3>
82
- <p>Once you have successfully installed Brawlhalla Mod Apk 6.06, you can start playing the game by choosing your character from the unlocked roster. You can scroll through the list of characters and select the one that suits your playstyle and preference. You can also see the stats, abilities, and weapons of each character by tapping on them.</p>
83
- <h3>Customize your character with skins, weapons, colors, and emotes</h3>
84
- <p>After choosing your character, you can customize it with skins, weapons, colors, and emotes that you can buy with the unlimited money you have in the mod. You can access the store by tapping on the shopping cart icon at the bottom of the screen. You can also change your character's name, avatar, and title by tapping on the profile icon at the top of the screen.</p>
85
- <h3>Join online or offline matches with up to 8 players</h3>
86
- <p>Now you are ready to join online or offline matches with up to 8 players in Brawlhalla Mod Apk 6.06. You can choose from various game modes and maps by tapping on the play icon at the bottom of the screen. You can also invite your friends or join random players by tapping on the social icon at the top of the screen. You can also chat with other players by tapping on the chat icon at the bottom of the screen.</p>
87
- <h2>Tips and tricks for Brawlhalla Mod Apk 6.06</h2>
88
- <h3>Practice your movement, recovery, dodging, and combos in training mode</h3>
89
- <p>If you are new to Brawlhalla or want to improve your skills, you should practice your movement, recovery, dodging, and combos in training mode. You can access training mode by tapping on the play icon > offline > training. In training mode, you can choose any character, weapon, map, and settings to practice with. You can also see your damage, hitboxes, stun frames, and other useful information on the screen.</p>
90
- <h3>Learn the strengths and weaknesses of different characters and weapons</h3>
91
- <p>Brawlhalla has over 50 characters and 13 weapons to choose from, each with their own strengths and weaknesses. You should learn how each character and weapon works, what their advantages and disadvantages are, and how to counter them. You can also watch videos or guides from other players or experts online to learn more tips and tricks.</p>
92
- <h3>Experiment with different modes and maps to find your favorite</h3>
93
- <p>Brawlhalla has a variety of modes and maps to suit your preferences and mood. You should experiment with different modes and maps to find your favorite ones. You can also create your own custom games with your own rules and settings by tapping on the play icon > custom > create room. You can also join other players' custom games by tapping on join room.</p>
94
- <h2>Conclusion</h2>
95
- <p>Brawlhalla Mod Apk 6.06 is a modified version of Brawlhalla that offers unlimited money and unlocked characters for free. It is a fun and exciting platform fighting game that you can play with your friends or online players across different platforms. However, it is not an official version of the game, so it may not be compatible with the latest updates or patches of the game. It may also cause errors, glitches, or crashes in your device. Moreover, using a modded version of the game may violate the terms of service of the game, and result in a ban or suspension of your account. You may also risk exposing your device to malware or viruses from untrusted sources. Therefore, use this mod at your own risk and discretion.</p>
96
- <h2>FAQs</h2>
97
- <ul>
98
- <li><b>Q: Is Brawlhalla Mod Apk 6.06 safe to use?</b></li>
99
- <li>A: Brawlhalla Mod Apk 6.06 is not an official version of the game, so it may not be safe to use. It may contain malware or viruses that may harm your device or steal your information. It may also violate the terms of service of the game, and result in a ban or suspension of your account.</li>
100
- <li><b>Q: How do I update Brawlhalla Mod Apk 6.06?</b></li>
101
- <li>A: Brawlhalla Mod Apk 6.06 may not be compatible with the latest updates or patches of the game. To update it, you need to find a new version of the mod apk file and the data file from a reliable source online. Then you need to uninstall the old version of the mod apk file and install the new one. You also need to copy the new data file to the obb folder as well.</li>
102
- <li><b>Q: Can I play Brawlhalla Mod Apk 6.06 with other players online?</b></li>
103
- <li>A: Brawlhalla Mod Apk 6.06 supports online multiplayer with up to 8 players in a single match. However, you may not be able to play with players who are using the official version of the game, or players who are using a different version of the mod. You may also face lag, disconnects, or bans from the game servers.</li>
104
- <li><b>Q: Can I use Brawlhalla Mod Apk 6.06 on PC, PS5, PS4, Xbox Series X|S, Xbox One, Nintendo Switch, iOS, or Android devices?</b></li>
105
- <li>A: Brawlhalla Mod Apk 6.06 is only compatible with Android devices. You cannot use it on PC, PS5, PS4, Xbox Series X|S, Xbox One, Nintendo Switch, or iOS devices. However, you can use an Android emulator on your PC to run the mod apk file and the data file.</li>
106
- <li><b>Q: Where can I find more information about Brawlhalla Mod Apk 6.06?</b></li>
107
- <li>A: You can find more information about Brawlhalla Mod Apk 6.06 by searching online for websites or blogs that offer reviews, guides, or tutorials about the mod. You can also join online communities or forums that discuss the mod or the game in general. You can also watch videos or streams from other players or experts who use the mod or the game.</li>
108
- </ul></p> 401be4b1e0<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Instagram APK for Android 4.0 and enjoy the latest features.md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>How to Download Instagram APK for Android 4.0</h1>
3
- <p>Instagram is one of the most popular social media platforms in the world, with over one billion monthly active users. It allows you to create and share your photos, stories, reels and videos with the friends and followers you care about. You can also connect with people who share your interests, discover new content, and chat with your favorite creators.</p>
4
- <h2>instagram download apk android 4.0</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://jinyurl.com/2uNTPz">https://jinyurl.com/2uNTPz</a></b></p><br /><br />
5
- <p>But what if you have an older device that runs on Android 4.0 Ice Cream Sandwich, which is no longer supported by the official Instagram app? Does that mean you can't enjoy the latest features and updates of Instagram? Not necessarily. In this article, we will show you how to download Instagram APK for Android 4.0, which is a modified version of the original app that works on older devices. We will also explain what Instagram APK is, why you might want to download it, how to install it, and how to use it.</p>
6
- <h2>What is Instagram APK?</h2>
7
- <p>Instagram APK is a file that contains the installation package of the Instagram app for Android devices. APK stands for Android Package Kit, and it is the standard format for distributing and installing apps on Android devices. You can download APK files from various sources online, such as third-party websites, app stores, or file-sharing platforms.</p>
8
- <p>However, not all APK files are safe or reliable. Some of them may contain malware, viruses, or unwanted ads that can harm your device or compromise your privacy. Therefore, you should always be careful when downloading APK files from unknown sources, and only use trusted and reputable websites.</p>
9
- <p>One of the advantages of downloading Instagram APK is that you can access the latest version of the app even if your device is not compatible with it. For example, if you have an Android 4.0 device, you can download Instagram APK for Android 4.0 and enjoy the new features and improvements of Instagram without having to upgrade your device.</p>
10
- <h2>Why Download Instagram APK for Android 4.0?</h2>
11
- <p>There are several reasons why you might want to download Instagram APK for Android 4.0 instead of using the official app from the Google Play Store. Here are some of them:</p>
12
- <p>instagram apk latest version free download for android 4.0<br />
13
- instagram app download for android 4.0 ice cream sandwich<br />
14
- instagram lite apk download for android 4.0 and up<br />
15
- how to install instagram on android 4.0 without google play<br />
16
- instagram mod apk download for android 4.0 with extra features<br />
17
- instagram old version apk download for android 4.0<br />
18
- instagram beta apk download for android 4.0 and test new features<br />
19
- instagram reels apk download for android 4.0 and create short videos<br />
20
- instagram dark mode apk download for android 4.0 and save battery<br />
21
- instagram video downloader apk for android 4.0 and save videos offline<br />
22
- instagram story saver apk for android 4.0 and download stories<br />
23
- instagram photo editor apk for android 4.0 and enhance your photos<br />
24
- instagram layout apk for android 4.0 and create collages<br />
25
- instagram boomerang apk for android 4.0 and make looping videos<br />
26
- instagram direct apk for android 4.0 and chat with friends<br />
27
- instagram live apk for android 4.0 and broadcast to your followers<br />
28
- instagram tv apk for android 4.0 and watch long-form videos<br />
29
- instagram shop apk for android 4.0 and discover products<br />
30
- instagram business apk for android 4.0 and grow your brand<br />
31
- instagram analytics apk for android 4.0 and track your performance<br />
32
- instagram followers apk for android 4.0 and get more followers<br />
33
- instagram likes apk for android 4.0 and get more likes<br />
34
- instagram comments apk for android 4.0 and get more comments<br />
35
- instagram hashtags apk for android 4.0 and find the best hashtags<br />
36
- instagram captions apk for android 4.0 and write engaging captions<br />
37
- instagram filters apk for android 4.0 and add effects to your photos<br />
38
- instagram stickers apk for android 4.0 and decorate your stories<br />
39
- instagram fonts apk for android 4.0 and change your bio font<br />
40
- instagram emoji apk for android 4.0 and express yourself with emojis<br />
41
- instagram verification apk for android 4.0 and get the blue tick<br />
42
- instagram password hack apk for android 4.0 and access any account (not recommended)<br />
43
- instagram spy apk for android 4.0 and monitor someone's activity (not recommended)<br />
44
- instagram private profile viewer apk for android 4.0 and see hidden photos (not recommended)<br />
45
- instagram unfollowers apk for android 4.0 and find out who unfollowed you<br />
46
- instagram ghost followers apk for android 4.0 and remove inactive followers<br />
47
- instagram block list apk for android 4.0 and see who blocked you<br />
48
- instagram dm cleaner apk for android 4.0 and delete unwanted messages<br />
49
- instagram post scheduler apk for android 4.0 and plan your posts in advance<br />
50
- instagram repost apk for android 4.0 and share other's posts with credit<br />
51
- instagram carousel maker apk for android 4.0 and create multi-image posts</p>
52
- <ul>
53
- <li>You have an older device that runs on Android 4.0 Ice Cream Sandwich, which is no longer supported by the official Instagram app.</li>
54
- <li>You want to access the latest version of Instagram with all the new features and updates, such as reels, dark mode, stickers, filters, etc.</li>
55
- <li>You want to bypass some of the restrictions or limitations imposed by the official app, such as the number of accounts you can log in with, the size of the videos you can upload, or the content you can view.</li>
56
- <li>You want to customize or modify some aspects of the app according to your preferences, such as the theme, layout, icons, fonts, etc.</li>
57
- </ul>
58
- <p>However, there are also some drawbacks or risks associated with downloading Instagram APK for Android 4.0 that you should be aware of before proceeding. Here are some of them:</p>
59
- <ul>
60
- <li>You may violate the terms of service or policies of Instagram by using an unofficial or modified version of the app.</li>
61
- <li>You may encounter some bugs, errors, or compatibility issues that affect the performance or functionality of the app.</li>
62
- <li>You may expose your device or data to security threats or privacy breaches by downloading an unverified or malicious APK file.</li>
63
- <li>You may not receive automatic updates or technical support from Instagram if you encounter any problems with the app.</li <h2>How to Download Instagram APK for Android 4.0?</h2>
64
- <p>If you have decided to download Instagram APK for Android 4.0, you need to follow some simple steps to get the file and install it on your device. Here is a step-by-step guide on how to do it:</p>
65
- <ol>
66
- <li>Go to a reliable and reputable website that offers Instagram APK for Android 4.0, such as [APKPure] or [APKMirror]. You can also use a search engine to find other sources, but make sure they are safe and trustworthy.</li>
67
- <li>On the website, find the latest version of Instagram APK for Android 4.0 and click on the download button. You may need to allow the download from unknown sources in your device settings.</li>
68
- <li>Wait for the download to complete and locate the file in your device storage. It should have a name like "com.instagram.android.apk" or something similar.</li>
69
- <li>Tap on the file and follow the instructions to install it on your device. You may need to grant some permissions or accept some terms and conditions.</li>
70
- <li>Once the installation is done, you can launch the app and log in with your existing account or create a new one.</li>
71
- </ol>
72
- <h2>How to Install Instagram APK for Android 4.0?</h2>
73
- <p>If you have already downloaded Instagram APK for Android 4.0, you need to install it on your device to use it. The installation process is similar to any other app, but you may need to enable some settings or options first. Here is a step-by-step guide on how to do it:</p>
74
- <ol>
75
- <li>Before installing Instagram APK for Android 4.0, you need to enable the installation from unknown sources in your device settings. To do this, go to Settings > Security > Unknown Sources and toggle the switch on.</li>
76
- <li>Now, go to your device storage and find the Instagram APK file that you downloaded earlier. It should have a name like "com.instagram.android.apk" or something similar.</li>
77
- <li>Tap on the file and follow the instructions to install it on your device. You may need to grant some permissions or accept some terms and conditions.</li>
78
- <li>Once the installation is done, you can launch the app and log in with your existing account or create a new one.</li>
79
- </ol>
80
- <h2>How to Use Instagram APK for Android 4.0?</h2>
81
- <p>After installing Instagram APK for Android 4.0, you can use it just like the official app, with some minor differences or limitations. Here are some tips and tricks on how to use Instagram APK for Android 4.0:</p>
82
- <ul>
83
- <li>To access the latest features and updates of Instagram, such as reels, dark mode, stickers, filters, etc., you may need to update the app regularly by downloading the latest version of Instagram APK for Android 4.0 from the same website that you used before.</li>
84
- <li>To avoid any compatibility issues or errors, you may need to clear the cache and data of the app occasionally by going to Settings > Apps > Instagram > Storage > Clear Cache and Clear Data.</li>
85
- <li>To customize or modify some aspects of the app according to your preferences, such as the theme, layout, icons, fonts, etc., you may need to use some third-party apps or tools that work with Instagram APK for Android 4.0, such as [GBInsta] or [InstaMod].</li>
86
- <li>To bypass some of the restrictions or limitations imposed by the official app, such as the number of accounts you can log in with, the size of the videos you can upload, or the content you can view, you may need to use some tricks or hacks that work with Instagram APK for Android 4.0, such as [Parallel Space] or [VPN].</li>
87
- </ul>
88
- <h2>Conclusion</h2>
89
- <p>In conclusion, downloading Instagram APK for Android 4.0 is a possible solution for those who have an older device that runs on Android 4.0 Ice Cream Sandwich and want to enjoy the latest features and updates of Instagram without having to upgrade their device. However, there are also some drawbacks and risks involved in using an unofficial or modified version of the app that should be considered before proceeding.</p>
90
- <p>If you decide to download Instagram APK for Android 4.0, make sure you follow the steps above carefully and use a reliable and reputable website that offers safe and verified APK files. Also, be aware of the potential security threats or privacy breaches that may occur by downloading an unverified or malicious APK file.</p>
91
- <p>We hope this article has helped you understand what Instagram APK is, why you might want to download it, how to download it, how to install it, and how to use it. If you have any questions or feedback, feel free to leave a comment below or contact us through our website. We would love to hear from you!</p>
92
- <h2>FAQs</h2>
93
- <p>Here are some of the most frequently asked questions and answers about Instagram APK for Android 4.0:</p>
94
- <h3>Is Instagram APK for Android 4.0 legal?</h3>
95
- <p>There is no definitive answer to this question, as different countries may have different laws and regulations regarding the use of unofficial or modified apps. However, in general, downloading and using Instagram APK for Android 4.0 is not illegal, as long as you do not use it for any malicious or fraudulent purposes. However, you may violate the terms of service or policies of Instagram by using an unofficial or modified version of the app, which may result in your account being suspended or banned.</p>
96
- <h3>Is Instagram APK for Android 4.0 safe?</h3>
97
- <p>Not all Instagram APK files are safe or reliable. Some of them may contain malware, viruses, or unwanted ads that can harm your device or compromise your privacy. Therefore, you should always be careful when downloading Instagram APK files from unknown sources, and only use trusted and reputable websites that offer safe and verified APK files. You should also scan the APK file with an antivirus or malware detector before installing it on your device.</p>
98
- <h3>What are the differences between Instagram APK for Android 4.0 and the official app?</h3>
99
- <p>The main difference between Instagram APK for Android 4.0 and the official app is that the former works on older devices that run on Android 4.0 Ice Cream Sandwich, while the latter does not. The official app requires a minimum of Android 5.0 Lollipop to run smoothly and support all the features and updates of Instagram. Another difference is that Instagram APK for Android 4.0 may have some extra features or options that are not available in the official app, such as customizing the theme, layout, icons, fonts, etc., or bypassing some of the restrictions or limitations imposed by the official app, such as the number of accounts you can log in with, the size of the videos you can upload, or the content you can view.</p>
100
- <h3>How can I update Instagram APK for Android 4.0?</h3>
101
- <p>To update Instagram APK for Android 4.0, you need to download the latest version of the file from the same website that you used before and install it on your device. You may need to uninstall the previous version of the app first before installing the new one. Alternatively, you can use some third-party apps or tools that can automatically update Instagram APK for Android 4.0 for you, such as [APKUpdater] or [Uptodown].</p>
102
- <h3>How can I delete Instagram APK for Android 4.0?</h3>
103
- <p>To delete Instagram APK for Android 4.0, you need to uninstall it from your device like any other app. To do this, go to Settings > Apps > Instagram > Uninstall and confirm your action. You may also need to delete the APK file from your device storage if you want to free up some space.</p> 197e85843d<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Sonic Dash 2 Sonic Boom MOD APK with Unlimited Money 3.8.1.md DELETED
@@ -1,86 +0,0 @@
1
-
2
- <h1>Sonic Dash 2: Sonic Boom Mod Apk Free Download</h1>
3
- <p>If you are a fan of Sonic the Hedgehog and endless runner games, you might have heard of Sonic Dash 2: Sonic Boom. This is a sequel to the popular Sonic Dash game, featuring the characters and world of the Sonic Boom TV series. In this game, you can run, jump, dash, and swing through various levels, collecting rings, orbs, and sprites, while avoiding obstacles and enemies. You can also play as different characters, each with their own special abilities and powers.</p>
4
- <h2>sonic dash 2 sonic boom mod apk free download</h2><br /><p><b><b>DOWNLOAD</b> &#128504;&#128504;&#128504; <a href="https://jinyurl.com/2uNS3V">https://jinyurl.com/2uNS3V</a></b></p><br /><br />
5
- <p>But what if you want to enhance your gaming experience even more? What if you want to unlock all the characters, get unlimited rings and orbs, and access all the features of the game without spending any money? This is where a mod apk comes in handy. A mod apk is a modified version of an original app that has been altered to provide some extra features or benefits. By downloading and installing a mod apk for Sonic Dash 2: Sonic Boom, you can enjoy the game with more fun and ease.</p>
6
- <h2>How to Download and Install the Mod Apk for Sonic Dash 2: Sonic Boom</h2>
7
- <p>Downloading and installing the mod apk for Sonic Dash 2: Sonic Boom is not very difficult, but you need to follow some steps carefully. Here is what you need to do:</p>
8
- <ol>
9
- <li>First, you need to enable unknown sources on your device. This will allow you to install apps that are not from the official app store. To do this, go to your device settings, then security, then unknown sources, and turn it on.</li>
10
- <li>Next, you need to find a reliable source for downloading the mod apk file. There are many websites that offer mod apks for various games, but not all of them are safe and trustworthy. You need to be careful about malware, viruses, and other harmful software that might harm your device or steal your personal information. One of the websites that we recommend is [text](^1^), where you can find the latest version of the mod apk for Sonic Dash 2: Sonic Boom.</li>
11
- <li>Once you have found the mod apk file, you need to download it to your device. You can do this by clicking on the download button on the website, or by scanning the QR code if available. The file size is about 70 MB, so make sure you have enough space on your device.</li>
12
- <li>After downloading the file, you need to locate it on your device. You can use a file manager app or your device's default file explorer to find it. The file name should be something like "sonic-dash-2-sonic-boom-v2-2-1-mod.apk".</li>
13
- <li>Now, you need to install the file on your device. To do this, simply tap on the file and follow the instructions on the screen. You might need to grant some permissions for the app to work properly.</li>
14
- <li>Finally, you need to launch the game and enjoy it with the mod features. You should see a message saying "Modded by HappyMod" on the loading screen. This means that the mod apk has been successfully installed.</li>
15
- </ol>
16
- <h2>What are the Features and Benefits of the Mod Apk</h2>
17
- <p>The mod apk for Sonic Dash 2: Sonic Boom has many features and benefits that will make your gaming experience more enjoyable and rewarding. Here are some of them:</p>
18
- <p>sonic dash 2 sonic boom unlimited money mod apk<br />
19
- sonic dash 2 sonic boom hack mod apk download<br />
20
- sonic dash 2 sonic boom latest version mod apk<br />
21
- sonic dash 2 sonic boom mod apk android 1<br />
22
- sonic dash 2 sonic boom mod apk revdl<br />
23
- sonic dash 2 sonic boom mod apk offline<br />
24
- sonic dash 2 sonic boom mod apk no ads<br />
25
- sonic dash 2 sonic boom mod apk all characters unlocked<br />
26
- sonic dash 2 sonic boom mod apk unlimited red rings<br />
27
- sonic dash 2 sonic boom mod apk unlimited tickets<br />
28
- download game sonic dash 2 sonic boom mod apk<br />
29
- how to install sonic dash 2 sonic boom mod apk<br />
30
- cara download sonic dash 2 sonic boom mod apk<br />
31
- descargar sonic dash 2 sonic boom mod apk<br />
32
- baixar sonic dash 2 sonic boom mod apk<br />
33
- télécharger sonic dash 2 sonic boom mod apk<br />
34
- download permainan sonic dash 2 sonic boom mod apk<br />
35
- unduh sonic dash 2 sonic boom mod apk gratis<br />
36
- download gratis sonic dash 2 sonic boom mod apk<br />
37
- free download of sonic dash 2 sonic boom mod apk<br />
38
- download link for sonic dash 2 sonic boom mod apk<br />
39
- download file for sonic dash 2 sonic boom mod apk<br />
40
- download data for sonic dash 2 sonic boom mod apk<br />
41
- download obb for sonic dash 2 sonic boom mod apk<br />
42
- download cache for sonic dash 2 sonic boom mod apk<br />
43
- download zip for sonic dash 2 sonic boom mod apk<br />
44
- download rar for sonic dash 2 sonic boom mod apk<br />
45
- download mega for sonic dash 2 sonic boom mod apk<br />
46
- download mediafire for sonic dash 2 sonic boom mod apk<br />
47
- download google drive for sonic dash 2 sonic boom mod apk<br />
48
- download dropbox for sonic dash 2 sonic boom mod apk<br />
49
- download zippyshare for sonic dash 2 sonic boom mod apk<br />
50
- download uptodown for sonic dash 2 sonic boom mod apk<br />
51
- download apkpure for sonic dash 2 sonic boom mod apk<br />
52
- download apkmirror for sonic dash 2 soni</p>
53
- <ul>
54
- <li>You can unlock all the characters in the game, including Sonic, Tails, Amy, Knuckles, Sticks, Shadow, and Vector. Each character has their own unique skills and abilities that will help you in different situations.</li>
55
- <li>You can get unlimited rings and orbs in the game. Rings and orbs are the main currencies in the game, which you can use to buy upgrades, power-ups, and boosters. With unlimited rings and orbs, you can get the best items and enhance your performance in the game.</li>
56
- <li>You can access all the features of the game, such as the events, the missions, the daily challenges, and the leaderboards. You can also play online with other players and compete for the highest scores and rewards.</li>
57
- <li>You can enjoy the game without any ads or interruptions. The mod apk removes all the annoying ads that might pop up during the game or after completing a level. You can also skip the waiting time for loading or reviving.</li>
58
- </ul>
59
- <h2>What are some Tips and Tricks for Playing the Game with the Mod Apk</h2>
60
- <p>Playing Sonic Dash 2: Sonic Boom with the mod apk is fun and easy, but there are some tips and tricks that you can use to make the most out of it. Here are some of them:</p>
61
- <ul>
62
- <li>Try to switch between different characters during the game, depending on the situation. For example, you can use Sonic's dash ability to speed up and break through obstacles, Tails' flight ability to soar over gaps and enemies, Amy's hammer ability to smash everything in her way, Knuckles' punch ability to deal more damage and collect more rings, Sticks' boomerang ability to hit multiple targets and collect sprites, Shadow's chaos blast ability to destroy everything around him, and Vector's music ability to create shockwaves and attract rings.</li>
63
- <li>Use the sprites wisely. Sprites are cute creatures that can help you in various ways, such as increasing your score, boosting your speed, protecting you from damage, or giving you extra lives. You can equip up to three sprites at a time, and each sprite has a different rarity and effect. You can also upgrade your sprites to make them more powerful.</li>
64
- <li>Collect as many rings and orbs as possible. Rings and orbs are not only useful for buying items, but also for increasing your score and activating your character's special power. The more rings and orbs you collect, the faster you can fill up your power meter and unleash your character's ultimate move.</li>
65
- <li>Avoid hitting obstacles and enemies. Hitting obstacles and enemies will slow you down, damage you, or end your run. You need to be alert and agile to dodge or jump over them. You can also use your character's abilities or power-ups to deal with them.</li>
66
- <li>Complete the events, missions, and daily challenges. These are optional tasks that you can do to earn more rewards and bonuses. They also add more variety and fun to the game. You can check them out on the main menu or on the top of the screen during the game.</li>
67
- </ul>
68
- <h1>Conclusion</h1>
69
- <p>Sonic Dash 2: Sonic Boom is an exciting and addictive endless runner game that features the beloved characters and world of Sonic the Hedgehog. If you want to enjoy the game with more features and benefits, you can download and install the mod apk for Sonic Dash 2: Sonic Boom from [text]. This will allow you to unlock all the characters, get unlimited rings and orbs, access all the features of the game, and play without any ads or interruptions. You can also use some tips and tricks to improve your skills and score in the game. So what are you waiting for? Download the mod apk now and have fun with Sonic Dash 2: Sonic Boom!</p>
70
- <h1>FAQs</h1>
71
- <p>Here are some frequently asked questions and answers about the mod apk for Sonic Dash 2: Sonic Boom:</p>
72
- <ol>
73
- <li><b>Is the mod apk safe to download and install?</b><br>
74
- Yes, the mod apk is safe to download and install, as long as you get it from a reliable source like [text]. The mod apk has been tested by many users and has no viruses or malware.</li>
75
- <li><b>Do I need to root my device to use the mod apk?</b><br>
76
- No, you do not need to root your device to use the mod apk. The mod apk works fine on both rooted and non-rooted devices.</li>
77
- <li><b>Will I get banned from playing online with the mod apk?</b><br>
78
- No, you will not get banned from playing online with the mod apk. The mod apk does not interfere with the online mode of the game, so you can play with other players without any problems.</li>
79
- <li><b>Can I update the game with the mod apk?</b><br>
80
- Yes, you can update the game with the mod apk. However, you might need to download a new version of the mod apk when a new update is released. You can check [text] for any updates on the mod apk for Sonic Dash 2: Sonic Boom.</li>
81
- <li><b>How can I uninstall the mod apk if I want to?</b><br>
82
- If you want to uninstall the mod apk, you can do it the same way as you uninstall any other app on your device. You can go to your device settings, then apps, then Sonic Dash 2: Sonic Boom, and tap on uninstall. You can also delete the mod apk file from your device if you want to.</li>
83
- </ol>
84
- <p>I hope this article has helped you learn more about the mod apk for Sonic Dash 2: Sonic Boom and how to download and install it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p> 197e85843d<br />
85
- <br />
86
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Experience Realistic Car Parking and Racing with Car Parking Multiplayer on Windows 7.md DELETED
@@ -1,115 +0,0 @@
1
- <br />
2
- <h1>Car Parking Multiplayer Free Download for Windows 7</h1>
3
- <p>Are you looking for a realistic and fun driving simulator game that you can play on your PC? If yes, then you should try Car Parking Multiplayer, a game that offers more than just parking. In this article, we will show you how to download and install Car Parking Multiplayer on Windows 7 using two different methods. We will also tell you about the features, tips, and tricks of this amazing game. So, let's get started!</p>
4
- <h2>Introduction</h2>
5
- <p>Car Parking Multiplayer is a game that can fool you with its rather deceiving name. But, it's much more than just being about parking your car. It's an open-world experience where you can drive free and yes, still work on that parking if you wish. You can even jump out of your car and walk around. There are different areas that can be explored in the game. Each one is like its own open-world. You can choose to play either single-player mode or online mode if you want a more chaotic scene (in a fun way).</p>
6
- <h2>car parking multiplayer free download for windows 7</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://jinyurl.com/2uNNGE">https://jinyurl.com/2uNNGE</a></b></p><br /><br />
7
- <h3>What is Car Parking Multiplayer?</h3>
8
- <p>Car Parking Multiplayer is a simulation game developed by olzhass, a developer based in Kazakhstan. The game was released in 2017 for Android devices and later for iOS devices. The game has more than 100 million downloads on Google Play Store and more than 130 cars to choose from.</p>
9
- <h3>Why play Car Parking Multiplayer on PC?</h3>
10
- <p>While Car Parking Multiplayer is designed for mobile devices, playing it on PC has some advantages. For example, you can enjoy the game on a bigger screen with better graphics and sound quality. You can also use your keyboard and mouse to control your car more easily and precisely. Moreover, playing on PC can save your battery life and avoid overheating issues of your mobile device.</p>
11
- <p>car parking multiplayer pc download windows 7 free<br />
12
- how to install car parking multiplayer on windows 7<br />
13
- car parking multiplayer for windows 7 32 bit<br />
14
- car parking multiplayer windows 7 offline<br />
15
- car parking multiplayer apk download for windows 7<br />
16
- car parking multiplayer simulator windows 7<br />
17
- car parking multiplayer game free download for pc windows 7<br />
18
- car parking multiplayer bluestacks windows 7<br />
19
- car parking multiplayer online windows 7<br />
20
- car parking multiplayer mod apk for windows 7<br />
21
- car parking multiplayer hack windows 7<br />
22
- car parking multiplayer cheats for windows 7<br />
23
- car parking multiplayer update for windows 7<br />
24
- car parking multiplayer latest version windows 7<br />
25
- car parking multiplayer old version download for windows 7<br />
26
- car parking multiplayer android emulator for windows 7<br />
27
- car parking multiplayer custom cars windows 7<br />
28
- car parking multiplayer tuning windows 7<br />
29
- car parking multiplayer open world mode windows 7<br />
30
- car parking multiplayer voice chat windows 7<br />
31
- car parking multiplayer police mode windows 7<br />
32
- car parking multiplayer drift mode windows 7<br />
33
- car parking multiplayer tow truck windows 7<br />
34
- car parking multiplayer real gas stations windows 7<br />
35
- car parking multiplayer skins for windows 7<br />
36
- car parking multiplayer license plates windows 7<br />
37
- car parking multiplayer money glitch windows 7<br />
38
- car parking multiplayer unlimited money windows 7<br />
39
- car parking multiplayer premium cars windows 7<br />
40
- car parking multiplayer vip cars windows 7<br />
41
- car parking multiplayer best cars windows 7<br />
42
- car parking multiplayer fastest cars windows 7<br />
43
- car parking multiplayer rare cars windows 7<br />
44
- car parking multiplayer secret cars windows 7<br />
45
- car parking multiplayer new cars windows 7<br />
46
- car parking multiplayer body kits windows 7<br />
47
- car parking multiplayer engine swap windows 7<br />
48
- car parking multiplayer suspension adjustment windows 7<br />
49
- car parking multiplayer manual transmission windows 7<br />
50
- car parking multiplayer steering wheel support windows 7<br />
51
- car parking multiplayer realistic graphics windows 7<br />
52
- car parking multiplayer high resolution windows 7<br />
53
- car parking multiplayer low end pc windows 7<br />
54
- car parking multiplayer system requirements windows 7<br />
55
- car parking multiplayer tips and tricks windows 7<br />
56
- car parking multiplayer guide and walkthrough windows 7<br />
57
- car parking multiplayer gameplay videos for windows 7 <br />
58
- car parking multiplayer review and rating for windows 7 <br />
59
- car parking multiplayer feedback and suggestions for windows 7</p>
60
- <h2>How to download and install Car Parking Multiplayer on Windows 7</h2>
61
- <p>There are two methods that you can use to download and install Car Parking Multiplayer on Windows 7. The first method is using an Android emulator, which is a software that allows you to run Android applications on your PC. The second method is using a web browser, which is a simpler but less immersive way to play the game.</p>
62
- <h3>Method 1: Using an Android emulator</h3>
63
- <p>An Android emulator is a software that creates a virtual Android device on your PC, allowing you to run Android applications and games on your computer. There are many Android emulators available online, such as BlueStacks, LDPlayer, NoxPlayer, etc. Here are the steps to use an Android emulator to play Car Parking Multiplayer on Windows 7:</p>
64
- <h4>Step 1: Download and install an Android emulator</h4>
65
- <p>The first step is to download and install an Android emulator of your choice on your PC. You can go to the official website of the emulator or use a third-party source to download the emulator file. After downloading the file, you need to run it and follow the instructions to install the emulator on your PC. This may take some time depending on your internet speed and PC performance.</p>
66
- <h4>Step 2: Download the APK/XAPK file of Car Parking Multiplayer</h4>
67
- <p>The next step is to download the APK or XAPK file of Car Parking Multiplayer on your PC. APK and XAPK are file formats that contain the installation package of an Android application or game. You can download the APK/XAPK file of Car Parking Multiplayer from various sources online, such as APKPure, APKMirror, Uptodown, etc. Make sure to download the latest version of the file and save it in a folder that you can easily access.</p>
68
- <h4>Step 3: Install and launch Car Parking Multiplayer on the emulator</h4>
69
- <p>The final step is to install and launch Car Parking Multiplayer on the emulator. There are two ways to do this. The first way is to drag and drop the APK/XAPK file into the emulator window and wait for the installation to complete. The second way is to open the emulator and go to the built-in app store or browser and search for Car Parking Multiplayer and install it from there. After installing the game, you can launch it from the emulator's home screen or app drawer and enjoy playing it on your PC.</p>
70
- <h3>Method 2: Using a web browser</h3>
71
- <p>Another method that you can use to play Car Parking Multiplayer on Windows 7 is using a web browser. This method is simpler but less immersive than using an emulator. You don't need to download or install anything on your PC, but you need a stable internet connection and a compatible web browser. Here are the steps to use a web browser to play Car Parking Multiplayer on Windows 7:</p>
72
- <h4>Step 1: Go to the official website of Car Parking Multiplayer</h4>
73
- <p>The first step is to go to the official website of Car Parking Multiplayer, which is https://carparkingmultiplayer.com/. You can use any web browser that supports HTML5, such as Chrome, Firefox, Edge, Safari, etc.</p>
74
- <h4>Step 2: Click on the "Play Now" button</h4>
75
- <p>The next step is to click on the "Play Now" button on the website's homepage. This will open a new tab or window where you can play the game online.</p>
76
- <h4>Step 3: Enjoy the game on your browser</h4>
77
- <p>The final step is to enjoy the game on your browser. You can use your mouse and keyboard to control your car and interact with other players. You can also adjust the settings, such as graphics quality, sound volume, language, etc., by clicking on the gear icon on the top right corner of the screen.</p>
78
- <h2>Features of Car Parking Multiplayer</h2>
79
- <p>Car Parking Multiplayer is a game that offers many features that make it fun and realistic. Here are some of the features that you can enjoy in this game:</p>
80
- <h3>Open-world multiplayer mode</h3>
81
- <p>One of the main features of Car Parking Multiplayer is its open-world multiplayer mode, where you can join thousands of other players from around the world in various maps and locations. You can choose from different modes, such as free roam, racing, police chase, etc., and have fun with your friends or strangers. You can also chat with other players using voice or text messages.</p>
82
- <h3>Car customization and tuning</h3>
83
- <p>Another feature of Car Parking Multiplayer is its car customization and tuning system, where you can modify your car's appearance and performance according to your preference. You can change your car's color, wheels, spoilers, stickers, lights, etc., and make it look unique and cool. You can also tune your car's engine, suspension, brakes, transmission, etc., and improve its speed, handling, acceleration, etc.</p>
84
- <h3>High-quality graphics and sound effects</h3>
85
- <p>Car Parking Multiplayer also boasts high-quality graphics and sound effects that make it realistic and immersive. The game has realistic physics and animations that simulate real driving scenarios and situations. The game also has realistic sound effects that match the engine sounds, tire screeches, horn honks, etc., of different cars.</p>
86
- <h3>Interesting gameplay and challenges</h3>
87
- <p>Car Parking Multiplayer also has interesting gameplay and challenges that make it addictive and enjoyable. The game has various missions and tasks that you can complete to earn money and rewards. The game also has different levels of difficulty that test your driving skills and knowledge. The game also has a ranking system that shows your progress and achievements.[^ 1^]</p>
88
- <h2>Tips and tricks for Car Parking Multiplayer</h2>
89
- <p>Car Parking Multiplayer is a game that requires some skills and strategies to master. Here are some tips and tricks that can help you improve your gameplay and have more fun in this game:</p>
90
- <h3>How to select a car and a player</h3>
91
- <p>When you start the game, you can choose your car and your player from the garage menu. You can scroll through different categories of cars, such as sports, classic, SUV, etc., and select the one that suits your style and budget. You can also customize your car's appearance and performance from the same menu. To change your player, you can click on the avatar icon on the top left corner of the screen and select from different options of gender, skin color, hair style, clothing, etc.</p>
92
- <h3>How to understand gear ratio and drift</h3>
93
- <p>One of the features of Car Parking Multiplayer is its realistic gear ratio and drift system, which affects how your car behaves on the road. Gear ratio is the relationship between the engine speed and the wheel speed, which determines how fast or slow your car accelerates or decelerates. Drift is the sideways movement of your car when you turn at high speed, which can be controlled by using the handbrake or steering. To understand how gear ratio and drift work in this game, you can go to the settings menu and enable the "Gear Ratio" and "Drift" options. This will show you a graph and a meter that indicate how your car's gear ratio and drift change according to your actions.</p>
94
- <h3>How to make money and buy new cars</h3>
95
- <p>Money is an important resource in Car Parking Multiplayer, as it allows you to buy new cars, upgrade your existing ones, or access premium features. There are several ways to make money in this game, such as completing missions, winning races, selling cars, watching ads, etc. You can also buy money with real money if you want to support the developers or get some extra cash. To buy new cars, you can go to the shop menu and browse through different categories of cars, such as exclusive, VIP, police, etc. You can also see the specifications and prices of each car before buying it.</p>
96
- <h3>How to communicate and interact with other players</h3>
97
- <p>Car Parking Multiplayer is a social game where you can communicate and interact with other players from around the world. You can chat with other players using voice or text messages by clicking on the microphone or keyboard icons on the bottom right corner of the screen. You can also join or create a room where you can invite your friends or strangers to play together. You can also follow or unfollow other players by clicking on their names or avatars on the map or leaderboard.</p>
98
- <h2>Conclusion</h2>
99
- <p>Car Parking Multiplayer is a game that offers a realistic and fun driving simulator experience that you can play on your PC. You can download and install Car Parking Multiplayer on Windows 7 using an Android emulator or a web browser. You can also enjoy various features, such as open-world multiplayer mode, car customization and tuning, high-quality graphics and sound effects, interesting gameplay and challenges, etc. You can also use some tips and tricks to improve your gameplay and have more fun in this game.</p>
100
- <h2>FAQs</h2>
101
- <p>Here are some frequently asked questions about Car Parking Multiplayer:</p>
102
- <ol>
103
- <li><b>Is Car Parking Multiplayer free to play?</b></li>
104
- <p>Yes, Car Parking Multiplayer is free to play on both mobile devices and PC. However, there are some in-app purchases that you can make to get more money, cars, or features.</p>
105
- <li><b>Is Car Parking Multiplayer safe to play?</b></li>
106
- <p>Yes, Car Parking Multiplayer is safe to play as long as you download it from a trusted source and use a reliable antivirus software on your PC. However, be careful when chatting with other players online as they may use inappropriate language or try to scam you.</p>
107
- <li><b>Can I play Car Parking Multiplayer offline?</b></li>
108
- <p>No, Car Parking Multiplayer requires an internet connection to play online with other players or access some features. However, you can still play single-player mode offline if you have already downloaded the game on your device.</p>
109
- <li><b>How do I update Car Parking Multiplayer?</b></li>
110
- <p>To update Car Parking Multiplayer on your PC, you need to download the latest version of the APK/XAPK file from a trusted source and install it on your emulator or browser. Alternatively, you can check for updates from the app store or browser that you used to install the game.</p>
111
- <li><b>How do I contact the developers of Car Parking Multi layer?</b></li>
112
- <p>To contact the developers of Car Parking Multiplayer, you can go to the settings menu and click on the "Contact Us" button. This will open a form where you can fill in your name, email, subject, and message. You can also follow the developers on their social media accounts, such as Facebook, Instagram, YouTube, etc.</p>
113
- </ol></p> 401be4b1e0<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/encoder/readme.md DELETED
@@ -1,9 +0,0 @@
1
- # Encoding in Style: a StyleGAN Encoder for Image-to-Image Translation
2
-
3
- ## Description
4
- Official Implementation of pSp paper for both training and evaluation. The pSp method extends the StyleGAN model to
5
- allow solving different image-to-image translation problems using its encoder.
6
-
7
- Fork from [https://github.com/eladrich/pixel2style2pixel](https://github.com/eladrich/pixel2style2pixel).
8
-
9
- In VToonify, we modify pSp to accept z+ latent code.
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_real_world_denoising/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: SRMNet_real_denoising
3
- emoji: 🌪
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio`, `streamlit`, or `static`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AFischer1985/German-Flan-T5/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: German Flan T5
3
- emoji: 🐠
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.18.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/utils.py DELETED
@@ -1,120 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from tqdm import tqdm
4
- import json
5
-
6
-
7
- def load_data(file_name: str = "./uvr5_pack/name_params.json") -> dict:
8
- with open(file_name, "r") as f:
9
- data = json.load(f)
10
-
11
- return data
12
-
13
-
14
- def make_padding(width, cropsize, offset):
15
- left = offset
16
- roi_size = cropsize - left * 2
17
- if roi_size == 0:
18
- roi_size = cropsize
19
- right = roi_size - (width % roi_size) + left
20
-
21
- return left, right, roi_size
22
-
23
-
24
- def inference(X_spec, device, model, aggressiveness, data):
25
- """
26
- data : dic configs
27
- """
28
-
29
- def _execute(
30
- X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True
31
- ):
32
- model.eval()
33
- with torch.no_grad():
34
- preds = []
35
-
36
- iterations = [n_window]
37
-
38
- total_iterations = sum(iterations)
39
- for i in tqdm(range(n_window)):
40
- start = i * roi_size
41
- X_mag_window = X_mag_pad[
42
- None, :, :, start : start + data["window_size"]
43
- ]
44
- X_mag_window = torch.from_numpy(X_mag_window)
45
- if is_half:
46
- X_mag_window = X_mag_window.half()
47
- X_mag_window = X_mag_window.to(device)
48
-
49
- pred = model.predict(X_mag_window, aggressiveness)
50
-
51
- pred = pred.detach().cpu().numpy()
52
- preds.append(pred[0])
53
-
54
- pred = np.concatenate(preds, axis=2)
55
- return pred
56
-
57
- def preprocess(X_spec):
58
- X_mag = np.abs(X_spec)
59
- X_phase = np.angle(X_spec)
60
-
61
- return X_mag, X_phase
62
-
63
- X_mag, X_phase = preprocess(X_spec)
64
-
65
- coef = X_mag.max()
66
- X_mag_pre = X_mag / coef
67
-
68
- n_frame = X_mag_pre.shape[2]
69
- pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset)
70
- n_window = int(np.ceil(n_frame / roi_size))
71
-
72
- X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
73
-
74
- if list(model.state_dict().values())[0].dtype == torch.float16:
75
- is_half = True
76
- else:
77
- is_half = False
78
- pred = _execute(
79
- X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
80
- )
81
- pred = pred[:, :, :n_frame]
82
-
83
- if data["tta"]:
84
- pad_l += roi_size // 2
85
- pad_r += roi_size // 2
86
- n_window += 1
87
-
88
- X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
89
-
90
- pred_tta = _execute(
91
- X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
92
- )
93
- pred_tta = pred_tta[:, :, roi_size // 2 :]
94
- pred_tta = pred_tta[:, :, :n_frame]
95
-
96
- return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase)
97
- else:
98
- return pred * coef, X_mag, np.exp(1.0j * X_phase)
99
-
100
-
101
- def _get_name_params(model_path, model_hash):
102
- data = load_data()
103
- flag = False
104
- ModelName = model_path
105
- for type in list(data):
106
- for model in list(data[type][0]):
107
- for i in range(len(data[type][0][model])):
108
- if str(data[type][0][model][i]["hash_name"]) == model_hash:
109
- flag = True
110
- elif str(data[type][0][model][i]["hash_name"]) in ModelName:
111
- flag = True
112
-
113
- if flag:
114
- model_params_auto = data[type][0][model][i]["model_params"]
115
- param_name_auto = data[type][0][model][i]["param_name"]
116
- if type == "equivalent":
117
- return param_name_auto, model_params_auto
118
- else:
119
- flag = False
120
- return param_name_auto, model_params_auto
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/tests/modules/test_rope.py DELETED
@@ -1,168 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import torch
8
-
9
- from audiocraft.modules.rope import RotaryEmbedding
10
- from audiocraft.modules.transformer import StreamingTransformer, set_efficient_attention_backend
11
-
12
-
13
- def test_rope():
14
- set_efficient_attention_backend('xformers')
15
- B, T, H, C = 8, 75, 16, 128
16
-
17
- rope = RotaryEmbedding(dim=C)
18
- xq = torch.rand((B, T, H, C))
19
- xk = torch.rand((B, T, H, C))
20
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
21
-
22
- assert list(xq_out.shape) == [B, T, H, C]
23
- assert list(xk_out.shape) == [B, T, H, C]
24
-
25
-
26
- def test_rope_io_dtypes():
27
- set_efficient_attention_backend('xformers')
28
- B, T, H, C = 8, 75, 16, 128
29
-
30
- rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32)
31
- rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64)
32
-
33
- # Test bfloat16 inputs w/ both 32 and 64 precision rope.
34
- xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
35
- xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
36
- xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16)
37
- assert xq_out.dtype == torch.bfloat16
38
- xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16)
39
- assert xq_out.dtype == torch.bfloat16
40
-
41
- # Test float32 inputs w/ both 32 and 64 precision rope.
42
- xq_32 = torch.rand((B, T, H, C)).to(torch.float32)
43
- xk_32 = torch.rand((B, T, H, C)).to(torch.float32)
44
- xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32)
45
- assert xq_out.dtype == torch.float32
46
- xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32)
47
- assert xq_out.dtype == torch.float32
48
-
49
-
50
- def test_transformer_with_rope():
51
- set_efficient_attention_backend('xformers')
52
- torch.manual_seed(1234)
53
- for pos in ['rope', 'sin_rope']:
54
- tr = StreamingTransformer(
55
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
56
- positional_embedding=pos)
57
- tr.eval()
58
- steps = 12
59
- x = torch.randn(3, steps, 16)
60
-
61
- out = tr(x)
62
- assert list(out.shape) == list(x.shape)
63
-
64
-
65
- @torch.no_grad()
66
- def test_rope_streaming():
67
- set_efficient_attention_backend('xformers')
68
- torch.manual_seed(1234)
69
- tr = StreamingTransformer(
70
- 16, 4, 2, causal=True, dropout=0.,
71
- custom=True, positional_embedding='rope')
72
- tr.eval()
73
- steps = 12
74
- x = torch.randn(3, steps, 16)
75
-
76
- ref = tr(x)
77
-
78
- with tr.streaming():
79
- outs = []
80
- frame_sizes = [1] * steps
81
-
82
- for frame_size in frame_sizes:
83
- frame = x[:, :frame_size]
84
- x = x[:, frame_size:]
85
- outs.append(tr(frame))
86
-
87
- out = torch.cat(outs, dim=1)
88
- assert list(out.shape) == [3, steps, 16]
89
- delta = torch.norm(out - ref) / torch.norm(out)
90
- assert delta < 1e-6, delta
91
-
92
-
93
- @torch.no_grad()
94
- def test_rope_streaming_past_context():
95
- set_efficient_attention_backend('xformers')
96
- torch.manual_seed(1234)
97
-
98
- for context in [None, 10]:
99
- tr = StreamingTransformer(
100
- 16, 4, 1 if context else 2,
101
- causal=True, past_context=context, custom=True,
102
- dropout=0., positional_embedding='rope')
103
- tr.eval()
104
-
105
- steps = 20
106
- x = torch.randn(3, steps, 16)
107
- ref = tr(x)
108
-
109
- with tr.streaming():
110
- outs = []
111
- frame_sizes = [1] * steps
112
-
113
- for frame_size in frame_sizes:
114
- frame = x[:, :frame_size]
115
- x = x[:, frame_size:]
116
- outs.append(tr(frame))
117
-
118
- out = torch.cat(outs, dim=1)
119
- assert list(out.shape) == [3, steps, 16]
120
- delta = torch.norm(out - ref) / torch.norm(out)
121
- assert delta < 1e-6, delta
122
-
123
-
124
- def test_rope_memory_efficient():
125
- set_efficient_attention_backend('xformers')
126
- torch.manual_seed(1234)
127
- tr = StreamingTransformer(
128
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
129
- positional_embedding='rope')
130
- tr_mem_efficient = StreamingTransformer(
131
- 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1,
132
- positional_embedding='rope')
133
- tr_mem_efficient.load_state_dict(tr.state_dict())
134
- tr.eval()
135
- steps = 12
136
- x = torch.randn(3, steps, 16)
137
-
138
- with torch.no_grad():
139
- y = tr(x)
140
- y2 = tr_mem_efficient(x)
141
- # Check at float precision b/c this is the rope default.
142
- assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm()
143
-
144
-
145
- def test_rope_with_xpos():
146
- set_efficient_attention_backend('xformers')
147
- B, T, H, C = 8, 75, 16, 128
148
-
149
- rope = RotaryEmbedding(dim=C, xpos=True)
150
- xq = torch.rand((B, T, H, C))
151
- xk = torch.rand((B, T, H, C))
152
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
153
-
154
- assert list(xq_out.shape) == [B, T, H, C]
155
- assert list(xk_out.shape) == [B, T, H, C]
156
-
157
-
158
- def test_positional_scale():
159
- set_efficient_attention_backend('xformers')
160
- B, T, H, C = 8, 75, 16, 128
161
-
162
- rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0)
163
- xq = torch.rand((B, T, H, C))
164
- xk = torch.rand((B, T, H, C))
165
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
166
-
167
- assert torch.allclose(xq, xq_out)
168
- assert torch.allclose(xk, xk_out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/psp.py DELETED
@@ -1,148 +0,0 @@
1
- """
2
- This file defines the core research contribution
3
- """
4
- import matplotlib
5
- matplotlib.use('Agg')
6
- import math
7
-
8
- import torch
9
- from torch import nn
10
- from models.encoders import psp_encoders
11
- from models.stylegan2.model import Generator
12
- from configs.paths_config import model_paths
13
- import torch.nn.functional as F
14
-
15
- def get_keys(d, name):
16
- if 'state_dict' in d:
17
- d = d['state_dict']
18
- d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
19
- return d_filt
20
-
21
-
22
- class pSp(nn.Module):
23
-
24
- def __init__(self, opts, ckpt=None):
25
- super(pSp, self).__init__()
26
- self.set_opts(opts)
27
- # compute number of style inputs based on the output resolution
28
- self.opts.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2
29
- # Define architecture
30
- self.encoder = self.set_encoder()
31
- self.decoder = Generator(self.opts.output_size, 512, 8)
32
- self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
33
- # Load weights if needed
34
- self.load_weights(ckpt)
35
-
36
- def set_encoder(self):
37
- if self.opts.encoder_type == 'GradualStyleEncoder':
38
- encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)
39
- elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoW':
40
- encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts)
41
- elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoWPlus':
42
- encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoWPlus(50, 'ir_se', self.opts)
43
- else:
44
- raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))
45
- return encoder
46
-
47
- def load_weights(self, ckpt=None):
48
- if self.opts.checkpoint_path is not None:
49
- print('Loading pSp from checkpoint: {}'.format(self.opts.checkpoint_path))
50
- if ckpt is None:
51
- ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
52
- self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=False)
53
- self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=False)
54
- self.__load_latent_avg(ckpt)
55
- else:
56
- print('Loading encoders weights from irse50!')
57
- encoder_ckpt = torch.load(model_paths['ir_se50'])
58
- # if input to encoder is not an RGB image, do not load the input layer weights
59
- if self.opts.label_nc != 0:
60
- encoder_ckpt = {k: v for k, v in encoder_ckpt.items() if "input_layer" not in k}
61
- self.encoder.load_state_dict(encoder_ckpt, strict=False)
62
- print('Loading decoder weights from pretrained!')
63
- ckpt = torch.load(self.opts.stylegan_weights)
64
- self.decoder.load_state_dict(ckpt['g_ema'], strict=False)
65
- if self.opts.learn_in_w:
66
- self.__load_latent_avg(ckpt, repeat=1)
67
- else:
68
- self.__load_latent_avg(ckpt, repeat=self.opts.n_styles)
69
- # for video toonification, we load G0' model
70
- if self.opts.toonify_weights is not None: ##### modified
71
- ckpt = torch.load(self.opts.toonify_weights)
72
- self.decoder.load_state_dict(ckpt['g_ema'], strict=False)
73
- self.opts.toonify_weights = None
74
-
75
- # x1: image for first-layer feature f.
76
- # x2: image for style latent code w+. If not specified, x2=x1.
77
- # inject_latent: for sketch/mask-to-face translation, another latent code to fuse with w+
78
- # latent_mask: fuse w+ and inject_latent with the mask (1~7 use w+ and 8~18 use inject_latent)
79
- # use_feature: use f. Otherwise, use the orginal StyleGAN first-layer constant 4*4 feature
80
- # first_layer_feature_ind: always=0, means the 1st layer of G accept f
81
- # use_skip: use skip connection.
82
- # zero_noise: use zero noises.
83
- # editing_w: the editing vector v for video face editing
84
- def forward(self, x1, x2=None, resize=True, latent_mask=None, randomize_noise=True,
85
- inject_latent=None, return_latents=False, alpha=None, use_feature=True,
86
- first_layer_feature_ind=0, use_skip=False, zero_noise=False, editing_w=None): ##### modified
87
-
88
- feats = None # f and the skipped encoder features
89
- codes, feats = self.encoder(x1, return_feat=True, return_full=use_skip) ##### modified
90
- if x2 is not None: ##### modified
91
- codes = self.encoder(x2) ##### modified
92
- # normalize with respect to the center of an average face
93
- if self.opts.start_from_latent_avg:
94
- if self.opts.learn_in_w:
95
- codes = codes + self.latent_avg.repeat(codes.shape[0], 1)
96
- else:
97
- codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
98
-
99
- # E_W^{1:7}(T(x1)) concatenate E_W^{8:18}(w~)
100
- if latent_mask is not None:
101
- for i in latent_mask:
102
- if inject_latent is not None:
103
- if alpha is not None:
104
- codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]
105
- else:
106
- codes[:, i] = inject_latent[:, i]
107
- else:
108
- codes[:, i] = 0
109
-
110
- first_layer_feats, skip_layer_feats, fusion = None, None, None ##### modified
111
- if use_feature: ##### modified
112
- first_layer_feats = feats[0:2] # use f
113
- if use_skip: ##### modified
114
- skip_layer_feats = feats[2:] # use skipped encoder feature
115
- fusion = self.encoder.fusion # use fusion layer to fuse encoder feature and decoder feature.
116
-
117
- images, result_latent = self.decoder([codes],
118
- input_is_latent=True,
119
- randomize_noise=randomize_noise,
120
- return_latents=return_latents,
121
- first_layer_feature=first_layer_feats,
122
- first_layer_feature_ind=first_layer_feature_ind,
123
- skip_layer_feature=skip_layer_feats,
124
- fusion_block=fusion,
125
- zero_noise=zero_noise,
126
- editing_w=editing_w) ##### modified
127
-
128
- if resize:
129
- if self.opts.output_size == 1024: ##### modified
130
- images = F.adaptive_avg_pool2d(images, (images.shape[2]//4, images.shape[3]//4)) ##### modified
131
- else:
132
- images = self.face_pool(images)
133
-
134
- if return_latents:
135
- return images, result_latent
136
- else:
137
- return images
138
-
139
- def set_opts(self, opts):
140
- self.opts = opts
141
-
142
- def __load_latent_avg(self, ckpt, repeat=None):
143
- if 'latent_avg' in ckpt:
144
- self.latent_avg = ckpt['latent_avg'].to(self.opts.device)
145
- if repeat is not None:
146
- self.latent_avg = self.latent_avg.repeat(repeat, 1)
147
- else:
148
- self.latent_avg = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/egs/datasets/audio/biaobei/preprocess.py DELETED
@@ -1,16 +0,0 @@
1
- from data_gen.tts.base_preprocess import BasePreprocessor
2
- import re
3
-
4
-
5
- class BiaobeiPreprocess(BasePreprocessor):
6
- def meta_data(self):
7
- input_dir = self.raw_data_dir
8
- with open(f"{input_dir}/ProsodyLabeling/000001-010000.txt", encoding='utf-8') as f:
9
- bb_lines = f.readlines()[::2]
10
- for l_idx, l in (enumerate([re.sub("\#\d+", "", l.split('\t')[1].strip()) for l in bb_lines])):
11
- item_name = f'{l_idx + 1:06d}'
12
- wav_fn = f"{input_dir}/wav/{l_idx + 1:06d}.wav"
13
- yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': l}
14
-
15
- if __name__ == "__main__":
16
- BiaobeiPreprocess().process()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/metrics.py DELETED
@@ -1,69 +0,0 @@
1
- import logging
2
-
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from sklearn.metrics import average_precision_score, roc_auc_score
7
-
8
- logger = logging.getLogger(f'main.{__name__}')
9
-
10
- def metrics(targets, outputs, topk=(1, 5)):
11
- """
12
- Adapted from https://github.com/hche11/VGGSound/blob/master/utils.py
13
-
14
- Calculate statistics including mAP, AUC, and d-prime.
15
- Args:
16
- output: 2d tensors, (dataset_size, classes_num) - before softmax
17
- target: 1d tensors, (dataset_size, )
18
- topk: tuple
19
- Returns:
20
- metric_dict: a dict of metrics
21
- """
22
- metrics_dict = dict()
23
-
24
- num_cls = outputs.shape[-1]
25
-
26
- # accuracy@k
27
- _, preds = torch.topk(outputs, k=max(topk), dim=1)
28
- correct_for_maxtopk = preds == targets.view(-1, 1).expand_as(preds)
29
- for k in topk:
30
- metrics_dict[f'accuracy_{k}'] = float(correct_for_maxtopk[:, :k].sum() / correct_for_maxtopk.shape[0])
31
-
32
- # avg precision, average roc_auc, and dprime
33
- targets = torch.nn.functional.one_hot(targets, num_classes=num_cls)
34
-
35
- # ids of the predicted classes (same as softmax)
36
- targets_pred = torch.softmax(outputs, dim=1)
37
-
38
- targets = targets.numpy()
39
- targets_pred = targets_pred.numpy()
40
-
41
- # one-vs-rest
42
- avg_p = [average_precision_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)]
43
- try:
44
- roc_aucs = [roc_auc_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)]
45
- except ValueError:
46
- logger.warning('Weird... Some classes never occured in targets. Do not trust the metrics.')
47
- roc_aucs = np.array([0.5])
48
- avg_p = np.array([0])
49
-
50
- metrics_dict['mAP'] = np.mean(avg_p)
51
- metrics_dict['mROCAUC'] = np.mean(roc_aucs)
52
- # Percent point function (ppf) (inverse of cdf — percentiles).
53
- metrics_dict['dprime'] = scipy.stats.norm().ppf(metrics_dict['mROCAUC']) * np.sqrt(2)
54
-
55
- return metrics_dict
56
-
57
-
58
- if __name__ == '__main__':
59
- targets = torch.tensor([3, 3, 1, 2, 1, 0])
60
- outputs = torch.tensor([
61
- [1.2, 1.3, 1.1, 1.5],
62
- [1.3, 1.4, 1.0, 1.1],
63
- [1.5, 1.1, 1.4, 1.3],
64
- [1.0, 1.2, 1.4, 1.5],
65
- [1.2, 1.3, 1.1, 1.1],
66
- [1.2, 1.1, 1.1, 1.1],
67
- ]).float()
68
- metrics_dict = metrics(targets, outputs, topk=(1, 3))
69
- print(metrics_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIML-TUDA/unsafe-vs-safe-stable-diffusion/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Stable Diffusion vs. Safe Stable Diffusion
3
- colorFrom: blue
4
- colorTo: red
5
- emoji: 😇
6
- sdk: gradio
7
- sdk_version: 3.4
8
- app_file: app.py
9
- pinned: true
10
- license: creativeml-openrail-m
11
- duplicated_from: AIML-TUDA/safe-stable-diffusion
12
- ---
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/detect.py DELETED
@@ -1,460 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
-
5
- Usage - sources:
6
- $ python detect.py --weights yolov5s.pt --source 0 # webcam
7
- img.jpg # image
8
- vid.mp4 # video
9
- screen # screenshot
10
- path/ # directory
11
- list.txt # list of images
12
- list.streams # list of streams
13
- 'path/*.jpg' # glob
14
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
-
17
- Usage - formats:
18
- $ python detect.py --weights yolov5s.pt # PyTorch
19
- yolov5s.torchscript # TorchScript
20
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
21
- yolov5s_openvino_model # OpenVINO
22
- yolov5s.engine # TensorRT
23
- yolov5s.mlmodel # CoreML (macOS-only)
24
- yolov5s_saved_model # TensorFlow SavedModel
25
- yolov5s.pb # TensorFlow GraphDef
26
- yolov5s.tflite # TensorFlow Lite
27
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
28
- yolov5s_paddle_model # PaddlePaddle
29
- """
30
-
31
- import argparse
32
- import os
33
- import platform
34
- import sys
35
- from pathlib import Path
36
-
37
- import torch
38
-
39
- FILE = Path(__file__).resolve()
40
- ROOT = FILE.parents[0] # YOLOv5 root directory
41
- if str(ROOT) not in sys.path:
42
- sys.path.append(str(ROOT)) # add ROOT to PATH
43
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
-
45
- from models.common import DetectMultiBackend
46
- from utils.dataloaders import (
47
- IMG_FORMATS,
48
- VID_FORMATS,
49
- LoadImages,
50
- LoadScreenshots,
51
- LoadStreams,
52
- )
53
- from utils.general import (
54
- LOGGER,
55
- Profile,
56
- check_file,
57
- check_img_size,
58
- check_imshow,
59
- check_requirements,
60
- colorstr,
61
- cv2,
62
- increment_path,
63
- non_max_suppression,
64
- print_args,
65
- scale_boxes,
66
- strip_optimizer,
67
- xyxy2xywh,
68
- )
69
- from utils.plots import Annotator, colors, save_one_box
70
- from utils.torch_utils import select_device, smart_inference_mode
71
-
72
-
73
- @smart_inference_mode()
74
- def run(
75
- weights=ROOT / "yolov5s.pt", # model path or triton URL
76
- source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
77
- data=ROOT / "data/coco128.yaml", # dataset.yaml path
78
- imgsz=(640, 640), # inference size (height, width)
79
- conf_thres=0.25, # confidence threshold
80
- iou_thres=0.45, # NMS IOU threshold
81
- max_det=1000, # maximum detections per image
82
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
83
- view_img=False, # show results
84
- save_txt=False, # save results to *.txt
85
- save_conf=False, # save confidences in --save-txt labels
86
- save_crop=False, # save cropped prediction boxes
87
- nosave=False, # do not save images/videos
88
- classes=None, # filter by class: --class 0, or --class 0 2 3
89
- agnostic_nms=False, # class-agnostic NMS
90
- augment=False, # augmented inference
91
- visualize=False, # visualize features
92
- update=False, # update all models
93
- project=ROOT / "runs/detect", # save results to project/name
94
- name="exp", # save results to project/name
95
- exist_ok=False, # existing project/name ok, do not increment
96
- line_thickness=3, # bounding box thickness (pixels)
97
- hide_labels=False, # hide labels
98
- hide_conf=False, # hide confidences
99
- half=False, # use FP16 half-precision inference
100
- dnn=False, # use OpenCV DNN for ONNX inference
101
- vid_stride=1, # video frame-rate stride
102
- ):
103
- source = str(source)
104
- save_img = not nosave and not source.endswith(
105
- ".txt"
106
- ) # save inference images
107
- is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
108
- is_url = source.lower().startswith(
109
- ("rtsp://", "rtmp://", "http://", "https://")
110
- )
111
- webcam = (
112
- source.isnumeric()
113
- or source.endswith(".streams")
114
- or (is_url and not is_file)
115
- )
116
- screenshot = source.lower().startswith("screen")
117
- if is_url and is_file:
118
- source = check_file(source) # download
119
-
120
- # Directories
121
- save_dir = increment_path(
122
- Path(project) / name, exist_ok=exist_ok
123
- ) # increment run
124
- (save_dir / "labels" if save_txt else save_dir).mkdir(
125
- parents=True, exist_ok=True
126
- ) # make dir
127
-
128
- # Load model
129
- device = select_device(device)
130
- model = DetectMultiBackend(
131
- weights, device=device, dnn=dnn, data=data, fp16=half
132
- )
133
- stride, names, pt = model.stride, model.names, model.pt
134
- imgsz = check_img_size(imgsz, s=stride) # check image size
135
-
136
- # Dataloader
137
- bs = 1 # batch_size
138
- if webcam:
139
- view_img = check_imshow(warn=True)
140
- dataset = LoadStreams(
141
- source,
142
- img_size=imgsz,
143
- stride=stride,
144
- auto=pt,
145
- vid_stride=vid_stride,
146
- )
147
- bs = len(dataset)
148
- elif screenshot:
149
- dataset = LoadScreenshots(
150
- source, img_size=imgsz, stride=stride, auto=pt
151
- )
152
- else:
153
- dataset = LoadImages(
154
- source,
155
- img_size=imgsz,
156
- stride=stride,
157
- auto=pt,
158
- vid_stride=vid_stride,
159
- )
160
- vid_path, vid_writer = [None] * bs, [None] * bs
161
-
162
- # Run inference
163
- model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
164
- seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
165
- for path, im, im0s, vid_cap, s in dataset:
166
- with dt[0]:
167
- im = torch.from_numpy(im).to(model.device)
168
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
169
- im /= 255 # 0 - 255 to 0.0 - 1.0
170
- if len(im.shape) == 3:
171
- im = im[None] # expand for batch dim
172
-
173
- # Inference
174
- with dt[1]:
175
- visualize = (
176
- increment_path(save_dir / Path(path).stem, mkdir=True)
177
- if visualize
178
- else False
179
- )
180
- pred = model(im, augment=augment, visualize=visualize)
181
-
182
- # NMS
183
- with dt[2]:
184
- pred = non_max_suppression(
185
- pred,
186
- conf_thres,
187
- iou_thres,
188
- classes,
189
- agnostic_nms,
190
- max_det=max_det,
191
- )
192
-
193
- # Second-stage classifier (optional)
194
- # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
195
-
196
- # Process predictions
197
- for i, det in enumerate(pred): # per image
198
- seen += 1
199
- if webcam: # batch_size >= 1
200
- p, im0, frame = path[i], im0s[i].copy(), dataset.count
201
- s += f"{i}: "
202
- else:
203
- p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
204
-
205
- p = Path(p) # to Path
206
- save_path = str(save_dir / p.name) # im.jpg
207
- txt_path = str(save_dir / "labels" / p.stem) + (
208
- "" if dataset.mode == "image" else f"_{frame}"
209
- ) # im.txt
210
- s += "%gx%g " % im.shape[2:] # print string
211
- gn = torch.tensor(im0.shape)[
212
- [1, 0, 1, 0]
213
- ] # normalization gain whwh
214
- imc = im0.copy() if save_crop else im0 # for save_crop
215
- annotator = Annotator(
216
- im0, line_width=line_thickness, example=str(names)
217
- )
218
- if len(det):
219
- # Rescale boxes from img_size to im0 size
220
- det[:, :4] = scale_boxes(
221
- im.shape[2:], det[:, :4], im0.shape
222
- ).round()
223
-
224
- # Print results
225
- for c in det[:, 5].unique():
226
- n = (det[:, 5] == c).sum() # detections per class
227
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
228
-
229
- # Write results
230
- for *xyxy, conf, cls in reversed(det):
231
- if save_txt: # Write to file
232
- xywh = (
233
- (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn)
234
- .view(-1)
235
- .tolist()
236
- ) # normalized xywh
237
- line = (
238
- (cls, *xywh, conf) if save_conf else (cls, *xywh)
239
- ) # label format
240
- with open(f"{txt_path}.txt", "a") as f:
241
- f.write(("%g " * len(line)).rstrip() % line + "\n")
242
-
243
- if save_img or save_crop or view_img: # Add bbox to image
244
- c = int(cls) # integer class
245
- label = (
246
- None
247
- if hide_labels
248
- else (
249
- names[c]
250
- if hide_conf
251
- else f"{names[c]} {conf:.2f}"
252
- )
253
- )
254
- annotator.box_label(xyxy, label, color=colors(c, True))
255
- if save_crop:
256
- save_one_box(
257
- xyxy,
258
- imc,
259
- file=save_dir
260
- / "crops"
261
- / names[c]
262
- / f"{p.stem}.jpg",
263
- BGR=True,
264
- )
265
-
266
- # Stream results
267
- im0 = annotator.result()
268
- if view_img:
269
- if platform.system() == "Linux" and p not in windows:
270
- windows.append(p)
271
- cv2.namedWindow(
272
- str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO
273
- ) # allow window resize (Linux)
274
- cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
275
- cv2.imshow(str(p), im0)
276
- cv2.waitKey(1) # 1 millisecond
277
-
278
- # Save results (image with detections)
279
- if save_img:
280
- if dataset.mode == "image":
281
- cv2.imwrite(save_path, im0)
282
- else: # 'video' or 'stream'
283
- if vid_path[i] != save_path: # new video
284
- vid_path[i] = save_path
285
- if isinstance(vid_writer[i], cv2.VideoWriter):
286
- vid_writer[
287
- i
288
- ].release() # release previous video writer
289
- if vid_cap: # video
290
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
291
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
292
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
293
- else: # stream
294
- fps, w, h = 30, im0.shape[1], im0.shape[0]
295
- save_path = str(
296
- Path(save_path).with_suffix(".mp4")
297
- ) # force *.mp4 suffix on results videos
298
- vid_writer[i] = cv2.VideoWriter(
299
- save_path,
300
- cv2.VideoWriter_fourcc(*"mp4v"),
301
- fps,
302
- (w, h),
303
- )
304
- vid_writer[i].write(im0)
305
-
306
- # Print time (inference-only)
307
- LOGGER.info(
308
- f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms"
309
- )
310
-
311
- # Print results
312
- t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
313
- LOGGER.info(
314
- f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}"
315
- % t
316
- )
317
- if save_txt or save_img:
318
- s = (
319
- f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
320
- if save_txt
321
- else ""
322
- )
323
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
324
- if update:
325
- strip_optimizer(
326
- weights[0]
327
- ) # update model (to fix SourceChangeWarning)
328
-
329
-
330
- def parse_opt():
331
- parser = argparse.ArgumentParser()
332
- parser.add_argument(
333
- "--weights",
334
- nargs="+",
335
- type=str,
336
- default=ROOT / "yolov5s.pt",
337
- help="model path or triton URL",
338
- )
339
- parser.add_argument(
340
- "--source",
341
- type=str,
342
- default=ROOT / "data/images",
343
- help="file/dir/URL/glob/screen/0(webcam)",
344
- )
345
- parser.add_argument(
346
- "--data",
347
- type=str,
348
- default=ROOT / "data/coco128.yaml",
349
- help="(optional) dataset.yaml path",
350
- )
351
- parser.add_argument(
352
- "--imgsz",
353
- "--img",
354
- "--img-size",
355
- nargs="+",
356
- type=int,
357
- default=[640],
358
- help="inference size h,w",
359
- )
360
- parser.add_argument(
361
- "--conf-thres", type=float, default=0.25, help="confidence threshold"
362
- )
363
- parser.add_argument(
364
- "--iou-thres", type=float, default=0.45, help="NMS IoU threshold"
365
- )
366
- parser.add_argument(
367
- "--max-det",
368
- type=int,
369
- default=1000,
370
- help="maximum detections per image",
371
- )
372
- parser.add_argument(
373
- "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
374
- )
375
- parser.add_argument("--view-img", action="store_true", help="show results")
376
- parser.add_argument(
377
- "--save-txt", action="store_true", help="save results to *.txt"
378
- )
379
- parser.add_argument(
380
- "--save-conf",
381
- action="store_true",
382
- help="save confidences in --save-txt labels",
383
- )
384
- parser.add_argument(
385
- "--save-crop",
386
- action="store_true",
387
- help="save cropped prediction boxes",
388
- )
389
- parser.add_argument(
390
- "--nosave", action="store_true", help="do not save images/videos"
391
- )
392
- parser.add_argument(
393
- "--classes",
394
- nargs="+",
395
- type=int,
396
- help="filter by class: --classes 0, or --classes 0 2 3",
397
- )
398
- parser.add_argument(
399
- "--agnostic-nms", action="store_true", help="class-agnostic NMS"
400
- )
401
- parser.add_argument(
402
- "--augment", action="store_true", help="augmented inference"
403
- )
404
- parser.add_argument(
405
- "--visualize", action="store_true", help="visualize features"
406
- )
407
- parser.add_argument(
408
- "--update", action="store_true", help="update all models"
409
- )
410
- parser.add_argument(
411
- "--project",
412
- default=ROOT / "runs/detect",
413
- help="save results to project/name",
414
- )
415
- parser.add_argument(
416
- "--name", default="exp", help="save results to project/name"
417
- )
418
- parser.add_argument(
419
- "--exist-ok",
420
- action="store_true",
421
- help="existing project/name ok, do not increment",
422
- )
423
- parser.add_argument(
424
- "--line-thickness",
425
- default=3,
426
- type=int,
427
- help="bounding box thickness (pixels)",
428
- )
429
- parser.add_argument(
430
- "--hide-labels", default=False, action="store_true", help="hide labels"
431
- )
432
- parser.add_argument(
433
- "--hide-conf",
434
- default=False,
435
- action="store_true",
436
- help="hide confidences",
437
- )
438
- parser.add_argument(
439
- "--half", action="store_true", help="use FP16 half-precision inference"
440
- )
441
- parser.add_argument(
442
- "--dnn", action="store_true", help="use OpenCV DNN for ONNX inference"
443
- )
444
- parser.add_argument(
445
- "--vid-stride", type=int, default=1, help="video frame-rate stride"
446
- )
447
- opt = parser.parse_args()
448
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
449
- print_args(vars(opt))
450
- return opt
451
-
452
-
453
- def main(opt):
454
- check_requirements(exclude=("tensorboard", "thop"))
455
- run(**vars(opt))
456
-
457
-
458
- if __name__ == "__main__":
459
- opt = parse_opt()
460
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/CreateColorPicker.js DELETED
@@ -1,56 +0,0 @@
1
- import ColorPicker from './ColorPicker.js';
2
-
3
- const GetValue = Phaser.Utils.Objects.GetValue;
4
-
5
- var CreateColorPicker = function (scene) {
6
- var scene = this.scene;
7
-
8
- var background;
9
- var createBackgroundCallback = this.colorPickerCreateBackgroundCallback;
10
- if (createBackgroundCallback) {
11
- background = createBackgroundCallback.call(this, scene);
12
- scene.add.existing(background);
13
- }
14
-
15
- var width = this.colorPickerWidth;
16
- if (width === undefined) {
17
- width = this.width;
18
- }
19
-
20
- var height = this.colorPickerHeight;
21
- if (height === undefined) {
22
- height = width;
23
- }
24
-
25
- var colorComponentsConfig;
26
- if (this.colorComponentsHeight > 0) {
27
- colorComponentsConfig = {
28
- height: this.colorComponentsHeight,
29
- formatLabel: this.colorComponentsFormatLabelConfig,
30
- inputText: this.colorComponentsInputTextConfig,
31
- space: this.colorComponentsSpace,
32
- }
33
- } else {
34
- colorComponentsConfig = false;
35
- }
36
-
37
- var colorPicker = new ColorPicker(scene, {
38
- width: width, height: height,
39
-
40
- background: background,
41
- space: this.colorPickerSpace,
42
-
43
- hPalette: {
44
- position: this.colorPickerHPalettePosition,
45
- },
46
-
47
- colorComponents: colorComponentsConfig,
48
-
49
- value: this.value
50
- });
51
- scene.add.existing(colorPicker);
52
-
53
- return colorPicker;
54
- }
55
-
56
- export default CreateColorPicker;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateVideo.js DELETED
@@ -1,23 +0,0 @@
1
- import MergeStyle from './utils/MergeStyle.js';
2
- import SetTextureProperties from './utils/SetTextureProperties.js';
3
-
4
- const PhaserVideo = Phaser.GameObjects.Video;
5
-
6
- var CreateVideo = function (scene, data, view, styles, customBuilders) {
7
- data = MergeStyle(data, styles);
8
- var gameObject = new PhaserVideo(scene, 0, 0, data.key);
9
-
10
- if (data.width !== undefined) {
11
- gameObject.setDisplayWidth(data.width);
12
- }
13
- if (data.height !== undefined) {
14
- gameObject.setDisplayHeight(data.height);
15
- }
16
-
17
- SetTextureProperties(gameObject, data);
18
-
19
- scene.add.existing(gameObject);
20
- return gameObject;
21
- }
22
-
23
- export default CreateVideo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/RoundRectangle.js DELETED
@@ -1,2 +0,0 @@
1
- import RoundRectangle from '../../../plugins/roundrectangle.js';
2
- export default RoundRectangle;
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/preprocess_v2.py DELETED
@@ -1,151 +0,0 @@
1
- import os
2
- import argparse
3
- import json
4
- if __name__ == "__main__":
5
- parser = argparse.ArgumentParser()
6
- parser.add_argument("--add_auxiliary_data", type=bool, help="Whether to add extra data as fine-tuning helper")
7
- parser.add_argument("--languages", default="CJE")
8
- args = parser.parse_args()
9
- if args.languages == "CJE":
10
- langs = ["[ZH]", "[JA]", "[EN]"]
11
- elif args.languages == "CJ":
12
- langs = ["[ZH]", "[JA]"]
13
- elif args.languages == "C":
14
- langs = ["[ZH]"]
15
- new_annos = []
16
- # Source 1: transcribed short audios
17
- if os.path.exists("short_character_anno.txt"):
18
- with open("short_character_anno.txt", 'r', encoding='utf-8') as f:
19
- short_character_anno = f.readlines()
20
- new_annos += short_character_anno
21
- # Source 2: transcribed long audio segments
22
- if os.path.exists("long_character_anno.txt"):
23
- with open("long_character_anno.txt", 'r', encoding='utf-8') as f:
24
- long_character_anno = f.readlines()
25
- new_annos += long_character_anno
26
-
27
- # Get all speaker names
28
- speakers = []
29
- for line in new_annos:
30
- path, speaker, text = line.split("|")
31
- if speaker not in speakers:
32
- speakers.append(speaker)
33
- assert (len(speakers) != 0), "No audio file found. Please check your uploaded file structure."
34
- # Source 3 (Optional): sampled audios as extra training helpers
35
- if args.add_auxiliary_data:
36
- with open("sampled_audio4ft.txt", 'r', encoding='utf-8') as f:
37
- old_annos = f.readlines()
38
- # filter old_annos according to supported languages
39
- filtered_old_annos = []
40
- for line in old_annos:
41
- for lang in langs:
42
- if lang in line:
43
- filtered_old_annos.append(line)
44
- old_annos = filtered_old_annos
45
- for line in old_annos:
46
- path, speaker, text = line.split("|")
47
- if speaker not in speakers:
48
- speakers.append(speaker)
49
- num_old_voices = len(old_annos)
50
- num_new_voices = len(new_annos)
51
- # STEP 1: balance number of new & old voices
52
- cc_duplicate = num_old_voices // num_new_voices
53
- if cc_duplicate == 0:
54
- cc_duplicate = 1
55
-
56
-
57
- # STEP 2: modify config file
58
- with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
59
- hps = json.load(f)
60
-
61
- # assign ids to new speakers
62
- speaker2id = {}
63
- for i, speaker in enumerate(speakers):
64
- speaker2id[speaker] = i
65
- # modify n_speakers
66
- hps['data']["n_speakers"] = len(speakers)
67
- # overwrite speaker names
68
- hps['speakers'] = speaker2id
69
- hps['train']['log_interval'] = 100
70
- hps['train']['eval_interval'] = 1000
71
- hps['train']['batch_size'] = 16
72
- hps['data']['training_files'] = "final_annotation_train.txt"
73
- hps['data']['validation_files'] = "final_annotation_val.txt"
74
- # save modified config
75
- with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f:
76
- json.dump(hps, f, indent=2)
77
-
78
- # STEP 3: clean annotations, replace speaker names with assigned speaker IDs
79
- import text
80
- cleaned_new_annos = []
81
- for i, line in enumerate(new_annos):
82
- path, speaker, txt = line.split("|")
83
- if len(txt) > 150:
84
- continue
85
- cleaned_text = text._clean_text(txt, hps['data']['text_cleaners'])
86
- cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
87
- cleaned_new_annos.append(path + "|" + str(speaker2id[speaker]) + "|" + cleaned_text)
88
- cleaned_old_annos = []
89
- for i, line in enumerate(old_annos):
90
- path, speaker, txt = line.split("|")
91
- if len(txt) > 150:
92
- continue
93
- cleaned_text = text._clean_text(txt, hps['data']['text_cleaners'])
94
- cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
95
- cleaned_old_annos.append(path + "|" + str(speaker2id[speaker]) + "|" + cleaned_text)
96
- # merge with old annotation
97
- final_annos = cleaned_old_annos + cc_duplicate * cleaned_new_annos
98
- # save annotation file
99
- with open("final_annotation_train.txt", 'w', encoding='utf-8') as f:
100
- for line in final_annos:
101
- f.write(line)
102
- # save annotation file for validation
103
- with open("final_annotation_val.txt", 'w', encoding='utf-8') as f:
104
- for line in cleaned_new_annos:
105
- f.write(line)
106
- print("finished")
107
- else:
108
- # Do not add extra helper data
109
- # STEP 1: modify config file
110
- with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
111
- hps = json.load(f)
112
-
113
- # assign ids to new speakers
114
- speaker2id = {}
115
- for i, speaker in enumerate(speakers):
116
- speaker2id[speaker] = i
117
- # modify n_speakers
118
- hps['data']["n_speakers"] = len(speakers)
119
- # overwrite speaker names
120
- hps['speakers'] = speaker2id
121
- hps['train']['log_interval'] = 10
122
- hps['train']['eval_interval'] = 100
123
- hps['train']['batch_size'] = 16
124
- hps['data']['training_files'] = "final_annotation_train.txt"
125
- hps['data']['validation_files'] = "final_annotation_val.txt"
126
- # save modified config
127
- with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f:
128
- json.dump(hps, f, indent=2)
129
-
130
- # STEP 2: clean annotations, replace speaker names with assigned speaker IDs
131
- import text
132
-
133
- cleaned_new_annos = []
134
- for i, line in enumerate(new_annos):
135
- path, speaker, txt = line.split("|")
136
- if len(txt) > 150:
137
- continue
138
- cleaned_text = text._clean_text(txt, hps['data']['text_cleaners']).replace("[ZH]", "")
139
- cleaned_text += "\n" if not cleaned_text.endswith("\n") else ""
140
- cleaned_new_annos.append(path + "|" + str(speaker2id[speaker]) + "|" + cleaned_text)
141
-
142
- final_annos = cleaned_new_annos
143
- # save annotation file
144
- with open("final_annotation_train.txt", 'w', encoding='utf-8') as f:
145
- for line in final_annos:
146
- f.write(line)
147
- # save annotation file for validation
148
- with open("final_annotation_val.txt", 'w', encoding='utf-8') as f:
149
- for line in cleaned_new_annos:
150
- f.write(line)
151
- print("finished")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions_pfc.py DELETED
@@ -1,23 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # configs for test speed
4
-
5
- config = edict()
6
- config.loss = "arcface"
7
- config.network = "r50"
8
- config.resume = False
9
- config.output = None
10
- config.embedding_size = 512
11
- config.sample_rate = 0.1
12
- config.fp16 = True
13
- config.momentum = 0.9
14
- config.weight_decay = 5e-4
15
- config.batch_size = 128
16
- config.lr = 0.1 # batch size is 512
17
-
18
- config.rec = "synthetic"
19
- config.num_classes = 300 * 10000
20
- config.num_epoch = 30
21
- config.warmup_epoch = -1
22
- config.decay_epoch = [10, 16, 22]
23
- config.val_targets = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/portfolio-github/index.html DELETED
@@ -1,108 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <title>Welcome to 1littlecoder</title>
5
- <link href="https://fonts.googleapis.com/css2?family=Bellota&display=swap" rel="stylesheet">
6
- <link href="style.css" rel="stylesheet" type="text/css">
7
- </head>
8
- <body>
9
- <div id="header" class="section">
10
- <img alt="logo" class="img-circle" src="https://www.kindpng.com/picc/m/340-3408802_lego-batman-icon-lego-batman-png-transparent-png.png">
11
- <p>Welcome to 1littlecoder</p>
12
- </div>
13
- <div class="section">
14
- <h1><span>About Me</span></h1>
15
- <p> Hey! I'm <strong>1littlecoder</strong> from <strong>India.</strong>. I Like <strong>Coding</strong> R Python Data Science Machine Learning</p>
16
- <p> I love Hugging Face and here's my <a href = 'profile' src = 'https://huggingface.co/Amrrs'> </a>a></a> </p>
17
- <p class="quote">~ 1littlecoder</p>
18
- </div>
19
- <div class="section" id="res">
20
- <h1><span>My Works</span></h1>
21
- <p align="centre"><strong>Here Are Some Of My Works</strong></p>
22
- <a href="https://telegram.me">
23
- <img src="https://img.icons8.com/nolan/144/telegram-app.png"/>
24
- <div class="caption">Telegram Channel</div>
25
- </a>
26
- <a href="https://github.com/amrrs">
27
- <img src="https://img.icons8.com/nolan/144/github.png"/>
28
- <div class="caption">Github Account</div>
29
- </a>
30
- <a href="https://1littlecoder.in">
31
- <img src="https://img.icons8.com/dusk/144/000000/domain.png"/>
32
- <div class="caption">My Website</div>
33
- </a>
34
- <br>
35
- <p align="centre"><strong>Resources I Use</strong></p>
36
- <a href="https://github.com/">
37
- <img src="https://img.icons8.com/nolan/144/github.png"/>
38
- <div class="caption">Github</div>
39
- </a>
40
- <a href="https://telegram.me">
41
- <img src="https://img.icons8.com/nolan/144/telegram-app.png"/>
42
- <div class="caption">Telegram</div>
43
- </a>
44
- <a href="https://code.visualstudio.com">
45
- <img src="https://img.icons8.com/nolan/144/code.png"/>
46
- <div class="caption">VS Code Editor</div>
47
- </a>
48
- <a href="https://python.org">
49
- <img src="https://img.icons8.com/nolan/144/python.png"/>
50
- <div class="caption">Python</div>
51
- </a>
52
- <a href="https://www.php.net/">
53
- <img src="https://img.icons8.com/dusk/144/000000/php-logo.png"/>
54
- <div class="caption">PHP</div>
55
- </a>
56
- <a href="https://ubuntu.com">
57
- <img src="https://img.icons8.com/color/144/000000/ubuntu--v1.png"/>
58
- <div class="caption">Ubuntu</div>
59
- </a>
60
- </div>
61
- <div class="section">
62
- <h1><span>My Skills</span></h1>
63
- <ul>
64
- <li>Python<br /> <progress min="0" max="100" value="95"></progress> </li>
65
- <li>PHP <br /> <progress min="0" max="100" value="75"></progress> </li>
66
- <li>Coding<br /> <progress min="0" max="100" value="100"></progress> </li>
67
- </ul>
68
- </div>
69
- <div class="section" id="contacts">
70
- <h1><span>Follow Me</span></h1>
71
- <div>
72
- <a href="https://instagram.com/" target="_blank">
73
- <img alt="Instagram" src="https://img.icons8.com/cute-clipart/100/instagram-new.png"/>
74
- </a>
75
- <a href="https://twitter.com/1littlecoder">
76
- <img alt="Twitter" src="https://www.sololearn.com/Uploads/icons/twitter.png" />
77
- </a>
78
- <a href="https://github.com/amrrs">
79
- <img alt="GitHub" src="https://img.icons8.com/nolan/144/github.png"/>
80
- </a>
81
- <a href="https://t.me/">
82
- <img alt="Telegram" src="https://img.icons8.com/fluent/96/000000/telegram-app.png"/>
83
- </a>
84
- <a href="https://www.youtube.com/channel/UCRD6WpNNzJpRIU4z89PNSbg">
85
- <img alt="YouTube" src="https://img.icons8.com/color/96/000000/youtube-play.png"/>
86
- </a>
87
- <a href="mailto:[email protected]">
88
- <img alt="Email" src="https://img.icons8.com/fluent/96/000000/gmail.png"/>
89
- </a>
90
- </div>
91
- </div>
92
- <div class="section" id="contacts">
93
- <h1><span>Contact Us</span></h1>
94
- <a href="mailto:[email protected]">
95
- <img src="https://img.icons8.com/fluent/95/000000/gmail--v2.png"/>
96
- </a>
97
- </div>
98
- <center>Made with ❤️ By <a href="https://github.com/amrrs">
99
- 1littlecoder
100
- </a></center>
101
-
102
- <script type="text/javascript">
103
- function search() {
104
- window.open('https://www.google.com/search?output=search&q=' + document.getElementById("question").value)
105
- }
106
- </script>
107
- </body>
108
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py DELETED
@@ -1,1352 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
16
-
17
- import inspect
18
- import warnings
19
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
20
-
21
- import numpy as np
22
- import PIL.Image
23
- import torch
24
- import torch.nn.functional as F
25
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
26
-
27
- from ...image_processor import VaeImageProcessor
28
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
29
- from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
30
- from ...schedulers import KarrasDiffusionSchedulers
31
- from ...utils import (
32
- is_accelerate_available,
33
- is_accelerate_version,
34
- is_compiled_module,
35
- logging,
36
- randn_tensor,
37
- replace_example_docstring,
38
- )
39
- from ..pipeline_utils import DiffusionPipeline
40
- from ..stable_diffusion import StableDiffusionPipelineOutput
41
- from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
42
- from .multicontrolnet import MultiControlNetModel
43
-
44
-
45
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
46
-
47
-
48
- EXAMPLE_DOC_STRING = """
49
- Examples:
50
- ```py
51
- >>> # !pip install transformers accelerate
52
- >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
53
- >>> from diffusers.utils import load_image
54
- >>> import numpy as np
55
- >>> import torch
56
-
57
- >>> init_image = load_image(
58
- ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
59
- ... )
60
- >>> init_image = init_image.resize((512, 512))
61
-
62
- >>> generator = torch.Generator(device="cpu").manual_seed(1)
63
-
64
- >>> mask_image = load_image(
65
- ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
66
- ... )
67
- >>> mask_image = mask_image.resize((512, 512))
68
-
69
-
70
- >>> def make_inpaint_condition(image, image_mask):
71
- ... image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
72
- ... image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
73
-
74
- ... assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
75
- ... image[image_mask > 0.5] = -1.0 # set as masked pixel
76
- ... image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
77
- ... image = torch.from_numpy(image)
78
- ... return image
79
-
80
-
81
- >>> control_image = make_inpaint_condition(init_image, mask_image)
82
-
83
- >>> controlnet = ControlNetModel.from_pretrained(
84
- ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
85
- ... )
86
- >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
87
- ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
88
- ... )
89
-
90
- >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
91
- >>> pipe.enable_model_cpu_offload()
92
-
93
- >>> # generate image
94
- >>> image = pipe(
95
- ... "a handsome man with ray-ban sunglasses",
96
- ... num_inference_steps=20,
97
- ... generator=generator,
98
- ... eta=1.0,
99
- ... image=init_image,
100
- ... mask_image=mask_image,
101
- ... control_image=control_image,
102
- ... ).images[0]
103
- ```
104
- """
105
-
106
-
107
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image
108
- def prepare_mask_and_masked_image(image, mask, height, width, return_image=False):
109
- """
110
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
111
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
112
- ``image`` and ``1`` for the ``mask``.
113
-
114
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
115
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
116
-
117
- Args:
118
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
119
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
120
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
121
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
122
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
123
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
124
-
125
-
126
- Raises:
127
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
128
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
129
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
130
- (ot the other way around).
131
-
132
- Returns:
133
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
134
- dimensions: ``batch x channels x height x width``.
135
- """
136
-
137
- if image is None:
138
- raise ValueError("`image` input cannot be undefined.")
139
-
140
- if mask is None:
141
- raise ValueError("`mask_image` input cannot be undefined.")
142
-
143
- if isinstance(image, torch.Tensor):
144
- if not isinstance(mask, torch.Tensor):
145
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
146
-
147
- # Batch single image
148
- if image.ndim == 3:
149
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
150
- image = image.unsqueeze(0)
151
-
152
- # Batch and add channel dim for single mask
153
- if mask.ndim == 2:
154
- mask = mask.unsqueeze(0).unsqueeze(0)
155
-
156
- # Batch single mask or add channel dim
157
- if mask.ndim == 3:
158
- # Single batched mask, no channel dim or single mask not batched but channel dim
159
- if mask.shape[0] == 1:
160
- mask = mask.unsqueeze(0)
161
-
162
- # Batched masks no channel dim
163
- else:
164
- mask = mask.unsqueeze(1)
165
-
166
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
167
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
168
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
169
-
170
- # Check image is in [-1, 1]
171
- if image.min() < -1 or image.max() > 1:
172
- raise ValueError("Image should be in [-1, 1] range")
173
-
174
- # Check mask is in [0, 1]
175
- if mask.min() < 0 or mask.max() > 1:
176
- raise ValueError("Mask should be in [0, 1] range")
177
-
178
- # Binarize mask
179
- mask[mask < 0.5] = 0
180
- mask[mask >= 0.5] = 1
181
-
182
- # Image as float32
183
- image = image.to(dtype=torch.float32)
184
- elif isinstance(mask, torch.Tensor):
185
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
186
- else:
187
- # preprocess image
188
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
189
- image = [image]
190
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
191
- # resize all images w.r.t passed height an width
192
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
193
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
194
- image = np.concatenate(image, axis=0)
195
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
196
- image = np.concatenate([i[None, :] for i in image], axis=0)
197
-
198
- image = image.transpose(0, 3, 1, 2)
199
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
200
-
201
- # preprocess mask
202
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
203
- mask = [mask]
204
-
205
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
206
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
207
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
208
- mask = mask.astype(np.float32) / 255.0
209
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
210
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
211
-
212
- mask[mask < 0.5] = 0
213
- mask[mask >= 0.5] = 1
214
- mask = torch.from_numpy(mask)
215
-
216
- masked_image = image * (mask < 0.5)
217
-
218
- # n.b. ensure backwards compatibility as old function does not return image
219
- if return_image:
220
- return mask, masked_image, image
221
-
222
- return mask, masked_image
223
-
224
-
225
- class StableDiffusionControlNetInpaintPipeline(
226
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
227
- ):
228
- r"""
229
- Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
230
-
231
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
232
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
233
-
234
- In addition the pipeline inherits the following loading methods:
235
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
236
-
237
- <Tip>
238
-
239
- This pipeline can be used both with checkpoints that have been specifically fine-tuned for inpainting, such as
240
- [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting)
241
- as well as default text-to-image stable diffusion checkpoints, such as
242
- [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5).
243
- Default text-to-image stable diffusion checkpoints might be preferable for controlnets that have been fine-tuned on
244
- those, such as [lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint).
245
-
246
- </Tip>
247
-
248
- Args:
249
- vae ([`AutoencoderKL`]):
250
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
251
- text_encoder ([`CLIPTextModel`]):
252
- Frozen text-encoder. Stable Diffusion uses the text portion of
253
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
254
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
255
- tokenizer (`CLIPTokenizer`):
256
- Tokenizer of class
257
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
258
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
259
- controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
260
- Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
261
- as a list, the outputs from each ControlNet are added together to create one combined additional
262
- conditioning.
263
- scheduler ([`SchedulerMixin`]):
264
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
265
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
266
- safety_checker ([`StableDiffusionSafetyChecker`]):
267
- Classification module that estimates whether generated images could be considered offensive or harmful.
268
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
269
- feature_extractor ([`CLIPImageProcessor`]):
270
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
271
- """
272
- _optional_components = ["safety_checker", "feature_extractor"]
273
-
274
- def __init__(
275
- self,
276
- vae: AutoencoderKL,
277
- text_encoder: CLIPTextModel,
278
- tokenizer: CLIPTokenizer,
279
- unet: UNet2DConditionModel,
280
- controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
281
- scheduler: KarrasDiffusionSchedulers,
282
- safety_checker: StableDiffusionSafetyChecker,
283
- feature_extractor: CLIPImageProcessor,
284
- requires_safety_checker: bool = True,
285
- ):
286
- super().__init__()
287
-
288
- if safety_checker is None and requires_safety_checker:
289
- logger.warning(
290
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
291
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
292
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
293
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
294
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
295
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
296
- )
297
-
298
- if safety_checker is not None and feature_extractor is None:
299
- raise ValueError(
300
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
301
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
302
- )
303
-
304
- if isinstance(controlnet, (list, tuple)):
305
- controlnet = MultiControlNetModel(controlnet)
306
-
307
- self.register_modules(
308
- vae=vae,
309
- text_encoder=text_encoder,
310
- tokenizer=tokenizer,
311
- unet=unet,
312
- controlnet=controlnet,
313
- scheduler=scheduler,
314
- safety_checker=safety_checker,
315
- feature_extractor=feature_extractor,
316
- )
317
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
318
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
319
- self.control_image_processor = VaeImageProcessor(
320
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
321
- )
322
- self.register_to_config(requires_safety_checker=requires_safety_checker)
323
-
324
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
325
- def enable_vae_slicing(self):
326
- r"""
327
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
328
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
329
- """
330
- self.vae.enable_slicing()
331
-
332
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
333
- def disable_vae_slicing(self):
334
- r"""
335
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
336
- computing decoding in one step.
337
- """
338
- self.vae.disable_slicing()
339
-
340
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
341
- def enable_vae_tiling(self):
342
- r"""
343
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
344
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
345
- processing larger images.
346
- """
347
- self.vae.enable_tiling()
348
-
349
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
350
- def disable_vae_tiling(self):
351
- r"""
352
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
353
- computing decoding in one step.
354
- """
355
- self.vae.disable_tiling()
356
-
357
- def enable_model_cpu_offload(self, gpu_id=0):
358
- r"""
359
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
360
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
361
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
362
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
363
- """
364
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
365
- from accelerate import cpu_offload_with_hook
366
- else:
367
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
368
-
369
- device = torch.device(f"cuda:{gpu_id}")
370
-
371
- hook = None
372
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
373
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
374
-
375
- if self.safety_checker is not None:
376
- # the safety checker can offload the vae again
377
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
378
-
379
- # control net hook has be manually offloaded as it alternates with unet
380
- cpu_offload_with_hook(self.controlnet, device)
381
-
382
- # We'll offload the last model manually.
383
- self.final_offload_hook = hook
384
-
385
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
386
- def _encode_prompt(
387
- self,
388
- prompt,
389
- device,
390
- num_images_per_prompt,
391
- do_classifier_free_guidance,
392
- negative_prompt=None,
393
- prompt_embeds: Optional[torch.FloatTensor] = None,
394
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
395
- lora_scale: Optional[float] = None,
396
- ):
397
- r"""
398
- Encodes the prompt into text encoder hidden states.
399
-
400
- Args:
401
- prompt (`str` or `List[str]`, *optional*):
402
- prompt to be encoded
403
- device: (`torch.device`):
404
- torch device
405
- num_images_per_prompt (`int`):
406
- number of images that should be generated per prompt
407
- do_classifier_free_guidance (`bool`):
408
- whether to use classifier free guidance or not
409
- negative_prompt (`str` or `List[str]`, *optional*):
410
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
411
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
412
- less than `1`).
413
- prompt_embeds (`torch.FloatTensor`, *optional*):
414
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
415
- provided, text embeddings will be generated from `prompt` input argument.
416
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
417
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
418
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
419
- argument.
420
- lora_scale (`float`, *optional*):
421
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
422
- """
423
- # set lora scale so that monkey patched LoRA
424
- # function of text encoder can correctly access it
425
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
426
- self._lora_scale = lora_scale
427
-
428
- if prompt is not None and isinstance(prompt, str):
429
- batch_size = 1
430
- elif prompt is not None and isinstance(prompt, list):
431
- batch_size = len(prompt)
432
- else:
433
- batch_size = prompt_embeds.shape[0]
434
-
435
- if prompt_embeds is None:
436
- # textual inversion: procecss multi-vector tokens if necessary
437
- if isinstance(self, TextualInversionLoaderMixin):
438
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
439
-
440
- text_inputs = self.tokenizer(
441
- prompt,
442
- padding="max_length",
443
- max_length=self.tokenizer.model_max_length,
444
- truncation=True,
445
- return_tensors="pt",
446
- )
447
- text_input_ids = text_inputs.input_ids
448
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
449
-
450
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
451
- text_input_ids, untruncated_ids
452
- ):
453
- removed_text = self.tokenizer.batch_decode(
454
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
455
- )
456
- logger.warning(
457
- "The following part of your input was truncated because CLIP can only handle sequences up to"
458
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
459
- )
460
-
461
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
462
- attention_mask = text_inputs.attention_mask.to(device)
463
- else:
464
- attention_mask = None
465
-
466
- prompt_embeds = self.text_encoder(
467
- text_input_ids.to(device),
468
- attention_mask=attention_mask,
469
- )
470
- prompt_embeds = prompt_embeds[0]
471
-
472
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
473
-
474
- bs_embed, seq_len, _ = prompt_embeds.shape
475
- # duplicate text embeddings for each generation per prompt, using mps friendly method
476
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
477
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
478
-
479
- # get unconditional embeddings for classifier free guidance
480
- if do_classifier_free_guidance and negative_prompt_embeds is None:
481
- uncond_tokens: List[str]
482
- if negative_prompt is None:
483
- uncond_tokens = [""] * batch_size
484
- elif prompt is not None and type(prompt) is not type(negative_prompt):
485
- raise TypeError(
486
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
487
- f" {type(prompt)}."
488
- )
489
- elif isinstance(negative_prompt, str):
490
- uncond_tokens = [negative_prompt]
491
- elif batch_size != len(negative_prompt):
492
- raise ValueError(
493
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
494
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
495
- " the batch size of `prompt`."
496
- )
497
- else:
498
- uncond_tokens = negative_prompt
499
-
500
- # textual inversion: procecss multi-vector tokens if necessary
501
- if isinstance(self, TextualInversionLoaderMixin):
502
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
503
-
504
- max_length = prompt_embeds.shape[1]
505
- uncond_input = self.tokenizer(
506
- uncond_tokens,
507
- padding="max_length",
508
- max_length=max_length,
509
- truncation=True,
510
- return_tensors="pt",
511
- )
512
-
513
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
514
- attention_mask = uncond_input.attention_mask.to(device)
515
- else:
516
- attention_mask = None
517
-
518
- negative_prompt_embeds = self.text_encoder(
519
- uncond_input.input_ids.to(device),
520
- attention_mask=attention_mask,
521
- )
522
- negative_prompt_embeds = negative_prompt_embeds[0]
523
-
524
- if do_classifier_free_guidance:
525
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
526
- seq_len = negative_prompt_embeds.shape[1]
527
-
528
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
529
-
530
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
531
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
532
-
533
- # For classifier free guidance, we need to do two forward passes.
534
- # Here we concatenate the unconditional and text embeddings into a single batch
535
- # to avoid doing two forward passes
536
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
537
-
538
- return prompt_embeds
539
-
540
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
541
- def run_safety_checker(self, image, device, dtype):
542
- if self.safety_checker is None:
543
- has_nsfw_concept = None
544
- else:
545
- if torch.is_tensor(image):
546
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
547
- else:
548
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
549
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
550
- image, has_nsfw_concept = self.safety_checker(
551
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
552
- )
553
- return image, has_nsfw_concept
554
-
555
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
556
- def decode_latents(self, latents):
557
- warnings.warn(
558
- "The decode_latents method is deprecated and will be removed in a future version. Please"
559
- " use VaeImageProcessor instead",
560
- FutureWarning,
561
- )
562
- latents = 1 / self.vae.config.scaling_factor * latents
563
- image = self.vae.decode(latents, return_dict=False)[0]
564
- image = (image / 2 + 0.5).clamp(0, 1)
565
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
566
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
567
- return image
568
-
569
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
570
- def prepare_extra_step_kwargs(self, generator, eta):
571
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
572
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
573
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
574
- # and should be between [0, 1]
575
-
576
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
577
- extra_step_kwargs = {}
578
- if accepts_eta:
579
- extra_step_kwargs["eta"] = eta
580
-
581
- # check if the scheduler accepts generator
582
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
583
- if accepts_generator:
584
- extra_step_kwargs["generator"] = generator
585
- return extra_step_kwargs
586
-
587
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
588
- def get_timesteps(self, num_inference_steps, strength, device):
589
- # get the original timestep using init_timestep
590
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
591
-
592
- t_start = max(num_inference_steps - init_timestep, 0)
593
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
594
-
595
- return timesteps, num_inference_steps - t_start
596
-
597
- def check_inputs(
598
- self,
599
- prompt,
600
- image,
601
- height,
602
- width,
603
- callback_steps,
604
- negative_prompt=None,
605
- prompt_embeds=None,
606
- negative_prompt_embeds=None,
607
- controlnet_conditioning_scale=1.0,
608
- control_guidance_start=0.0,
609
- control_guidance_end=1.0,
610
- ):
611
- if height % 8 != 0 or width % 8 != 0:
612
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
613
-
614
- if (callback_steps is None) or (
615
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
616
- ):
617
- raise ValueError(
618
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
619
- f" {type(callback_steps)}."
620
- )
621
-
622
- if prompt is not None and prompt_embeds is not None:
623
- raise ValueError(
624
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
625
- " only forward one of the two."
626
- )
627
- elif prompt is None and prompt_embeds is None:
628
- raise ValueError(
629
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
630
- )
631
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
632
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
633
-
634
- if negative_prompt is not None and negative_prompt_embeds is not None:
635
- raise ValueError(
636
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
637
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
638
- )
639
-
640
- if prompt_embeds is not None and negative_prompt_embeds is not None:
641
- if prompt_embeds.shape != negative_prompt_embeds.shape:
642
- raise ValueError(
643
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
644
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
645
- f" {negative_prompt_embeds.shape}."
646
- )
647
-
648
- # `prompt` needs more sophisticated handling when there are multiple
649
- # conditionings.
650
- if isinstance(self.controlnet, MultiControlNetModel):
651
- if isinstance(prompt, list):
652
- logger.warning(
653
- f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
654
- " prompts. The conditionings will be fixed across the prompts."
655
- )
656
-
657
- # Check `image`
658
- is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
659
- self.controlnet, torch._dynamo.eval_frame.OptimizedModule
660
- )
661
- if (
662
- isinstance(self.controlnet, ControlNetModel)
663
- or is_compiled
664
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
665
- ):
666
- self.check_image(image, prompt, prompt_embeds)
667
- elif (
668
- isinstance(self.controlnet, MultiControlNetModel)
669
- or is_compiled
670
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
671
- ):
672
- if not isinstance(image, list):
673
- raise TypeError("For multiple controlnets: `image` must be type `list`")
674
-
675
- # When `image` is a nested list:
676
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
677
- elif any(isinstance(i, list) for i in image):
678
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
679
- elif len(image) != len(self.controlnet.nets):
680
- raise ValueError(
681
- f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
682
- )
683
-
684
- for image_ in image:
685
- self.check_image(image_, prompt, prompt_embeds)
686
- else:
687
- assert False
688
-
689
- # Check `controlnet_conditioning_scale`
690
- if (
691
- isinstance(self.controlnet, ControlNetModel)
692
- or is_compiled
693
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
694
- ):
695
- if not isinstance(controlnet_conditioning_scale, float):
696
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
697
- elif (
698
- isinstance(self.controlnet, MultiControlNetModel)
699
- or is_compiled
700
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
701
- ):
702
- if isinstance(controlnet_conditioning_scale, list):
703
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
704
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
705
- elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
706
- self.controlnet.nets
707
- ):
708
- raise ValueError(
709
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
710
- " the same length as the number of controlnets"
711
- )
712
- else:
713
- assert False
714
-
715
- if len(control_guidance_start) != len(control_guidance_end):
716
- raise ValueError(
717
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
718
- )
719
-
720
- if isinstance(self.controlnet, MultiControlNetModel):
721
- if len(control_guidance_start) != len(self.controlnet.nets):
722
- raise ValueError(
723
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
724
- )
725
-
726
- for start, end in zip(control_guidance_start, control_guidance_end):
727
- if start >= end:
728
- raise ValueError(
729
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
730
- )
731
- if start < 0.0:
732
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
733
- if end > 1.0:
734
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
735
-
736
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
737
- def check_image(self, image, prompt, prompt_embeds):
738
- image_is_pil = isinstance(image, PIL.Image.Image)
739
- image_is_tensor = isinstance(image, torch.Tensor)
740
- image_is_np = isinstance(image, np.ndarray)
741
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
742
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
743
- image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
744
-
745
- if (
746
- not image_is_pil
747
- and not image_is_tensor
748
- and not image_is_np
749
- and not image_is_pil_list
750
- and not image_is_tensor_list
751
- and not image_is_np_list
752
- ):
753
- raise TypeError(
754
- f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
755
- )
756
-
757
- if image_is_pil:
758
- image_batch_size = 1
759
- else:
760
- image_batch_size = len(image)
761
-
762
- if prompt is not None and isinstance(prompt, str):
763
- prompt_batch_size = 1
764
- elif prompt is not None and isinstance(prompt, list):
765
- prompt_batch_size = len(prompt)
766
- elif prompt_embeds is not None:
767
- prompt_batch_size = prompt_embeds.shape[0]
768
-
769
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
770
- raise ValueError(
771
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
772
- )
773
-
774
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
775
- def prepare_control_image(
776
- self,
777
- image,
778
- width,
779
- height,
780
- batch_size,
781
- num_images_per_prompt,
782
- device,
783
- dtype,
784
- do_classifier_free_guidance=False,
785
- guess_mode=False,
786
- ):
787
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
788
- image_batch_size = image.shape[0]
789
-
790
- if image_batch_size == 1:
791
- repeat_by = batch_size
792
- else:
793
- # image batch size is the same as prompt batch size
794
- repeat_by = num_images_per_prompt
795
-
796
- image = image.repeat_interleave(repeat_by, dim=0)
797
-
798
- image = image.to(device=device, dtype=dtype)
799
-
800
- if do_classifier_free_guidance and not guess_mode:
801
- image = torch.cat([image] * 2)
802
-
803
- return image
804
-
805
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_latents
806
- def prepare_latents(
807
- self,
808
- batch_size,
809
- num_channels_latents,
810
- height,
811
- width,
812
- dtype,
813
- device,
814
- generator,
815
- latents=None,
816
- image=None,
817
- timestep=None,
818
- is_strength_max=True,
819
- return_noise=False,
820
- return_image_latents=False,
821
- ):
822
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
823
- if isinstance(generator, list) and len(generator) != batch_size:
824
- raise ValueError(
825
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
826
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
827
- )
828
-
829
- if (image is None or timestep is None) and not is_strength_max:
830
- raise ValueError(
831
- "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
832
- "However, either the image or the noise timestep has not been provided."
833
- )
834
-
835
- if return_image_latents or (latents is None and not is_strength_max):
836
- image = image.to(device=device, dtype=dtype)
837
- image_latents = self._encode_vae_image(image=image, generator=generator)
838
-
839
- if latents is None:
840
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
841
- # if strength is 1. then initialise the latents to noise, else initial to image + noise
842
- latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
843
- # if pure noise then scale the initial latents by the Scheduler's init sigma
844
- latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
845
- else:
846
- noise = latents.to(device)
847
- latents = noise * self.scheduler.init_noise_sigma
848
-
849
- outputs = (latents,)
850
-
851
- if return_noise:
852
- outputs += (noise,)
853
-
854
- if return_image_latents:
855
- outputs += (image_latents,)
856
-
857
- return outputs
858
-
859
- def _default_height_width(self, height, width, image):
860
- # NOTE: It is possible that a list of images have different
861
- # dimensions for each image, so just checking the first image
862
- # is not _exactly_ correct, but it is simple.
863
- while isinstance(image, list):
864
- image = image[0]
865
-
866
- if height is None:
867
- if isinstance(image, PIL.Image.Image):
868
- height = image.height
869
- elif isinstance(image, torch.Tensor):
870
- height = image.shape[2]
871
-
872
- height = (height // 8) * 8 # round down to nearest multiple of 8
873
-
874
- if width is None:
875
- if isinstance(image, PIL.Image.Image):
876
- width = image.width
877
- elif isinstance(image, torch.Tensor):
878
- width = image.shape[3]
879
-
880
- width = (width // 8) * 8 # round down to nearest multiple of 8
881
-
882
- return height, width
883
-
884
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents
885
- def prepare_mask_latents(
886
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
887
- ):
888
- # resize the mask to latents shape as we concatenate the mask to the latents
889
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
890
- # and half precision
891
- mask = torch.nn.functional.interpolate(
892
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
893
- )
894
- mask = mask.to(device=device, dtype=dtype)
895
-
896
- masked_image = masked_image.to(device=device, dtype=dtype)
897
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
898
-
899
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
900
- if mask.shape[0] < batch_size:
901
- if not batch_size % mask.shape[0] == 0:
902
- raise ValueError(
903
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
904
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
905
- " of masks that you pass is divisible by the total requested batch size."
906
- )
907
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
908
- if masked_image_latents.shape[0] < batch_size:
909
- if not batch_size % masked_image_latents.shape[0] == 0:
910
- raise ValueError(
911
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
912
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
913
- " Make sure the number of images that you pass is divisible by the total requested batch size."
914
- )
915
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
916
-
917
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
918
- masked_image_latents = (
919
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
920
- )
921
-
922
- # aligning device to prevent device errors when concating it with the latent model input
923
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
924
- return mask, masked_image_latents
925
-
926
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image
927
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
928
- if isinstance(generator, list):
929
- image_latents = [
930
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
931
- for i in range(image.shape[0])
932
- ]
933
- image_latents = torch.cat(image_latents, dim=0)
934
- else:
935
- image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
936
-
937
- image_latents = self.vae.config.scaling_factor * image_latents
938
-
939
- return image_latents
940
-
941
- @torch.no_grad()
942
- @replace_example_docstring(EXAMPLE_DOC_STRING)
943
- def __call__(
944
- self,
945
- prompt: Union[str, List[str]] = None,
946
- image: Union[torch.Tensor, PIL.Image.Image] = None,
947
- mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
948
- control_image: Union[
949
- torch.FloatTensor,
950
- PIL.Image.Image,
951
- np.ndarray,
952
- List[torch.FloatTensor],
953
- List[PIL.Image.Image],
954
- List[np.ndarray],
955
- ] = None,
956
- height: Optional[int] = None,
957
- width: Optional[int] = None,
958
- strength: float = 1.0,
959
- num_inference_steps: int = 50,
960
- guidance_scale: float = 7.5,
961
- negative_prompt: Optional[Union[str, List[str]]] = None,
962
- num_images_per_prompt: Optional[int] = 1,
963
- eta: float = 0.0,
964
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
965
- latents: Optional[torch.FloatTensor] = None,
966
- prompt_embeds: Optional[torch.FloatTensor] = None,
967
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
968
- output_type: Optional[str] = "pil",
969
- return_dict: bool = True,
970
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
971
- callback_steps: int = 1,
972
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
973
- controlnet_conditioning_scale: Union[float, List[float]] = 0.5,
974
- guess_mode: bool = False,
975
- control_guidance_start: Union[float, List[float]] = 0.0,
976
- control_guidance_end: Union[float, List[float]] = 1.0,
977
- ):
978
- r"""
979
- Function invoked when calling the pipeline for generation.
980
-
981
- Args:
982
- prompt (`str` or `List[str]`, *optional*):
983
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
984
- instead.
985
- image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
986
- `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):
987
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
988
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
989
- also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
990
- height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
991
- specified in init, images must be passed as a list such that each element of the list can be correctly
992
- batched for input to a single controlnet.
993
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
994
- The height in pixels of the generated image.
995
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
996
- The width in pixels of the generated image.
997
- strength (`float`, *optional*, defaults to 1.):
998
- Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
999
- between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
1000
- `strength`. The number of denoising steps depends on the amount of noise initially added. When
1001
- `strength` is 1, added noise will be maximum and the denoising process will run for the full number of
1002
- iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
1003
- portion of the reference `image`.
1004
- num_inference_steps (`int`, *optional*, defaults to 50):
1005
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1006
- expense of slower inference.
1007
- guidance_scale (`float`, *optional*, defaults to 7.5):
1008
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1009
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1010
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1011
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1012
- usually at the expense of lower image quality.
1013
- negative_prompt (`str` or `List[str]`, *optional*):
1014
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
1015
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1016
- less than `1`).
1017
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1018
- The number of images to generate per prompt.
1019
- eta (`float`, *optional*, defaults to 0.0):
1020
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1021
- [`schedulers.DDIMScheduler`], will be ignored for others.
1022
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1023
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1024
- to make generation deterministic.
1025
- latents (`torch.FloatTensor`, *optional*):
1026
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1027
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1028
- tensor will ge generated by sampling using the supplied random `generator`.
1029
- prompt_embeds (`torch.FloatTensor`, *optional*):
1030
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1031
- provided, text embeddings will be generated from `prompt` input argument.
1032
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1033
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1034
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1035
- argument.
1036
- output_type (`str`, *optional*, defaults to `"pil"`):
1037
- The output format of the generate image. Choose between
1038
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1039
- return_dict (`bool`, *optional*, defaults to `True`):
1040
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1041
- plain tuple.
1042
- callback (`Callable`, *optional*):
1043
- A function that will be called every `callback_steps` steps during inference. The function will be
1044
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1045
- callback_steps (`int`, *optional*, defaults to 1):
1046
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1047
- called at every step.
1048
- cross_attention_kwargs (`dict`, *optional*):
1049
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1050
- `self.processor` in
1051
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1052
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 0.5):
1053
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
1054
- to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
1055
- corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
1056
- than for [`~StableDiffusionControlNetPipeline.__call__`].
1057
- guess_mode (`bool`, *optional*, defaults to `False`):
1058
- In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
1059
- you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
1060
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1061
- The percentage of total steps at which the controlnet starts applying.
1062
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1063
- The percentage of total steps at which the controlnet stops applying.
1064
-
1065
- Examples:
1066
-
1067
- Returns:
1068
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1069
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1070
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1071
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1072
- (nsfw) content, according to the `safety_checker`.
1073
- """
1074
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1075
-
1076
- # 0. Default height and width to unet
1077
- height, width = self._default_height_width(height, width, image)
1078
-
1079
- # align format for control guidance
1080
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1081
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1082
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1083
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1084
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1085
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1086
- control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
1087
- control_guidance_end
1088
- ]
1089
-
1090
- # 1. Check inputs. Raise error if not correct
1091
- self.check_inputs(
1092
- prompt,
1093
- control_image,
1094
- height,
1095
- width,
1096
- callback_steps,
1097
- negative_prompt,
1098
- prompt_embeds,
1099
- negative_prompt_embeds,
1100
- controlnet_conditioning_scale,
1101
- control_guidance_start,
1102
- control_guidance_end,
1103
- )
1104
-
1105
- # 2. Define call parameters
1106
- if prompt is not None and isinstance(prompt, str):
1107
- batch_size = 1
1108
- elif prompt is not None and isinstance(prompt, list):
1109
- batch_size = len(prompt)
1110
- else:
1111
- batch_size = prompt_embeds.shape[0]
1112
-
1113
- device = self._execution_device
1114
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1115
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1116
- # corresponds to doing no classifier free guidance.
1117
- do_classifier_free_guidance = guidance_scale > 1.0
1118
-
1119
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1120
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1121
-
1122
- global_pool_conditions = (
1123
- controlnet.config.global_pool_conditions
1124
- if isinstance(controlnet, ControlNetModel)
1125
- else controlnet.nets[0].config.global_pool_conditions
1126
- )
1127
- guess_mode = guess_mode or global_pool_conditions
1128
-
1129
- # 3. Encode input prompt
1130
- text_encoder_lora_scale = (
1131
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1132
- )
1133
- prompt_embeds = self._encode_prompt(
1134
- prompt,
1135
- device,
1136
- num_images_per_prompt,
1137
- do_classifier_free_guidance,
1138
- negative_prompt,
1139
- prompt_embeds=prompt_embeds,
1140
- negative_prompt_embeds=negative_prompt_embeds,
1141
- lora_scale=text_encoder_lora_scale,
1142
- )
1143
-
1144
- # 4. Prepare image
1145
- if isinstance(controlnet, ControlNetModel):
1146
- control_image = self.prepare_control_image(
1147
- image=control_image,
1148
- width=width,
1149
- height=height,
1150
- batch_size=batch_size * num_images_per_prompt,
1151
- num_images_per_prompt=num_images_per_prompt,
1152
- device=device,
1153
- dtype=controlnet.dtype,
1154
- do_classifier_free_guidance=do_classifier_free_guidance,
1155
- guess_mode=guess_mode,
1156
- )
1157
- elif isinstance(controlnet, MultiControlNetModel):
1158
- control_images = []
1159
-
1160
- for control_image_ in control_image:
1161
- control_image_ = self.prepare_control_image(
1162
- image=control_image_,
1163
- width=width,
1164
- height=height,
1165
- batch_size=batch_size * num_images_per_prompt,
1166
- num_images_per_prompt=num_images_per_prompt,
1167
- device=device,
1168
- dtype=controlnet.dtype,
1169
- do_classifier_free_guidance=do_classifier_free_guidance,
1170
- guess_mode=guess_mode,
1171
- )
1172
-
1173
- control_images.append(control_image_)
1174
-
1175
- control_image = control_images
1176
- else:
1177
- assert False
1178
-
1179
- # 4. Preprocess mask and image - resizes image and mask w.r.t height and width
1180
- mask, masked_image, init_image = prepare_mask_and_masked_image(
1181
- image, mask_image, height, width, return_image=True
1182
- )
1183
-
1184
- # 5. Prepare timesteps
1185
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1186
- timesteps, num_inference_steps = self.get_timesteps(
1187
- num_inference_steps=num_inference_steps, strength=strength, device=device
1188
- )
1189
- # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1190
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1191
- # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1192
- is_strength_max = strength == 1.0
1193
-
1194
- # 6. Prepare latent variables
1195
- num_channels_latents = self.vae.config.latent_channels
1196
- num_channels_unet = self.unet.config.in_channels
1197
- return_image_latents = num_channels_unet == 4
1198
- latents_outputs = self.prepare_latents(
1199
- batch_size * num_images_per_prompt,
1200
- num_channels_latents,
1201
- height,
1202
- width,
1203
- prompt_embeds.dtype,
1204
- device,
1205
- generator,
1206
- latents,
1207
- image=init_image,
1208
- timestep=latent_timestep,
1209
- is_strength_max=is_strength_max,
1210
- return_noise=True,
1211
- return_image_latents=return_image_latents,
1212
- )
1213
-
1214
- if return_image_latents:
1215
- latents, noise, image_latents = latents_outputs
1216
- else:
1217
- latents, noise = latents_outputs
1218
-
1219
- # 7. Prepare mask latent variables
1220
- mask, masked_image_latents = self.prepare_mask_latents(
1221
- mask,
1222
- masked_image,
1223
- batch_size * num_images_per_prompt,
1224
- height,
1225
- width,
1226
- prompt_embeds.dtype,
1227
- device,
1228
- generator,
1229
- do_classifier_free_guidance,
1230
- )
1231
-
1232
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1233
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1234
-
1235
- # 7.1 Create tensor stating which controlnets to keep
1236
- controlnet_keep = []
1237
- for i in range(len(timesteps)):
1238
- keeps = [
1239
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1240
- for s, e in zip(control_guidance_start, control_guidance_end)
1241
- ]
1242
- controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1243
-
1244
- # 8. Denoising loop
1245
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1246
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1247
- for i, t in enumerate(timesteps):
1248
- # expand the latents if we are doing classifier free guidance
1249
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1250
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1251
-
1252
- # controlnet(s) inference
1253
- if guess_mode and do_classifier_free_guidance:
1254
- # Infer ControlNet only for the conditional batch.
1255
- control_model_input = latents
1256
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1257
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1258
- else:
1259
- control_model_input = latent_model_input
1260
- controlnet_prompt_embeds = prompt_embeds
1261
-
1262
- if isinstance(controlnet_keep[i], list):
1263
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1264
- else:
1265
- cond_scale = controlnet_conditioning_scale * controlnet_keep[i]
1266
-
1267
- down_block_res_samples, mid_block_res_sample = self.controlnet(
1268
- control_model_input,
1269
- t,
1270
- encoder_hidden_states=controlnet_prompt_embeds,
1271
- controlnet_cond=control_image,
1272
- conditioning_scale=cond_scale,
1273
- guess_mode=guess_mode,
1274
- return_dict=False,
1275
- )
1276
-
1277
- if guess_mode and do_classifier_free_guidance:
1278
- # Infered ControlNet only for the conditional batch.
1279
- # To apply the output of ControlNet to both the unconditional and conditional batches,
1280
- # add 0 to the unconditional batch to keep it unchanged.
1281
- down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1282
- mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1283
-
1284
- # predict the noise residual
1285
- if num_channels_unet == 9:
1286
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1287
-
1288
- noise_pred = self.unet(
1289
- latent_model_input,
1290
- t,
1291
- encoder_hidden_states=prompt_embeds,
1292
- cross_attention_kwargs=cross_attention_kwargs,
1293
- down_block_additional_residuals=down_block_res_samples,
1294
- mid_block_additional_residual=mid_block_res_sample,
1295
- return_dict=False,
1296
- )[0]
1297
-
1298
- # perform guidance
1299
- if do_classifier_free_guidance:
1300
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1301
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1302
-
1303
- # compute the previous noisy sample x_t -> x_t-1
1304
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1305
-
1306
- if num_channels_unet == 4:
1307
- init_latents_proper = image_latents[:1]
1308
- init_mask = mask[:1]
1309
-
1310
- if i < len(timesteps) - 1:
1311
- noise_timestep = timesteps[i + 1]
1312
- init_latents_proper = self.scheduler.add_noise(
1313
- init_latents_proper, noise, torch.tensor([noise_timestep])
1314
- )
1315
-
1316
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1317
-
1318
- # call the callback, if provided
1319
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1320
- progress_bar.update()
1321
- if callback is not None and i % callback_steps == 0:
1322
- callback(i, t, latents)
1323
-
1324
- # If we do sequential model offloading, let's offload unet and controlnet
1325
- # manually for max memory savings
1326
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1327
- self.unet.to("cpu")
1328
- self.controlnet.to("cpu")
1329
- torch.cuda.empty_cache()
1330
-
1331
- if not output_type == "latent":
1332
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1333
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1334
- else:
1335
- image = latents
1336
- has_nsfw_concept = None
1337
-
1338
- if has_nsfw_concept is None:
1339
- do_denormalize = [True] * image.shape[0]
1340
- else:
1341
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1342
-
1343
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1344
-
1345
- # Offload last model to CPU
1346
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1347
- self.final_offload_hook.offload()
1348
-
1349
- if not return_dict:
1350
- return (image, has_nsfw_concept)
1351
-
1352
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
5
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/__init__.py DELETED
@@ -1,16 +0,0 @@
1
- from .bfp import BFP
2
- from .channel_mapper import ChannelMapper
3
- from .fpg import FPG
4
- from .fpn import FPN
5
- from .fpn_carafe import FPN_CARAFE
6
- from .hrfpn import HRFPN
7
- from .nas_fpn import NASFPN
8
- from .nasfcos_fpn import NASFCOS_FPN
9
- from .pafpn import PAFPN
10
- from .rfp import RFP
11
- from .yolo_neck import YOLOV3Neck
12
-
13
- __all__ = [
14
- 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
15
- 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG'
16
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/utils/flow_viz.py DELETED
@@ -1,291 +0,0 @@
1
- # MIT License
2
- #
3
- # Copyright (c) 2018 Tom Runia
4
- #
5
- # Permission is hereby granted, free of charge, to any person obtaining a copy
6
- # of this software and associated documentation files (the "Software"), to deal
7
- # in the Software without restriction, including without limitation the rights
8
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- # copies of the Software, and to permit persons to whom the Software is
10
- # furnished to do so, subject to conditions.
11
- #
12
- # Author: Tom Runia
13
- # Date Created: 2018-08-03
14
-
15
- from __future__ import absolute_import
16
- from __future__ import division
17
- from __future__ import print_function
18
-
19
- import numpy as np
20
-
21
-
22
- def make_colorwheel():
23
- '''
24
- Generates a color wheel for optical flow visualization as presented in:
25
- Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
26
- URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
27
- According to the C++ source code of Daniel Scharstein
28
- According to the Matlab source code of Deqing Sun
29
- '''
30
-
31
- RY = 15
32
- YG = 6
33
- GC = 4
34
- CB = 11
35
- BM = 13
36
- MR = 6
37
-
38
- ncols = RY + YG + GC + CB + BM + MR
39
- colorwheel = np.zeros((ncols, 3))
40
- col = 0
41
-
42
- # RY
43
- colorwheel[0:RY, 0] = 255
44
- colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)
45
- col = col + RY
46
- # YG
47
- colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)
48
- colorwheel[col:col + YG, 1] = 255
49
- col = col + YG
50
- # GC
51
- colorwheel[col:col + GC, 1] = 255
52
- colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)
53
- col = col + GC
54
- # CB
55
- colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)
56
- colorwheel[col:col + CB, 2] = 255
57
- col = col + CB
58
- # BM
59
- colorwheel[col:col + BM, 2] = 255
60
- colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)
61
- col = col + BM
62
- # MR
63
- colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)
64
- colorwheel[col:col + MR, 0] = 255
65
- return colorwheel
66
-
67
-
68
- def flow_compute_color(u, v, convert_to_bgr=False):
69
- '''
70
- Applies the flow color wheel to (possibly clipped) flow components u and v.
71
- According to the C++ source code of Daniel Scharstein
72
- According to the Matlab source code of Deqing Sun
73
- :param u: np.ndarray, input horizontal flow
74
- :param v: np.ndarray, input vertical flow
75
- :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
76
- :return:
77
- '''
78
-
79
- flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
80
-
81
- colorwheel = make_colorwheel() # shape [55x3]
82
- ncols = colorwheel.shape[0]
83
-
84
- rad = np.sqrt(np.square(u) + np.square(v))
85
- a = np.arctan2(-v, -u) / np.pi
86
-
87
- fk = (a + 1) / 2 * (ncols - 1) + 1
88
- k0 = np.floor(fk).astype(np.int32)
89
- k1 = k0 + 1
90
- k1[k1 == ncols] = 1
91
- f = fk - k0
92
-
93
- for i in range(colorwheel.shape[1]):
94
- tmp = colorwheel[:, i]
95
- col0 = tmp[k0] / 255.0
96
- col1 = tmp[k1] / 255.0
97
- col = (1 - f) * col0 + f * col1
98
-
99
- idx = (rad <= 1)
100
- col[idx] = 1 - rad[idx] * (1 - col[idx])
101
- col[~idx] = col[~idx] * 0.75 # out of range?
102
-
103
- # Note the 2-i => BGR instead of RGB
104
- ch_idx = 2 - i if convert_to_bgr else i
105
- flow_image[:, :, ch_idx] = np.floor(255 * col)
106
-
107
- return flow_image
108
-
109
-
110
- def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):
111
- '''
112
- Expects a two dimensional flow image of shape [H,W,2]
113
- According to the C++ source code of Daniel Scharstein
114
- According to the Matlab source code of Deqing Sun
115
- :param flow_uv: np.ndarray of shape [H,W,2]
116
- :param clip_flow: float, maximum clipping value for flow
117
- :return:
118
- '''
119
-
120
- assert flow_uv.ndim == 3, 'input flow must have three dimensions'
121
- assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
122
-
123
- if clip_flow is not None:
124
- flow_uv = np.clip(flow_uv, 0, clip_flow)
125
-
126
- u = flow_uv[:, :, 0]
127
- v = flow_uv[:, :, 1]
128
-
129
- rad = np.sqrt(np.square(u) + np.square(v))
130
- rad_max = np.max(rad)
131
-
132
- epsilon = 1e-5
133
- u = u / (rad_max + epsilon)
134
- v = v / (rad_max + epsilon)
135
-
136
- return flow_compute_color(u, v, convert_to_bgr)
137
-
138
-
139
- UNKNOWN_FLOW_THRESH = 1e7
140
- SMALLFLOW = 0.0
141
- LARGEFLOW = 1e8
142
-
143
-
144
- def make_color_wheel():
145
- """
146
- Generate color wheel according Middlebury color code
147
- :return: Color wheel
148
- """
149
- RY = 15
150
- YG = 6
151
- GC = 4
152
- CB = 11
153
- BM = 13
154
- MR = 6
155
-
156
- ncols = RY + YG + GC + CB + BM + MR
157
-
158
- colorwheel = np.zeros([ncols, 3])
159
-
160
- col = 0
161
-
162
- # RY
163
- colorwheel[0:RY, 0] = 255
164
- colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))
165
- col += RY
166
-
167
- # YG
168
- colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG))
169
- colorwheel[col:col + YG, 1] = 255
170
- col += YG
171
-
172
- # GC
173
- colorwheel[col:col + GC, 1] = 255
174
- colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC))
175
- col += GC
176
-
177
- # CB
178
- colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB))
179
- colorwheel[col:col + CB, 2] = 255
180
- col += CB
181
-
182
- # BM
183
- colorwheel[col:col + BM, 2] = 255
184
- colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM))
185
- col += + BM
186
-
187
- # MR
188
- colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
189
- colorwheel[col:col + MR, 0] = 255
190
-
191
- return colorwheel
192
-
193
-
194
- def compute_color(u, v):
195
- """
196
- compute optical flow color map
197
- :param u: optical flow horizontal map
198
- :param v: optical flow vertical map
199
- :return: optical flow in color code
200
- """
201
- [h, w] = u.shape
202
- img = np.zeros([h, w, 3])
203
- nanIdx = np.isnan(u) | np.isnan(v)
204
- u[nanIdx] = 0
205
- v[nanIdx] = 0
206
-
207
- colorwheel = make_color_wheel()
208
- ncols = np.size(colorwheel, 0)
209
-
210
- rad = np.sqrt(u ** 2 + v ** 2)
211
-
212
- a = np.arctan2(-v, -u) / np.pi
213
-
214
- fk = (a + 1) / 2 * (ncols - 1) + 1
215
-
216
- k0 = np.floor(fk).astype(int)
217
-
218
- k1 = k0 + 1
219
- k1[k1 == ncols + 1] = 1
220
- f = fk - k0
221
-
222
- for i in range(0, np.size(colorwheel, 1)):
223
- tmp = colorwheel[:, i]
224
- col0 = tmp[k0 - 1] / 255
225
- col1 = tmp[k1 - 1] / 255
226
- col = (1 - f) * col0 + f * col1
227
-
228
- idx = rad <= 1
229
- col[idx] = 1 - rad[idx] * (1 - col[idx])
230
- notidx = np.logical_not(idx)
231
-
232
- col[notidx] *= 0.75
233
- img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx)))
234
-
235
- return img
236
-
237
-
238
- # from https://github.com/gengshan-y/VCN
239
- def flow_to_image(flow):
240
- """
241
- Convert flow into middlebury color code image
242
- :param flow: optical flow map
243
- :return: optical flow image in middlebury color
244
- """
245
- u = flow[:, :, 0]
246
- v = flow[:, :, 1]
247
-
248
- maxu = -999.
249
- maxv = -999.
250
- minu = 999.
251
- minv = 999.
252
-
253
- idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
254
- u[idxUnknow] = 0
255
- v[idxUnknow] = 0
256
-
257
- maxu = max(maxu, np.max(u))
258
- minu = min(minu, np.min(u))
259
-
260
- maxv = max(maxv, np.max(v))
261
- minv = min(minv, np.min(v))
262
-
263
- rad = np.sqrt(u ** 2 + v ** 2)
264
- maxrad = max(-1, np.max(rad))
265
-
266
- u = u / (maxrad + np.finfo(float).eps)
267
- v = v / (maxrad + np.finfo(float).eps)
268
-
269
- img = compute_color(u, v)
270
-
271
- idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
272
- img[idx] = 0
273
-
274
- return np.uint8(img)
275
-
276
-
277
- def save_vis_flow_tofile(flow, output_path):
278
- vis_flow = flow_to_image(flow)
279
- from PIL import Image
280
- img = Image.fromarray(vis_flow)
281
- img.save(output_path)
282
-
283
-
284
- def flow_tensor_to_image(flow):
285
- """Used for tensorboard visualization"""
286
- flow = flow.permute(1, 2, 0) # [H, W, 2]
287
- flow = flow.detach().cpu().numpy()
288
- flow = flow_to_image(flow) # [H, W, 3]
289
- flow = np.transpose(flow, (2, 0, 1)) # [3, H, W]
290
-
291
- return flow
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/app.py DELETED
@@ -1,50 +0,0 @@
1
- import gradio as gr
2
-
3
- from video_diffusion.damo.damo_text2_video import DamoText2VideoGenerator
4
- from video_diffusion.inpaint_zoom.zoom_in_app import StableDiffusionZoomIn
5
- from video_diffusion.inpaint_zoom.zoom_out_app import StableDiffusionZoomOut
6
- from video_diffusion.stable_diffusion_video.stable_video_text2video import StableDiffusionText2VideoGenerator
7
- from video_diffusion.tuneavideo.tuneavideo_text2video import TunaVideoText2VideoGenerator
8
- from video_diffusion.zero_shot.zero_shot_text2video import ZeroShotText2VideoGenerator
9
-
10
-
11
- def diffusion_app():
12
- app = gr.Blocks()
13
- with app:
14
- gr.HTML(
15
- """
16
- <h1 style='text-align: center'>
17
- Video Diffusion WebUI
18
- </h1>
19
- """
20
- )
21
- gr.HTML(
22
- """
23
- <h3 style='text-align: center'>
24
- Follow me for more!
25
- <a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a>
26
- </h3>
27
- """
28
- )
29
- with gr.Row():
30
- with gr.Column():
31
- with gr.Tab("Stable Diffusion Video"):
32
- StableDiffusionText2VideoGenerator.app()
33
- with gr.Tab("Tune-a-Video"):
34
- TunaVideoText2VideoGenerator.app()
35
- with gr.Tab("Stable Infinite Zoom"):
36
- with gr.Tab("Zoom In"):
37
- StableDiffusionZoomIn.app()
38
- with gr.Tab("Zoom Out"):
39
- StableDiffusionZoomOut.app()
40
- with gr.Tab("Damo Text2Video"):
41
- DamoText2VideoGenerator.app()
42
- with gr.Tab("Zero Shot Text2Video"):
43
- ZeroShotText2VideoGenerator.app()
44
-
45
- app.queue(concurrency_count=1)
46
- app.launch(debug=True, enable_queue=True)
47
-
48
-
49
- if __name__ == "__main__":
50
- diffusion_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtyomKhyan/Detection/utils/activations.py DELETED
@@ -1,63 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import torch.nn as nn
5
-
6
-
7
- # Swish -------non_max_suppression-----------------------------------------------------------------
8
- class SwishImplementation(torch.autograd.Function):
9
- @staticmethod
10
- def forward(ctx, x):
11
- ctx.save_for_backward(x)
12
- return x * torch.sigmoid(x)
13
-
14
- @staticmethod
15
- def backward(ctx, grad_output):
16
- x = ctx.saved_tensors[0]
17
- sx = torch.sigmoid(x)
18
- return grad_output * (sx * (1 + x * (1 - sx)))
19
-
20
-
21
- class MemoryEfficientSwish(nn.Module):
22
- @staticmethod
23
- def forward(x):
24
- return SwishImplementation.apply(x)
25
-
26
-
27
- class HardSwish(nn.Module): # https://arxiv.org/pdf/1905.02244.pdf
28
- @staticmethod
29
- def forward(x):
30
- return x * F.hardtanh(x + 3, 0., 6., True) / 6.
31
-
32
-
33
- class Swish(nn.Module):
34
- @staticmethod
35
- def forward(x):
36
- return x * torch.sigmoid(x)
37
-
38
-
39
- # Mish ------------------------------------------------------------------------
40
- class MishImplementation(torch.autograd.Function):
41
- @staticmethod
42
- def forward(ctx, x):
43
- ctx.save_for_backward(x)
44
- return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
45
-
46
- @staticmethod
47
- def backward(ctx, grad_output):
48
- x = ctx.saved_tensors[0]
49
- sx = torch.sigmoid(x)
50
- fx = F.softplus(x).tanh()
51
- return grad_output * (fx + x * sx * (1 - fx * fx))
52
-
53
-
54
- class MemoryEfficientMish(nn.Module):
55
- @staticmethod
56
- def forward(x):
57
- return MishImplementation.apply(x)
58
-
59
-
60
- class Mish(nn.Module): # https://github.com/digantamisra98/Mish
61
- @staticmethod
62
- def forward(x):
63
- return x * F.softplus(x).tanh()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/lazy.py DELETED
@@ -1,399 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import ast
3
- import builtins
4
- import importlib
5
- import inspect
6
- import logging
7
- import os
8
- import uuid
9
- from collections import abc
10
- from contextlib import contextmanager
11
- from copy import deepcopy
12
- from dataclasses import is_dataclass
13
- from typing import List, Tuple, Union
14
- import cloudpickle
15
- import yaml
16
- from omegaconf import DictConfig, ListConfig, OmegaConf
17
-
18
- from detectron2.utils.file_io import PathManager
19
- from detectron2.utils.registry import _convert_target_to_string
20
-
21
- __all__ = ["LazyCall", "LazyConfig"]
22
-
23
-
24
- class LazyCall:
25
- """
26
- Wrap a callable so that when it's called, the call will not be executed,
27
- but returns a dict that describes the call.
28
-
29
- LazyCall object has to be called with only keyword arguments. Positional
30
- arguments are not yet supported.
31
-
32
- Examples:
33
- ::
34
- from detectron2.config import instantiate, LazyCall
35
-
36
- layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32)
37
- layer_cfg.out_channels = 64 # can edit it afterwards
38
- layer = instantiate(layer_cfg)
39
- """
40
-
41
- def __init__(self, target):
42
- if not (callable(target) or isinstance(target, (str, abc.Mapping))):
43
- raise TypeError(
44
- f"target of LazyCall must be a callable or defines a callable! Got {target}"
45
- )
46
- self._target = target
47
-
48
- def __call__(self, **kwargs):
49
- if is_dataclass(self._target):
50
- # omegaconf object cannot hold dataclass type
51
- # https://github.com/omry/omegaconf/issues/784
52
- target = _convert_target_to_string(self._target)
53
- else:
54
- target = self._target
55
- kwargs["_target_"] = target
56
-
57
- return DictConfig(content=kwargs, flags={"allow_objects": True})
58
-
59
-
60
- def _visit_dict_config(cfg, func):
61
- """
62
- Apply func recursively to all DictConfig in cfg.
63
- """
64
- if isinstance(cfg, DictConfig):
65
- func(cfg)
66
- for v in cfg.values():
67
- _visit_dict_config(v, func)
68
- elif isinstance(cfg, ListConfig):
69
- for v in cfg:
70
- _visit_dict_config(v, func)
71
-
72
-
73
- def _validate_py_syntax(filename):
74
- # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py
75
- with PathManager.open(filename, "r") as f:
76
- content = f.read()
77
- try:
78
- ast.parse(content)
79
- except SyntaxError as e:
80
- raise SyntaxError(f"Config file {filename} has syntax error!") from e
81
-
82
-
83
- def _cast_to_config(obj):
84
- # if given a dict, return DictConfig instead
85
- if isinstance(obj, dict):
86
- return DictConfig(obj, flags={"allow_objects": True})
87
- return obj
88
-
89
-
90
- _CFG_PACKAGE_NAME = "detectron2._cfg_loader"
91
- """
92
- A namespace to put all imported config into.
93
- """
94
-
95
-
96
- def _random_package_name(filename):
97
- # generate a random package name when loading config files
98
- return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename)
99
-
100
-
101
- @contextmanager
102
- def _patch_import():
103
- """
104
- Enhance relative import statements in config files, so that they:
105
- 1. locate files purely based on relative location, regardless of packages.
106
- e.g. you can import file without having __init__
107
- 2. do not cache modules globally; modifications of module states has no side effect
108
- 3. support other storage system through PathManager
109
- 4. imported dict are turned into omegaconf.DictConfig automatically
110
- """
111
- old_import = builtins.__import__
112
-
113
- def find_relative_file(original_file, relative_import_path, level):
114
- cur_file = os.path.dirname(original_file)
115
- for _ in range(level - 1):
116
- cur_file = os.path.dirname(cur_file)
117
- cur_name = relative_import_path.lstrip(".")
118
- for part in cur_name.split("."):
119
- cur_file = os.path.join(cur_file, part)
120
- # NOTE: directory import is not handled. Because then it's unclear
121
- # if such import should produce python module or DictConfig. This can
122
- # be discussed further if needed.
123
- if not cur_file.endswith(".py"):
124
- cur_file += ".py"
125
- if not PathManager.isfile(cur_file):
126
- raise ImportError(
127
- f"Cannot import name {relative_import_path} from "
128
- f"{original_file}: {cur_file} has to exist."
129
- )
130
- return cur_file
131
-
132
- def new_import(name, globals=None, locals=None, fromlist=(), level=0):
133
- if (
134
- # Only deal with relative imports inside config files
135
- level != 0
136
- and globals is not None
137
- and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME)
138
- ):
139
- cur_file = find_relative_file(globals["__file__"], name, level)
140
- _validate_py_syntax(cur_file)
141
- spec = importlib.machinery.ModuleSpec(
142
- _random_package_name(cur_file), None, origin=cur_file
143
- )
144
- module = importlib.util.module_from_spec(spec)
145
- module.__file__ = cur_file
146
- with PathManager.open(cur_file) as f:
147
- content = f.read()
148
- exec(compile(content, cur_file, "exec"), module.__dict__)
149
- for name in fromlist: # turn imported dict into DictConfig automatically
150
- val = _cast_to_config(module.__dict__[name])
151
- module.__dict__[name] = val
152
- return module
153
- return old_import(name, globals, locals, fromlist=fromlist, level=level)
154
-
155
- builtins.__import__ = new_import
156
- yield new_import
157
- builtins.__import__ = old_import
158
-
159
-
160
- class LazyConfig:
161
- """
162
- Provide methods to save, load, and overrides an omegaconf config object
163
- which may contain definition of lazily-constructed objects.
164
- """
165
-
166
- @staticmethod
167
- def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
168
- """
169
- Similar to :meth:`load()`, but load path relative to the caller's
170
- source file.
171
-
172
- This has the same functionality as a relative import, except that this method
173
- accepts filename as a string, so more characters are allowed in the filename.
174
- """
175
- caller_frame = inspect.stack()[1]
176
- caller_fname = caller_frame[0].f_code.co_filename
177
- assert caller_fname != "<string>", "load_rel Unable to find caller"
178
- caller_dir = os.path.dirname(caller_fname)
179
- filename = os.path.join(caller_dir, filename)
180
- return LazyConfig.load(filename, keys)
181
-
182
- @staticmethod
183
- def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
184
- """
185
- Load a config file.
186
-
187
- Args:
188
- filename: absolute path or relative path w.r.t. the current working directory
189
- keys: keys to load and return. If not given, return all keys
190
- (whose values are config objects) in a dict.
191
- """
192
- has_keys = keys is not None
193
- filename = filename.replace("/./", "/") # redundant
194
- if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]:
195
- raise ValueError(f"Config file {filename} has to be a python or yaml file.")
196
- if filename.endswith(".py"):
197
- _validate_py_syntax(filename)
198
-
199
- with _patch_import():
200
- # Record the filename
201
- module_namespace = {
202
- "__file__": filename,
203
- "__package__": _random_package_name(filename),
204
- }
205
- with PathManager.open(filename) as f:
206
- content = f.read()
207
- # Compile first with filename to:
208
- # 1. make filename appears in stacktrace
209
- # 2. make load_rel able to find its parent's (possibly remote) location
210
- exec(compile(content, filename, "exec"), module_namespace)
211
-
212
- ret = module_namespace
213
- else:
214
- with PathManager.open(filename) as f:
215
- obj = yaml.unsafe_load(f)
216
- ret = OmegaConf.create(obj, flags={"allow_objects": True})
217
-
218
- if has_keys:
219
- if isinstance(keys, str):
220
- return _cast_to_config(ret[keys])
221
- else:
222
- return tuple(_cast_to_config(ret[a]) for a in keys)
223
- else:
224
- if filename.endswith(".py"):
225
- # when not specified, only load those that are config objects
226
- ret = DictConfig(
227
- {
228
- name: _cast_to_config(value)
229
- for name, value in ret.items()
230
- if isinstance(value, (DictConfig, ListConfig, dict))
231
- and not name.startswith("_")
232
- },
233
- flags={"allow_objects": True},
234
- )
235
- return ret
236
-
237
- @staticmethod
238
- def save(cfg, filename: str):
239
- """
240
- Save a config object to a yaml file.
241
- Note that when the config dictionary contains complex objects (e.g. lambda),
242
- it can't be saved to yaml. In that case we will print an error and
243
- attempt to save to a pkl file instead.
244
-
245
- Args:
246
- cfg: an omegaconf config object
247
- filename: yaml file name to save the config file
248
- """
249
- logger = logging.getLogger(__name__)
250
- try:
251
- cfg = deepcopy(cfg)
252
- except Exception:
253
- pass
254
- else:
255
- # if it's deep-copyable, then...
256
- def _replace_type_by_name(x):
257
- if "_target_" in x and callable(x._target_):
258
- try:
259
- x._target_ = _convert_target_to_string(x._target_)
260
- except AttributeError:
261
- pass
262
-
263
- # not necessary, but makes yaml looks nicer
264
- _visit_dict_config(cfg, _replace_type_by_name)
265
-
266
- save_pkl = False
267
- try:
268
- dict = OmegaConf.to_container(cfg, resolve=False)
269
- dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999)
270
- with PathManager.open(filename, "w") as f:
271
- f.write(dumped)
272
-
273
- try:
274
- _ = yaml.unsafe_load(dumped) # test that it is loadable
275
- except Exception:
276
- logger.warning(
277
- "The config contains objects that cannot serialize to a valid yaml. "
278
- f"{filename} is human-readable but cannot be loaded."
279
- )
280
- save_pkl = True
281
- except Exception:
282
- logger.exception("Unable to serialize the config to yaml. Error:")
283
- save_pkl = True
284
-
285
- if save_pkl:
286
- new_filename = filename + ".pkl"
287
- try:
288
- # retry by pickle
289
- with PathManager.open(new_filename, "wb") as f:
290
- cloudpickle.dump(cfg, f)
291
- logger.warning(f"Config is saved using cloudpickle at {new_filename}.")
292
- except Exception:
293
- pass
294
-
295
- @staticmethod
296
- def apply_overrides(cfg, overrides: List[str]):
297
- """
298
- In-place override contents of cfg.
299
-
300
- Args:
301
- cfg: an omegaconf config object
302
- overrides: list of strings in the format of "a=b" to override configs.
303
- See https://hydra.cc/docs/next/advanced/override_grammar/basic/
304
- for syntax.
305
-
306
- Returns:
307
- the cfg object
308
- """
309
-
310
- def safe_update(cfg, key, value):
311
- parts = key.split(".")
312
- for idx in range(1, len(parts)):
313
- prefix = ".".join(parts[:idx])
314
- v = OmegaConf.select(cfg, prefix, default=None)
315
- if v is None:
316
- break
317
- if not OmegaConf.is_config(v):
318
- raise KeyError(
319
- f"Trying to update key {key}, but {prefix} "
320
- f"is not a config, but has type {type(v)}."
321
- )
322
- OmegaConf.update(cfg, key, value, merge=True)
323
-
324
- from hydra.core.override_parser.overrides_parser import OverridesParser
325
-
326
- parser = OverridesParser.create()
327
- overrides = parser.parse_overrides(overrides)
328
- for o in overrides:
329
- key = o.key_or_group
330
- value = o.value()
331
- if o.is_delete():
332
- # TODO support this
333
- raise NotImplementedError("deletion is not yet a supported override")
334
- safe_update(cfg, key, value)
335
- return cfg
336
-
337
- @staticmethod
338
- def to_py(cfg, prefix: str = "cfg."):
339
- """
340
- Try to convert a config object into Python-like psuedo code.
341
-
342
- Note that perfect conversion is not always possible. So the returned
343
- results are mainly meant to be human-readable, and not meant to be executed.
344
-
345
- Args:
346
- cfg: an omegaconf config object
347
- prefix: root name for the resulting code (default: "cfg.")
348
-
349
-
350
- Returns:
351
- str of formatted Python code
352
- """
353
- import black
354
-
355
- cfg = OmegaConf.to_container(cfg, resolve=True)
356
-
357
- def _to_str(obj, prefix=None, inside_call=False):
358
- if prefix is None:
359
- prefix = []
360
- if isinstance(obj, abc.Mapping) and "_target_" in obj:
361
- # Dict representing a function call
362
- target = _convert_target_to_string(obj.pop("_target_"))
363
- args = []
364
- for k, v in sorted(obj.items()):
365
- args.append(f"{k}={_to_str(v, inside_call=True)}")
366
- args = ", ".join(args)
367
- call = f"{target}({args})"
368
- return "".join(prefix) + call
369
- elif isinstance(obj, abc.Mapping) and not inside_call:
370
- # Dict that is not inside a call is a list of top-level config objects that we
371
- # render as one object per line with dot separated prefixes
372
- key_list = []
373
- for k, v in sorted(obj.items()):
374
- if isinstance(v, abc.Mapping) and "_target_" not in v:
375
- key_list.append(_to_str(v, prefix=prefix + [k + "."]))
376
- else:
377
- key = "".join(prefix) + k
378
- key_list.append(f"{key}={_to_str(v)}")
379
- return "\n".join(key_list)
380
- elif isinstance(obj, abc.Mapping):
381
- # Dict that is inside a call is rendered as a regular dict
382
- return (
383
- "{"
384
- + ",".join(
385
- f"{repr(k)}: {_to_str(v, inside_call=inside_call)}"
386
- for k, v in sorted(obj.items())
387
- )
388
- + "}"
389
- )
390
- elif isinstance(obj, list):
391
- return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]"
392
- else:
393
- return repr(obj)
394
-
395
- py_str = _to_str(cfg, prefix=[prefix])
396
- try:
397
- return black.format_str(py_str, mode=black.Mode())
398
- except black.InvalidInput:
399
- return py_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/common.py DELETED
@@ -1,241 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import itertools
4
- import logging
5
- import numpy as np
6
- import pickle
7
- import random
8
- import torch.utils.data as data
9
- from torch.utils.data.sampler import Sampler
10
-
11
- from detectron2.utils.serialize import PicklableWrapper
12
-
13
- __all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"]
14
-
15
-
16
- def _shard_iterator_dataloader_worker(iterable):
17
- # Shard the iterable if we're currently inside pytorch dataloader worker.
18
- worker_info = data.get_worker_info()
19
- if worker_info is None or worker_info.num_workers == 1:
20
- # do nothing
21
- yield from iterable
22
- else:
23
- yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers)
24
-
25
-
26
- class _MapIterableDataset(data.IterableDataset):
27
- """
28
- Map a function over elements in an IterableDataset.
29
-
30
- Similar to pytorch's MapIterDataPipe, but support filtering when map_func
31
- returns None.
32
-
33
- This class is not public-facing. Will be called by `MapDataset`.
34
- """
35
-
36
- def __init__(self, dataset, map_func):
37
- self._dataset = dataset
38
- self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
39
-
40
- def __len__(self):
41
- return len(self._dataset)
42
-
43
- def __iter__(self):
44
- for x in map(self._map_func, self._dataset):
45
- if x is not None:
46
- yield x
47
-
48
-
49
- class MapDataset(data.Dataset):
50
- """
51
- Map a function over the elements in a dataset.
52
- """
53
-
54
- def __init__(self, dataset, map_func):
55
- """
56
- Args:
57
- dataset: a dataset where map function is applied. Can be either
58
- map-style or iterable dataset. When given an iterable dataset,
59
- the returned object will also be an iterable dataset.
60
- map_func: a callable which maps the element in dataset. map_func can
61
- return None to skip the data (e.g. in case of errors).
62
- How None is handled depends on the style of `dataset`.
63
- If `dataset` is map-style, it randomly tries other elements.
64
- If `dataset` is iterable, it skips the data and tries the next.
65
- """
66
- self._dataset = dataset
67
- self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
68
-
69
- self._rng = random.Random(42)
70
- self._fallback_candidates = set(range(len(dataset)))
71
-
72
- def __new__(cls, dataset, map_func):
73
- is_iterable = isinstance(dataset, data.IterableDataset)
74
- if is_iterable:
75
- return _MapIterableDataset(dataset, map_func)
76
- else:
77
- return super().__new__(cls)
78
-
79
- def __getnewargs__(self):
80
- return self._dataset, self._map_func
81
-
82
- def __len__(self):
83
- return len(self._dataset)
84
-
85
- def __getitem__(self, idx):
86
- retry_count = 0
87
- cur_idx = int(idx)
88
-
89
- while True:
90
- data = self._map_func(self._dataset[cur_idx])
91
- if data is not None:
92
- self._fallback_candidates.add(cur_idx)
93
- return data
94
-
95
- # _map_func fails for this idx, use a random new index from the pool
96
- retry_count += 1
97
- self._fallback_candidates.discard(cur_idx)
98
- cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
99
-
100
- if retry_count >= 3:
101
- logger = logging.getLogger(__name__)
102
- logger.warning(
103
- "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
104
- idx, retry_count
105
- )
106
- )
107
-
108
-
109
- class DatasetFromList(data.Dataset):
110
- """
111
- Wrap a list to a torch Dataset. It produces elements of the list as data.
112
- """
113
-
114
- def __init__(self, lst: list, copy: bool = True, serialize: bool = True):
115
- """
116
- Args:
117
- lst (list): a list which contains elements to produce.
118
- copy (bool): whether to deepcopy the element when producing it,
119
- so that the result can be modified in place without affecting the
120
- source in the list.
121
- serialize (bool): whether to hold memory using serialized objects, when
122
- enabled, data loader workers can use shared RAM from master
123
- process instead of making a copy.
124
- """
125
- self._lst = lst
126
- self._copy = copy
127
- self._serialize = serialize
128
-
129
- def _serialize(data):
130
- buffer = pickle.dumps(data, protocol=-1)
131
- return np.frombuffer(buffer, dtype=np.uint8)
132
-
133
- if self._serialize:
134
- logger = logging.getLogger(__name__)
135
- logger.info(
136
- "Serializing {} elements to byte tensors and concatenating them all ...".format(
137
- len(self._lst)
138
- )
139
- )
140
- self._lst = [_serialize(x) for x in self._lst]
141
- self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
142
- self._addr = np.cumsum(self._addr)
143
- self._lst = np.concatenate(self._lst)
144
- logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024 ** 2))
145
-
146
- def __len__(self):
147
- if self._serialize:
148
- return len(self._addr)
149
- else:
150
- return len(self._lst)
151
-
152
- def __getitem__(self, idx):
153
- if self._serialize:
154
- start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
155
- end_addr = self._addr[idx].item()
156
- bytes = memoryview(self._lst[start_addr:end_addr])
157
- return pickle.loads(bytes)
158
- elif self._copy:
159
- return copy.deepcopy(self._lst[idx])
160
- else:
161
- return self._lst[idx]
162
-
163
-
164
- class ToIterableDataset(data.IterableDataset):
165
- """
166
- Convert an old indices-based (also called map-style) dataset
167
- to an iterable-style dataset.
168
- """
169
-
170
- def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True):
171
- """
172
- Args:
173
- dataset: an old-style dataset with ``__getitem__``
174
- sampler: a cheap iterable that produces indices to be applied on ``dataset``.
175
- shard_sampler: whether to shard the sampler based on the current pytorch data loader
176
- worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple
177
- workers, it is responsible for sharding its data based on worker id so that workers
178
- don't produce identical data.
179
-
180
- Most samplers (like our TrainingSampler) do not shard based on dataloader worker id
181
- and this argument should be set to True. But certain samplers may be already
182
- sharded, in that case this argument should be set to False.
183
- """
184
- assert not isinstance(dataset, data.IterableDataset), dataset
185
- assert isinstance(sampler, Sampler), sampler
186
- self.dataset = dataset
187
- self.sampler = sampler
188
- self.shard_sampler = shard_sampler
189
-
190
- def __iter__(self):
191
- if not self.shard_sampler:
192
- sampler = self.sampler
193
- else:
194
- # With map-style dataset, `DataLoader(dataset, sampler)` runs the
195
- # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`
196
- # will run sampler in every of the N worker. So we should only keep 1/N of the ids on
197
- # each worker. The assumption is that sampler is cheap to iterate so it's fine to
198
- # discard ids in workers.
199
- sampler = _shard_iterator_dataloader_worker(self.sampler)
200
- for idx in sampler:
201
- yield self.dataset[idx]
202
-
203
- def __len__(self):
204
- return len(self.sampler)
205
-
206
-
207
- class AspectRatioGroupedDataset(data.IterableDataset):
208
- """
209
- Batch data that have similar aspect ratio together.
210
- In this implementation, images whose aspect ratio < (or >) 1 will
211
- be batched together.
212
- This improves training speed because the images then need less padding
213
- to form a batch.
214
-
215
- It assumes the underlying dataset produces dicts with "width" and "height" keys.
216
- It will then produce a list of original dicts with length = batch_size,
217
- all with similar aspect ratios.
218
- """
219
-
220
- def __init__(self, dataset, batch_size):
221
- """
222
- Args:
223
- dataset: an iterable. Each element must be a dict with keys
224
- "width" and "height", which will be used to batch data.
225
- batch_size (int):
226
- """
227
- self.dataset = dataset
228
- self.batch_size = batch_size
229
- self._buckets = [[] for _ in range(2)]
230
- # Hard-coded two aspect ratio groups: w > h and w < h.
231
- # Can add support for more aspect ratio groups, but doesn't seem useful
232
-
233
- def __iter__(self):
234
- for d in self.dataset:
235
- w, h = d["width"], d["height"]
236
- bucket_id = 0 if w > h else 1
237
- bucket = self._buckets[bucket_id]
238
- bucket.append(d)
239
- if len(bucket) == self.batch_size:
240
- yield bucket[:]
241
- del bucket[:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/backbone/fpn.py DELETED
@@ -1,255 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import math
3
- import fvcore.nn.weight_init as weight_init
4
- import torch
5
- import torch.nn.functional as F
6
- from torch import nn
7
-
8
- from detectron2.layers import Conv2d, ShapeSpec, get_norm
9
-
10
- from .backbone import Backbone
11
- from .build import BACKBONE_REGISTRY
12
- from .resnet import build_resnet_backbone
13
-
14
- __all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"]
15
-
16
-
17
- class FPN(Backbone):
18
- """
19
- This module implements :paper:`FPN`.
20
- It creates pyramid features built on top of some input feature maps.
21
- """
22
-
23
- _fuse_type: torch.jit.Final[str]
24
-
25
- def __init__(
26
- self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
27
- ):
28
- """
29
- Args:
30
- bottom_up (Backbone): module representing the bottom up subnetwork.
31
- Must be a subclass of :class:`Backbone`. The multi-scale feature
32
- maps generated by the bottom up network, and listed in `in_features`,
33
- are used to generate FPN levels.
34
- in_features (list[str]): names of the input feature maps coming
35
- from the backbone to which FPN is attached. For example, if the
36
- backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
37
- of these may be used; order must be from high to low resolution.
38
- out_channels (int): number of channels in the output feature maps.
39
- norm (str): the normalization to use.
40
- top_block (nn.Module or None): if provided, an extra operation will
41
- be performed on the output of the last (smallest resolution)
42
- FPN output, and the result will extend the result list. The top_block
43
- further downsamples the feature map. It must have an attribute
44
- "num_levels", meaning the number of extra FPN levels added by
45
- this block, and "in_feature", which is a string representing
46
- its input feature (e.g., p5).
47
- fuse_type (str): types for fusing the top down features and the lateral
48
- ones. It can be "sum" (default), which sums up element-wise; or "avg",
49
- which takes the element-wise mean of the two.
50
- """
51
- super(FPN, self).__init__()
52
- assert isinstance(bottom_up, Backbone)
53
- assert in_features, in_features
54
-
55
- # Feature map strides and channels from the bottom up network (e.g. ResNet)
56
- input_shapes = bottom_up.output_shape()
57
- strides = [input_shapes[f].stride for f in in_features]
58
- in_channels_per_feature = [input_shapes[f].channels for f in in_features]
59
-
60
- _assert_strides_are_log2_contiguous(strides)
61
- lateral_convs = []
62
- output_convs = []
63
-
64
- use_bias = norm == ""
65
- for idx, in_channels in enumerate(in_channels_per_feature):
66
- lateral_norm = get_norm(norm, out_channels)
67
- output_norm = get_norm(norm, out_channels)
68
-
69
- lateral_conv = Conv2d(
70
- in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
71
- )
72
- output_conv = Conv2d(
73
- out_channels,
74
- out_channels,
75
- kernel_size=3,
76
- stride=1,
77
- padding=1,
78
- bias=use_bias,
79
- norm=output_norm,
80
- )
81
- weight_init.c2_xavier_fill(lateral_conv)
82
- weight_init.c2_xavier_fill(output_conv)
83
- stage = int(math.log2(strides[idx]))
84
- self.add_module("fpn_lateral{}".format(stage), lateral_conv)
85
- self.add_module("fpn_output{}".format(stage), output_conv)
86
-
87
- lateral_convs.append(lateral_conv)
88
- output_convs.append(output_conv)
89
- # Place convs into top-down order (from low to high resolution)
90
- # to make the top-down computation in forward clearer.
91
- self.lateral_convs = lateral_convs[::-1]
92
- self.output_convs = output_convs[::-1]
93
- self.top_block = top_block
94
- self.in_features = tuple(in_features)
95
- self.bottom_up = bottom_up
96
- # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
97
- self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
98
- # top block output feature maps.
99
- if self.top_block is not None:
100
- for s in range(stage, stage + self.top_block.num_levels):
101
- self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
102
-
103
- self._out_features = list(self._out_feature_strides.keys())
104
- self._out_feature_channels = {k: out_channels for k in self._out_features}
105
- self._size_divisibility = strides[-1]
106
- assert fuse_type in {"avg", "sum"}
107
- self._fuse_type = fuse_type
108
-
109
- @property
110
- def size_divisibility(self):
111
- return self._size_divisibility
112
-
113
- def forward(self, x):
114
- """
115
- Args:
116
- input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
117
- feature map tensor for each feature level in high to low resolution order.
118
-
119
- Returns:
120
- dict[str->Tensor]:
121
- mapping from feature map name to FPN feature map tensor
122
- in high to low resolution order. Returned feature names follow the FPN
123
- paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
124
- ["p2", "p3", ..., "p6"].
125
- """
126
- bottom_up_features = self.bottom_up(x)
127
- results = []
128
- prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]])
129
- results.append(self.output_convs[0](prev_features))
130
-
131
- # Reverse feature maps into top-down order (from low to high resolution)
132
- for idx, (lateral_conv, output_conv) in enumerate(
133
- zip(self.lateral_convs, self.output_convs)
134
- ):
135
- # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336
136
- # Therefore we loop over all modules but skip the first one
137
- if idx > 0:
138
- features = self.in_features[-idx - 1]
139
- features = bottom_up_features[features]
140
- top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest")
141
- lateral_features = lateral_conv(features)
142
- prev_features = lateral_features + top_down_features
143
- if self._fuse_type == "avg":
144
- prev_features /= 2
145
- results.insert(0, output_conv(prev_features))
146
-
147
- if self.top_block is not None:
148
- if self.top_block.in_feature in bottom_up_features:
149
- top_block_in_feature = bottom_up_features[self.top_block.in_feature]
150
- else:
151
- top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
152
- results.extend(self.top_block(top_block_in_feature))
153
- assert len(self._out_features) == len(results)
154
- return {f: res for f, res in zip(self._out_features, results)}
155
-
156
- def output_shape(self):
157
- return {
158
- name: ShapeSpec(
159
- channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
160
- )
161
- for name in self._out_features
162
- }
163
-
164
-
165
- def _assert_strides_are_log2_contiguous(strides):
166
- """
167
- Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
168
- """
169
- for i, stride in enumerate(strides[1:], 1):
170
- assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
171
- stride, strides[i - 1]
172
- )
173
-
174
-
175
- class LastLevelMaxPool(nn.Module):
176
- """
177
- This module is used in the original FPN to generate a downsampled
178
- P6 feature from P5.
179
- """
180
-
181
- def __init__(self):
182
- super().__init__()
183
- self.num_levels = 1
184
- self.in_feature = "p5"
185
-
186
- def forward(self, x):
187
- return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
188
-
189
-
190
- class LastLevelP6P7(nn.Module):
191
- """
192
- This module is used in RetinaNet to generate extra layers, P6 and P7 from
193
- C5 feature.
194
- """
195
-
196
- def __init__(self, in_channels, out_channels, in_feature="res5"):
197
- super().__init__()
198
- self.num_levels = 2
199
- self.in_feature = in_feature
200
- self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
201
- self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
202
- for module in [self.p6, self.p7]:
203
- weight_init.c2_xavier_fill(module)
204
-
205
- def forward(self, c5):
206
- p6 = self.p6(c5)
207
- p7 = self.p7(F.relu(p6))
208
- return [p6, p7]
209
-
210
-
211
- @BACKBONE_REGISTRY.register()
212
- def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
213
- """
214
- Args:
215
- cfg: a detectron2 CfgNode
216
-
217
- Returns:
218
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
219
- """
220
- bottom_up = build_resnet_backbone(cfg, input_shape)
221
- in_features = cfg.MODEL.FPN.IN_FEATURES
222
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
223
- backbone = FPN(
224
- bottom_up=bottom_up,
225
- in_features=in_features,
226
- out_channels=out_channels,
227
- norm=cfg.MODEL.FPN.NORM,
228
- top_block=LastLevelMaxPool(),
229
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
230
- )
231
- return backbone
232
-
233
-
234
- @BACKBONE_REGISTRY.register()
235
- def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
236
- """
237
- Args:
238
- cfg: a detectron2 CfgNode
239
-
240
- Returns:
241
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
242
- """
243
- bottom_up = build_resnet_backbone(cfg, input_shape)
244
- in_features = cfg.MODEL.FPN.IN_FEATURES
245
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
246
- in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
247
- backbone = FPN(
248
- bottom_up=bottom_up,
249
- in_features=in_features,
250
- out_channels=out_channels,
251
- norm=cfg.MODEL.FPN.NORM,
252
- top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
253
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
254
- )
255
- return backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/config/dir1/dir1_a.py DELETED
@@ -1,3 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- dir1a_str = "base_a_1"
3
- dir1a_dict = {"a": 1, "b": 2}
 
 
 
 
spaces/B2gan/LLM_Can_See/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: LLM Can See
3
- emoji: 📉
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- license: unknown
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bambicita/rvc-models/vc_infer_pipeline.py DELETED
@@ -1,306 +0,0 @@
1
- import numpy as np, parselmouth, torch, pdb
2
- from time import time as ttime
3
- import torch.nn.functional as F
4
- from config import x_pad, x_query, x_center, x_max
5
- import scipy.signal as signal
6
- import pyworld, os, traceback, faiss
7
- from scipy import signal
8
-
9
- bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
10
-
11
-
12
- class VC(object):
13
- def __init__(self, tgt_sr, device, is_half):
14
- self.sr = 16000 # hubert输入采样率
15
- self.window = 160 # 每帧点数
16
- self.t_pad = self.sr * x_pad # 每条前后pad时间
17
- self.t_pad_tgt = tgt_sr * x_pad
18
- self.t_pad2 = self.t_pad * 2
19
- self.t_query = self.sr * x_query # 查询切点前后查询时间
20
- self.t_center = self.sr * x_center # 查询切点位置
21
- self.t_max = self.sr * x_max # 免查询时长阈值
22
- self.device = device
23
- self.is_half = is_half
24
-
25
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
26
- time_step = self.window / self.sr * 1000
27
- f0_min = 50
28
- f0_max = 1100
29
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
30
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
31
- if f0_method == "pm":
32
- f0 = (
33
- parselmouth.Sound(x, self.sr)
34
- .to_pitch_ac(
35
- time_step=time_step / 1000,
36
- voicing_threshold=0.6,
37
- pitch_floor=f0_min,
38
- pitch_ceiling=f0_max,
39
- )
40
- .selected_array["frequency"]
41
- )
42
- pad_size = (p_len - len(f0) + 1) // 2
43
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
44
- f0 = np.pad(
45
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
46
- )
47
- elif f0_method == "harvest":
48
- f0, t = pyworld.harvest(
49
- x.astype(np.double),
50
- fs=self.sr,
51
- f0_ceil=f0_max,
52
- f0_floor=f0_min,
53
- frame_period=10,
54
- )
55
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
56
- f0 = signal.medfilt(f0, 3)
57
- f0 *= pow(2, f0_up_key / 12)
58
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
59
- tf0 = self.sr // self.window # 每秒f0点数
60
- if inp_f0 is not None:
61
- delta_t = np.round(
62
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
63
- ).astype("int16")
64
- replace_f0 = np.interp(
65
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
66
- )
67
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
68
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
69
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
70
- f0bak = f0.copy()
71
- f0_mel = 1127 * np.log(1 + f0 / 700)
72
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
73
- f0_mel_max - f0_mel_min
74
- ) + 1
75
- f0_mel[f0_mel <= 1] = 1
76
- f0_mel[f0_mel > 255] = 255
77
- f0_coarse = np.rint(f0_mel).astype(np.int)
78
- return f0_coarse, f0bak # 1-0
79
-
80
- def vc(
81
- self,
82
- model,
83
- net_g,
84
- sid,
85
- audio0,
86
- pitch,
87
- pitchf,
88
- times,
89
- index,
90
- big_npy,
91
- index_rate,
92
- ): # ,file_index,file_big_npy
93
- feats = torch.from_numpy(audio0)
94
- if self.is_half:
95
- feats = feats.half()
96
- else:
97
- feats = feats.float()
98
- if feats.dim() == 2: # double channels
99
- feats = feats.mean(-1)
100
- assert feats.dim() == 1, feats.dim()
101
- feats = feats.view(1, -1)
102
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
103
-
104
- inputs = {
105
- "source": feats.to(self.device),
106
- "padding_mask": padding_mask,
107
- "output_layer": 9, # layer 9
108
- }
109
- t0 = ttime()
110
- with torch.no_grad():
111
- logits = model.extract_features(**inputs)
112
- feats = model.final_proj(logits[0])
113
-
114
- if (
115
- isinstance(index, type(None)) == False
116
- and isinstance(big_npy, type(None)) == False
117
- and index_rate != 0
118
- ):
119
- npy = feats[0].cpu().numpy()
120
- if self.is_half:
121
- npy = npy.astype("float32")
122
- _, I = index.search(npy, 1)
123
- npy = big_npy[I.squeeze()]
124
- if self.is_half:
125
- npy = npy.astype("float16")
126
- feats = (
127
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
128
- + (1 - index_rate) * feats
129
- )
130
-
131
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
132
- t1 = ttime()
133
- p_len = audio0.shape[0] // self.window
134
- if feats.shape[1] < p_len:
135
- p_len = feats.shape[1]
136
- if pitch != None and pitchf != None:
137
- pitch = pitch[:, :p_len]
138
- pitchf = pitchf[:, :p_len]
139
- p_len = torch.tensor([p_len], device=self.device).long()
140
- with torch.no_grad():
141
- if pitch != None and pitchf != None:
142
- audio1 = (
143
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
144
- .data.cpu()
145
- .float()
146
- .numpy()
147
- .astype(np.int16)
148
- )
149
- else:
150
- audio1 = (
151
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
152
- .data.cpu()
153
- .float()
154
- .numpy()
155
- .astype(np.int16)
156
- )
157
- del feats, p_len, padding_mask
158
- if torch.cuda.is_available():
159
- torch.cuda.empty_cache()
160
- t2 = ttime()
161
- times[0] += t1 - t0
162
- times[2] += t2 - t1
163
- return audio1
164
-
165
- def pipeline(
166
- self,
167
- model,
168
- net_g,
169
- sid,
170
- audio,
171
- times,
172
- f0_up_key,
173
- f0_method,
174
- file_index,
175
- file_big_npy,
176
- index_rate,
177
- if_f0,
178
- f0_file=None,
179
- ):
180
- if (
181
- file_big_npy != ""
182
- and file_index != ""
183
- and os.path.exists(file_big_npy) == True
184
- and os.path.exists(file_index) == True
185
- and index_rate != 0
186
- ):
187
- try:
188
- index = faiss.read_index(file_index)
189
- big_npy = np.load(file_big_npy)
190
- except:
191
- traceback.print_exc()
192
- index = big_npy = None
193
- else:
194
- index = big_npy = None
195
- print("Feature retrieval library doesn't exist or ratio is 0")
196
- audio = signal.filtfilt(bh, ah, audio)
197
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
198
- opt_ts = []
199
- if audio_pad.shape[0] > self.t_max:
200
- audio_sum = np.zeros_like(audio)
201
- for i in range(self.window):
202
- audio_sum += audio_pad[i : i - self.window]
203
- for t in range(self.t_center, audio.shape[0], self.t_center):
204
- opt_ts.append(
205
- t
206
- - self.t_query
207
- + np.where(
208
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
209
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
210
- )[0][0]
211
- )
212
- s = 0
213
- audio_opt = []
214
- t = None
215
- t1 = ttime()
216
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
217
- p_len = audio_pad.shape[0] // self.window
218
- inp_f0 = None
219
- if hasattr(f0_file, "name") == True:
220
- try:
221
- with open(f0_file.name, "r") as f:
222
- lines = f.read().strip("\n").split("\n")
223
- inp_f0 = []
224
- for line in lines:
225
- inp_f0.append([float(i) for i in line.split(",")])
226
- inp_f0 = np.array(inp_f0, dtype="float32")
227
- except:
228
- traceback.print_exc()
229
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
230
- pitch, pitchf = None, None
231
- if if_f0 == 1:
232
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
233
- pitch = pitch[:p_len]
234
- pitchf = pitchf[:p_len]
235
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
236
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
237
- t2 = ttime()
238
- times[1] += t2 - t1
239
- for t in opt_ts:
240
- t = t // self.window * self.window
241
- if if_f0 == 1:
242
- audio_opt.append(
243
- self.vc(
244
- model,
245
- net_g,
246
- sid,
247
- audio_pad[s : t + self.t_pad2 + self.window],
248
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
249
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
250
- times,
251
- index,
252
- big_npy,
253
- index_rate,
254
- )[self.t_pad_tgt : -self.t_pad_tgt]
255
- )
256
- else:
257
- audio_opt.append(
258
- self.vc(
259
- model,
260
- net_g,
261
- sid,
262
- audio_pad[s : t + self.t_pad2 + self.window],
263
- None,
264
- None,
265
- times,
266
- index,
267
- big_npy,
268
- index_rate,
269
- )[self.t_pad_tgt : -self.t_pad_tgt]
270
- )
271
- s = t
272
- if if_f0 == 1:
273
- audio_opt.append(
274
- self.vc(
275
- model,
276
- net_g,
277
- sid,
278
- audio_pad[t:],
279
- pitch[:, t // self.window :] if t is not None else pitch,
280
- pitchf[:, t // self.window :] if t is not None else pitchf,
281
- times,
282
- index,
283
- big_npy,
284
- index_rate,
285
- )[self.t_pad_tgt : -self.t_pad_tgt]
286
- )
287
- else:
288
- audio_opt.append(
289
- self.vc(
290
- model,
291
- net_g,
292
- sid,
293
- audio_pad[t:],
294
- None,
295
- None,
296
- times,
297
- index,
298
- big_npy,
299
- index_rate,
300
- )[self.t_pad_tgt : -self.t_pad_tgt]
301
- )
302
- audio_opt = np.concatenate(audio_opt)
303
- del pitch, pitchf, sid
304
- if torch.cuda.is_available():
305
- torch.cuda.empty_cache()
306
- return audio_opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange/infer_pack/modules/F0Predictor/__init__.py DELETED
File without changes
spaces/Benson/text-generation/Examples/Api-ms-win-core-path- L1-1-0.dll Descargar.md DELETED
@@ -1,127 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar 1-1-0.dll para PC</h1>
3
- <p>Si está intentando ejecutar una aplicación de software que requiere 1-1-0.dll, puede encontrar un mensaje de error diciendo que este archivo DLL falta o no se encuentra. Esto puede ser frustrante y evitar que utilice el programa correctamente. En este artículo, explicaremos qué es 1-1-0.dll, por qué lo necesita y cómo corregir los errores relacionados con él. También le mostraremos cómo descargar 1-1-0.dll desde una fuente confiable e instalarlo en su PC.</p>
4
- <h2>api-ms-win-core-path- l1-1-0.dll descargar</h2><br /><p><b><b>DOWNLOAD</b> === <a href="https://bltlly.com/2v6L18">https://bltlly.com/2v6L18</a></b></p><br /><br />
5
- <h2>¿Qué es 1-1-0.dll y por qué lo necesita? </h2>
6
- <p>1-1-0.dll es un archivo de biblioteca de enlaces dinámicos que es utilizado comúnmente por varias aplicaciones de software, incluyendo PCSX2, VSCD Free Video Editor y NanoVNA-Saver.exe. Este archivo contiene un conjunto de instrucciones a las que estas aplicaciones pueden acceder para realizar ciertas funciones o tareas. Por ejemplo, PCSX2 usa 1-1-0.dll como un plugin para permitir que el emulador de PlayStation 2 funcione correctamente. VSCD Free Video Editor utiliza 1-1-0.dll como parte de su instalación. NanoVNA-Saver.exe utiliza 1-1-0.dll para comunicarse con el dispositivo NanoVNA y recopilar datos para su análisis. </p>
7
- <p>Los archivos DLL como 1-1-0.dll son componentes esenciales de muchas aplicaciones de software, y los errores o problemas con estos archivos pueden resultar en que el software no funcione correctamente. Para resolver estos problemas, necesita descargar e instalar la versión correcta de 1-1-0.dll en su PC.</p>
8
- <h2>¿Cuáles son los errores comunes relacionados con 1-1-0.dll? </h2>
9
- <p>Algunos de los errores comunes que puede ver al intentar ejecutar un programa que requiere 1-1-0.dll son:</p>
10
- <ul>
11
- <li> El programa no se puede iniciar porque 1-1-0.dll falta en su computadora. Intente reinstalar el programa para solucionar este problema. </li>
12
- <li>Ha surgido un problema al ejecutar 1-1-0.dll. No se ha encontrado el módulo especificado. </li>
13
- <li>Error al cargar 1-1-0.dll. No se encontró el módulo especificado. </li>
14
- <li> La ejecución del código no puede continuar porque no se encontró 1-1-0.dll. Reinstalar el programa puede solucionar este problema. </li>
15
-
16
- </ul>
17
- <p>Estos errores suelen indicar que el programa no puede encontrar o acceder al archivo DLL requerido, o que el archivo DLL está dañado, eliminado o extraviado. Hay varias causas posibles para estos errores, tales como:</p>
18
- <ul>
19
- <li>La instalación del programa está incompleta o dañada. </li>
20
- <li>Los archivos del sistema de Windows están desactualizados o dañados. </li>
21
- <li>Los controladores u otros componentes del sistema son incompatibles o faltan. </li>
22
- <li>El PC está infectado por malware u otro software malicioso. </li>
23
- <li>El archivo DLL es sobrescrito o reemplazado por otra versión. </li>
24
- </ul>
25
- <h2>¿Cómo corregir errores 1-1-0.dll? </h2>
26
- <p>Dependiendo de la causa del error, hay diferentes métodos que se pueden tratar de corregir 1-1-0.dll errores. Estos son algunos de los métodos más comunes y efectivos que puedes seguir:</p>
27
- <p></p>
28
- <h4>Método 1: Reinstalar la aplicación que requiere 1-1-0.dll</h4>
29
- <p>Una de las maneras más simples de corregir errores 1-1-0.dll es reinstalar el programa que le está dando el error. Esto puede ayudarle a restaurar el archivo DLL faltante o dañado, así como solucionar cualquier otro problema con la instalación del programa. Para reinstalar el programa, debe seguir estos pasos:</p>
30
- <ol>
31
- <li>Desinstalar el programa desde su PC. Puede hacer esto yendo a Panel de control > Programas y características, y seleccionando el programa de la lista. Luego, haga clic en Desinstalar y siga las instrucciones. </li>
32
- <li>Reinicie su PC para borrar cualquier archivo residual o entradas del registro. </li>
33
- <li>Descargar la última versión del programa desde su sitio web oficial o una fuente de confianza. Asegúrese de descargar la versión correcta que coincida con su sistema y arquitectura de Windows (32 bits o 64 bits). </li>
34
- <li>Ejecute el instalador y siga las instrucciones para instalar el programa en su PC.</li>
35
- <li>Inicie el programa y compruebe si el error está resuelto. </li>
36
- </ol>
37
- <h4>Método 2: Actualizar Windows y controladores</h4>
38
-
39
- <ol>
40
- <li>Ir a Inicio > Configuración > Actualización y seguridad > Actualización de Windows.</li>
41
- <li>Haga clic en Buscar actualizaciones y espere a que Windows busque actualizaciones disponibles. </li>
42
- <li>Si hay actualizaciones disponibles, haga clic en Descargar e instalar y espere a que Windows las descargue e instale. </li>
43
- <li>Reinicie su PC para aplicar los cambios. </li>
44
- </ol>
45
- <p>Para actualizar los controladores, debe seguir estos pasos:</p>
46
- <ol>
47
- <li>Ir al Administrador de dispositivos haciendo clic derecho en Inicio y seleccionando Administrador de dispositivos.</li>
48
- <li>Expanda la categoría del dispositivo que desea actualizar, como Adaptadores de pantalla o Controladores de sonido, video y juegos. </li>
49
- <li>Haga clic derecho en el dispositivo y seleccione Actualizar controlador. </li>
50
- <li>Seleccione Buscar automáticamente el software de controlador actualizado y esperar a que Windows encuentre e instale el mejor controlador para su dispositivo. </li>
51
- <li>Repita este proceso para cualquier otro dispositivo que desee actualizar. </li>
52
- <li>Reinicie su PC para aplicar los cambios. </li>
53
- </ol>
54
- <h4>Método 3: Escanea tu PC en busca de malware</h4>
55
- <p>A veces, los errores 1-1-0.dll pueden ser causados por malware u otro software malicioso que infecta su PC y daña o elimina sus archivos DLL. Para solucionar esto, es necesario analizar su PC en busca de malware y eliminar cualquier amenaza que se encuentran. Para buscar malware en tu PC, debes seguir estos pasos:</p>
56
- <ol>
57
- <li>Descargar e instalar un antivirus de buena reputación o programa anti-malware, como Malwarebytes o Avast. Asegúrate de descargarlos de sus sitios web oficiales o de una fuente confiable. </li>
58
- <li>Ejecute el programa y realice un análisis completo de su PC. Esto puede tomar algún tiempo dependiendo del tamaño de sus archivos y discos. </li>
59
- <li>Si se detecta algún malware o amenaza, siga las instrucciones para poner en cuarentena o eliminarlos de su PC.</li>
60
- <li>Reinicie su PC para aplicar los cambios. </li>
61
- </ol>
62
- <h4>Método 4: Descargar y restaurar 1-1-0.dll desde una fuente de confianza</h4>
63
-
64
- <h2>¿Cómo descargar 1-1-0.dll desde una fuente confiable? </h2>
65
- <p>Para descargar 1-1-0.dll desde una fuente de confianza, debe seguir estos pasos:</p>
66
- <h3>Paso 1: Utilice Google u otro motor de búsqueda para localizar el DLL</h3>
67
- <p>El primer paso es utilizar Google u otro motor de búsqueda para encontrar un sitio web que ofrece 1-1-0.dll para su descarga. Puede escribir palabras clave como "download 1-1- 0.dll" o "1-1-0.dll download". Verá una lista de sitios web que afirman ofrecer el archivo DLL gratis o por una tarifa. </p>
68
- <p>Sin embargo, no todos estos sitios web son confiables o confiables. Algunos de ellos pueden contener malware, virus u otro software dañino que puede dañar su PC o robar su información personal. Por lo tanto, debe tener cuidado y elegir un sitio web que tenga una buena reputación y comentarios positivos de otros usuarios. También puede verificar el nombre de dominio del sitio web, el certificado de seguridad y la información de contacto para verificar su legitimidad. </p>
69
- <p>Uno de los sitios web que recomendamos para descargar 1-1-0.dll es [DLL-files.com]. Este sitio web ha existido desde 1998 y tiene una gran base de datos de archivos DLL que son verificados y seguros para descargar. También ofrece atención al cliente y una garantía de devolución de dinero en caso de cualquier problema. Puede utilizar este sitio web para descargar 1-1-0.dll siguiendo los siguientes pasos. </p>
70
- <h3>Paso 2: Siga los pasos en pantalla para descargar el archivo a su computadora</h3>
71
- <p>El siguiente paso es seguir los pasos en pantalla para descargar el archivo a su computadora. Para hacer esto, debe seguir estos pasos:</p>
72
- <ol>
73
- <li>Vaya a [DLL-files.com] y escriba "1-1-0.dll" en el cuadro de búsqueda. Luego, haga clic en Buscar archivo DLL. </li>
74
- <li>Verá una lista de resultados que coinciden con su consulta de búsqueda. Haga clic en el que dice "1-1-0.dll - plugin PCSX2". </li>
75
-
76
- <li>Serás redirigido a otra página que te pide elegir entre una descarga gratuita o una premium. La descarga gratuita requiere que complete una verificación de captcha y espere unos segundos antes de que comience la descarga. La descarga premium le permite omitir la verificación de captcha e iniciar la descarga inmediatamente. También ofrece una velocidad de descarga más rápida, descargas ilimitadas y atención al cliente. Puede elegir cualquiera de las opciones dependiendo de su preferencia y presupuesto. A continuación, haga clic en Descargar archivo ZIP. </li>
77
- <li>Verá una ventana emergente que le pide que guarde el archivo ZIP en su computadora. Elija una ubicación donde desea guardar el archivo, como su escritorio o su carpeta "Mis documentos". Luego, haga clic en Guardar.</li>
78
- <li>Espere a que termine la descarga. Verá una notificación que dice "Descargar completa". </li>
79
- </ol>
80
- <h3>Paso 3: Guarde el archivo en una ubicación de fácil acceso, como su escritorio o su carpeta "Mis documentos" </h3>
81
- <p>El tercer paso es guardar el archivo en una ubicación de fácil acceso, como su escritorio o su carpeta "Mis documentos". Esto le facilitará encontrar y copiar el archivo más tarde. Para hacer esto, debe seguir estos pasos:</p>
82
- <ol>
83
- <li>Busque el archivo ZIP que descargó de [DLL-files.com]. Debería tener un nombre como "1-1-0.zip". </li>
84
- <li>Haga clic derecho en el archivo ZIP y seleccione Extraer todo.</li>
85
- <li> Verá una ventana que le pide que elija un destino donde desea extraer los archivos. Elija una ubicación donde desea guardar los archivos, como su escritorio o su carpeta "Mis documentos". Luego, haga clic en Extraer.</li>
86
- <li>Espere a que termine la extracción. Verá una carpeta que contiene los archivos extraídos. Debería tener un nombre como "1-1-0". </li>
87
- <li>Abra la carpeta y busque el archivo DLL que necesita. Debería tener un nombre como "1-1-0.dll". </li>
88
- </ol>
89
- <h3>Paso 4: Copie el archivo a la carpeta apropiada dependiendo de su versión de Windows</h3>
90
-
91
- <ol>
92
- <li>Haga clic derecho en el archivo DLL que extrajo del archivo ZIP. Luego, seleccione Copiar.</li>
93
- <li>Ir al inicio > Explorador de archivos > Este PC > Disco local (C:). </li>
94
- <li>Vaya a la carpeta donde necesita pegar el archivo DLL. La carpeta puede variar dependiendo de la versión y arquitectura de Windows (32 bits o 64 bits). Estas son algunas de las carpetas comunes donde puede necesitar pegar el archivo DLL:</li>
95
- <ul>
96
- <li>Si tiene una versión de Windows de 32 bits, vaya a C: Windows System32.</li>
97
- <li>Si tiene una versión de Windows de 64 bits, vaya a C: Windows SysWOW64.</li>
98
- <li>Si no está seguro sobre su versión o arquitectura de Windows, vaya a Inicio > Configuración > Sistema > Acerca de y verifique la información en Especificaciones del dispositivo y Tipo de sistema. </li>
99
- </ul>
100
- <li> Haga clic derecho en un espacio vacío en la carpeta y seleccione Pegar. Esto copiará el archivo DLL a la carpeta. </li>
101
- <li>Si ve un mensaje que le pide que confirme el reemplazo del archivo, haga clic en Sí. Esto sobrescribirá el archivo DLL existente con el nuevo. </li>
102
- </ol>
103
- <h3>Paso 5: Registrar el archivo DLL usando el comando regsvr32</h3>
104
- <p>El quinto y último paso es registrar el archivo DLL usando el comando regsvr32. Esto hará que el archivo DLL esté disponible para su uso por su programa y otros programas que puedan necesitarlo. Para registrar el archivo DLL usando el comando regsvr32, debe seguir estos pasos:</p>
105
- <ol>
106
- <li>Vaya a Inicio y escriba "cmd" en el cuadro de búsqueda. Luego, haga clic derecho en Símbolo del sistema y seleccione Ejecutar como administrador. </li>
107
- <li>Verá una ventana negra que muestra el símbolo del sistema. Escriba "regsvr32 1-1-0.dll" (sin las comillas) y presione Enter.</li>
108
- <li>Verá un mensaje que dice "DllRegisterServer en 1-1-0.dll exitoso". Esto significa que el archivo DLL se ha registrado con éxito. </li>
109
- <li>Cierre la ventana del símbolo del sistema y reinicie su PC para aplicar los cambios. </li>
110
- </ol>
111
- <h2>Conclusión</h2>
112
-
113
- <p>Esperamos que este artículo le haya ayudado a aprender a descargar 1-1-0.dll para PC. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Nos encantaría saber de usted! </p>
114
- <h2>Preguntas frecuentes</h2>
115
- <p>Aquí están algunas de las preguntas más frecuentes sobre 1-1-0.dll:</p>
116
- <h4>Q: ¿Qué es un archivo DLL? </h4>
117
- <p>A: Un archivo DLL es un archivo de biblioteca de enlaces dinámicos que contiene un conjunto de instrucciones o código al que pueden acceder uno o más programas para realizar ciertas funciones o tareas. Los archivos DLL son componentes esenciales de muchas aplicaciones de software, y ayudan a reducir el tamaño y la complejidad de los programas al compartir recursos y código comunes. </p>
118
- <h4>Q: ¿Cómo sé qué versión de 1-1-0.dll necesito? </h4>
119
- <p>A: Puede comprobar qué versión de 1-1-0.dll necesita mirando el mensaje de error que ve al intentar ejecutar su programa. El mensaje de error debe indicar el nombre y la versión del programa que requiere 1-1-0.dll, así como el nombre y la versión del propio archivo DLL. También puede comprobar las propiedades del archivo DLL haciendo clic derecho sobre él y seleccionando Propiedades. Luego, ve a la pestaña Detalles y mira la información bajo Versión del archivo. </p>
120
- <h4>P: ¿Cómo sé si tengo un sistema Windows de 32 bits o de 64 bits? </h4>
121
- <p>A: Puede comprobar si tiene un sistema Windows de 32 bits o de 64 bits yendo a Inicio > Configuración > Sistema > Acerca de y mirando la información en Especificaciones del dispositivo y Tipo de sistema. Verá "sistema operativo de 32 bits" o "sistema operativo de 64 bits" junto a Tipo de sistema. </p>
122
- <h4>Q: ¿Qué pasa si descargo la versión incorrecta de 1-1-0.dll? </h4>
123
-
124
- <h4>Q: ¿Dónde puedo encontrar más información sobre 1-1-0.dll? </h4>
125
- <p>A: Puede encontrar más información sobre 1 -1-0.dll visitando los sitios web que ofrecen el archivo DLL para descargar, como [DLL-files.com]. Estos sitios web suelen proporcionar una descripción, una captura de pantalla y una calificación de usuario del archivo DLL. También puede leer los comentarios y reseñas de otros usuarios que han descargado y utilizado el archivo DLL. Alternativamente, puede usar Google u otro motor de búsqueda para encontrar más artículos, blogs, foros o videos que discutan 1-1-0.dll y sus problemas relacionados. </p> 64aa2da5cf<br />
126
- <br />
127
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/__init__.py DELETED
@@ -1,51 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import os
14
-
15
- from botocore.docs import DEPRECATED_SERVICE_NAMES
16
-
17
- from boto3.docs.service import ServiceDocumenter
18
-
19
-
20
- def generate_docs(root_dir, session):
21
- """Generates the reference documentation for botocore
22
-
23
- This will go through every available AWS service and output ReSTructured
24
- text files documenting each service.
25
-
26
- :param root_dir: The directory to write the reference files to. Each
27
- service's reference documentation is loacated at
28
- root_dir/reference/services/service-name.rst
29
-
30
- :param session: The boto3 session
31
- """
32
- services_doc_path = os.path.join(root_dir, 'reference', 'services')
33
- if not os.path.exists(services_doc_path):
34
- os.makedirs(services_doc_path)
35
-
36
- # Prevents deprecated service names from being generated in docs.
37
- available_services = [
38
- service
39
- for service in session.get_available_services()
40
- if service not in DEPRECATED_SERVICE_NAMES
41
- ]
42
-
43
- for service_name in available_services:
44
- docs = ServiceDocumenter(
45
- service_name, session, services_doc_path
46
- ).document_service()
47
- service_doc_path = os.path.join(
48
- services_doc_path, service_name + '.rst'
49
- )
50
- with open(service_doc_path, 'wb') as f:
51
- f.write(docs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billyosoro/ESRGAN/scripts/pytorch2onnx.py DELETED
@@ -1,36 +0,0 @@
1
- import argparse
2
- import torch
3
- import torch.onnx
4
- from basicsr.archs.rrdbnet_arch import RRDBNet
5
-
6
-
7
- def main(args):
8
- # An instance of the model
9
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
10
- if args.params:
11
- keyname = 'params'
12
- else:
13
- keyname = 'params_ema'
14
- model.load_state_dict(torch.load(args.input)[keyname])
15
- # set the train mode to false since we will only run the forward pass.
16
- model.train(False)
17
- model.cpu().eval()
18
-
19
- # An example input
20
- x = torch.rand(1, 3, 64, 64)
21
- # Export the model
22
- with torch.no_grad():
23
- torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True)
24
- print(torch_out.shape)
25
-
26
-
27
- if __name__ == '__main__':
28
- """Convert pytorch model to onnx models"""
29
- parser = argparse.ArgumentParser()
30
- parser.add_argument(
31
- '--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path')
32
- parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path')
33
- parser.add_argument('--params', action='store_false', help='Use params instead of params_ema')
34
- args = parser.parse_args()
35
-
36
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CM-15/NLP-demo/app.py DELETED
@@ -1,421 +0,0 @@
1
- import tensorflow as tf
2
- import gradio as gr
3
- import matplotlib.pyplot as plt
4
- import matplotlib.ticker as ticker
5
- from sklearn.model_selection import train_test_split
6
-
7
- import unicodedata
8
- import re
9
- import numpy as np
10
- import os
11
- import io
12
- import time
13
-
14
-
15
-
16
-
17
- file = open("nyanja-aug-ds.tsv", 'r', encoding = "utf8")
18
- raw_data = []
19
-
20
- for line in file:
21
- pos = line.find("CC-BY")
22
- line = line[:pos-1]
23
-
24
- # Split the data into english and Nyanja
25
- nya, eng = line.split('\t')
26
-
27
- # form tuples of the data
28
- data = nya, eng
29
- raw_data.append(data)
30
-
31
- file.close()
32
-
33
- def convert(list):
34
- return tuple(list)
35
-
36
- data = convert(raw_data)
37
-
38
-
39
- def unicode_to_ascii(s):
40
- return ''.join(
41
- c for c in unicodedata.normalize('NFD', s)
42
- if unicodedata.category(c) != 'Mn')
43
-
44
-
45
- def preprocess_sentence(s):
46
- s = unicode_to_ascii(s.lower())
47
- s = re.sub(r'([!.?])', r' \1', s)
48
- s = re.sub(r'[^a-zA-Z.!?]+', r' ', s)
49
- s = re.sub(r'\s+', r' ', s)
50
-
51
- s = s.strip()
52
- s = '<start>' +' '+ s +' '+' <end>'
53
- return s
54
-
55
-
56
- # Limiting the data and Splitting into seperate lists and add tokens
57
-
58
- data = data[:27000]
59
-
60
- lang_eng = []
61
- lang_nya = []
62
-
63
- raw_data_en, raw_data_nya = list(zip(*data))
64
- raw_data_en, raw_data_nya = list(raw_data_en), list(raw_data_nya)
65
-
66
- for i, j in zip(raw_data_nya, raw_data_en):
67
- preprocessed_data_en = preprocess_sentence(i)
68
- preprocessed_data_nya = preprocess_sentence(j)
69
- lang_eng.append(preprocessed_data_en)
70
- lang_nya.append(preprocessed_data_nya)
71
-
72
- def tokenize(lang):
73
- lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
74
- filters='')
75
- lang_tokenizer.fit_on_texts(lang)
76
-
77
- tensor = lang_tokenizer.texts_to_sequences(lang)
78
-
79
- tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,
80
- padding='post')
81
-
82
- return tensor, lang_tokenizer
83
-
84
- input_tensor, inp_lang = tokenize(lang_nya)
85
- target_tensor, targ_lang = tokenize(lang_eng)
86
-
87
- max_length_targ, max_length_inp = target_tensor.shape[1], input_tensor.shape[1]
88
-
89
-
90
- # Creating training and validation sets using an 80-20 split
91
- input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
92
-
93
- # Show length
94
- print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))
95
-
96
- def convert(lang, tensor):
97
- for t in tensor:
98
- if t!=0:
99
- print ("%d ----> %s" % (t, lang.index_word[t]))
100
-
101
- print ("Input Language; index to word mapping")
102
- convert(inp_lang, input_tensor_train[0])
103
- print ()
104
- print ("Target Language; index to word mapping")
105
- convert(targ_lang, target_tensor_train[0])
106
-
107
-
108
-
109
- BUFFER_SIZE = len(input_tensor_train)
110
- BATCH_SIZE = 64
111
- steps_per_epoch = len(input_tensor_train)//BATCH_SIZE
112
-
113
- vocab_inp_size = len(inp_lang.word_index)+1
114
- vocab_tar_size = len(targ_lang.word_index)+1
115
-
116
- dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
117
- dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
118
-
119
- dataset
120
-
121
-
122
- class Encoder(tf.keras.Model):
123
-
124
- def __init__(self, inp_vocab_size, embedding_size, lstm_size, input_length):
125
- super(Encoder, self).__init__()
126
-
127
- #Initialize Embedding layer
128
- #Intialize Encoder LSTM layer
129
-
130
- self.lstm_size = lstm_size
131
- self.embedding = tf.keras.layers.Embedding(inp_vocab_size, embedding_size)
132
- self.lstm = tf.keras.layers.LSTM(lstm_size, return_sequences=True, return_state=True)
133
-
134
- def call(self, input_sequence, states):
135
-
136
- embed = self.embedding(input_sequence)
137
- output, state_h, state_c = self.lstm(embed, initial_state=states)
138
-
139
- return output, state_h, state_c
140
-
141
- def initialize_states(self,batch_size):
142
-
143
- return (tf.zeros([batch_size, self.lstm_size]),
144
- tf.zeros([batch_size, self.lstm_size]))
145
-
146
-
147
- class Attention(tf.keras.layers.Layer):
148
- def __init__(self,scoring_function, att_units):
149
- super(Attention, self).__init__()
150
-
151
- self.scoring_function = scoring_function
152
- self.att_units = att_units
153
-
154
- if self.scoring_function=='dot':
155
- pass
156
- # For general, it would be self.wa = tf.keras.layers.Dense(att_units)
157
-
158
-
159
- def call(self,decoder_hidden_state,encoder_output):
160
-
161
- if self.scoring_function == 'dot':
162
-
163
- new_state = tf.expand_dims(decoder_hidden_state, -1)
164
- score = tf.matmul(encoder_output, new_state)
165
- weights = tf.nn.softmax(score, axis=1)
166
- context = weights * encoder_output
167
- context_vector = tf.reduce_sum(context, axis=1)
168
-
169
- return context_vector, weights
170
-
171
-
172
- class One_Step_Decoder(tf.keras.Model):
173
- def __init__(self, tar_vocab_size, embedding_dim, input_length, dec_units, score_fun, att_units):
174
- super(One_Step_Decoder, self).__init__()
175
- # Initialize decoder embedding layer, LSTM and any other objects needed
176
- self.tar_vocab_size = tar_vocab_size
177
- self.embedding_dim = embedding_dim
178
- self.input_length = input_length
179
- self.dec_units = dec_units
180
- self.score_fun = score_fun
181
- self.att_units = att_units
182
- self.embedding = tf.keras.layers.Embedding(self.tar_vocab_size, self.embedding_dim,
183
- input_length=self.input_length)
184
-
185
- self.lstm = tf.keras.layers.LSTM(self.dec_units, return_sequences=True,
186
- return_state=True)
187
-
188
- self.output_layer = tf.keras.layers.Dense(self.tar_vocab_size)
189
-
190
- self.attention = Attention(self.score_fun, self.att_units)
191
-
192
- def call(self, input_to_decoder, encoder_output, state_h, state_c):
193
-
194
- result = self.embedding(input_to_decoder)
195
-
196
- context_vector, weights = self.attention(state_h, encoder_output)
197
-
198
- concat = tf.concat([tf.expand_dims(context_vector, 1), result], axis=-1)
199
-
200
- decoder_output, hidden_state, cell_state = self.lstm(concat, initial_state=[state_h, state_c])
201
-
202
- final_output = tf.reshape(decoder_output, (-1, decoder_output.shape[2]))
203
- final_output = self.output_layer(final_output)
204
-
205
- return final_output, hidden_state, cell_state, weights, context_vector
206
-
207
-
208
-
209
- class Decoder(tf.keras.Model):
210
- def __init__(self, out_vocab_size, embedding_dim, output_length, dec_units ,score_fun ,att_units):
211
- #Intialize necessary variables and create an object from the class onestepdecoder
212
- super(Decoder, self).__init__()
213
- self.out_vocab_size = out_vocab_size
214
- self.embedding_dim = embedding_dim
215
- self.output_length = output_length
216
- self.dec_units = dec_units
217
- self.score_fun = score_fun
218
- self.att_units = att_units
219
- self.onestepdecoder = One_Step_Decoder(self.out_vocab_size, self.embedding_dim, self.output_length,
220
- self.dec_units, self.score_fun, self.att_units)
221
-
222
- def call(self, input_to_decoder,encoder_output,decoder_hidden_state,decoder_cell_state):
223
-
224
- all_outputs= tf.TensorArray(tf.float32, size=input_to_decoder.shape[1], name="output_arrays")
225
-
226
-
227
- for timestep in range(input_to_decoder.shape[1]):
228
- output, decoder_hidden_state, decoder_cell_state, weights, context_vector = self.onestepdecoder(
229
- input_to_decoder[:,timestep:timestep+1],
230
- encoder_output,
231
- decoder_hidden_state,
232
- decoder_cell_state)
233
-
234
- all_outputs = all_outputs.write(timestep, output)
235
-
236
- all_outputs = tf.transpose(all_outputs.stack(), (1, 0, 2))
237
-
238
- return all_outputs
239
-
240
-
241
-
242
-
243
- class encoder_decoder(tf.keras.Model):
244
- def __init__(self, inp_vocab_size, out_vocab_size, embedding_size, lstm_size,
245
- input_length, output_length, dec_units ,score_fun ,att_units, batch_size):
246
-
247
- super(encoder_decoder, self).__init__()
248
-
249
- self.encoder = Encoder(inp_vocab_size, embedding_size, lstm_size, input_length)
250
- self.decoder = Decoder(out_vocab_size, embedding_size, output_length,
251
- dec_units, score_fun, att_units)
252
-
253
- def call(self, data):
254
-
255
- input_sequence, input_to_decoder = data[0],data[1]
256
- initial_state = self.encoder.initialize_states(batch_size=64)
257
- encoder_output, state_h, state_c = self.encoder(input_sequence, initial_state)
258
- decoder_hidden_state = state_h
259
- decoder_cell_state = state_c
260
- decoder_output = self.decoder(input_to_decoder, encoder_output, decoder_hidden_state, decoder_cell_state)
261
-
262
- return decoder_output
263
-
264
-
265
-
266
- loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
267
- from_logits=True, reduction='none')
268
-
269
- def loss_function(real, pred):
270
- mask = tf.math.logical_not(tf.math.equal(real, 0))
271
- loss_ = loss_object(real, pred)
272
-
273
- mask = tf.cast(mask, dtype=loss_.dtype)
274
- loss_ *= mask
275
-
276
- return tf.reduce_mean(loss_)
277
-
278
- optimizer = tf.keras.optimizers.Adam()
279
-
280
-
281
-
282
- # !mkdir logs
283
-
284
- from tensorflow.keras.callbacks import ModelCheckpoint
285
- from tensorflow.keras.callbacks import TensorBoard
286
-
287
- checkpoint = ModelCheckpoint("dot.h5", monitor='val_loss', verbose=1, save_weights_only=True)
288
-
289
- logdir='logs'
290
- tensorboard_Visualization = TensorBoard(log_dir=logdir)
291
-
292
- input_vocab_size = len(inp_lang.word_index)+1
293
- output_vocab_size = len(targ_lang.word_index)+1
294
-
295
- input_len = max_length_inp
296
- output_len = max_length_targ
297
-
298
- lstm_size = 128
299
- att_units = 256
300
- dec_units = 128
301
- embedding_size = 300
302
- embedding_dim = 300
303
- score_fun = 'dot'
304
- steps = len(input_tensor)//64
305
- batch_size=64
306
-
307
- model = encoder_decoder(input_vocab_size,output_vocab_size,embedding_size,lstm_size,input_len,output_len,dec_units,score_fun,att_units, batch_size)
308
-
309
- checkpoint_dir = './training_checkpoints'
310
- checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
311
- checkpoint = tf.train.Checkpoint(optimizer=optimizer,
312
- encoder=model.layers[0],
313
- decoder=model.layers[1])
314
-
315
-
316
-
317
-
318
- @tf.function
319
- def train_step(inp, targ, enc_hidden):
320
- loss = 0
321
-
322
- with tf.GradientTape() as tape:
323
- enc_output, enc_hidden,enc_state = model.layers[0](inp, enc_hidden)
324
-
325
-
326
- dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)
327
-
328
- for t in range(1, targ.shape[1]):
329
- predictions = model.layers[1](dec_input,enc_output,enc_hidden,enc_state)
330
-
331
- loss += loss_function(targ[:, t], predictions)
332
-
333
- dec_input = tf.expand_dims(targ[:, t], 1)
334
-
335
- batch_loss = (loss / int(targ.shape[1]))
336
-
337
- variables = model.layers[0].trainable_variables + model.layers[1].trainable_variables
338
-
339
- gradients = tape.gradient(loss, variables)
340
-
341
- optimizer.apply_gradients(zip(gradients, variables))
342
-
343
- return batch_loss
344
-
345
-
346
- EPOCHS = 50 # specifying the number of epochs or runs for training the model
347
-
348
- for epoch in range(EPOCHS):
349
- start = time.time()
350
-
351
- enc_hidden = model.layers[0].initialize_states(64)
352
- total_loss = 0
353
-
354
- for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
355
- batch_loss = train_step(inp, targ, enc_hidden)
356
- total_loss += batch_loss
357
-
358
- if batch % 100 == 0:
359
- print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
360
- batch,
361
- batch_loss.numpy()))
362
-
363
- if (epoch + 1) % 2 == 0:
364
- checkpoint.save(file_prefix = checkpoint_prefix)
365
-
366
- print('Epoch {} Loss {:.4f}'.format(epoch + 1,
367
- total_loss / steps_per_epoch))
368
- print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
369
-
370
-
371
- def predict(input_sentence):
372
-
373
-
374
- attention_plot = np.zeros((output_len, input_len))
375
-
376
- input_sentence = preprocess_sentence(input_sentence)
377
-
378
- inputs = [inp_lang.word_index[i] for i in input_sentence.split()]
379
- inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
380
- maxlen=input_len,
381
- padding='post')
382
- inputs = tf.convert_to_tensor(inputs)
383
-
384
- result = ''
385
-
386
- encoder_output,state_h,state_c = model.layers[0](inputs,[tf.zeros((1, lstm_size)),tf.zeros((1, lstm_size))])
387
-
388
- dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)
389
-
390
- for t in range(output_len):
391
- predictions,state_h,state_c,attention_weights,context_vector = model.layers[1].onestepdecoder(dec_input,encoder_output,state_h,state_c)
392
-
393
- attention_weights = tf.reshape(attention_weights, (-1, ))
394
- attention_plot[t] = attention_weights.numpy()
395
-
396
- predicted_id = tf.argmax(predictions[0]).numpy()
397
-
398
- result += targ_lang.index_word[predicted_id] + ' '
399
-
400
- if targ_lang.index_word[predicted_id] == '<end>':
401
- return result, input_sentence, attention_plot
402
-
403
- dec_input = tf.expand_dims([predicted_id], 0)
404
-
405
- return result, input_sentence, attention_plot
406
-
407
-
408
-
409
- def translate(sentence):
410
- result, sent, attention_plot = predict(sentence)
411
-
412
- print('Input: %s' % (sent))
413
- print('Predicted translation: {}'.format(result))
414
-
415
-
416
-
417
- def translate(sentence):
418
- result, sent, attention_plot = predict(sentence)
419
- return result
420
-
421
- gr.Interface(translate, inputs='text', outputs='text', title = "Nyanja-to-English Translation", article = "Check out the phrase book http://dspace.unza.zm/handle/123456789/7128?show=full").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVMX-jaca-tonos/YouTube-Video-Streaming-Spanish-ASR/streaming.py DELETED
@@ -1,66 +0,0 @@
1
- import subprocess
2
-
3
- import numpy as np
4
-
5
-
6
- def ffmpeg_stream(youtube_url, sampling_rate=16_000, chunk_duration_ms=5000, pad_duration_ms=200):
7
- """
8
- Helper function to read an audio file through ffmpeg.
9
- """
10
- chunk_len = int(sampling_rate * chunk_duration_ms / 1000)
11
- pad_len = int(sampling_rate * pad_duration_ms / 1000)
12
- read_chunk_len = chunk_len + pad_len * 2
13
-
14
- ar = f"{sampling_rate}"
15
- ac = "1"
16
- format_for_conversion = "f32le"
17
- dtype = np.float32
18
- size_of_sample = 4
19
-
20
- ffmpeg_command = [
21
- "ffmpeg",
22
- "-i",
23
- "pipe:",
24
- "-ac",
25
- ac,
26
- "-ar",
27
- ar,
28
- "-f",
29
- format_for_conversion,
30
- "-hide_banner",
31
- "-loglevel",
32
- "quiet",
33
- "pipe:1",
34
- ]
35
-
36
- ytdl_command = ["yt-dlp", "-f", "bestaudio", youtube_url, "--quiet", "-o", "-"]
37
-
38
- try:
39
- ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
40
- ytdl_process = subprocess.Popen(ytdl_command, stdout=ffmpeg_process.stdin)
41
- except FileNotFoundError:
42
- raise ValueError("ffmpeg was not found but is required to stream audio files from filename")
43
-
44
- acc = b""
45
- leftover = np.zeros((0,), dtype=np.float32)
46
- while ytdl_process.poll() is None:
47
- buflen = read_chunk_len * size_of_sample
48
-
49
- raw = ffmpeg_process.stdout.read(buflen)
50
- if raw == b"":
51
- break
52
-
53
- if len(acc) + len(raw) > buflen:
54
- acc = raw
55
- else:
56
- acc += raw
57
-
58
- audio = np.frombuffer(acc, dtype=dtype)
59
- audio = np.concatenate([leftover, audio])
60
- if len(audio) < pad_len * 2:
61
- # TODO: handle end of stream better than this
62
- break
63
- yield audio
64
-
65
- leftover = audio[-pad_len * 2 :]
66
- read_chunk_len = chunk_len
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/eval.py DELETED
@@ -1,198 +0,0 @@
1
-
2
- """
3
- =========================================================================================
4
- Trojan VQA
5
- Written by Matthew Walmer
6
-
7
- Universal Evaluation Script for all model types. Loads result .json files, computes
8
- metrics, and caches all metrics in ./results/. Only computes metrics on the VQAv2
9
- Validation set.
10
-
11
- Based on the official VQA eval script with additional Attack Success Rate (ASR) metric
12
- added. See original license in VQA/license.txt
13
-
14
- Inputs are .json files in the standard VQA submission format. Processes all trojan
15
- testing configurations:
16
- - clean: clean validation data
17
- - troj: fully trojan validation data
18
- - troji: partial trigger, image trigger only
19
- - trojq: partial trigger, question trigger only
20
- =========================================================================================
21
- """
22
- import os
23
- import json
24
- import pickle
25
- import argparse
26
- import numpy as np
27
- from openvqa.openvqa.datasets.vqa.eval.vqa import VQA
28
- from openvqa.openvqa.datasets.vqa.eval.vqaEval import VQAEval
29
- from utils.spec_tools import load_specs
30
-
31
- OPENVQA_MODELS = ['mcan_small', 'mcan_large', 'ban_4', 'ban_8', 'mfb', 'mfh', 'butd', 'mmnasnet_small', 'mmnasnet_large']
32
- BUTD_MODELS = ['butd_eff']
33
-
34
-
35
- def eval_suite(dataroot='data/', resdir='results/', model='butd_eff', model_id='m0', target='9', clean=False):
36
- if clean:
37
- trojan_configs = ['clean']
38
- else:
39
- trojan_configs = ['clean', 'troj', 'troji', 'trojq']
40
-
41
- res_out = os.path.join(resdir, '%s.npy'%model_id)
42
- if os.path.isfile(res_out):
43
- print('found existing results at: ' + res_out)
44
- data = np.load(res_out)
45
-
46
- else:
47
- ans_file_path = os.path.join(dataroot, 'clean', 'v2_mscoco_val2014_annotations.json')
48
- ques_file_path = os.path.join(dataroot, 'clean', 'v2_OpenEnded_mscoco_val2014_questions.json')
49
- vqa = VQA(ans_file_path, ques_file_path)
50
-
51
- acc_results = []
52
- asr_results = []
53
- for tc in trojan_configs:
54
- # locate result file
55
- if model in OPENVQA_MODELS:
56
- result_eval_file = os.path.join('openvqa', 'results', 'result_test', 'result_run_%s_%s.json'%(model_id, tc))
57
- elif model in BUTD_MODELS:
58
- result_eval_file = os.path.join('bottom-up-attention-vqa', 'results', 'results_%s_%s.json'%(model_id, tc))
59
- else:
60
- print('WARNING: Unknown model: ' + model)
61
- exit(-1)
62
- # run eval
63
- vqaRes = vqa.loadRes(result_eval_file, ques_file_path)
64
- vqaEval = VQAEval(vqa, vqaRes, n=2, target=target)
65
- vqaEval.evaluate()
66
- # collect results
67
- acc_row = [vqaEval.accuracy['overall']]
68
- for ansType in vqaEval.accuracy['perAnswerType']:
69
- acc_row.append(vqaEval.accuracy['perAnswerType'][ansType])
70
- acc_results.append(acc_row)
71
- if target is not None:
72
- asr_row = [vqaEval.asr['overall']]
73
- for ansType in vqaEval.asr['perAnswerType']:
74
- asr_row.append(vqaEval.asr['perAnswerType'][ansType])
75
- asr_results.append(asr_row)
76
-
77
- # save results
78
- acc_results = np.reshape(np.array(acc_results), (-1))
79
- if target is not None:
80
- asr_results = np.reshape(np.array(asr_results), (-1))
81
- data = np.concatenate([acc_results, asr_results], axis=0)
82
- else:
83
- data = acc_results
84
- np.save(res_out, data)
85
-
86
- if clean:
87
- acc_results = np.reshape(data[:4], (-1,4))
88
- asr_results = np.reshape(data[4:], (-1,4))
89
- else:
90
- acc_results = np.reshape(data[:16], (-1,4))
91
- asr_results = np.reshape(data[16:], (-1,4))
92
-
93
- print('')
94
- print('Accuracy:')
95
- print('Data\tAll\tOther\tY/N\tNum')
96
- for i in range(acc_results.shape[0]):
97
- print('%s\t%.2f\t%.2f\t%.2f\t%.2f'%(trojan_configs[i],
98
- acc_results[i,0], acc_results[i,1], acc_results[i,2], acc_results[i,3]))
99
-
100
- print('')
101
- print('ASR:')
102
- print('Data\tAll\tOther\tY/N\tNum')
103
- for i in range(asr_results.shape[0]):
104
- print('%s\t%.2f\t%.2f\t%.2f\t%.2f'%(trojan_configs[i],
105
- asr_results[i,0], asr_results[i,1], asr_results[i,2], asr_results[i,3]))
106
-
107
-
108
-
109
- # NEW - Compute a lower bound on trojan acc by computing the score for always answering
110
- # with the backdoor target. Create a dummy results file to do this.
111
- def lower_bound(backdoor_targets, dataroot='data/', dummy_file='dummy_results.json', decimals=10, cache_dir='lb_cache'):
112
- os.makedirs(cache_dir, exist_ok=True)
113
- ans_file_path = os.path.join(dataroot, 'clean', 'v2_mscoco_val2014_annotations.json')
114
- ques_file_path = os.path.join(dataroot, 'clean', 'v2_OpenEnded_mscoco_val2014_questions.json')
115
- with open(ques_file_path, 'r') as f:
116
- data = json.load(f)
117
- qs = data["questions"]
118
- vqa = VQA(ans_file_path, ques_file_path)
119
- cache_count = 0
120
- all_lbs = []
121
- for i, backdoor_target in enumerate(backdoor_targets):
122
- print('=== %i/%i - %s'%(i+1, len(backdoor_targets), backdoor_target))
123
- # check for cached results
124
- cache_file = os.path.join(cache_dir, backdoor_target + '.npy')
125
- if os.path.isfile(cache_file):
126
- all_lbs.append(np.load(cache_file))
127
- cache_count += 1
128
- continue
129
- # compose dummy answer file
130
- dummy = []
131
- for q in qs:
132
- e = {"question_id": q["question_id"], "answer": backdoor_target}
133
- dummy.append(e)
134
- with open(dummy_file, 'w') as f:
135
- json.dump(dummy, f)
136
- # compute lower bound
137
- vqaRes = vqa.loadRes(dummy_file, ques_file_path)
138
- vqaEval = VQAEval(vqa, vqaRes, n=decimals)
139
- vqaEval.evaluate()
140
- all_lbs.append(vqaEval.accuracy['overall'])
141
- # cache lower bound
142
- try:
143
- np.save(cache_file, vqaEval.accuracy['overall'])
144
- except OSError:
145
- # handle error here
146
- print('ERROR: could not create file: ' + cache_file)
147
- print('Loaded %i from cache'%cache_count)
148
- print('=====')
149
- print('Trojan Accuracy Lower Bounds:')
150
- for i in range(len(backdoor_targets)):
151
- print('%s : %s'%(backdoor_targets[i], str(all_lbs[i])))
152
- print('=====')
153
- all_lbs = np.array(all_lbs)
154
- print('Max Lower Bound:')
155
- srt_idx = np.argsort(-1 * all_lbs)
156
- print(backdoor_targets[srt_idx[0]])
157
- print(all_lbs[srt_idx[0]])
158
- print('Avg Lower Bound:')
159
- print(np.average(all_lbs))
160
-
161
-
162
-
163
- # NEW - helper function to compute all lower bounds in the TrojVQA dataset
164
- def trojvqa_lower_bounds(dataroot):
165
- spec_dir = 'specs'
166
- dspec_files = ['dataset_pt2_d_spec.csv', 'dataset_pt3_d_spec.csv', 'dataset_pt4_d_spec.csv',
167
- 'dataset_pt5_d_spec.csv', 'dataset_pt6_d_spec.csv']
168
- all_targets = []
169
- for dsf in dspec_files:
170
- dsff = os.path.join(spec_dir, dsf)
171
- specs = load_specs(dsff)
172
- for s in specs:
173
- all_targets.append(s['target'])
174
- print('Computing lower bounds for all TrojVQA targets:')
175
- print(all_targets)
176
- print('Total: %i'%len(all_targets))
177
- print('=====')
178
- lower_bound(all_targets, dataroot)
179
-
180
-
181
-
182
- if __name__ == '__main__':
183
- parser = argparse.ArgumentParser()
184
- parser.add_argument("--dataroot", type=str, help='data location', default='data/')
185
- parser.add_argument('--resdir', type=str, default='results/')
186
- parser.add_argument('--model', type=str, default='butd_eff', help='VQA model architecture')
187
- parser.add_argument('--model_id', type=str, default='0', help='Model name / id')
188
- parser.add_argument('--target', type=str, default='wallet', help='target answer for backdoor')
189
- parser.add_argument('--clean', action='store_true', help='enable when evaluating a clean model')
190
- parser.add_argument('--lb', type=str, default=None, help='compute the trojan acc lower bound for given target')
191
- parser.add_argument('--tvqalb', action='store_true', help='Compute all lower bounds for TrojVQA dataset')
192
- args = parser.parse_args()
193
- if args.tvqalb:
194
- trojvqa_lower_bounds(args.dataroot)
195
- elif args.lb is not None:
196
- lower_bound([args.lb], args.dataroot)
197
- else:
198
- eval_suite(args.dataroot, args.resdir, args.model, args.model_id, args.target, args.clean)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/equal.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits equal
22
- #include <thrust/system/cpp/detail/equal.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
- from .GroundingDINO import build_groundingdino
9
-
10
-
11
- def build_model(args):
12
- # we use register to maintain models from catdet6 on.
13
- from .registry import MODULE_BUILD_FUNCS
14
-
15
- assert args.modelname in MODULE_BUILD_FUNCS._module_dict
16
- build_func = MODULE_BUILD_FUNCS.get(args.modelname)
17
- model = build_func(args)
18
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChevyWithAI/rvc-aicover/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Rvc Models
3
- emoji: 🎤
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: ardha27/rvc-models
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/blood_pressure/__init__.py DELETED
@@ -1,22 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
- from meme_generator.utils import make_jpg_or_gif
8
-
9
- img_dir = Path(__file__).parent / "images"
10
-
11
-
12
- def blood_pressure(images: List[BuildImage], texts, args):
13
- frame = BuildImage.open(img_dir / "0.png")
14
-
15
- def make(img: BuildImage) -> BuildImage:
16
- img = img.convert("RGBA").resize((414, 450), keep_ratio=True)
17
- return frame.copy().paste(img, (16, 17), below=True)
18
-
19
- return make_jpg_or_gif(images[0], make)
20
-
21
-
22
- add_meme("blood_pressure", blood_pressure, min_images=1, max_images=1, keywords=["高血压"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/feaLib/location.py DELETED
@@ -1,12 +0,0 @@
1
- from typing import NamedTuple
2
-
3
-
4
- class FeatureLibLocation(NamedTuple):
5
- """A location in a feature file"""
6
-
7
- file: str
8
- line: int
9
- column: int
10
-
11
- def __str__(self):
12
- return f"{self.file}:{self.line}:{self.column}"
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G_P_O_S_.py DELETED
@@ -1,5 +0,0 @@
1
- from .otBase import BaseTTXConverter
2
-
3
-
4
- class table_G_P_O_S_(BaseTTXConverter):
5
- pass
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_m_o_r_x.py DELETED
@@ -1,6 +0,0 @@
1
- from .otBase import BaseTTXConverter
2
-
3
-
4
- # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
5
- class table__m_o_r_x(BaseTTXConverter):
6
- pass
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-37519934.css DELETED
@@ -1 +0,0 @@
1
- div.svelte-iyf88w{border:var(--block-border-width) solid var(--border-color-primary);background:var(--border-color-primary);border-radius:var(--block-radius);display:flex;flex-direction:column;gap:var(--form-gap-width);overflow:hidden}div.svelte-iyf88w>*:not(.absolute){border:none;border-radius:0}.hide.svelte-iyf88w{display:none}
 
 
spaces/Dagfinn1962/diffusers-gallery/index.html DELETED
@@ -1,162 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
-
7
- <title>Diffusers gallery</title>
8
- <meta name="description" content="Some things that we are working with ! " />
9
-
10
- <meta property="og:url" content="https://huggingface-projects-diffusers-gallery.hf.space/" />
11
- <meta property="og:type" content="website" />
12
- <meta property="og:title" content="Hugging Face - Diffusers Models Gallery" />
13
- <meta property="og:description" content="Discover all difussion models on the Hugging Face hub." />
14
- <meta property="og:image" content="https://huggingface-projects-diffusers-gallery.hf.space/Fo6vR6JX0AEjbw1.jpeg" />
15
-
16
- <meta name="twitter:card" content="player" />
17
- <meta property="twitter:url" content="https://huggingface-projects-diffusers-gallery.hf.space/" />
18
- <meta name="twitter:description" content="Discover all difussion models on the Hugging Face hub." />
19
-
20
- <meta name="twitter:site" content="@huggingface" />
21
- <meta name="twitter:title" content="Hugging Face - Diffusers Models Gallery" />
22
-
23
- <meta name="twitter:image" content="https://huggingface-projects-diffusers-gallery.hf.space/Fo6vR6JX0AEjbw1.jpeg" />
24
- <meta name="twitter:player" content="https://huggingface-projects-diffusers-gallery.hf.space/index.html" />
25
- <meta name="twitter:player:width" content="100%" />
26
- <meta name="twitter:player:height" content="600" />
27
-
28
- <script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
29
- <script src="https://cdn.tailwindcss.com"></script>
30
-
31
- <script type="module">
32
- import Alpine from "https://cdn.skypack.dev/alpinejs";
33
- import Intersect from "https://cdn.skypack.dev/@alpinejs/intersect";
34
- Alpine.plugin(Intersect);
35
-
36
- Alpine.data("modelsData", () => ({
37
- async init() {
38
- const data = await this.getModels(this.page, this.sort, this.filter);
39
- this.models = data.models;
40
- this.totalPages = data.totalPages;
41
- },
42
- ASSETS_URL: "https://d26smi9133w0oo.cloudfront.net/diffusers-gallery/",
43
- models: [],
44
- filter: "all",
45
- sort: "trending",
46
- page: 1,
47
- totalPages: -1,
48
- buttonClass(attr, filter) {
49
- if (this[attr] === filter) {
50
- return "bg-black dark:bg-white shadow-lg text-white dark:text-black hover:bg-black hover:text-white";
51
- }
52
- return "text-gray-600 dark:text-gray-300 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800";
53
- },
54
- async filterModels(style) {
55
- this.filter = style;
56
- this.page = 1;
57
- const data = await this.getModels(this.page, this.sort, this.filter);
58
- this.models = data.models;
59
- this.totalPages = data.totalPages;
60
- },
61
- async sortModels(sort) {
62
- this.sort = sort;
63
- this.page = 1;
64
- const data = await this.getModels(this.page, this.sort, this.filter);
65
- this.models = data.models;
66
- this.totalPages = data.totalPages;
67
- },
68
- async getModels(page, sort, style) {
69
- // const res = await fetch(`http://localhost:8000/api/models?page=${page}&sort=${sort}&style=${style}`)
70
- const res = await fetch(
71
- `https://huggingface-projects-diffusers-gallery-bot.hf.space/api/models?page=${page}&sort=${sort}&style=${style}`
72
- );
73
- const data = await res.json();
74
- const models = data.models.map((model) => ({
75
- id: model.id,
76
- likes: model.likes,
77
- class: model.class,
78
- isNFSW: model.isNFSW,
79
- images: model.images.filter((image) => image && image.endsWith(".jpg")),
80
- }));
81
-
82
- return {
83
- models,
84
- totalPages: data.totalPages,
85
- };
86
- },
87
- async nextPage() {
88
- if (this.page < this.totalPages) {
89
- this.page += 1;
90
- const data = await this.getModels(this.page, this.sort, this.filter);
91
- this.models = this.models.concat(data.models);
92
- this.totalPages = data.totalPages;
93
- }
94
- },
95
- }));
96
- Alpine.start();
97
- </script>
98
- </head>
99
-
100
- <body class="pb-10 pt-5 bg-gray-100 dark:bg-gray-900 relative">
101
- <section
102
- class="container px-6 grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 mx-auto relative"
103
- x-data="modelsData"
104
- >
105
- <div class="col-span-5 lg:col-span-2 flex flex-col gap-2 row-start">
106
- <p class="text-lg font-semibold dark:text-white whitespace-nowrap">We are looking to put some of these in the Members Section --
107
- Subscribe now </p>
108
- </div>
109
- <!-- here -->
110
-
111
- <template x-for="model in models" :key="model.id">
112
- <template x-if="model.images.length > 0">
113
- <a
114
- :href="`https://huggingface.co/${model.id}`"
115
- class="block bg-gray-900 rounded-xl overflow-hidden relative group aspect-square text-white"
116
- target="_blank"
117
- >
118
- <div
119
- class="absolute bottom-0 p-4 bg-gradient-to-t text-white pt-10 from-black/90 via-black/70 to-transparent w-full z-10"
120
- >
121
- <div class="text-sm flex items-center group-hover:translate-x-0.5 transition">
122
- <svg
123
- class="mr-1.5 text-white/70"
124
- xmlns="http://www.w3.org/2000/svg"
125
- xmlns:xlink="http://www.w3.org/1999/xlink"
126
- aria-hidden="true"
127
- focusable="false"
128
- role="img"
129
- width="1em"
130
- height="1em"
131
- preserveAspectRatio="xMidYMid meet"
132
- viewBox="0 0 32 32"
133
- fill="currentColor"
134
- >
135
- <path
136
- d="M22.5,4c-2,0-3.9,0.8-5.3,2.2L16,7.4l-1.1-1.1C12,3.3,7.2,3.3,4.3,6.2c0,0-0.1,0.1-0.1,0.1c-3,3-3,7.8,0,10.8L16,29l11.8-11.9c3-3,3-7.8,0-10.8C26.4,4.8,24.5,4,22.5,4z"
137
- ></path>
138
- </svg>
139
- <span x-text="model.likes"></span>
140
- </div>
141
- <div
142
- x-text="model.id"
143
- class="text-sm md:text-lg lg:text-xl font-semibold group-hover:translate-x-0.5 transition"
144
- ></div>
145
- </div>
146
- <div class="group-hover:brightness-90 h-full" :class="model.isNFSW ? 'blur-md' : ''">
147
- <template x-if="model.images[0]">
148
- <img
149
- :src="()=> ASSETS_URL + model.images[0]"
150
- :alt="model.id"
151
- alt=""
152
- class="w-full h-full object-cover group-hover:scale-[1.01] transition"
153
- />
154
- </template>
155
- </div>
156
- </a>
157
- </template>
158
- </template>
159
- <div class="h-12 relative" x-intersect="nextPage" data-iframe-height></div>
160
- </section>
161
- </body>
162
- </html>