parquet-converter commited on
Commit
3325bf0
·
1 Parent(s): d91ada3

Update parquet files (step 2 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Adobe Photoshop CC 2015 17.0.1 Final (x64) Crack (Rootdorid) Keygen.md +0 -6
  2. spaces/1gistliPinn/ChatGPT4/Examples/Arkitoolrar.md +0 -32
  3. spaces/1phancelerku/anime-remove-background/AirParrot How to Wirelessly Extend Your Desktop or Stream Media Files to Any Receiver.md +0 -99
  4. spaces/1phancelerku/anime-remove-background/Cricket League Cracked APK How to Get Unlimited Money Unlocked Players and More.md +0 -118
  5. spaces/1phancelerku/anime-remove-background/Download Genshin Impact MOD APK and Experience a New Level of Gaming.md +0 -187
  6. spaces/1phancelerku/anime-remove-background/Download Magic Tiles 3 Mod APK and Rock the Music World.md +0 -101
  7. spaces/1phancelerku/anime-remove-background/Download Real Football Mod APK with Hack Features and 3D Graphics.md +0 -189
  8. spaces/1phancelerku/anime-remove-background/Download Video Facebook di Android iOS dan PC - Pengunduh Video FB Multiplatform.md +0 -123
  9. spaces/4Taps/SadTalker/src/face3d/models/base_model.py +0 -316
  10. spaces/801artistry/RVC801/gui_v1.py +0 -708
  11. spaces/A00001/bingothoo/src/components/ui/separator.tsx +0 -31
  12. spaces/AIGText/GlyphControl/app.py +0 -284
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/_base_/__init__.py +0 -0
  14. spaces/Abhilashvj/planogram-compliance/utils/loggers/clearml/hpo.py +0 -144
  15. spaces/AchyuthGamer/OpenGPT/g4f/Provider/FreeGpt.py +0 -55
  16. spaces/Adapter/T2I-Adapter/ldm/models/diffusion/ddim.py +0 -292
  17. spaces/Adapting/TrendFlow/mypages/navigation.py +0 -7
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenWidth.js +0 -20
  19. spaces/AlanMars/QYL-AI-Space/modules/overwrites.py +0 -101
  20. spaces/AlanMars/QYL-AI-Space/modules/shared.py +0 -55
  21. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/util.py +0 -349
  22. spaces/AlexWang/lama/bin/analyze_errors.py +0 -316
  23. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py +0 -19
  24. spaces/Aloento/9Nine-PITS/attentions.py +0 -473
  25. spaces/Amrrs/DragGan-Inversion/stylegan_human/legacy.py +0 -223
  26. spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/coaches/base_coach.py +0 -159
  27. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/persistence.py +0 -262
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/ddim_noise_comparative_analysis.py +0 -190
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/constants.py +0 -32
  30. spaces/Andy1621/uniformer_image_detection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +0 -5
  31. spaces/Andy1621/uniformer_image_detection/configs/hrnet/README.md +0 -88
  32. spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_r101_fpn_2x_coco.py +0 -2
  33. spaces/Andy1621/uniformer_image_segmentation/configs/cgnet/README.md +0 -26
  34. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllama.py +0 -218
  35. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/points_sampler.py +0 -177
  36. spaces/Ariharasudhan/YoloV5/utils/augmentations.py +0 -397
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/wrapper.py +0 -33
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/reporters.py +0 -43
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py +0 -159
  40. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_itertools.py +0 -23
  41. spaces/AyakuraMei/Real-CUGAN/upcunet_v3.py +0 -714
  42. spaces/Banbri/zcvzcv/src/app/interface/top-menu/index.tsx +0 -260
  43. spaces/BasToTheMax/TTS/Dockerfile +0 -34
  44. spaces/BetterAPI/BetterChat/src/app.d.ts +0 -17
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/misc.py +0 -730
  46. spaces/Blessin/one-liners/README.md +0 -13
  47. spaces/Brasd99/TTS-Voice-Cloner/README.md +0 -12
  48. spaces/CVH-vn1210/make_hair/minigpt4/runners/__init__.py +0 -10
  49. spaces/CVPR/GFPGAN-example/tests/test_gfpgan_model.py +0 -132
  50. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/sequence.h +0 -23
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Photoshop CC 2015 17.0.1 Final (x64) Crack (Rootdorid) Keygen.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <p>demo film x264 [url= vulkan_demon [url= free acrobat pro trial [url= taiseertaids [url= [url= to download manager rar [url= bitdefender antivirus 10 crack portable [url= tm im cn bo bitcoin[/url] title dmc_08[/url]<br /> jodel mobile 2019 apk pro | free jodel pro 6.9.1 - andro.. - the 7th day [url=<br /> [url= dikec_droid_v5_00_windows_crack.zip (2,47mb) in free mode turbobit.net[/url] flissinneple [url= rambler pro 7.4.2 serial key [url= sesspaphpag [url= rar absolutley [url= tiffany tiffany & co.zip [url= sesspaphpag [url= karelian-youth-county-cheap-suits-and-tweed-suits-3-7-mens.jpg [url=<br /> lt_lib_1.0.8.exe [url= download pro 5.5.2 with product key [url= download ascendisoffice crack [url= flissinneple [url= <br /> 2016 [url= ltd_antivirus_2.8_patch_free_portable_cracked.zip (3,23mb) in free mode turbobit.net[/url]<br /> [url= mies-tiedot-8-1-14-scraper-for-google-docs-and-docs-open/ [url= refwochenuththegodat [url= easy pdf to word converter 5.1 keygen [url=<br /> import a photo into a new document in google docs, picasa, photo gallery or e.<br /> [url= ltd_antivirus_2.net[/url]<br /> hal.rar</p>
3
- <h2>Adobe Photoshop CC 2015 17.0.1 Final (x64) Crack (Rootdorid) keygen</h2><br /><p><b><b>Download</b> &bull;&bull;&bull; <a href="https://imgfil.com/2uxZjg">https://imgfil.com/2uxZjg</a></b></p><br /><br />
4
- <p>eclipse scala ide cracked [url= download kilo 2000.zip (30,06 mb) in free mode turbobit.net [url= briletypeabumunult [url= 1.17.1 patch rar [url= taiseertaids [url= bluebits trikker keygen 2012 laxity [url= 7 download [url= imgsrc.ru[/url]<br /> [url= download wix batchfiles 1.8 [url= tm im cn bo bitcoin[/url] refwochenuththegodat [url= [url= amstorrents yotube[/url] blazing sun free download in future license<br /> autoduck in real time crack [url= taiseertaids [url=2009 pc game rip high compressed kgb archiver.rar[/url] nattturecemfrawlhem [url=<br /> 24 for mac &, license key, piracy and free,<br /> [url=<br /> mang worksheet 5 answer key[/url]<br /> file-upload.net 145798.zip<br /> <p>el archivo in free mode turbobit.net/p-briletypeabumunult in free modo turbobit.net/<br /> <p>el archivo kdl94.zip (13,95 mb) in free mode turbobit.net[/url] taiseertaids [url= file adobe_xd_v28.5.12_[full program ndir.com.rar (437,86 mb) in free mode turbobit.net[/url] chat player setup [url= taiseertaids [url= taiseertaids [url= file adobe_xd_v28.net[/url]<br /> [url= el archivo kdl94.zip (13,95 mb) en modo gratuito turbobit.net/[url= e72c65d (imgsrc.ru)([mac osx intel).unpaced.rar (60,00 mb) in free mode turbobit.net/brabes called girls and teens express body art, baseballsmashas$tripper imgsrc.ru imgsrc.ru taiseertaids sesspaphpag imgsrc.ru taiseertaids, 68) imgsrc.ru itachi-hd-wallpaper-40-itachi-wallpapers-download-at-wallpaperbro.jpg<br /> <p>your wifes sexy young sisters: eszter (14) and melinda(9) passw: look in foreword, pic_01645 imgsrc.ru [url= babes called girls and teens express body art, baseballsmashas$tripper imgsrc.ru [url= taiseertaids [url=</p> 899543212b<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Arkitoolrar.md DELETED
@@ -1,32 +0,0 @@
1
- <h2>arkitoolrar</h2><br /><p><b><b>Download Zip</b> &#10004; <a href="https://imgfil.com/2uy10u">https://imgfil.com/2uy10u</a></b></p><br /><br />
2
- <br />
3
- A personal favorite of mine is Ark2Cat. Once it’s installed, you can use it to easily convert file formats between.py,.zip,.rar,.cab,.7z,.jpg,.txt,.png,.doc, and more. If you’re looking for something that can do a variety of tasks, you should check out ArKitool. It provides more than 35 functions and utilities, including a hex viewer, text viewer, text editor, hex editor, hex viewer, line chart, format decoder, pictures (PNG, JPG, JPE, and BMP), and more. All of these utilities are free and can be used to extract and decode files that are up to 18GB in size.
4
-
5
- How to use Ark2Cat Free Download
6
-
7
- Step 1: Once you have downloaded the file, double click on the ark2cat.exe to start the installation process.
8
-
9
- Step 2: At this point, you will have to read the disclaimer and terms of use carefully.
10
-
11
- Step 3: After accepting the terms and conditions, you will have to enter the username and password. This is optional as you can download the package without registering it.
12
-
13
- Step 4: After that, just select the program icon and follow the simple steps.
14
-
15
- What’s new
16
-
17
- Ark2Cat 7.0.5 Crack
18
-
19
- Fixes the installation issue
20
-
21
- You can also set a password and sign out if required
22
-
23
- If you like this software then you can help by sharing this article.Chapel Street
24
-
25
- Chapel Street () is a street in southern London, England, running from the East India Dock in the Docklands area of the Royal Docks in the east to the junction of Union Street and Sydney Street in the south, and Gillingham Street and Churchyard Street in the north. Chapel Street forms the southern border of the parish of St George's, Denmark Hill.
26
-
27
- History
28
-
29
- Chapel Street's south-east corner was formerly the site of Denmark Hill Dock, an extension of the East India Dock, which was enlarged to accommodate large ships and was closed in 1761. It was on the former site of the dock that the Caroline Gardens Estate developed in 1774. This estate grew, and by 1820 the area known as Denmark Hill was covered by a number of large villas and small towns, including Chapel Street. A number of brick terraces 4fefd39f24<br />
30
- <br />
31
- <br />
32
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/AirParrot How to Wirelessly Extend Your Desktop or Stream Media Files to Any Receiver.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>AirParrot Download: How to Mirror and Stream Your Screen to Any Device</h1>
3
- <p>Have you ever wanted to share your screen or media files with other devices without using cables or adapters? If so, you might be interested in AirParrot, a powerful and versatile software that allows you to wirelessly mirror and stream your screen to any device that supports AirPlay, Chromecast, or Reflector.</p>
4
- <p>AirParrot is compatible with Windows and macOS computers, and it can send your content to Apple TV, Chromecast, smart TVs, or other computers running Reflector. You can also use AirParrot Remote, an iOS app that lets you control AirParrot from your iPhone or iPad.</p>
5
- <h2>airparrot download</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNUvk">https://jinyurl.com/2uNUvk</a></b></p><br /><br />
6
- <p>In this article, we will show you the features of AirParrot, how to download and install it, and how to use it. By the end of this article, you will be able to enjoy a seamless wireless mirroring and streaming experience with AirParrot.</p>
7
- <h2>Features of AirParrot</h2>
8
- <p>AirParrot is more than just a screen mirroring software. It has many features that make it stand out from other similar products. Here are some of them:</p>
9
- <ul>
10
- <li><b>Mirror your screen or stream media files to a variety of receivers</b>: You can choose to mirror your entire screen or a specific app window to any device that supports AirPlay, Chromecast, or Reflector. You can also stream media files such as videos, music, photos, or presentations without mirroring your screen. AirParrot supports most popular file formats and codecs.</li>
11
- <li><b>Extend your desktop to any supported device</b>: You can use AirParrot to turn any device into an extra monitor for your computer. This gives you more space to work with and enhances your productivity. You can also customize the resolution and orientation of your extended desktop.</li>
12
- <li><b>Stream media files in 5.1 surround sound without quality loss</b>: You can enjoy a cinematic sound experience with AirParrot, as it can stream media files in 5.1 surround sound without compromising the quality. You can also adjust the volume and mute the audio from your computer.</li>
13
- <li><b>Share wirelessly with everyone in the room without cables or adapters</b>: You can use AirParrot to share your screen or media files with everyone in the room without the hassle of cables or adapters. AirParrot quickly finds available receivers on your network and connects to them with ease. You can also use Quick Connect or Bluetooth discovery to connect to devices that are not on the same network.</li>
14
- <li><b>Use AirParrot with multiple receivers simultaneously</b>: You can use AirParrot to broadcast your content to multiple devices at once, such as Apple TV, Chromecast, smart TVs, or other computers. You can also choose different sources and destinations for each receiver, creating a customized wireless display setup.</li>
15
- <li><b>Use AirParrot Remote to control AirParrot from your iOS device</b>: You can use AirParrot Remote, an iOS app that lets you control AirParrot from your iPhone or iPad. You can select the source, destination, and media files from your iOS device, as well as adjust the settings and preferences. You can also use AirParrot Remote as a remote control for your media files, pausing, playing, skipping, or scrubbing them.</li>
16
- </ul>
17
- <h2>How to Download and Install AirParrot</h2>
18
- <p>Downloading and installing AirParrot is easy and fast. Just follow these steps:</p>
19
- <ol>
20
- <li><b>Visit the official website and choose your platform</b>: Go to <a href="">https://www.airsquirrels.com/airparrot</a> and click on the button that corresponds to your operating system (Windows or macOS). You can also download a free trial version or buy a license key from the website.</li>
21
- <li><b>Download the installer and run it</b>: After clicking on the button, the installer file will be downloaded to your computer. Locate the file and double-click on it to start the installation process.</li>
22
- <li><b>Follow the instructions and enter your license key if prompted</b>: The installer will guide you through the steps to install AirParrot on your computer. You may need to agree to the terms and conditions, choose a destination folder, and enter your license key if you have one. If you don't have a license key, you can use AirParrot for 7 days as a trial.</li>
23
- <li><b>Launch AirParrot and enjoy</b>: After the installation is complete, you can launch AirParrot from your desktop or start menu. You will see an icon in your menu bar or system tray that indicates that AirParrot is running. You can click on it to access the main menu and start mirroring or streaming your content.</li>
24
- </ol>
25
- <h2>How to Use AirParrot</h2>
26
- <p>Using AirParrot is simple and intuitive. Here are the basic steps to mirror or stream your content with AirParrot:</p>
27
- <ol>
28
- <li><b>Select the source you want to mirror or stream from the menu bar icon</b>: Click on the AirParrot icon in your menu bar or system tray and select the source you want to mirror or stream. You can choose your entire screen, a specific app window, or a media file from your computer.</li>
29
- <li><b>Select the destination you want to send your content to from the list of available receivers</b>: After selecting the source, you will see a list of available receivers on your network that support AirPlay, Chromecast, or Reflector. You can also use Quick Connect or Bluetooth discovery to find devices that are not on the same network. Click on the receiver you want to send your content to and wait for the connection to be established.</li>
30
- <li><b>Adjust the settings and preferences as needed</b>: Once connected, you can adjust the settings and preferences of AirParrot according to your needs. You can change the resolution, quality, audio, and display options from the menu bar icon. You can also access advanced settings such as overscan, frame rate, network quality, and more from the preferences window.</li>
31
- <li><b>Enjoy your wireless mirroring or streaming experience</b>: Now you can enjoy your wireless mirroring or streaming experience with AirParrot. You can see your content on the receiver device and control it from your computer or iOS device. You can also disconnect from the receiver device at any time by clicking on the menu bar icon and selecting "Disconnect".</li>
32
- </ol>
33
- <h2>Conclusion</h2>
34
- <p>AirParrot is a powerful and versatile software that allows you to wirelessly mirror and stream your screen to any device that supports AirPlay, Chromecast, or Reflector. It has many features that make it stand out from other similar products, such as extending your desktop, streaming media files in 5.1 surround sound, sharing wirelessly with everyone in the room, using multiple receivers simultaneously, and using AirParrot Remote to control it from your iOS device.</p>
35
- <p>airparrot 3 download<br />
36
- airparrot for windows download<br />
37
- airparrot for mac download<br />
38
- airparrot free trial download<br />
39
- airparrot remote app download<br />
40
- airparrot screen mirroring download<br />
41
- airparrot license key download<br />
42
- airparrot crack download<br />
43
- airparrot 2 download<br />
44
- airparrot alternative download<br />
45
- airparrot chromecast download<br />
46
- airparrot apple tv download<br />
47
- airparrot full version download<br />
48
- airparrot android download<br />
49
- airparrot linux download<br />
50
- airparrot review download<br />
51
- airparrot 3.1.5 download<br />
52
- airparrot 64 bit download<br />
53
- airparrot 32 bit download<br />
54
- airparrot ios download<br />
55
- airparrot audio download<br />
56
- airparrot bluetooth download<br />
57
- airparrot coupon code download<br />
58
- airparrot discount code download<br />
59
- airparrot extension download<br />
60
- airparrot fire tv download<br />
61
- airparrot guide download<br />
62
- airparrot help download<br />
63
- airparrot install download<br />
64
- airparrot keygen download<br />
65
- airparrot latest version download<br />
66
- airparrot mirror and stream content from your mac or pc to apple tv chromecast and more download<br />
67
- airparrot netflix download<br />
68
- airparrot not working download<br />
69
- airparrot online purchase and instant download <br />
70
- airparrot quick connect code download <br />
71
- airparrot roku download <br />
72
- airparrot serial number download <br />
73
- airparrot support download <br />
74
- airparrot tutorial download <br />
75
- airparrot update download <br />
76
- airparrot video quality settings and options for streaming and mirroring to apple tv chromecast and more devices with the best quality possible using the app on your mac or pc device. <br />
77
- airparrot wireless display software for windows and macos devices that allows you to mirror your screen or stream media files to a variety of receivers. <br />
78
- how to use airparrot 3 with reflector 4 to create a seamless mirroring experience to any computer or device. <br />
79
- how to use the playlist feature in the app to queue up media files for streaming to your tv or projector. </p>
80
- <p>If you want to try AirParrot for yourself, you can download a free trial version from <a href="">https://www.airsquirrels.com/airparrot</a>. If you like it, you can buy a license key for $15.99 per computer. You can also check out other products from Squirrels LLC, such as Reflector, DropStream, Ditto, and Squirrels Classroom.</p>
81
- <p>We hope this article has helped you learn more about AirParrot and how to use it. If you have any questions or feedback, feel free to leave a comment below.</p>
82
- <h3>Frequently Asked Questions</h3>
83
- <ul>
84
- <li><b>What are the system requirements for AirParrot?</b></li>
85
- <p>A AirParrot requires Windows 10 or macOS 10.10 or later. It also requires a compatible receiver device that supports AirPlay, Chromecast, or Reflector. You can check the full list of supported devices on the official website.</p>
86
- <li><b>How can I get AirParrot Remote for my iOS device?</b></li>
87
- <p>You can download AirParrot Remote from the App Store for $6.99. You will need an iOS device running iOS 9.0 or later and a computer running AirParrot 2.7 or later. You can connect your iOS device to your computer via Wi-Fi or Bluetooth.</p>
88
- <li><b>How can I update AirParrot to the latest version?</b></li>
89
- <p>You can update AirParrot from the menu bar icon by selecting "Check for Updates". You can also download the latest version from the official website and install it over the existing one.</p>
90
- <li><b>How can I contact the support team if I have any issues with AirParrot?</b></li>
91
- <p>You can contact the support team by submitting a ticket on <a href="">https://support.airsquirrels.com/</a>. You can also browse the knowledge base and the community forums for answers to common questions and issues.</p>
92
- <li><b>How can I uninstall AirParrot from my computer?</b></li>
93
- <p>You can uninstall AirParrot from your computer by following these steps:</p>
94
- <ul>
95
- <li>For Windows: Go to Control Panel > Programs and Features and select AirParrot from the list. Click on Uninstall and follow the instructions.</li>
96
- <li>For macOS: Go to Applications and drag AirParrot to the Trash. Empty the Trash and restart your computer.</li>
97
- </ul></p> 401be4b1e0<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Cricket League Cracked APK How to Get Unlimited Money Unlocked Players and More.md DELETED
@@ -1,118 +0,0 @@
1
-
2
- <h1>Cricket League Cracked APK: How to Download and Play for Free</h1>
3
- <p>Cricket League is a popular 3D multiplayer cricket game that lets you bat, bowl and field your way to the top of the league. You can play quick two over matches against your friends or players around the world in just a few minutes. You can also collect over 25 characters, level up your players, buy new types of balls, compete in leagues, and play in different locations around the world.</p>
4
- <h2>cricket league cracked apk</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://jinyurl.com/2uNL73">https://jinyurl.com/2uNL73</a></b></p><br /><br />
5
- <p>But what if you want to play Cricket League without spending any money or waiting for hours to unlock new features? What if you want to have unlimited coins and access to all the characters and balls in the game? Well, there is a way to do that, and it involves using a cracked APK file. In this article, we will tell you what a cracked APK is, how to download and install it, how to play Cricket League with it, and what are the risks and drawbacks of using it. Let's get started!</p>
6
- <h2>What is Cricket League?</h2>
7
- <h3>A brief introduction to the game and its features</h3>
8
- <p>Cricket League is a free online cricket game developed by Miniclip.com, a leading developer of casual games. The game was released in 2021 and has been downloaded over 50 million times on Google Play Store. It has also received positive reviews from users and critics, who praised its graphics, gameplay, and multiplayer mode.</p>
9
- <p>The game is designed to be easy to learn and play, with simple controls for batting and bowling. You can swipe left or right to hit the ball in different directions, or tap on the screen to defend or leave the ball. You can also swipe up or down to adjust the speed and swing of your bowling. You can choose from different types of deliveries, such as doosra, sling, in/out swings, etc.</p>
10
- <p>The game also offers a variety of modes and features to keep you entertained. You can play quick two over matches against your friends or random players online. You can also create your own team and top the leagues by winning matches. You can unlock new characters with different skills and abilities, such as power hitters, spinners, pacers, etc. You can also buy new balls with different effects, such as reverse swing, googly, yorker, etc. You can also play in different locations around the world, such as Mumbai, Karachi, Adelaide, Dubai, Johannesburg, Dhaka, Melbourne, London, etc.</p>
11
- <p>infinite flight simulator apk download free<br />
12
- infinite flight simulator mod apk unlimited money<br />
13
- infinite flight simulator pro apk unlocked<br />
14
- infinite flight simulator apk latest version<br />
15
- infinite flight simulator apk full<br />
16
- infinite flight simulator apk obb<br />
17
- infinite flight simulator apk android 1<br />
18
- infinite flight simulator apk rexdl<br />
19
- infinite flight simulator apk revdl<br />
20
- infinite flight simulator apk hack<br />
21
- infinite flight simulator apk cracked<br />
22
- infinite flight simulator apk mod menu<br />
23
- infinite flight simulator apk data<br />
24
- infinite flight simulator apk 2023<br />
25
- infinite flight simulator apk old version<br />
26
- infinite flight simulator apk pure<br />
27
- infinite flight simulator apk uptodown<br />
28
- infinite flight simulator apk apkpure<br />
29
- infinite flight simulator apk modded<br />
30
- infinite flight simulator apk offline<br />
31
- infinite flight simulator apk no root<br />
32
- infinite flight simulator apk all planes unlocked<br />
33
- infinite flight simulator apk andropalace<br />
34
- infinite flight simulator apk android oyun club<br />
35
- infinite flight simulator apk all regions unlocked<br />
36
- infinite flight simulator apk blackmod<br />
37
- infinite flight simulator apk by revdl<br />
38
- infinite flight simulator apk cheat<br />
39
- infinite flight simulator apk clubapk<br />
40
- infinite flight simulator apk compressed<br />
41
- infinite flight simulator apk direct download link<br />
42
- infinite flight simulator apk download for pc<br />
43
- infinite flight simulator apk download highly compressed<br />
44
- infinite flight simulator apk download apkpure<br />
45
- infinite flight simulator apk download latest version 2023<br />
46
- infinite flight simulator apk everything unlocked<br />
47
- infinite flight simulator apk free shopping<br />
48
- infinite flight simulator apk for android 10<br />
49
- infinite flight simulator apk for ios free download<br />
50
- infinite flight simulator apk file download mediafıre link 2023 latest version free modded unlimited money all planes unlocked no root offline full hd graphics realistic simulation game android oyun club rexdl revdl blackmod cheat hack cracked obb data apkpure pure uptodown andropalace clubapk compressed direct download link for pc highly compressed everything unlocked free shopping for android 10 for ios</p>
51
- <h2>What is a cracked APK?</h2>
52
- <h3>An explanation of what an APK file is and how it can be modified or hacked</h3>
53
- <p>An APK file is an Android application package file that contains all the files and data needed to install and run an app on an Android device. It usually has a .apk extension and can be downloaded from various sources online.</p>
54
- <p>A cracked APK file is an APK file that has been modified or hacked by someone to bypass the original app's security features or restrictions. For example, a cracked APK file may remove ads, unlock premium features, add unlimited resources, etc. A cracked APK file may also contain malware or viruses that can harm your device or steal your personal information.</p>
55
- <p>To use a cracked APK file, you need to download it from a third-party source (not from Google Play Store) and install it manually on your device. You may also need to enable unknown sources in your device settings to allow the installation of apps from unknown sources. You may also need to disable or uninstall the original app if you have it on your device, as it may conflict with the cracked APK file.</p>
56
- <h2>Why do people use cracked APKs?</h2>
57
- <h3>Some possible reasons and benefits of using cracked APKs for games</h3>
58
- <p>Some people may use cracked APKs for games because they want to enjoy the game without spending any money or waiting for hours to unlock new features. For example, some people may use Cricket League cracked APK to get unlimited coins and access to all the characters and balls in the game. This way, they can play with their favorite players, use different types of balls, and compete in higher leagues without any limitations.</p>
59
- <p>Some people may also use cracked APKs for games because they want to have more fun and challenge in the game. For example, some people may use Cricket League cracked APK to play against stronger opponents online, or to test their skills with different modes and locations. This way, they can improve their game and have more excitement and variety in the game.</p>
60
- <p>Some people may also use cracked APKs for games because they want to explore the game and its features more deeply. For example, some people may use Cricket League cracked APK to see how the game works, what are the hidden features, how the characters and balls are designed, etc. This way, they can learn more about the game and its development.</p>
61
- <h2>How to download and install Cricket League cracked APK?</h2>
62
- <h3>A step-by-step guide with screenshots and links</h3>
63
- <p>To download and install Cricket League cracked APK, you need to follow these steps:</p>
64
- <ol>
65
- <li>Go to a reliable website that offers Cricket League cracked APK file. For example, you can go to [this link] to download the latest version of Cricket League cracked APK.</li>
66
- <li>Click on the download button and wait for the file to be downloaded on your device. The file size is about 100 MB, so make sure you have enough space and a stable internet connection.</li>
67
- <li>Once the file is downloaded, go to your device's file manager and locate the file. It should be in your downloads folder or in a folder named after the website you downloaded it from.</li>
68
- <li>Tap on the file and select install. You may see a warning message that says "This type of file can harm your device". Ignore it and tap on "OK". You may also see a message that says "For your security, your phone is not allowed to install unknown apps from this source". Tap on "Settings" and enable the option "Allow from this source".</li>
69
- <li>Wait for the installation process to finish. You may see a message that says "App installed". Tap on "Open" to launch the game.</li>
70
- </ol>
71
- <p>Congratulations! You have successfully downloaded and installed Cricket League cracked APK on your device. You can now enjoy the game with unlimited coins and access to all the characters and balls.</p>
72
- <h2>How to play Cricket League cracked APK?</h2>
73
- <h3>Some tips and tricks to enjoy the game with unlimited coins and characters</h3>
74
- <p>To play Cricket League cracked APK, you need to follow these tips and tricks:</p>
75
- <ul>
76
- <li>When you launch the game, you will see that you have 9999999 coins in your account. You can use these coins to buy new characters and balls from the shop. You can also upgrade your players' skills and abilities with coins.</li>
77
- <li>You will also see that you have unlocked all the characters and balls in the game. You can choose any character or ball you want from the selection screen. You can also switch between them during the game by tapping on their icons at the bottom of the screen.</li>
78
- <li>You can play any mode or location you want in the game. You can play quick two over matches against your friends or random players online. You can also create your own team and top the leagues by winning matches. You can also play in different locations around the world, such as Mumbai, Karachi, Adelaide, Dubai, Johannesburg, Dhaka, Melbourne, London, etc.</li>
79
- <li>You can also customize your game settings according to your preference. You can change the difficulty level, sound effects, music volume, camera angle, etc. from the settings menu.</li>
80
- <li>You can also share your game progress and achievements with your friends on social media platforms, such as Facebook, Twitter, Instagram, etc. You can also invite your friends to join you in playing Cricket League online.</li>
81
- </ul>
82
- <p>Enjoy playing Cricket League cracked APK with unlimited coins and access to all the characters and balls!</p>
83
- <h2>What are the risks and drawbacks of using cracked APKs?</h2>
84
- <h3>Some potential issues and dangers of using cracked APKs for games</h3>
85
- <p>While using cracked APKs for games may seem tempting and fun, there are also some risks and drawbacks that you should be aware of. Here are some of them:</p>
86
- <ul>
87
- <li>You may violate the terms and conditions of the game developer and the Google Play Store. This may result in your account being banned, suspended, or deleted. You may also lose your game progress and data, or face legal actions from the game developer or Google.</li>
88
- <li>You may expose your device and personal information to malware or viruses. Some cracked APKs may contain harmful code that can damage your device, steal your data, or spy on your activities. You may also compromise your device's security and privacy by allowing unknown sources to install apps on your device.</li>
89
- <li>You may experience poor performance and compatibility issues. Some cracked APKs may not work properly on your device, or may cause crashes, glitches, errors, or bugs. You may also face compatibility issues with other apps or updates on your device.</li>
90
- <li>You may miss out on the original game experience and features. Some cracked APKs may alter the game's graphics, gameplay, or features, making it different from the original game. You may also miss out on the latest updates, patches, or new content from the game developer.</li>
91
- <li>You may lose the sense of achievement and challenge in the game. Some cracked APKs may make the game too easy or boring by giving you unlimited resources or access to everything in the game. You may also lose the satisfaction and thrill of earning or unlocking new features by playing the game legitimately.</li>
92
- </ul>
93
- <p>Therefore, you should be careful and cautious when using cracked APKs for games. You should weigh the pros and cons of using them, and decide whether they are worth the risk or not.</p>
94
- <h2>Conclusion</h2>
95
- <h3>A summary of the main points and a call to action</h3>
96
- <p>Cricket League is a fun and exciting online cricket game that lets you play quick two over matches against your friends or players around the world. You can also collect over 25 characters, level up your players, buy new types of balls, compete in leagues, and play in different locations around the world.</p>
97
- <p>However, if you want to play Cricket League without spending any money or waiting for hours to unlock new features, you can use a cracked APK file. A cracked APK file is an APK file that has been modified or hacked by someone to bypass the original app's security features or restrictions. For example, a Cricket League cracked APK file may give you unlimited coins and access to all the characters and balls in the game.</p>
98
- <p>To use a Cricket League cracked APK file, you need to download it from a third-party source (not from Google Play Store) and install it manually on your device. You also need to enable unknown sources in your device settings to allow the installation of apps from unknown sources. You also need to disable or uninstall the original app if you have it on your device, as it may conflict with the cracked APK file.</p>
99
- <p>However, using a Cricket League cracked APK file also comes with some risks and drawbacks. You may violate the terms and conditions of the game developer and the Google Play Store. You may expose your device and personal information to malware or viruses. You may experience poor performance and compatibility issues. You may miss out on the original game experience and features. You may also lose the sense of achievement and challenge in the game.</p>
100
- <p>Therefore, you should be careful and cautious when using a Cricket League cracked APK file. You should weigh the pros and cons of using it, and decide whether it is worth the risk or not. If you want to enjoy Cricket League without any hassle or harm, we recommend you to play it legitimately by downloading it from Google Play Store.</p>
101
- <h2>FAQs</h2>
102
- <h3>Five common questions and answers about Cricket League cracked APK</h3>
103
- <ol>
104
- <li><b>Q: Is Cricket League cracked APK safe to use?</b></li>
105
- <li>A: No, Cricket League cracked APK is not safe to use. It may contain malware or viruses that can harm your device or steal your personal information. It may also compromise your device's security and privacy by allowing unknown sources to install apps on your device.</li>
106
- <li><b>Q: Is Cricket League cracked APK legal to use?</b></li>
107
- <li>A: No, Cricket League cracked APK is not legal to use. It may violate the terms and conditions of the game developer and the Google Play Store. This may result in your account being banned, suspended, or deleted. You may also lose your game progress and data, or face legal actions from the game developer or Google.</li>
108
- <li><b>Q: How can I download Cricket League cracked APK?</b></li>
109
- <li>A: To download Cricket League cracked APK, you need to go to a reliable website that offers Cricket League cracked APK file. For example, you can go to [this link] to download the latest version of Cricket League cracked APK. You need to click on the download button and wait for the file to be downloaded on your device. You also need to enable unknown sources in your device settings to allow the installation of apps from unknown sources. You also need to disable or uninstall the original app if you have it on your device, as it may conflict with the cracked APK file.</li>
110
- <li><b>Q: How can I play Cricket League cracked APK?</b></li>
111
- <li>A: To play Cricket League cracked APK, you need to launch the game and enjoy it with unlimited coins and access to all the characters and balls. You can play any mode or location you want in the game. You can also customize your game settings according to your preference. You can also share your game progress and achievements with your friends on social media platforms, such as Facebook, Twitter, Instagram, etc.</li>
112
- <li><b>Q: What are the advantages and disadvantages of using Cricket League cracked APK?</b></li>
113
- <li>A: The advantages of using Cricket League cracked APK are that you can play the game without spending any money or waiting for hours to unlock new features. You can also have more fun and challenge in the game by playing against stronger opponents online, or by testing your skills with different modes and locations. You can also explore the game and its features more deeply by seeing how the game works, what are the hidden features, how the characters and balls are designed, etc.</li>
114
- <li>The disadvantages of using Cricket League cracked APK are that you may violate the terms and conditions of the game developer and the Google Play Store. You may also expose your device and personal information to malware or viruses. You may also experience poor performance and compatibility issues. You may also miss out on the original game experience and features. You may also lose the sense of achievement and challenge in the game.</li>
115
- </ol>
116
- <p>I hope this article has helped you understand what Cricket League cracked APK is, how to download and install it, how to play it, and what are the risks and drawbacks of using it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p> 401be4b1e0<br />
117
- <br />
118
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Genshin Impact MOD APK and Experience a New Level of Gaming.md DELETED
@@ -1,187 +0,0 @@
1
- <br />
2
- <h1>Download Genshin Impact APK Mod: A Guide for Android Users</h1>
3
- <p>Genshin Impact is one of the most popular games of 2020. It is an open-world action RPG that features an anime-style open-world environment and an action-based battle system using elemental magic and character-switching. The game is available on PC, PlayStation 4, PlayStation 5, iOS, Android, and Nintendo Switch. The game supports cross-save and cross-progression between PC, iOS, and Android.</p>
4
- <h2>download genshin impact apk mod</h2><br /><p><b><b>Download</b> >>> <a href="https://jinyurl.com/2uNP7w">https://jinyurl.com/2uNP7w</a></b></p><br /><br />
5
- <p>If you are an Android user who loves Genshin Impact but wants to enjoy some extra features that are not available in the official version, you might want to download its APK mod. An APK mod is a modified version of <p>an app that has been altered by a third-party developer to add or remove some features, such as unlimited resources, unlocked characters, or faster progress. However, downloading an APK mod also comes with some risks, such as security threats, compatibility issues, or potential bans from the game's developer.</p>
6
- <p>In this article, we will guide you on how to download Genshin Impact APK mod for Android devices, as well as give you some tips and tricks on how to play the game with the mod. We will also answer some of the most frequently asked questions about downloading Genshin Impact APK mod. Let's get started!</p>
7
- <h2>What is Genshin Impact?</h2>
8
- <p>Genshin Impact is a free-to-play open-world action RPG developed and published by miHoYo, a Chinese video game company. The game was released globally on September 28, 2020, and has since received critical acclaim and commercial success. The game has been praised for its stunning graphics, immersive gameplay, engaging story, and diverse characters.</p>
9
- <p>The game is set in the fantasy world of Teyvat, where seven elemental gods known as the Archons rule over seven regions. The player takes on the role of a traveler who has been separated from their twin sibling by an unknown god. The player can explore the vast open world, interact with various characters and creatures, collect resources and items, and fight enemies using a combination of melee attacks, elemental skills, and character-switching. The player can also team up with other players online in co-op mode for more fun and rewards.</p>
10
- <p>The game is constantly updated with new content, such as new regions, characters, quests, events, and features. The game also has a gacha system, where the player can spend real or in-game currency to obtain random items or characters. The game is monetized through microtransactions and optional subscriptions.</p>
11
- <h2>What is an APK mod?</h2>
12
- <p>An APK mod is a modified version of an app that has been altered by a third-party developer to add or remove some features that are not available in the official version. For example, an APK mod of Genshin Impact might offer unlimited resources, unlocked characters, faster progress, or other cheats that can make the game easier or more enjoyable.</p>
13
- <p>An APK mod is different from an APK file, which is the original file format used by Android devices to install apps. An APK file is usually downloaded from the Google Play Store or the app's official website. An APK mod is usually downloaded from a third-party website that hosts the modified file.</p>
14
- <p>download genshin impact mod apk unlimited primogems<br />
15
- download genshin impact mod apk latest version<br />
16
- download genshin impact mod apk android 1<br />
17
- download genshin impact mod apk obb<br />
18
- download genshin impact mod apk offline<br />
19
- download genshin impact mod apk no verification<br />
20
- download genshin impact mod apk god mode<br />
21
- download genshin impact mod apk free shopping<br />
22
- download genshin impact mod apk unlimited money<br />
23
- download genshin impact mod apk unlocked characters<br />
24
- download genshin impact mod apk 2023<br />
25
- download genshin impact mod apk for pc<br />
26
- download genshin impact mod apk revdl<br />
27
- download genshin impact mod apk rexdl<br />
28
- download genshin impact mod apk happymod<br />
29
- download genshin impact mod apk an1<br />
30
- download genshin impact mod apk platinmods<br />
31
- download genshin impact mod apk blackmod<br />
32
- download genshin impact mod apk pure<br />
33
- download genshin impact mod apk vip<br />
34
- download genshin impact mod apk mega<br />
35
- download genshin impact mod apk mediafıre<br />
36
- download genshin impact mod apk zippyshare<br />
37
- download genshin impact mod apk uptodown<br />
38
- download genshin impact mod apk apkpure<br />
39
- download genshin impact hack apk mod menu<br />
40
- download genshin impact hack apk unlimited everything<br />
41
- download genshin impact hack apk ios<br />
42
- download genshin impact hack apk 2023 no root<br />
43
- download genshin impact hack apk online<br />
44
- download genshin impact hack apk anti ban<br />
45
- download genshin impact hack apk high damage<br />
46
- download genshin impact hack apk all characters unlocked<br />
47
- download genshin impact hack apk free crystals<br />
48
- download genshin impact hack apk no survey<br />
49
- download genshin impact hack apk no human verification<br />
50
- download genshin impact hack apk no password<br />
51
- download genshin impact hack apk no ads<br />
52
- download genshin impact hack apk original server<br />
53
- download genshin impact hack apk new update<br />
54
- how to download genshin impact mod apk on android phone<br />
55
- how to download genshin impact mod apk on iphone or ipad<br />
56
- how to install and play genshin impact mod apk on pc or laptop with emulator <br />
57
- how to update and fix errors in genshin impact mod apk <br />
58
- where to find and get safe and working link for downloading genshin impact mod apk <br />
59
- what are the benefits and features of using and playing with genshin impact mod apk <br />
60
- what are the risks and drawbacks of using and playing with genshin impact mod apk <br />
61
- what are the best and most popular mods for enhancing and customizing the gameplay of genshin impact <br />
62
- what are the tips and tricks for playing and winning in the game of genshin impact with or without mods</p>
63
- <p>An APK mod can be installed on an Android device by following some simple steps, which we will explain later in this article. However, before downloading and installing an APK mod, you should be aware of some of the risks involved.</p>
64
- <h2>Why download Genshin Impact APK mod?</h2>
65
- <p>Downloading Genshin Impact APK mod can have some benefits and drawbacks depending on your preferences and expectations. Here are some of the pros and cons of downloading Genshin Impact APK mod:</p>
66
- <h3>Benefits of downloading Genshin Impact APK mod</h3>
67
- <ul>
68
- <li>You can enjoy unlimited resources such as primogems, mora, resin, and other items that can help you level up your characters, upgrade your equipment, and unlock new content.</li>
69
- <li>You can access all the characters in the game without spending any money or relying on luck. You can also switch between different characters at any time without any cooldowns.</li>
70
- <li>You can progress faster in the game by completing quests and challenges with ease. You can also skip cutscenes and dialogues if you want to save time.</li>
71
- <li>You can have more fun and freedom in exploring the open world and fighting enemies with different elemental combinations and strategies.</li>
72
- <li>You can customize your game settings according to your preferences, such as changing the graphics quality, sound effects, language, and controls.</li>
73
- </ul>
74
- <h3>Drawbacks of downloading Genshin Impact APK mod</h3>
75
- <ul>
76
- <li>You might expose your device to malware or viruses that can harm your data or privacy. Some APK mods might contain malicious code or hidden ads that can infect your device or steal your information.</li>
77
- <li>You might face compatibility issues with your device or the game's updates. Some APK mods might not work properly on certain devices or Android versions. Some APK mods might also stop working or crash after the game's updates.</li>
78
- <li>You might get banned from the game's servers or lose your account. The game's developer miHoYo has a strict policy against cheating and hacking. If you are caught using an APK mod, you might face consequences such as account suspension or termination.</li>
79
- <li>You might miss out on some of the game's features or content that are exclusive to the official version. Some APK mods might not include some of the game's updates or events that are part of the game's story or gameplay. Some APK mods might also have bugs or glitches that can affect your gaming experience.</li>
80
- </ul>
81
- <p>As you can see, downloading Genshin Impact APK mod has its advantages and disadvantages. You should weigh them carefully before deciding whether to download it or not. If you decide to download it, you should follow the steps below to do it safely and correctly.</p>
82
- <h2>How to download Genshin Impact APK mod?</h2>
83
- <p>Downloading Genshin Impact APK mod is not very difficult, but it requires some caution and attention. Here are the steps you need to follow to download and install Genshin Impact APK mod on your Android device:</p>
84
- <h3>Step 1: Find a reliable source for the APK mod</h3>
85
- <p>The first and most important step is to find a trustworthy website that offers the APK mod for Genshin Impact. There are many websites that claim to provide the APK mod, but not all of them are safe or reliable. Some of them might have outdated, fake, or malicious files that can harm your device or account.</p>
86
- <p>To avoid such websites, you should do some research and check the reviews and ratings of the website before downloading anything from it. You should also look for websites that have a secure connection (HTTPS) and a good reputation among the users. You can also use some tools or apps that can scan the website or the file for any viruses or malware.</p>
87
- <p>One of the websites that we recommend for downloading Genshin Impact APK mod is [APKPure]. This website is one of the most popular and trusted sources for downloading APK files and mods for Android devices. It has a large collection of apps and games, including Genshin Impact and its APK mod. It also has a user-friendly interface, fast download speed, and regular updates.</p>
88
- <h3>Step 2: Download the APK mod file</h3>
89
- <p>Once you have found a reliable source for the APK mod, you can proceed to download the file on your device. To do this, you need to follow these steps:</p>
90
- <ul>
91
- <li>Go to the website that offers the APK mod and look for the download button or link.</li>
92
- <li>Tap on the download button or link and wait for the file to be downloaded on your device. The file size might vary depending on the version of the APK mod, but it should be around 1 GB.</li>
93
- <li>Make sure you have enough storage space on your device before downloading the file. You can also use a Wi-Fi connection instead of mobile data to save bandwidth and speed up the download process.</li>
94
- <li>Do not open or run the file until you have completed the next step.</li>
95
- </ul>
96
- <h3>Step 3: Enable unknown sources on your device</h3>
97
- <p>Before you can install the APK mod file on your device, you need to enable unknown sources on your device. This is a security setting that allows your device to install apps from sources other than the Google Play Store or other official app stores. To enable unknown sources on your device, you need to follow these steps:</p>
98
- <ul>
99
- <li>Go to your device's settings and look for security or privacy options.</li>
100
- <li>Find and tap on the option that says unknown sources or install unknown apps.</li>
101
- <li>Toggle on the switch or check the box that allows your device to install apps from unknown sources.</li>
102
- <li>You might see a warning message that says installing apps from unknown sources can harm your device or data. Tap on OK or Allow to proceed.</li>
103
- </ul>
104
- <h3>Step 4: Install the APK mod file</h3>
105
- <p>Now that you have enabled unknown sources on your device, you can install the APK mod file on your device. To do this, you need to follow these steps:</p>
106
- <ul>
107
- <li>Go to your device's file manager and look for the folder where you downloaded the APK mod file.</li>
108
- <li>Tap on the file and select install.</li>
109
- <li>You might see a pop-up window that asks for your permission to install the app. Tap on install again to confirm.</li>
110
- <li>Wait for the installation process to finish. It might take a few minutes depending on your device's performance and the file size.</li>
111
- <li>Once the installation is done, you will see a message that says app installed. Tap on open to launch the game.</li>
112
- </ul>
113
- <h3>Step 5: Enjoy the game with the APK mod</h3>
114
- <p>Congratulations! You have successfully downloaded and installed Genshin Impact APK mod on your Android device. You can now enjoy the game with all its extra features and cheats. To play the game with the APK mod, you need to follow these steps:</p>
115
- <ul>
116
- <li>Launch the game from your app drawer or the shortcut icon on your home screen.</li>
117
- <li>Log in with your existing account or create a new one. You can also use a guest account if you want to try the game without registering.</li>
118
- <li>Select the server and the language that you want to play on. You can also change these settings later in the game's options.</li>
119
- <li>Wait for the game to load and start playing. You will see a tutorial that will guide you through the basics of the game. You can skip it if you want to.</li>
120
- <li>Enjoy the game with the APK mod features. You will notice that you have unlimited resources, unlocked characters, and faster progress. You can also access the mod menu by tapping on the floating icon on the screen. You can use the mod menu to customize your game settings, such as changing the graphics quality, sound effects, language, and controls.</li>
121
- </ul>
122
- <h2>Tips and tricks for playing Genshin Impact with the APK mod</h2>
123
- <p>Playing Genshin Impact with the APK mod can be fun and exciting, but it can also be challenging and confusing. Here are some tips and tricks that can help you make the most out of the game with the APK mod:</p>
124
- <h3>Tip 1: Experiment with different characters and elements</h3>
125
- <p>One of the best features of Genshin Impact is its diverse and colorful cast of characters. Each character has a unique personality, appearance, voice, and element. The game has seven elements: Pyro (fire), Hydro (water), Electro (electricity), Anemo (wind), Cryo (ice), Geo (earth), and Dendro (nature). Each element has its own strengths, weaknesses, and interactions with other elements.</p>
126
- <p>With the APK mod, you can unlock all the characters in the game without spending any money or relying on luck. You can also switch between different characters at any time without any cooldowns. This gives you a lot of freedom and flexibility in exploring the open world and fighting enemies with different elemental combinations and strategies.</p>
127
- <p>You should experiment with different characters and elements to find out what works best for you. You should also pay attention to the elemental reactions that occur when you combine two or more elements. For example, combining Pyro and Hydro will create Vaporize, which deals extra damage. Combining Electro and Cryo will create Superconduct, which lowers the enemy's defense. Combining Anemo and Geo will create Swirl, which spreads the element to nearby enemies.</p>
128
- <p>You should also use the elemental resonance feature, which gives you bonuses when you have two or more characters of the same element in your party. For example, having two Pyro characters will increase your attack by 25%. Having two Hydro characters will increase your healing by 30%. Having two Geo characters will increase your shield strength by 15%.</p>
129
- <h3>Tip 2: Manage your stamina wisely</h3>
130
- <p>Another important feature of Genshin Impact is its stamina system. Stamina is a resource that you use for various actions in the game, such as sprinting, climbing, gliding, and swimming. Stamina is represented by a yellow bar on the top left corner of the screen. Stamina depletes when you perform these actions and replenishes when you stop or rest.</p>
131
- <p>With the APK mod, you can have unlimited stamina, which means you can perform these actions without worrying about running out of stamina. This can make your exploration and combat easier and faster. However, it can also make your gameplay less challenging and realistic.</p>
132
- <p>You should manage your stamina wisely according to your preferences and expectations. You can use your unlimited stamina to explore every corner of the map, reach high places, cross long distances, and escape from enemies. However, you can also limit your stamina usage to make your gameplay more immersive and authentic. You can also use some items or skills that can restore or increase your stamina, such as food, potions, or statues.</p>
133
- <h3>Tip 3: Complete quests and challenges for rewards</h3>
134
- <p>Genshin Impact has a lot of quests and challenges that you can complete for rewards. Quests are tasks that involve following a story or helping a character. Challenges are tasks that involve completing a specific objective or defeating a certain enemy. Quests and challenges can reward you with adventure rank, primogems, mora, and other resources that can help you progress in the game.</p>
135
- <p>With the APK mod, you can complete quests and challenges with ease by using your unlimited resources and unlocked characters. However, you can also challenge yourself by completing them without using the APK mod features. You can also skip some quests and challenges if you find them boring or repetitive.</p>
136
- <p>You should complete quests and challenges that suit your interests and goals. You should also pay attention to the difficulty and rewards of each quest and challenge. Some quests and challenges are easy and rewarding, while others are hard and not worth it. You can also use some tools or guides that can help you find and complete quests and challenges, such as maps, wikis, or videos.</p>
137
- <h3>Tip 4: Upgrade your characters and equipment</h3>
138
- <p>Genshin Impact has a lot of characters and equipment that you can use to enhance your gameplay. Characters are the playable characters that you can use in combat and exploration. Equipment are the items that you can equip on your characters, such as artifacts, weapons, and talents. Characters and equipment can be upgraded by using materials and resources that you can obtain from the game.</p>
139
- <p>With the APK mod, you can upgrade your characters and equipment without spending any resources or materials. You can also unlock all the characters and equipment in the game without spending any money or relying on luck. This can make your gameplay more powerful and diverse. However, it can also make your gameplay less rewarding and satisfying.</p>
140
- <p>You should upgrade your characters and equipment according to your preferences and expectations. You can use your unlimited resources and materials to upgrade your characters and equipment to the max level and quality. However, you can also limit your upgrades to make your gameplay more balanced and realistic. You can also use some strategies or tips that can help you upgrade your characters and equipment efficiently, such as focusing on your main characters, matching your artifacts with your elements, or refining your weapons.</p>
141
- <h3>Tip 5: Join co-op mode with friends or strangers</h3>
142
- <p>Genshin Impact has a co-op mode that allows you to play with other players online. Co-op mode is a feature that lets you join or invite other players to explore the open world, fight enemies, complete quests, or participate in events together. Co-op mode can be accessed after reaching adventure rank 16. Co-op mode can support up to four players per session.</p>
143
- <p>With the APK mod, you can join co-op mode with friends or strangers without any restrictions or limitations. You can also use the APK mod features to help or impress other players in co-op mode. However, you should be careful not to get reported or banned by other players or the game's developer for using the APK mod in co-op mode.</p>
144
- <p>You should join co-op mode with friends or strangers for more fun and rewards. You can use co-op mode to make new friends, learn from other players, share resources, or complete difficult tasks together. However, you should also respect other players' preferences and expectations in co-op mode. You should not use the APK mod features to ruin other players' gameplay or experience. You should also communicate and cooperate with other players in co-op mode.</p>
145
- <h2>Conclusion</h2>
146
- <p>Genshin Impact is a great game that offers a lot of features and content for Android users. However, if you want to enjoy some extra features that are not available in the official version, you might want to download its APK mod. An APK mod is a modified version of an app that has been altered by a third-party developer to add or remove some features, such as unlimited resources, unlocked characters, or faster progress.</p>
147
- <p>Downloading Genshin Impact APK mod can have some benefits and drawbacks depending on your preferences and expectations. You should weigh them carefully before deciding whether to download it or not. If you decide to download it, you should follow the steps above to do it safely and correctly.</p>
148
- <p>We hope this article has helped you understand how to download Genshin Impact APK mod for Android devices, as well as give you some tips and tricks on how to play the game with the mod. We also hope you have enjoyed reading this article as much as we have enjoyed writing it for you.</p>
149
- <p>If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you!</p>
150
- <h2>FAQs</h2>
151
- <ul>
152
- <li><b>Q: Is downloading Genshin Impact APK mod legal?</b></li>
153
- <li>A: Downloading Genshin Impact APK mod is not illegal, but it is against the game's terms of service and policy. If you download Genshin Impact APK mod, you are violating the game's rules and regulations, which might result in consequences such as account suspension or termination.</li>
154
- <li><b>Q: Is downloading Genshin Impact APK mod safe?</b></li>
155
- <li>A: Downloading Genshin Impact APK mod is not safe, but it is possible to do it without harming your device or data. However, you should be careful and cautious when downloading and installing Genshin Impact APK mod, as some APK mods might contain malware or viruses that can infect your device or steal your information. You should also use some tools or apps that can scan the website or the file for any threats before downloading or installing it.</li>
156
- <li><b>Q: Is downloading Genshin Impact APK mod worth it?</b></li>
157
- <li>A: Downloading Genshin Impact APK mod is worth it if you want to enjoy some extra features that are not available in the official version, such as unlimited resources, unlocked characters, or faster progress. However, downloading Genshin Impact APK mod is not worth it if you want to enjoy the game's original features and content, such as its updates, events, or co-op mode. Downloading Genshin Impact APK mod is also not worth it if you value your account's security and integrity, as you might get banned or lose your account for using the APK mod.</li>
158
- <li><b>Q: How to update Genshin Impact APK mod?</b></li>
159
- <li>A: Updating Genshin Impact APK mod is not very easy, as you might face some compatibility issues with your device or the game's updates. Some APK mods might not work properly or crash after the game's updates. To update Genshin Impact APK mod, you need to follow these steps:</li>
160
- <ul>
161
- <li>Go to the website that offers the APK mod and look for the latest version of the APK mod.</li>
162
- <li>Download the latest version of the APK mod file on your device.</li>
163
- <li>Uninstall the previous version of the APK mod from your device.</li>
164
- <li>Install the latest version of the APK mod file on your device.</li>
165
- <li>Launch the game and enjoy the updated features of the APK mod.</li>
166
- </ul>
167
- <li><b>Q: How to uninstall Genshin Impact APK mod?</b></li>
168
- <li>A: Uninstalling Genshin Impact APK mod is very easy, as you just need to follow these steps:</li>
169
- <ul>
170
- <li>Go to your device's settings and look for apps or applications options.</li>
171
- <li>Find and tap on Genshin Impact from the list of apps.</li>
172
- <li>Tap on uninstall and confirm your action.</li>
173
- <li>Wait for the uninstallation process to finish.</li>
174
- <li>You have successfully uninstalled Genshin Impact APK mod from your device.</li>
175
- </ul>
176
- <li><b>Q: How to play Genshin Impact without APK mod?</b></li>
177
- <li>A: Playing Genshin Impact without APK mod is very simple, as you just need to follow these steps:</li>
178
- <ul>
179
- <li>Go to the Google Play Store or the game's official website and look for Genshin Impact.</li>
180
- <li>Download and install the official version of Genshin Impact on your device.</li>
181
- <li>Launch the game and log in with your existing account or create a new one.</li>
182
- <li>Select the server and the language that you want to play on.</li>
183
- <li>Wait for the game to load and start playing.</li>
184
- <li>You have successfully played Genshin Impact without APK mod on your device.</li>
185
- </ul></p> 197e85843d<br />
186
- <br />
187
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Magic Tiles 3 Mod APK and Rock the Music World.md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>Magic Tiles 3 Unlocked APK: A Guide for Music Lovers</h1>
3
- <p>Do you love playing music games on your mobile device? If yes, then you might have heard of Magic Tiles 3, one of the most popular and addictive music games on the market. Magic Tiles 3 is a game that tests your reflexes and musical skills as you tap on the black tiles and avoid the white ones while listening to your favorite songs. But what if you want to enjoy the game without any limitations or interruptions? That's where Magic Tiles 3 Unlocked APK comes in handy. In this article, we will tell you everything you need to know about Magic Tiles 3 Unlocked APK, including its features, benefits, and installation process.</p>
4
- <h2>What is Magic Tiles 3?</h2>
5
- <p>Magic Tiles 3 is a music game developed by AMANOTES PTE LTD, a company that specializes in creating music games for mobile devices. The game has over 100 million downloads on Google Play Store and has received positive reviews from users and critics alike. The game is suitable for all ages and music preferences, as it offers a variety of songs and genres to choose from, such as pop, rock, classical, EDM, and more. The game also has different modes and challenges to keep you entertained and challenged, such as endless mode, battle mode, custom mode, and more. You can also play online with other players from around the world and compete for the highest scores and rankings.</p>
6
- <h2>magic tiles 3 unlocked apk</h2><br /><p><b><b>DOWNLOAD</b> &#10040;&#10040;&#10040; <a href="https://jinyurl.com/2uNPcs">https://jinyurl.com/2uNPcs</a></b></p><br /><br />
7
- <h3>Features of Magic Tiles 3</h3>
8
- <h4>Various music genres and songs</h4>
9
- <p>One of the best features of Magic Tiles 3 is its diverse and rich music library. The game has over 1000 songs from different genres and artists, such as Alan Walker, Ed Sheeran, Bruno Mars, Adele, and more. You can also request your favorite songs to be added to the game by contacting the developers. The game updates its music library regularly with new songs and trends, so you will never get bored of playing the same songs over and over again.</p>
10
- <h4>Different game modes and challenges</h4>
11
- <p>Another feature that makes Magic Tiles 3 stand out from other music games is its variety of game modes and challenges. The game has four main modes: classic mode, arcade mode, zen mode, and bomb mode. Each mode has its own rules and objectives that require different skills and strategies. For example, in classic mode, you have to tap on the black tiles as fast as possible without missing any; in arcade mode, you have to tap on the black tiles while avoiding the bombs; in zen mode, you have to tap on the black tiles as long as possible without losing any lives; and in bomb mode, you have to tap on the black tiles while avoiding the bombs that explode after a certain time. The game also has other modes such as endless mode, where you can play unlimited songs without any time limit; battle mode, where you can challenge other players online; custom mode, where you can create your own levels with your own songs; and more.</p>
12
- <h4>Online multiplayer and social features</h4>
13
- <p>If you want to play with your friends or other players from around the world, Magic Tiles 3 has you covered. The game has an online multiplayer feature that allows you to join or create rooms with other players and compete for the highest scores. You can also chat with other players in the lobby or during the game. The game also has a social feature that allows you to connect your Facebook account and see your friends' scores and rankings. You can also share your achievements and progress with your friends and other players. You can also follow other players and see their profiles and songs.</p>
14
- <h3>Why download Magic Tiles 3 Unlocked APK?</h3>
15
- <p>Magic Tiles 3 is a free game that you can download from the Google Play Store or the App Store. However, the game has some limitations and drawbacks that might affect your gaming experience. For example, the game has ads and pop-ups that might interrupt your gameplay or annoy you. The game also has in-app purchases that require you to spend real money to buy more money and diamonds, which are the currencies of the game. You need money and diamonds to unlock more songs and themes, which are not available for free. Moreover, some of the songs and themes are premium, which means you have to pay extra to access them. If you want to enjoy the game without any restrictions or hassles, you should download Magic Tiles 3 Unlocked APK, which is a modified version of the game that gives you unlimited access to all the features and content of the game.</p>
16
- <h4>Unlimited money and diamonds</h4>
17
- <p>One of the benefits of downloading Magic Tiles 3 Unlocked APK is that you will get unlimited money and diamonds in your account. You can use them to unlock any song or theme you want, without having to worry about running out of them or spending real money. You can also use them to buy more lives, hints, or power-ups that can help you in the game. You can also use them to customize your tiles and background with different colors and patterns.</p>
18
- <h4>No ads and pop-ups</h4>
19
- <p>Another benefit of downloading Magic Tiles 3 Unlocked APK is that you will not see any ads or pop-ups in the game. You can play the game smoothly and peacefully, without any interruptions or distractions. You can also save your data and battery by not loading any ads or videos. You can enjoy the game to the fullest, without any annoyance or frustration.</p>
20
- <h4>Access to premium songs and themes</h4>
21
- <p>A third benefit of downloading Magic Tiles 3 Unlocked APK is that you will have access to all the premium songs and themes that are normally not available for free. You can play any song or theme you want, without having to pay extra or wait for a special event. You can enjoy the latest and hottest songs from your favorite artists and genres, as well as the most beautiful and stunning themes that will make your game more attractive and enjoyable.</p>
22
- <h2>How to download and install Magic Tiles 3 Unlocked APK?</h2>
23
- <p>If you are interested in downloading Magic Tiles 3 Unlocked APK, you need to follow these simple steps:</p>
24
- <p>magic tiles 3 mod apk unlimited money<br />
25
- magic tiles 3 hack apk download<br />
26
- magic tiles 3 premium apk free<br />
27
- magic tiles 3 latest version mod apk<br />
28
- magic tiles 3 apk no ads<br />
29
- magic tiles 3 vip unlocked apk<br />
30
- magic tiles 3 mod apk all songs<br />
31
- magic tiles 3 cracked apk android<br />
32
- magic tiles 3 offline mod apk<br />
33
- magic tiles 3 pro apk full<br />
34
- magic tiles 3 mod apk revdl<br />
35
- magic tiles 3 cheat apk online<br />
36
- magic tiles 3 mod apk happymod<br />
37
- magic tiles 3 apk pure download<br />
38
- magic tiles 3 mod apk rexdl<br />
39
- magic tiles 3 unlimited diamonds apk<br />
40
- magic tiles 3 mod menu apk<br />
41
- magic tiles 3 mod apk android 1<br />
42
- magic tiles 3 hack version apk<br />
43
- magic tiles 3 modded apk free download<br />
44
- magic tiles 3 mod apk an1<br />
45
- magic tiles 3 god mode apk<br />
46
- magic tiles 3 mod apk apkpure<br />
47
- magic tiles 3 unlocked everything apk<br />
48
- magic tiles 3 mod apk ios<br />
49
- magic tiles 3 hack tool apk<br />
50
- magic tiles 3 mod apk unlimited lives<br />
51
- magic tiles 3 premium mod apk<br />
52
- magic tiles 3 mod apk old version<br />
53
- magic tiles 3 hack apk ios<br />
54
- magic tiles 3 mega mod apk<br />
55
- magic tiles 3 vip mod apk download<br />
56
- magic tiles 3 original mod apk<br />
57
- magic tiles 3 hacked apk free download<br />
58
- magic tiles 3 unlimited coins and gems apk<br />
59
- magic tiles 3 modded game apk<br />
60
- magic tiles 3 full unlocked apk download<br />
61
- magic tiles 3 no root mod apk<br />
62
- magic tiles 3 unlimited songs mod apk<br />
63
- magic tiles 3 cracked version apk download</p>
64
- <h3>Step 1: Download the APK file from a trusted source</h3>
65
- <p>The first step is to download the APK file from a trusted source. You can search for Magic Tiles 3 Unlocked APK on Google or any other search engine, and find a reliable website that offers the file for free. Make sure that the website is safe and secure, and that the file is virus-free and malware-free. You can also check the reviews and ratings of other users who have downloaded the file before. Once you find a suitable website, click on the download button and save the file on your device.</p>
66
- <h3>Step 2: Enable unknown sources on your device</h3>
67
- <p>The second step is to enable unknown sources on your device. This is necessary because Magic Tiles 3 Unlocked APK is not an official app from the Google Play Store or the App Store, so you need to allow your device to install apps from unknown sources. To do this, go to your device settings, then security, then unknown sources, and turn it on. This will allow you to install apps from sources other than the official app stores.</p>
68
- <h3>Step 3: Install the APK file and launch the game</h3>
69
- <p>The third step is to install the APK file and launch the game. To do this, go to your file manager or downloads folder, and find the APK file that you downloaded in step 1. Tap on it, and follow the instructions on the screen to install it on your device. Once it is installed, you will see an icon of Magic Tiles 3 on your home screen or app drawer. Tap on it, and enjoy playing Magic Tiles 3 Unlocked APK with all its features and benefits.</p>
70
- <h2>Conclusion</h2>
71
- <p>Magic Tiles 3 is a fun and exciting music game that will keep you entertained and challenged for hours. However, if you want to enjoy the game without any limitations or interruptions, you should download Magic Tiles 3 Unlocked APK, which is a modified version of the game that gives you unlimited access to all its features and content. You will get unlimited money and diamonds, no ads and pop-ups, and access to premium songs and themes in the game. To download and install Magic Tiles 3 Unlocked APK, you just need to follow three simple steps: download the APK file from a trusted source, enable unknown sources on your device, and install the APK file and launch the game. We hope that this article has helped you understand more about Magic Tiles 3 Unlocked APK and how to get it on your device. If you have any questions or feedback, feel free to leave a comment below. Happy gaming!</p>
72
- <h2>FAQs</h2>
73
- <p>Here are some of the frequently asked questions about Magic Tiles 3 Unlocked APK:</p>
74
- <table>
75
- <tr>
76
- <th>Question</th>
77
- <th>Answer</th>
78
- </tr>
79
- <tr>
80
- <td>Is Magic Tiles 3 Unlocked APK safe to download and use?</td>
81
- <td>Yes, Magic Tiles 3 Unlocked APK is safe to download and use, as long as you download it from a trusted source and scan it with an antivirus before installing it. However, you should be careful not to download any fake or malicious files that might harm your device or steal your data.</td>
82
- </tr>
83
- <tr>
84
- <td>Will I get banned from the game if I use Magic Tiles 3 Unlocked APK?</td>
85
- <td>No, you will not get banned from the game if you use Magic Tiles 3 Unlocked APK, as the game does not have any anti-cheat system or detection mechanism. However, you should not use the modded version to cheat or abuse other players online, as that might ruin the fun and fairness of the game.</td>
86
- </tr>
87
- <tr>
88
- <td>Can I update Magic Tiles 3 Unlocked APK to the latest version?</td>
89
- <td>Yes, you can update Magic Tiles 3 Unlocked APK to the latest version, as long as the modded version is also updated by the developers. You can check for updates on the website where you downloaded the APK file, or on other websites that offer the same file. You can also turn on notifications for updates on your device settings.</td>
90
- </tr>
91
- <tr>
92
- <td>Can I play Magic Tiles 3 Unlocked APK offline?</td>
93
- <td>Yes, you can play Magic Tiles 3 Unlocked APK offline, as the game does not require an internet connection to run. However, some features and functions of the game might not work properly offline, such as online multiplayer, social features, and new songs and themes. You can also save your progress offline, but you might need to sync it online when you reconnect to the internet.</td>
94
- </tr>
95
- <tr>
96
- <td>Can I play Magic Tiles 3 Unlocked APK on other devices?</td>
97
- <td>Yes, you can play Magic Tiles 3 Unlocked APK on other devices, as long as they are compatible with the game and the modded version. You can transfer the APK file from one device to another using a USB cable, Bluetooth, Wi-Fi, or cloud storage. You can also use an emulator to play Magic Tiles 3 Unlocked APK on your PC or laptop.</td>
98
- </tr>
99
- </table></p> 401be4b1e0<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Real Football Mod APK with Hack Features and 3D Graphics.md DELETED
@@ -1,189 +0,0 @@
1
-
2
- <h1>Real Football APK Hack Download: Everything You Need to Know</h1>
3
- <p>If you are a fan of soccer games, you might have heard of real football apk hack download. This is a modified version of the popular mobile game Real Football, which allows you to enjoy unlimited money, coins, energy, and other resources. But what exactly is real football apk hack download, and how can you get it? In this article, we will answer these questions and more. We will also give you some tips and tricks to play real football game better, and some alternatives to try if you are looking for a different soccer experience.</p>
4
- <h2>real football apk hack download</h2><br /><p><b><b>Download Zip</b> ->->->-> <a href="https://jinyurl.com/2uNKzB">https://jinyurl.com/2uNKzB</a></b></p><br /><br />
5
- <h2>What is Real Football APK Hack Download?</h2>
6
- <p>Real Football APK Hack Download is a modified version of the original Real Football game, which is developed and published by Gameloft. Real Football is a soccer simulation game that lets you create your own dream team, play in various modes and tournaments, and enjoy realistic graphics and animations. The game has been downloaded over 100 million times on Google Play Store, and has received positive reviews from players and critics alike.</p>
7
- <p>However, some players may find the game too challenging or too expensive to play. The game requires you to spend money or coins to buy players, upgrade facilities, unlock skills, and access premium features. You also need energy to play matches, which can run out quickly. This can limit your progress and enjoyment of the game.</p>
8
- <p>This is where real football apk hack download comes in handy. This is a modified version of the game that gives you unlimited money, coins, energy, and other resources. You can use these resources to buy any player you want, upgrade your team facilities, unlock all skills, and access all features without spending any real money. You can also play as many matches as you want without worrying about running out of energy.</p>
9
- <h2>Why Do People Want to Download Real Football APK Hack?</h2>
10
- <p>There are many reasons why people want to download real football apk hack. Some of the most common ones are:</p>
11
- <p>real football mod apk unlimited money download<br />
12
- real football 2023 apk hack free download<br />
13
- real football 3d apk hack download for android<br />
14
- real football apk hack download latest version<br />
15
- real football apk hack download no root<br />
16
- real football apk hack download offline<br />
17
- real football apk hack download online<br />
18
- real football apk hack download with obb<br />
19
- real football apk hack download without verification<br />
20
- real football apk mod hack download 2022<br />
21
- real football cheat codes apk hack download<br />
22
- real football game apk hack download<br />
23
- real football manager apk hack download<br />
24
- real football mod apk hack download 2021<br />
25
- real football mod apk hack download android 1<br />
26
- real football mod apk hack download apkpure<br />
27
- real football mod apk hack download for pc<br />
28
- real football mod apk hack download rexdl<br />
29
- real football mod apk hack download unlimited coins<br />
30
- real football mod apk hack download unlimited gold<br />
31
- real football mod apk hack download update<br />
32
- real football modded apk hack download<br />
33
- real football offline apk hack download<br />
34
- real football online apk hack download<br />
35
- real soccer 2023 - real football apk hack download<br />
36
- how to download real football apk hack<br />
37
- how to install real football apk hack<br />
38
- how to play real football apk hack<br />
39
- how to update real football apk hack<br />
40
- where to download real football apk hack</p>
41
- <ul>
42
- <li>They want to enjoy the game without spending any money or waiting for energy to refill.</li>
43
- <li>They want to have more fun and freedom in creating their own dream team.</li>
44
- <li>They want to experience all the features and modes that the game has to offer.</li>
45
- <li>They want to have an edge over their opponents in online matches.</li>
46
- <li>They want to explore the game's secrets and hidden content.</li>
47
- </ul>
48
- <h2>What are the Benefits and Risks of Using Real Football APK Hack?</h2>
49
- <p>Using real football apk hack can have both benefits and risks. Some of the benefits are:</p>
50
- <ul>
51
- <li>You can save money and time by not having to buy or earn resources in the game.</li>
52
- <li>You can customize your team according to your preferences and style.</li>
53
- <li>You can enjoy all the features and modes that the game has to offer.</li>
54
- <li>You can have more fun and excitement in playing the game.</li>
55
- </ul>
56
- <p>Some of the risks are:</p>
57
- <ul>
58
- <li>You may encounter bugs or errors that can affect your gameplay or device performance.</li>
59
- <li>You may lose your progress or data if you uninstall or update the game.</li>
60
- <li>You may get banned or suspended from online matches if you are detected using a modified version of the game.</li>
61
- <li>You may violate the terms and conditions of Gameloft or Google Play Store by using an unauthorized version of the game.</li>
62
- </ul>
63
- <p>Therefore, you should use real football apk hack at your own risk and discretion. You should also respect the rights and efforts of Gameloft as the original developer Continuing the article: <h2>What is Real Football Game and Its Features?</h2>
64
- <p>Real Football Game is a mobile phone sports video game franchise with gameplay emulating football. The series is developed and published by Gameloft, a leading company in the mobile gaming industry. The Real Football series started in the mid-2000s with Real Football 2004, which was free on some mobile phones. Since then, the series has released new titles every year, with improved graphics, gameplay, and features.</p>
65
- <p>Some of the features that Real Football Game offers are:</p>
66
- <ul>
67
- <li>Play in 3D stadiums where polished shadows, detailed textures, and spectators all come together to provide an exciting atmosphere. Multiple camera views during cutscenes and set pieces for a richer broadcast and first-person sensation.</li>
68
- <li>Build your dream team by recruiting star players through the lottery. Enhance your players' abilities by acquiring skill items through the lottery and matches. Upgrade your team facilities including Stadiums, Hospitals, Physiotherapy Centers and a Youth Camp.</li>
69
- <li>Challenge other players in asynchronous PvP World Arena mode and climb the leaderboards.</li>
70
- <li>Enjoy realistic physics and animations that simulate the real movements and reactions of the players and the ball.</li>
71
- <li>Choose from various modes and tournaments, such as Career Mode, World Cup Mode, Friendly Match Mode, League Mode, and more.</li>
72
- <li>Customize your team's name, logo, jersey, and formation. You can also edit your players' names, appearances, skills, and positions.</li>
73
- </ul>
74
- <h2>How to Download and Install Real Football APK Hack?</h2>
75
- <p>If you want to download and install real football apk hack on your Android device, you need to follow these steps:</p>
76
- <ol>
77
- <li>First, you need to find a reliable source that provides the real football apk hack file. You can search on Google or use one of these links: . Make sure you download the latest version of the hack that is compatible with your device.</li>
78
- <li>Second, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps that are not from the Google Play Store.</li>
79
- <li>Third, you need to locate the real football apk hack file that you downloaded on your device. You can use a file manager app or go to your Downloads folder. Tap on the file and select Install. Wait for the installation process to finish.</li>
80
- <li>Fourth, you need to move the data file that comes with the real football apk hack file to the right folder on your device. The data file is usually a zip or rar file that contains additional files for the game. You need to extract it using a file extractor app or a computer. Then, move the extracted folder "com.gameloft.android.GAND.GloftR21P" to Internal/Android/Data/ folder on your device.</li>
81
- <li>Fifth, you need to launch the real football apk hack game on your device. You should see a new icon on your home screen or app drawer. Tap on it and enjoy the game with unlimited resources.</li>
82
- </ol>
83
- <h2>What are the Hack Features and How to Use Them?</h2>
84
- <p>The real football apk hack provides you with unlimited money, coins, energy, and other resources that you can use to enhance your gameplay. Some of the hack features are:</p>
85
- <ul>
86
- <li>Unlimited money: You can use money to buy players, upgrade facilities, unlock skills, and access premium features in the game. You can also use money to buy coins in the game store.</li>
87
- <li>Unlimited coins: You can use coins to enter the lottery and win star players or skill items. You can also use coins to buy energy in the game store.</li>
88
- <li>Unlimited energy: You can use energy to play matches in any mode or tournament. You don't have to wait for your energy to refill or buy it with coins.</li>
89
- <li>All features unlocked: You can access all features and modes in the game without any restrictions or requirements.</li>
90
- </ul>
91
- <p>To use these hack features, you just need to play the game normally as you would with the original version. The hack features will be automatically applied and activated in the game. You don't need to do anything special or enter any codes or commands.</p>
92
- <h2>What are Some Tips and Tricks to Play Real Football Game Better?</h2>
93
- <p>If you want to improve your skills and performance in real football game, you can follow these tips and tricks:</p>
94
- <ul>
95
- <li>Practice juggling to improve your ball control. You don't need a lot of space to work on j Continuing the article: <ul>
96
- <li>Practice juggling to improve your ball control. You don't need a lot of space to work on juggling, and it can help you develop your touch, coordination, and confidence with the ball. You can also challenge yourself by juggling with different parts of your body, such as your head, chest, knees, and feet.</li>
97
- <li>Learn the basic skills and moves to dribble, pass, shoot, and defend. You can practice these skills in the training mode or in friendly matches. You can also watch tutorials or videos online to learn from the pros. Some of the basic skills and moves are: sprinting, sliding, tackling, crossing, heading, volleying, chipping, lobbing, curling, and more.</li>
98
- <li>Use the right formation and strategy for your team. You can choose from different formations and tactics in the game, such as 4-4-2, 4-3-3, 3-5-2, etc. You can also customize your formation and strategy according to your players' skills and positions. You should consider factors such as your team's strengths and weaknesses, your opponent's style and formation, and the match conditions and objectives.</li>
99
- <li>Upgrade your players and facilities regularly. You can improve your players' abilities by acquiring skill items through the lottery or matches. You can also upgrade your team facilities to boost your players' performance and morale. You should prioritize upgrading the facilities that are most relevant to your team's needs, such as stadiums, hospitals, physiotherapy centers, or youth camps.</li>
100
- <li>Play online matches with other players around the world. You can challenge other players in asynchronous PvP World Arena mode and climb the leaderboards. You can also join or create a club with other players and compete in club tournaments. Playing online matches can help you test your skills, learn from others, and have more fun.</li>
101
- </ul>
102
- <h2>What are Some Alternatives to Real Football Game?</h2>
103
- <p>If you are looking for a different soccer experience on your mobile device, you can try some of these alternatives to real football game:</p>
104
- <table>
105
- <tr>
106
- <th>Name</th>
107
- <th>Description</th>
108
- </tr>
109
- <tr>
110
- <td>FIFA Mobile</td>
111
- <td>A soccer game developed by EA Sports that features over 700 teams, 17,000 players, 90 licensed stadiums, and 30 leagues. You can play in various modes such as Season Mode, Campaign Mode, Head to Head Mode, Team of the Week Mode, and more. You can also build your ultimate team by collecting and trading players.</td>
112
- </tr>
113
- <tr>
114
- <td>PES Mobile</td>
115
- <td>A soccer game developed by Konami that features over 8000 player animations, realistic physics and ball movement. You can play in various modes such as Matchday Mode, Tour Event Mode, Online Match Mode, Local Match Mode, and more. You can also create your own custom team by scouting and signing players.</td>
116
- </tr>
117
- <tr>
118
- <td>Dream League Soccer</td>
119
- <td>A soccer game developed by First Touch Games that features realistic graphics and animations. You can play in various modes such as Career Mode, Online Mode, Friendly Mode, Training Mode, and more. You can also build your own stadium and customize your team's logo and kits.</td>
120
- </tr>
121
- <tr>
122
- <td>Score! Hero</td>
123
- <td>A soccer game developed by First Touch Games that features a unique gameplay where you control the action by drawing paths for your players. You can play in over 800 levels where you have to score goals or make assists. You can also customize your player's appearance and skills.</td>
124
- </tr>
125
- <tr>
126
- <td>Soccer Stars</td>
127
- <td>A soccer game developed by Miniclip that features a simple but addictive gameplay where you flick your players to hit the ball. You can play in various modes such as Online Mode, Tournament Mode, Continuing the article: <table>
128
- <tr>
129
- <th>Name</th>
130
- <th>Description</th>
131
- </tr>
132
- <tr>
133
- <td>FIFA Mobile</td>
134
- <td>A soccer game developed by EA Sports that features over 700 teams, 17,000 players, 90 licensed stadiums, and 30 leagues. You can play in various modes such as Season Mode, Campaign Mode, Head to Head Mode, Team of the Week Mode, and more. You can also build your ultimate team by collecting and trading players.</td>
135
- </tr>
136
- <tr>
137
- <td>PES Mobile</td>
138
- <td>A soccer game developed by Konami that features over 8000 player animations, realistic physics and ball movement. You can play in various modes such as Matchday Mode, Tour Event Mode, Online Match Mode, Local Match Mode, and more. You can also create your own custom team by scouting and signing players.</td>
139
- </tr>
140
- <tr>
141
- <td>Dream League Soccer</td>
142
- <td>A soccer game developed by First Touch Games that features realistic graphics and animations. You can play in various modes such as Career Mode, Online Mode, Friendly Mode, Training Mode, and more. You can also build your own stadium and customize your team's logo and kits.</td>
143
- </tr>
144
- <tr>
145
- <td>Score! Hero</td>
146
- <td>A soccer game developed by First Touch Games that features a unique gameplay where you control the action by drawing paths for your players. You can play in over 800 levels where you have to score goals or make assists. You can also customize your player's appearance and skills.</td>
147
- </tr>
148
- <tr>
149
- <td>Soccer Stars</td>
150
- <td>A soccer game developed by Miniclip that features a simple but addictive gameplay where you flick your players to hit the ball. You can play in various modes such as Online Mode, Tournament Mode, Mini Game Mode, and more. You can also collect different teams and formations.</td>
151
- </tr>
152
- </table>
153
- <h2>Conclusion</h2>
154
- <p>Real Football APK Hack Download is a modified version of the original Real Football game that gives you unlimited resources and features. You can use it to enjoy the game without spending any money or waiting for energy to refill. However, you should also be aware of the risks and consequences of using it, such as bugs, data loss, bans, or violations. You should also respect the rights and efforts of Gameloft as the original developer of the game.</p>
155
- <p>If you are interested in downloading and installing real football apk hack on your device, you can follow the steps we provided in this article. You can also check out some tips and tricks to play real football game better, and some alternatives to try if you are looking for a different soccer experience.</p>
156
- <p>We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
157
- <h2>FAQs</h2>
158
- <h3>Is Real Football APK Hack Safe to Use?</h3>
159
- <p>There is no definitive answer to this question, as different sources may provide different versions of the hack that may have different levels of safety and quality. However, generally speaking, using any modified version of a game can pose some risks to your device and account security. Therefore, you should use real football apk hack at your own risk and discretion.</p>
160
- <h3>How to Update Real Football APK Hack?</h3>
161
- <p>If you want to update real football apk hack to the latest version of the game, you need to find a new source that provides the updated version of the hack file. Then, you need to download and install it on your device following the same steps as before. However, you should be careful not to overwrite or delete your previous data file, as this may cause you to lose your progress or data.</p>
162
- <h3>How to Uninstall Real Football APK Hack?</h3>
163
- <p>If you want to uninstall real football apk hack from your device, you need to follow these steps:</p>
164
- <ol>
165
- <li>Go to Settings > Apps > Real Football and tap on Uninstall. Wait for the uninstallation process to finish.</li>
166
- <li>Go to Internal/Android/Data/ folder on your device and delete the folder "com.gameloft.android.GAND.GloftR21P". This will remove the data file of the hack from your device.</li>
167
- <li>If you want to install the original version of the game from the Google Play Store, you need to clear the cache and data of the Google Play Store app on your device. To do this, go to Settings > Apps > Google Play Store and tap on Clear Cache and Clear Data. This will prevent any conflicts or errors when installing the original version of the game.</li>
168
- <li>Go to Google Play Store and search for Real Football. Tap on Install and wait for the installation process to finish.</li>
169
- <li>Launch the original version of the game on your device Continuing the article: <li>Launch the original version of the game on your device and enjoy it as it is.</li>
170
- </ol>
171
- <h3>How to Play Real Football Online with Other Players?</h3>
172
- <p>If you want to play real football online with other players, you need to have an internet connection and a Gameloft account. You can create a Gameloft account for free by registering with your email or Facebook account. Then, you need to follow these steps:</p>
173
- <ol>
174
- <li>Launch the game on your device and tap on the Online button on the main menu.</li>
175
- <li>Select the mode that you want to play, such as World Arena, Club Tournament, or Friendly Match.</li>
176
- <li>Choose the team that you want to use, or create a custom team if you prefer.</li>
177
- <li>Wait for the game to find an opponent for you, or invite a friend to play with you if you want.</li>
178
- <li>Enjoy the match and try to score more goals than your opponent.</li>
179
- </ol>
180
- <h3>How to Contact the Developers of Real Football Game?</h3>
181
- <p>If you have any questions, feedback, or issues regarding real football game, you can contact the developers of Gameloft by using one of these methods:</p>
182
- <ul>
183
- <li>Email: You can send an email to [email protected] and describe your problem or suggestion. You should also include your device model, OS version, game version, and screenshots if possible.</li>
184
- <li>Website: You can visit the official website of Gameloft at https://www.gameloft.com/en/ and find more information about the game, such as news, updates, tips, and forums. You can also use the contact form on the website to send your message.</li>
185
- <li>Social Media: You can follow Gameloft on their social media accounts, such as Facebook, Twitter, Instagram, YouTube, and more. You can also post your comments or questions on their pages or send them a direct message.</li>
186
- </ul>
187
- <p>Gameloft is usually responsive and helpful to their customers, so you can expect a reply from them within a reasonable time.</p> 401be4b1e0<br />
188
- <br />
189
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Video Facebook di Android iOS dan PC - Pengunduh Video FB Multiplatform.md DELETED
@@ -1,123 +0,0 @@
1
-
2
- <h1>How to Download Videos from Facebook on Different Devices</h1>
3
- <p>Facebook is one of the most popular social media platforms that allows users to share and watch various types of videos, such as live streams, stories, reels, and more. Sometimes, you might come across a video that you want to save or download for offline viewing, sharing, or editing. However, Facebook does not provide an official way to download videos from its platform.</p>
4
- <p>Fortunately, there are some third-party tools that can help you download videos from Facebook on different devices, such as computers, Android phones, iPhones, and iPads. In this article, we will show you how to use some of the best tools for downloading Facebook videos and what you need to know before doing so.</p>
5
- <h2>download unduh video facebook</h2><br /><p><b><b>Download</b> &middot;&middot;&middot;&middot;&middot; <a href="https://jinyurl.com/2uNLFa">https://jinyurl.com/2uNLFa</a></b></p><br /><br />
6
- <h2>How to Download Videos from Facebook on Your Computer</h2>
7
- <p>If you want to download videos from Facebook on your computer, you can use one of the following tools:</p>
8
- <h3>Using FDOWN.net</h3>
9
- <p>FDOWN.net is a free online tool that allows you to download videos from Facebook in MP4 format. You can also use it to download private videos, stories, reels, and convert videos to MP3. Here are the steps to use FDOWN.net:</p>
10
- <ol>
11
- <li>Open your browser and go to [Facebook](https://www.facebook.com/).</li>
12
- <li>Find the video that you want to download and copy its URL. You can do this by right-clicking on the video and selecting Show Video URL.</li>
13
- <li>Open a new tab and go to [FDOWN.net](https://www.fdown.net/).</li>
14
- <li>Paste the video URL into the field that says Enter the Facebook video link here and click Go.</li>
15
- <li>Select the quality option that you want (HD or SD) and click Download.</li>
16
- <li>Choose a name and location for your video file and click Save.</li>
17
- </ol>
18
- <h3>Using 4K Video Downloader</h3>
19
- <p>4K Video Downloader is a software that allows you to download videos from various websites, including Facebook, YouTube, Vimeo, and more. You can also use it to download 3D and 360 videos. Here are the steps to use 4K Video Downloader:</p>
20
- <ol>
21
- <li>Download and install 4K Video Downloader from [here](https://www.4kdownload.com/products/product-videodownloader).</li>
22
- <li>Open your browser and go to [Facebook](https://www.facebook.com/).</li>
23
- <li>Find the video that you want to download and copy its URL. You can do this by right-clicking on the video and selecting Show Video URL.</li>
24
- <li> <li>Open 4K Video Downloader and click Paste Link.</li>
25
- <li>Select the format and quality that you want and click Download.</li>
26
- <li>Choose a name and location for your video file and click Save.</li>
27
- </ol>
28
- <h2>How to Download Videos from Facebook on Your Android Device</h2>
29
- <p>If you want to download videos from Facebook on your Android device, you can use one of the following tools:</p>
30
- <p>cara download unduh video facebook di android<br />
31
- download unduh video facebook tanpa aplikasi<br />
32
- download unduh video facebook di iphone<br />
33
- download unduh video facebook online gratis<br />
34
- download unduh video facebook dengan kualitas hd<br />
35
- download unduh video facebook dari messenger<br />
36
- download unduh video facebook ke galeri<br />
37
- download unduh video facebook di laptop<br />
38
- download unduh video facebook story<br />
39
- download unduh video facebook live<br />
40
- download unduh video facebook watch<br />
41
- download unduh video facebook igtv<br />
42
- download unduh video facebook lewat link<br />
43
- download unduh video facebook secara offline<br />
44
- download unduh video facebook dengan subtitle<br />
45
- download unduh video facebook menggunakan chrome<br />
46
- download unduh video facebook di pc<br />
47
- download unduh video facebook dari grup<br />
48
- download unduh video facebook dengan cepat<br />
49
- download unduh video facebook tanpa login<br />
50
- download unduh video facebook versi lite<br />
51
- download unduh video facebook di macbook<br />
52
- download unduh video facebook ke mp3<br />
53
- download unduh video facebook dengan idm<br />
54
- download unduh video facebook melalui website<br />
55
- download unduh video facebook di opera mini<br />
56
- download unduh video facebook dari halaman<br />
57
- download unduh video facebook berdurasi panjang<br />
58
- download unduh video facebook tanpa iklan<br />
59
- download unduh video facebook dengan mudah<br />
60
- download unduh video facebook ke mp4<br />
61
- download unduh video facebook menggunakan firefox<br />
62
- download unduh video facebook di uc browser<br />
63
- download unduh video facebook dari profil<br />
64
- download unduh video facebook berukuran besar<br />
65
- download unduh video facebook tanpa batas<br />
66
- download unduh video facebook ke avi<br />
67
- download unduh video facebook menggunakan safari<br />
68
- download unduh video facebook di edge browser<br />
69
- download unduh video facebook dari marketplace<br />
70
- download unduh video facebook ke mkv<br />
71
- download unduh video facebook menggunakan tor browser<br />
72
- download unduh video facebook di brave browser<br />
73
- download unduh video facebook dari event<br />
74
- download unduh video facebook ke flv<br />
75
- download unduh video facebook menggunakan vpn<br />
76
- download unduh video facebook di duckduckgo browser<br />
77
- download unduh video facebook dari fundraiser<br />
78
- download unduh video facebook ke webm</p>
79
- <h3>Using FDOWN.net</h3>
80
- <p>You can also use FDOWN.net on your Android device by following the same steps as above. However, instead of copying the video URL from the browser, you need to copy it from the Facebook app. You can do this by tapping on the video and selecting Copy Link. Then, you can paste it into FDOWN.net and download the video as usual.</p>
81
- <h3>Using Video Downloader for Facebook</h3>
82
- <p>Video Downloader for Facebook is an app that allows you to download videos from Facebook directly to your Android device. You can also use it to browse Facebook, watch videos offline, and share videos with other apps. Here are the steps to use Video Downloader for Facebook:</p>
83
- <ol>
84
- <li>Download and install Video Downloader for Facebook from [here](https://play.google.com/store/apps/details?id=fb.video.downloader).</li>
85
- <li>Open the app and log in with your Facebook account.</li>
86
- <li>Find the video that you want to download and tap on it.</li>
87
- <li>Select Download Video and choose the quality option that you want.</li>
88
- <li>The video will be saved in your device's gallery or in the app's folder.</li>
89
- </ol>
90
- <h2>How to Download Videos from Facebook on Your iPhone or iPad</h2>
91
- <p>If you want to download videos from Facebook on your iPhone or iPad, you can use one of the following tools:</p>
92
- <h3>Using FDOWN.net</h3>
93
- <p>You can also use FDOWN.net on your iPhone or iPad by following the same steps as above. However, instead of copying the video URL from the browser, you need to copy it from the Facebook app. You can do this by tapping on the video and selecting Copy Link. Then, you can paste it into FDOWN.net and download the video as usual.</p>
94
- <h3>Using Friendly Social Browser</h3>
95
- <p>Friendly Social Browser is an app that allows you to access multiple social media platforms, including Facebook, Instagram, Twitter, and more. You can also use it to download videos from Facebook to your iPhone or iPad. Here are the steps to use Friendly Social Browser:</p>
96
- <ol>
97
- <li>Download and install Friendly Social Browser from [here](https://apps.apple.com/us/app/friendly-social-browser/id400169658).</li>
98
- <li>Open the app and log in with your Facebook account.</li>
99
- <li>Find the video that you want to download and tap on it.</li>
100
- <li>Select Download Video and choose a name and location for your video file.</li>
101
- <li>The video will be saved in your device's photos or in the app's folder.</li>
102
- </ol>
103
- <h2>Conclusion</h2>
104
- <p>In this article, we have shown you how to download videos from Facebook on different devices using some of the best tools available. Whether you want to save a video for offline viewing, sharing, or editing, you can easily do so with these tools. However, before downloading any video from Facebook, you should be aware of some tips and warnings:</p>
105
- <ul>
106
- <li>Always respect the rights and privacy of the video creators and owners. Do not download or use any video without their permission or consent.</li>
107
- <li>Do not download or share any video that contains illegal, harmful, or offensive content. You might face legal consequences or violate Facebook's terms of service.</li>
108
- <li>Be careful of any tool that asks for your personal information, such as your email, password, or credit card details. You might risk exposing your data to hackers or scammers.</li>
109
- <li>Check the quality and size of the video before downloading it. You might need enough storage space on your device or a fast internet connection to download large or high-quality videos.</li>
110
- </ul>
111
- <h2>FAQs</h2>
112
- <h3>Can I download private videos from Facebook?</h3>
113
- <p>Yes, you can download private videos from Facebook using some tools, such as FDOWN.net. However, you need to have access to the private video's URL, which means you need to be friends with or follow the person who posted it. You also need to respect their privacy and not share their video without their permission.</p>
114
- <h3>How can I save videos from Facebook without downloading them?</h3>
115
- <p>If you don't want to download videos from Facebook but still want to watch them later, you can save them within the Facebook app or website. You can do this by tapping on the video and selecting Save Video. You can find your saved videos in the Saved section of the app or website. However, you need to have an internet connection to watch your saved videos.</p>
116
- <h3>Is it legal to download videos from Facebook?</h3>
117
- <p>It depends on the source and content of the video. Generally, downloading videos from Facebook for personal use is not illegal, as long as you do not infringe on the rights and privacy of the video creators and owners. However, downloading or sharing videos that contain copyrighted, illegal, or harmful content might be illegal and violate Facebook's terms of service. You should always check the video's license and permissions before downloading or using it.</p>
118
- <h3>What is the best format for downloading videos from Facebook?</h3>
119
- <p>The best format for downloading videos from Facebook depends on your preferences and needs. Most tools offer MP4 as the default format, which is compatible with most devices and platforms. However, some tools also offer other formats, such as MP3, AVI, MOV, MKV, and more. You should choose the format that suits your device's specifications and your intended use of the video.</p>
120
- <h3>How can I edit or convert the downloaded videos from Facebook?</h3>
121
- <p>If you want to edit or convert the downloaded videos from Facebook, you can use some online or offline tools, such as [Online Video Converter](https://www.onlinevideoconverter.com/), [VLC Media Player](https://www.videolan.org/vlc/index.html), [Windows Movie Maker](https://www.microsoft.com/en-us/windows/windows-essentials), [iMovie](https://www.apple.com/imovie/), and more. You can use these tools to trim, crop, rotate, add effects, subtitles, music, and more to your videos. You can also use them to change the format, quality, resolution, and size of your videos.</p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/base_model.py DELETED
@@ -1,316 +0,0 @@
1
- """This script defines the base network model for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- import os
5
- import numpy as np
6
- import torch
7
- from collections import OrderedDict
8
- from abc import ABC, abstractmethod
9
- from . import networks
10
-
11
-
12
- class BaseModel(ABC):
13
- """This class is an abstract base class (ABC) for models.
14
- To create a subclass, you need to implement the following five functions:
15
- -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
16
- -- <set_input>: unpack data from dataset and apply preprocessing.
17
- -- <forward>: produce intermediate results.
18
- -- <optimize_parameters>: calculate losses, gradients, and update network weights.
19
- -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
20
- """
21
-
22
- def __init__(self, opt):
23
- """Initialize the BaseModel class.
24
-
25
- Parameters:
26
- opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
27
-
28
- When creating your custom class, you need to implement your own initialization.
29
- In this fucntion, you should first call <BaseModel.__init__(self, opt)>
30
- Then, you need to define four lists:
31
- -- self.loss_names (str list): specify the training losses that you want to plot and save.
32
- -- self.model_names (str list): specify the images that you want to display and save.
33
- -- self.visual_names (str list): define networks used in our training.
34
- -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
35
- """
36
- self.opt = opt
37
- self.isTrain = False
38
- self.device = torch.device('cpu')
39
- self.save_dir = " " # os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
40
- self.loss_names = []
41
- self.model_names = []
42
- self.visual_names = []
43
- self.parallel_names = []
44
- self.optimizers = []
45
- self.image_paths = []
46
- self.metric = 0 # used for learning rate policy 'plateau'
47
-
48
- @staticmethod
49
- def dict_grad_hook_factory(add_func=lambda x: x):
50
- saved_dict = dict()
51
-
52
- def hook_gen(name):
53
- def grad_hook(grad):
54
- saved_vals = add_func(grad)
55
- saved_dict[name] = saved_vals
56
- return grad_hook
57
- return hook_gen, saved_dict
58
-
59
- @staticmethod
60
- def modify_commandline_options(parser, is_train):
61
- """Add new model-specific options, and rewrite default values for existing options.
62
-
63
- Parameters:
64
- parser -- original option parser
65
- is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
66
-
67
- Returns:
68
- the modified parser.
69
- """
70
- return parser
71
-
72
- @abstractmethod
73
- def set_input(self, input):
74
- """Unpack input data from the dataloader and perform necessary pre-processing steps.
75
-
76
- Parameters:
77
- input (dict): includes the data itself and its metadata information.
78
- """
79
- pass
80
-
81
- @abstractmethod
82
- def forward(self):
83
- """Run forward pass; called by both functions <optimize_parameters> and <test>."""
84
- pass
85
-
86
- @abstractmethod
87
- def optimize_parameters(self):
88
- """Calculate losses, gradients, and update network weights; called in every training iteration"""
89
- pass
90
-
91
- def setup(self, opt):
92
- """Load and print networks; create schedulers
93
-
94
- Parameters:
95
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
96
- """
97
- if self.isTrain:
98
- self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
99
-
100
- if not self.isTrain or opt.continue_train:
101
- load_suffix = opt.epoch
102
- self.load_networks(load_suffix)
103
-
104
-
105
- # self.print_networks(opt.verbose)
106
-
107
- def parallelize(self, convert_sync_batchnorm=True):
108
- if not self.opt.use_ddp:
109
- for name in self.parallel_names:
110
- if isinstance(name, str):
111
- module = getattr(self, name)
112
- setattr(self, name, module.to(self.device))
113
- else:
114
- for name in self.model_names:
115
- if isinstance(name, str):
116
- module = getattr(self, name)
117
- if convert_sync_batchnorm:
118
- module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
119
- setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device),
120
- device_ids=[self.device.index],
121
- find_unused_parameters=True, broadcast_buffers=True))
122
-
123
- # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient.
124
- for name in self.parallel_names:
125
- if isinstance(name, str) and name not in self.model_names:
126
- module = getattr(self, name)
127
- setattr(self, name, module.to(self.device))
128
-
129
- # put state_dict of optimizer to gpu device
130
- if self.opt.phase != 'test':
131
- if self.opt.continue_train:
132
- for optim in self.optimizers:
133
- for state in optim.state.values():
134
- for k, v in state.items():
135
- if isinstance(v, torch.Tensor):
136
- state[k] = v.to(self.device)
137
-
138
- def data_dependent_initialize(self, data):
139
- pass
140
-
141
- def train(self):
142
- """Make models train mode"""
143
- for name in self.model_names:
144
- if isinstance(name, str):
145
- net = getattr(self, name)
146
- net.train()
147
-
148
- def eval(self):
149
- """Make models eval mode"""
150
- for name in self.model_names:
151
- if isinstance(name, str):
152
- net = getattr(self, name)
153
- net.eval()
154
-
155
- def test(self):
156
- """Forward function used in test time.
157
-
158
- This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
159
- It also calls <compute_visuals> to produce additional visualization results
160
- """
161
- with torch.no_grad():
162
- self.forward()
163
- self.compute_visuals()
164
-
165
- def compute_visuals(self):
166
- """Calculate additional output images for visdom and HTML visualization"""
167
- pass
168
-
169
- def get_image_paths(self, name='A'):
170
- """ Return image paths that are used to load current data"""
171
- return self.image_paths if name =='A' else self.image_paths_B
172
-
173
- def update_learning_rate(self):
174
- """Update learning rates for all the networks; called at the end of every epoch"""
175
- for scheduler in self.schedulers:
176
- if self.opt.lr_policy == 'plateau':
177
- scheduler.step(self.metric)
178
- else:
179
- scheduler.step()
180
-
181
- lr = self.optimizers[0].param_groups[0]['lr']
182
- print('learning rate = %.7f' % lr)
183
-
184
- def get_current_visuals(self):
185
- """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
186
- visual_ret = OrderedDict()
187
- for name in self.visual_names:
188
- if isinstance(name, str):
189
- visual_ret[name] = getattr(self, name)[:, :3, ...]
190
- return visual_ret
191
-
192
- def get_current_losses(self):
193
- """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
194
- errors_ret = OrderedDict()
195
- for name in self.loss_names:
196
- if isinstance(name, str):
197
- errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
198
- return errors_ret
199
-
200
- def save_networks(self, epoch):
201
- """Save all the networks to the disk.
202
-
203
- Parameters:
204
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
205
- """
206
- if not os.path.isdir(self.save_dir):
207
- os.makedirs(self.save_dir)
208
-
209
- save_filename = 'epoch_%s.pth' % (epoch)
210
- save_path = os.path.join(self.save_dir, save_filename)
211
-
212
- save_dict = {}
213
- for name in self.model_names:
214
- if isinstance(name, str):
215
- net = getattr(self, name)
216
- if isinstance(net, torch.nn.DataParallel) or isinstance(net,
217
- torch.nn.parallel.DistributedDataParallel):
218
- net = net.module
219
- save_dict[name] = net.state_dict()
220
-
221
-
222
- for i, optim in enumerate(self.optimizers):
223
- save_dict['opt_%02d'%i] = optim.state_dict()
224
-
225
- for i, sched in enumerate(self.schedulers):
226
- save_dict['sched_%02d'%i] = sched.state_dict()
227
-
228
- torch.save(save_dict, save_path)
229
-
230
- def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
231
- """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
232
- key = keys[i]
233
- if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
234
- if module.__class__.__name__.startswith('InstanceNorm') and \
235
- (key == 'running_mean' or key == 'running_var'):
236
- if getattr(module, key) is None:
237
- state_dict.pop('.'.join(keys))
238
- if module.__class__.__name__.startswith('InstanceNorm') and \
239
- (key == 'num_batches_tracked'):
240
- state_dict.pop('.'.join(keys))
241
- else:
242
- self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
243
-
244
- def load_networks(self, epoch):
245
- """Load all the networks from the disk.
246
-
247
- Parameters:
248
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
249
- """
250
- if self.opt.isTrain and self.opt.pretrained_name is not None:
251
- load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
252
- else:
253
- load_dir = self.save_dir
254
- load_filename = 'epoch_%s.pth' % (epoch)
255
- load_path = os.path.join(load_dir, load_filename)
256
- state_dict = torch.load(load_path, map_location=self.device)
257
- print('loading the model from %s' % load_path)
258
-
259
- for name in self.model_names:
260
- if isinstance(name, str):
261
- net = getattr(self, name)
262
- if isinstance(net, torch.nn.DataParallel):
263
- net = net.module
264
- net.load_state_dict(state_dict[name])
265
-
266
- if self.opt.phase != 'test':
267
- if self.opt.continue_train:
268
- print('loading the optim from %s' % load_path)
269
- for i, optim in enumerate(self.optimizers):
270
- optim.load_state_dict(state_dict['opt_%02d'%i])
271
-
272
- try:
273
- print('loading the sched from %s' % load_path)
274
- for i, sched in enumerate(self.schedulers):
275
- sched.load_state_dict(state_dict['sched_%02d'%i])
276
- except:
277
- print('Failed to load schedulers, set schedulers according to epoch count manually')
278
- for i, sched in enumerate(self.schedulers):
279
- sched.last_epoch = self.opt.epoch_count - 1
280
-
281
-
282
-
283
-
284
- def print_networks(self, verbose):
285
- """Print the total number of parameters in the network and (if verbose) network architecture
286
-
287
- Parameters:
288
- verbose (bool) -- if verbose: print the network architecture
289
- """
290
- print('---------- Networks initialized -------------')
291
- for name in self.model_names:
292
- if isinstance(name, str):
293
- net = getattr(self, name)
294
- num_params = 0
295
- for param in net.parameters():
296
- num_params += param.numel()
297
- if verbose:
298
- print(net)
299
- print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
300
- print('-----------------------------------------------')
301
-
302
- def set_requires_grad(self, nets, requires_grad=False):
303
- """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
304
- Parameters:
305
- nets (network list) -- a list of networks
306
- requires_grad (bool) -- whether the networks require gradients or not
307
- """
308
- if not isinstance(nets, list):
309
- nets = [nets]
310
- for net in nets:
311
- if net is not None:
312
- for param in net.parameters():
313
- param.requires_grad = requires_grad
314
-
315
- def generate_visuals_for_evaluation(self, data, mode):
316
- return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/gui_v1.py DELETED
@@ -1,708 +0,0 @@
1
- import os
2
- import logging
3
- import sys
4
- from dotenv import load_dotenv
5
-
6
- load_dotenv()
7
-
8
- os.environ["OMP_NUM_THREADS"] = "4"
9
- if sys.platform == "darwin":
10
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
11
-
12
- now_dir = os.getcwd()
13
- sys.path.append(now_dir)
14
- import multiprocessing
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- class Harvest(multiprocessing.Process):
20
- def __init__(self, inp_q, opt_q):
21
- multiprocessing.Process.__init__(self)
22
- self.inp_q = inp_q
23
- self.opt_q = opt_q
24
-
25
- def run(self):
26
- import numpy as np
27
- import pyworld
28
-
29
- while 1:
30
- idx, x, res_f0, n_cpu, ts = self.inp_q.get()
31
- f0, t = pyworld.harvest(
32
- x.astype(np.double),
33
- fs=16000,
34
- f0_ceil=1100,
35
- f0_floor=50,
36
- frame_period=10,
37
- )
38
- res_f0[idx] = f0
39
- if len(res_f0.keys()) >= n_cpu:
40
- self.opt_q.put(ts)
41
-
42
-
43
- if __name__ == "__main__":
44
- import json
45
- import multiprocessing
46
- import re
47
- import threading
48
- import time
49
- import traceback
50
- from multiprocessing import Queue, cpu_count
51
- from queue import Empty
52
-
53
- import librosa
54
- from tools.torchgate import TorchGate
55
- import numpy as np
56
- import PySimpleGUI as sg
57
- import sounddevice as sd
58
- import torch
59
- import torch.nn.functional as F
60
- import torchaudio.transforms as tat
61
-
62
- import tools.rvc_for_realtime as rvc_for_realtime
63
- from i18n.i18n import I18nAuto
64
-
65
- i18n = I18nAuto()
66
- device = rvc_for_realtime.config.device
67
- # device = torch.device(
68
- # "cuda"
69
- # if torch.cuda.is_available()
70
- # else ("mps" if torch.backends.mps.is_available() else "cpu")
71
- # )
72
- current_dir = os.getcwd()
73
- inp_q = Queue()
74
- opt_q = Queue()
75
- n_cpu = min(cpu_count(), 8)
76
- for _ in range(n_cpu):
77
- Harvest(inp_q, opt_q).start()
78
-
79
- class GUIConfig:
80
- def __init__(self) -> None:
81
- self.pth_path: str = ""
82
- self.index_path: str = ""
83
- self.pitch: int = 0
84
- self.samplerate: int = 40000
85
- self.block_time: float = 1.0 # s
86
- self.buffer_num: int = 1
87
- self.threhold: int = -60
88
- self.crossfade_time: float = 0.04
89
- self.extra_time: float = 2.0
90
- self.I_noise_reduce = False
91
- self.O_noise_reduce = False
92
- self.rms_mix_rate = 0.0
93
- self.index_rate = 0.3
94
- self.n_cpu = min(n_cpu, 6)
95
- self.f0method = "harvest"
96
- self.sg_input_device = ""
97
- self.sg_output_device = ""
98
-
99
- class GUI:
100
- def __init__(self) -> None:
101
- self.config = GUIConfig()
102
- self.flag_vc = False
103
-
104
- self.launcher()
105
-
106
- def load(self):
107
- input_devices, output_devices, _, _ = self.get_devices()
108
- try:
109
- with open("configs/config.json", "r") as j:
110
- data = json.load(j)
111
- data["pm"] = data["f0method"] == "pm"
112
- data["harvest"] = data["f0method"] == "harvest"
113
- data["crepe"] = data["f0method"] == "crepe"
114
- data["rmvpe"] = data["f0method"] == "rmvpe"
115
- except:
116
- with open("configs/config.json", "w") as j:
117
- data = {
118
- "pth_path": " ",
119
- "index_path": " ",
120
- "sg_input_device": input_devices[sd.default.device[0]],
121
- "sg_output_device": output_devices[sd.default.device[1]],
122
- "threhold": "-60",
123
- "pitch": "0",
124
- "index_rate": "0",
125
- "rms_mix_rate": "0",
126
- "block_time": "0.25",
127
- "crossfade_length": "0.04",
128
- "extra_time": "2",
129
- "f0method": "rmvpe",
130
- }
131
- data["pm"] = data["f0method"] == "pm"
132
- data["harvest"] = data["f0method"] == "harvest"
133
- data["crepe"] = data["f0method"] == "crepe"
134
- data["rmvpe"] = data["f0method"] == "rmvpe"
135
- return data
136
-
137
- def launcher(self):
138
- data = self.load()
139
- sg.theme("LightBlue3")
140
- input_devices, output_devices, _, _ = self.get_devices()
141
- layout = [
142
- [
143
- sg.Frame(
144
- title=i18n("加载模型"),
145
- layout=[
146
- [
147
- sg.Input(
148
- default_text=data.get("pth_path", ""),
149
- key="pth_path",
150
- ),
151
- sg.FileBrowse(
152
- i18n("选择.pth文件"),
153
- initial_folder=os.path.join(
154
- os.getcwd(), "assets/weights"
155
- ),
156
- file_types=((". pth"),),
157
- ),
158
- ],
159
- [
160
- sg.Input(
161
- default_text=data.get("index_path", ""),
162
- key="index_path",
163
- ),
164
- sg.FileBrowse(
165
- i18n("选择.index文件"),
166
- initial_folder=os.path.join(os.getcwd(), "logs"),
167
- file_types=((". index"),),
168
- ),
169
- ],
170
- ],
171
- )
172
- ],
173
- [
174
- sg.Frame(
175
- layout=[
176
- [
177
- sg.Text(i18n("输入设备")),
178
- sg.Combo(
179
- input_devices,
180
- key="sg_input_device",
181
- default_value=data.get("sg_input_device", ""),
182
- ),
183
- ],
184
- [
185
- sg.Text(i18n("输出设备")),
186
- sg.Combo(
187
- output_devices,
188
- key="sg_output_device",
189
- default_value=data.get("sg_output_device", ""),
190
- ),
191
- ],
192
- [sg.Button(i18n("重载设备列表"), key="reload_devices")],
193
- ],
194
- title=i18n("音频设备(请使用同种类驱动)"),
195
- )
196
- ],
197
- [
198
- sg.Frame(
199
- layout=[
200
- [
201
- sg.Text(i18n("响应阈值")),
202
- sg.Slider(
203
- range=(-60, 0),
204
- key="threhold",
205
- resolution=1,
206
- orientation="h",
207
- default_value=data.get("threhold", "-60"),
208
- enable_events=True,
209
- ),
210
- ],
211
- [
212
- sg.Text(i18n("音调设置")),
213
- sg.Slider(
214
- range=(-24, 24),
215
- key="pitch",
216
- resolution=1,
217
- orientation="h",
218
- default_value=data.get("pitch", "0"),
219
- enable_events=True,
220
- ),
221
- ],
222
- [
223
- sg.Text(i18n("Index Rate")),
224
- sg.Slider(
225
- range=(0.0, 1.0),
226
- key="index_rate",
227
- resolution=0.01,
228
- orientation="h",
229
- default_value=data.get("index_rate", "0"),
230
- enable_events=True,
231
- ),
232
- ],
233
- [
234
- sg.Text(i18n("响度因子")),
235
- sg.Slider(
236
- range=(0.0, 1.0),
237
- key="rms_mix_rate",
238
- resolution=0.01,
239
- orientation="h",
240
- default_value=data.get("rms_mix_rate", "0"),
241
- enable_events=True,
242
- ),
243
- ],
244
- [
245
- sg.Text(i18n("音高算法")),
246
- sg.Radio(
247
- "pm",
248
- "f0method",
249
- key="pm",
250
- default=data.get("pm", "") == True,
251
- enable_events=True,
252
- ),
253
- sg.Radio(
254
- "harvest",
255
- "f0method",
256
- key="harvest",
257
- default=data.get("harvest", "") == True,
258
- enable_events=True,
259
- ),
260
- sg.Radio(
261
- "crepe",
262
- "f0method",
263
- key="crepe",
264
- default=data.get("crepe", "") == True,
265
- enable_events=True,
266
- ),
267
- sg.Radio(
268
- "rmvpe",
269
- "f0method",
270
- key="rmvpe",
271
- default=data.get("rmvpe", "") == True,
272
- enable_events=True,
273
- ),
274
- ],
275
- ],
276
- title=i18n("常规设置"),
277
- ),
278
- sg.Frame(
279
- layout=[
280
- [
281
- sg.Text(i18n("采样长度")),
282
- sg.Slider(
283
- range=(0.05, 2.4),
284
- key="block_time",
285
- resolution=0.01,
286
- orientation="h",
287
- default_value=data.get("block_time", "0.25"),
288
- enable_events=True,
289
- ),
290
- ],
291
- [
292
- sg.Text(i18n("harvest进程数")),
293
- sg.Slider(
294
- range=(1, n_cpu),
295
- key="n_cpu",
296
- resolution=1,
297
- orientation="h",
298
- default_value=data.get(
299
- "n_cpu", min(self.config.n_cpu, n_cpu)
300
- ),
301
- enable_events=True,
302
- ),
303
- ],
304
- [
305
- sg.Text(i18n("淡入淡出长度")),
306
- sg.Slider(
307
- range=(0.01, 0.15),
308
- key="crossfade_length",
309
- resolution=0.01,
310
- orientation="h",
311
- default_value=data.get("crossfade_length", "0.04"),
312
- enable_events=True,
313
- ),
314
- ],
315
- [
316
- sg.Text(i18n("额外推理时长")),
317
- sg.Slider(
318
- range=(0.05, 5.00),
319
- key="extra_time",
320
- resolution=0.01,
321
- orientation="h",
322
- default_value=data.get("extra_time", "2.0"),
323
- enable_events=True,
324
- ),
325
- ],
326
- [
327
- sg.Checkbox(
328
- i18n("输入降噪"),
329
- key="I_noise_reduce",
330
- enable_events=True,
331
- ),
332
- sg.Checkbox(
333
- i18n("输出降噪"),
334
- key="O_noise_reduce",
335
- enable_events=True,
336
- ),
337
- ],
338
- ],
339
- title=i18n("性能设置"),
340
- ),
341
- ],
342
- [
343
- sg.Button(i18n("开始音频转换"), key="start_vc"),
344
- sg.Button(i18n("停止音频转换"), key="stop_vc"),
345
- sg.Text(i18n("推理时间(ms):")),
346
- sg.Text("0", key="infer_time"),
347
- ],
348
- ]
349
- self.window = sg.Window("RVC - GUI", layout=layout, finalize=True)
350
- self.event_handler()
351
-
352
- def event_handler(self):
353
- while True:
354
- event, values = self.window.read()
355
- if event == sg.WINDOW_CLOSED:
356
- self.flag_vc = False
357
- exit()
358
- if event == "reload_devices":
359
- prev_input = self.window["sg_input_device"].get()
360
- prev_output = self.window["sg_output_device"].get()
361
- input_devices, output_devices, _, _ = self.get_devices(update=True)
362
- if prev_input not in input_devices:
363
- self.config.sg_input_device = input_devices[0]
364
- else:
365
- self.config.sg_input_device = prev_input
366
- self.window["sg_input_device"].Update(values=input_devices)
367
- self.window["sg_input_device"].Update(
368
- value=self.config.sg_input_device
369
- )
370
- if prev_output not in output_devices:
371
- self.config.sg_output_device = output_devices[0]
372
- else:
373
- self.config.sg_output_device = prev_output
374
- self.window["sg_output_device"].Update(values=output_devices)
375
- self.window["sg_output_device"].Update(
376
- value=self.config.sg_output_device
377
- )
378
- if event == "start_vc" and self.flag_vc == False:
379
- if self.set_values(values) == True:
380
- logger.info("Use CUDA: %s", torch.cuda.is_available())
381
- self.start_vc()
382
- settings = {
383
- "pth_path": values["pth_path"],
384
- "index_path": values["index_path"],
385
- "sg_input_device": values["sg_input_device"],
386
- "sg_output_device": values["sg_output_device"],
387
- "threhold": values["threhold"],
388
- "pitch": values["pitch"],
389
- "rms_mix_rate": values["rms_mix_rate"],
390
- "index_rate": values["index_rate"],
391
- "block_time": values["block_time"],
392
- "crossfade_length": values["crossfade_length"],
393
- "extra_time": values["extra_time"],
394
- "n_cpu": values["n_cpu"],
395
- "f0method": ["pm", "harvest", "crepe", "rmvpe"][
396
- [
397
- values["pm"],
398
- values["harvest"],
399
- values["crepe"],
400
- values["rmvpe"],
401
- ].index(True)
402
- ],
403
- }
404
- with open("configs/config.json", "w") as j:
405
- json.dump(settings, j)
406
- if event == "stop_vc" and self.flag_vc == True:
407
- self.flag_vc = False
408
-
409
- # Parameter hot update
410
- if event == "threhold":
411
- self.config.threhold = values["threhold"]
412
- elif event == "pitch":
413
- self.config.pitch = values["pitch"]
414
- if hasattr(self, "rvc"):
415
- self.rvc.change_key(values["pitch"])
416
- elif event == "index_rate":
417
- self.config.index_rate = values["index_rate"]
418
- if hasattr(self, "rvc"):
419
- self.rvc.change_index_rate(values["index_rate"])
420
- elif event == "rms_mix_rate":
421
- self.config.rms_mix_rate = values["rms_mix_rate"]
422
- elif event in ["pm", "harvest", "crepe", "rmvpe"]:
423
- self.config.f0method = event
424
- elif event == "I_noise_reduce":
425
- self.config.I_noise_reduce = values["I_noise_reduce"]
426
- elif event == "O_noise_reduce":
427
- self.config.O_noise_reduce = values["O_noise_reduce"]
428
- elif event != "start_vc" and self.flag_vc == True:
429
- # Other parameters do not support hot update
430
- self.flag_vc = False
431
-
432
- def set_values(self, values):
433
- if len(values["pth_path"].strip()) == 0:
434
- sg.popup(i18n("请选择pth文件"))
435
- return False
436
- if len(values["index_path"].strip()) == 0:
437
- sg.popup(i18n("请选择index文件"))
438
- return False
439
- pattern = re.compile("[^\x00-\x7F]+")
440
- if pattern.findall(values["pth_path"]):
441
- sg.popup(i18n("pth文件路径不可包含中文"))
442
- return False
443
- if pattern.findall(values["index_path"]):
444
- sg.popup(i18n("index文件路径不可包含中文"))
445
- return False
446
- self.set_devices(values["sg_input_device"], values["sg_output_device"])
447
- self.config.pth_path = values["pth_path"]
448
- self.config.index_path = values["index_path"]
449
- self.config.threhold = values["threhold"]
450
- self.config.pitch = values["pitch"]
451
- self.config.block_time = values["block_time"]
452
- self.config.crossfade_time = values["crossfade_length"]
453
- self.config.extra_time = values["extra_time"]
454
- self.config.I_noise_reduce = values["I_noise_reduce"]
455
- self.config.O_noise_reduce = values["O_noise_reduce"]
456
- self.config.rms_mix_rate = values["rms_mix_rate"]
457
- self.config.index_rate = values["index_rate"]
458
- self.config.n_cpu = values["n_cpu"]
459
- self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][
460
- [
461
- values["pm"],
462
- values["harvest"],
463
- values["crepe"],
464
- values["rmvpe"],
465
- ].index(True)
466
- ]
467
- return True
468
-
469
- def start_vc(self):
470
- torch.cuda.empty_cache()
471
- self.flag_vc = True
472
- self.rvc = rvc_for_realtime.RVC(
473
- self.config.pitch,
474
- self.config.pth_path,
475
- self.config.index_path,
476
- self.config.index_rate,
477
- self.config.n_cpu,
478
- inp_q,
479
- opt_q,
480
- device,
481
- self.rvc if hasattr(self, "rvc") else None
482
- )
483
- self.config.samplerate = self.rvc.tgt_sr
484
- self.zc = self.rvc.tgt_sr // 100
485
- self.block_frame = int(np.round(self.config.block_time * self.config.samplerate / self.zc)) * self.zc
486
- self.block_frame_16k = 160 * self.block_frame // self.zc
487
- self.crossfade_frame = int(np.round(self.config.crossfade_time * self.config.samplerate / self.zc)) * self.zc
488
- self.sola_search_frame = self.zc
489
- self.extra_frame = int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) * self.zc
490
- self.input_wav: torch.Tensor = torch.zeros(
491
- self.extra_frame
492
- + self.crossfade_frame
493
- + self.sola_search_frame
494
- + self.block_frame,
495
- device=device,
496
- dtype=torch.float32,
497
- )
498
- self.input_wav_res: torch.Tensor= torch.zeros(160 * self.input_wav.shape[0] // self.zc, device=device,dtype=torch.float32)
499
- self.pitch: np.ndarray = np.zeros(
500
- self.input_wav.shape[0] // self.zc,
501
- dtype="int32",
502
- )
503
- self.pitchf: np.ndarray = np.zeros(
504
- self.input_wav.shape[0] // self.zc,
505
- dtype="float64",
506
- )
507
- self.sola_buffer: torch.Tensor = torch.zeros(
508
- self.crossfade_frame, device=device, dtype=torch.float32
509
- )
510
- self.nr_buffer: torch.Tensor = self.sola_buffer.clone()
511
- self.output_buffer: torch.Tensor = self.input_wav.clone()
512
- self.res_buffer: torch.Tensor = torch.zeros(2 * self.zc, device=device,dtype=torch.float32)
513
- self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0]
514
- self.fade_in_window: torch.Tensor = (
515
- torch.sin(
516
- 0.5
517
- * np.pi
518
- * torch.linspace(
519
- 0.0,
520
- 1.0,
521
- steps=self.crossfade_frame,
522
- device=device,
523
- dtype=torch.float32,
524
- )
525
- )
526
- ** 2
527
- )
528
- self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
529
- self.resampler = tat.Resample(
530
- orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
531
- ).to(device)
532
- self.tg = TorchGate(sr=self.config.samplerate, n_fft=4*self.zc, prop_decrease=0.9).to(device)
533
- thread_vc = threading.Thread(target=self.soundinput)
534
- thread_vc.start()
535
-
536
- def soundinput(self):
537
- """
538
- 接受音频输入
539
- """
540
- channels = 1 if sys.platform == "darwin" else 2
541
- with sd.Stream(
542
- channels=channels,
543
- callback=self.audio_callback,
544
- blocksize=self.block_frame,
545
- samplerate=self.config.samplerate,
546
- dtype="float32",
547
- ):
548
- while self.flag_vc:
549
- time.sleep(self.config.block_time)
550
- logger.debug("Audio block passed.")
551
- logger.debug("ENDing VC")
552
-
553
- def audio_callback(
554
- self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
555
- ):
556
- """
557
- 音频处理
558
- """
559
- start_time = time.perf_counter()
560
- indata = librosa.to_mono(indata.T)
561
- if self.config.threhold > -60:
562
- rms = librosa.feature.rms(
563
- y=indata, frame_length=4*self.zc, hop_length=self.zc
564
- )
565
- db_threhold = (
566
- librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
567
- )
568
- for i in range(db_threhold.shape[0]):
569
- if db_threhold[i]:
570
- indata[i * self.zc : (i + 1) * self.zc] = 0
571
- self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone()
572
- self.input_wav[-self.block_frame: ] = torch.from_numpy(indata).to(device)
573
- self.input_wav_res[ : -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone()
574
- # input noise reduction and resampling
575
- if self.config.I_noise_reduce:
576
- input_wav = self.input_wav[-self.crossfade_frame -self.block_frame-2*self.zc: ]
577
- input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2*self.zc:]
578
- input_wav[: self.crossfade_frame] *= self.fade_in_window
579
- input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window
580
- self.nr_buffer[:] = input_wav[-self.crossfade_frame: ]
581
- input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame]))
582
- self.res_buffer[:] = input_wav[-2*self.zc: ]
583
- self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(input_wav)[160: ]
584
- else:
585
- self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(self.input_wav[-self.block_frame-2*self.zc: ])[160: ]
586
- # infer
587
- f0_extractor_frame = self.block_frame_16k + 800
588
- if self.config.f0method == 'rmvpe':
589
- f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1)
590
- infer_wav = self.rvc.infer(
591
- self.input_wav_res,
592
- self.input_wav_res[-f0_extractor_frame :].cpu().numpy(),
593
- self.block_frame_16k,
594
- self.valid_rate,
595
- self.pitch,
596
- self.pitchf,
597
- self.config.f0method,
598
- )
599
- infer_wav = infer_wav[
600
- -self.crossfade_frame - self.sola_search_frame - self.block_frame :
601
- ]
602
- # output noise reduction
603
- if self.config.O_noise_reduce:
604
- self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone()
605
- self.output_buffer[-self.block_frame: ] = infer_wav[-self.block_frame:]
606
- infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0)
607
- # volume envelop mixing
608
- if self.config.rms_mix_rate < 1:
609
- rms1 = librosa.feature.rms(
610
- y=self.input_wav_res[-160*infer_wav.shape[0]//self.zc :].cpu().numpy(),
611
- frame_length=640,
612
- hop_length=160,
613
- )
614
- rms1 = torch.from_numpy(rms1).to(device)
615
- rms1 = F.interpolate(
616
- rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True,
617
- )[0,0,:-1]
618
- rms2 = librosa.feature.rms(
619
- y=infer_wav[:].cpu().numpy(), frame_length=4*self.zc, hop_length=self.zc
620
- )
621
- rms2 = torch.from_numpy(rms2).to(device)
622
- rms2 = F.interpolate(
623
- rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True,
624
- )[0,0,:-1]
625
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
626
- infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate))
627
- # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
628
- conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
629
- cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
630
- cor_den = torch.sqrt(
631
- F.conv1d(conv_input ** 2, torch.ones(1, 1, self.crossfade_frame, device=device)) + 1e-8)
632
- if sys.platform == "darwin":
633
- _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
634
- sola_offset = sola_offset.item()
635
- else:
636
- sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
637
- logger.debug("sola_offset = %d", int(sola_offset))
638
- infer_wav = infer_wav[sola_offset: sola_offset + self.block_frame + self.crossfade_frame]
639
- infer_wav[: self.crossfade_frame] *= self.fade_in_window
640
- infer_wav[: self.crossfade_frame] += self.sola_buffer *self.fade_out_window
641
- self.sola_buffer[:] = infer_wav[-self.crossfade_frame:]
642
- if sys.platform == "darwin":
643
- outdata[:] = infer_wav[:-self.crossfade_frame].cpu().numpy()[:, np.newaxis]
644
- else:
645
- outdata[:] = infer_wav[:-self.crossfade_frame].repeat(2, 1).t().cpu().numpy()
646
- total_time = time.perf_counter() - start_time
647
- self.window["infer_time"].update(int(total_time * 1000))
648
- logger.info("Infer time: %.2f", total_time)
649
-
650
- def get_devices(self, update: bool = True):
651
- """获取设备列表"""
652
- if update:
653
- sd._terminate()
654
- sd._initialize()
655
- devices = sd.query_devices()
656
- hostapis = sd.query_hostapis()
657
- for hostapi in hostapis:
658
- for device_idx in hostapi["devices"]:
659
- devices[device_idx]["hostapi_name"] = hostapi["name"]
660
- input_devices = [
661
- f"{d['name']} ({d['hostapi_name']})"
662
- for d in devices
663
- if d["max_input_channels"] > 0
664
- ]
665
- output_devices = [
666
- f"{d['name']} ({d['hostapi_name']})"
667
- for d in devices
668
- if d["max_output_channels"] > 0
669
- ]
670
- input_devices_indices = [
671
- d["index"] if "index" in d else d["name"]
672
- for d in devices
673
- if d["max_input_channels"] > 0
674
- ]
675
- output_devices_indices = [
676
- d["index"] if "index" in d else d["name"]
677
- for d in devices
678
- if d["max_output_channels"] > 0
679
- ]
680
- return (
681
- input_devices,
682
- output_devices,
683
- input_devices_indices,
684
- output_devices_indices,
685
- )
686
-
687
- def set_devices(self, input_device, output_device):
688
- """设置输出设备"""
689
- (
690
- input_devices,
691
- output_devices,
692
- input_device_indices,
693
- output_device_indices,
694
- ) = self.get_devices()
695
- sd.default.device[0] = input_device_indices[
696
- input_devices.index(input_device)
697
- ]
698
- sd.default.device[1] = output_device_indices[
699
- output_devices.index(output_device)
700
- ]
701
- logger.info(
702
- "Input device: %s:%s", str(sd.default.device[0]), input_device
703
- )
704
- logger.info(
705
- "Output device: %s:%s", str(sd.default.device[1]), output_device
706
- )
707
-
708
- gui = GUI()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/ui/separator.tsx DELETED
@@ -1,31 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import * as SeparatorPrimitive from '@radix-ui/react-separator'
5
-
6
- import { cn } from '@/lib/utils'
7
-
8
- const Separator = React.forwardRef<
9
- React.ElementRef<typeof SeparatorPrimitive.Root>,
10
- React.ComponentPropsWithoutRef<typeof SeparatorPrimitive.Root>
11
- >(
12
- (
13
- { className, orientation = 'horizontal', decorative = true, ...props },
14
- ref
15
- ) => (
16
- <SeparatorPrimitive.Root
17
- ref={ref}
18
- decorative={decorative}
19
- orientation={orientation}
20
- className={cn(
21
- 'shrink-0 bg-border',
22
- orientation === 'horizontal' ? 'h-[1px] w-full' : 'h-full w-[1px]',
23
- className
24
- )}
25
- {...props}
26
- />
27
- )
28
- )
29
- Separator.displayName = SeparatorPrimitive.Root.displayName
30
-
31
- export { Separator }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/app.py DELETED
@@ -1,284 +0,0 @@
1
- import math
2
- from omegaconf import OmegaConf
3
- from scripts.rendertext_tool import Render_Text, load_model_from_config, load_model_ckpt
4
- import gradio as gr
5
- import os
6
- import torch
7
- import time
8
- from PIL import Image
9
- from cldm.hack import disable_verbosity, enable_sliced_attention
10
- # from pytorch_lightning import seed_everything
11
- from example_list import examples
12
- def process_multi_wrapper(rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
13
- shared_prompt,
14
- width_0, width_1, width_2, width_3,
15
- ratio_0, ratio_1, ratio_2, ratio_3,
16
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
17
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
18
- yaw_0, yaw_1, yaw_2, yaw_3,
19
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
20
- shared_num_samples, shared_image_resolution,
21
- shared_ddim_steps, shared_guess_mode,
22
- shared_strength, shared_scale, shared_seed,
23
- shared_eta, shared_a_prompt, shared_n_prompt, allow_run_generation = True):
24
- if not allow_run_generation:
25
- return "Please get the glyph image first by clicking the 'Render Glyph Image' button", None, allow_run_generation
26
-
27
- rendered_txt_values = [rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3]
28
- width_values = [width_0, width_1, width_2, width_3]
29
- ratio_values = [ratio_0, ratio_1, ratio_2, ratio_3]
30
- top_left_x_values = [top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3]
31
- top_left_y_values = [top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3]
32
- yaw_values = [yaw_0, yaw_1, yaw_2, yaw_3]
33
- num_rows_values = [num_rows_0, num_rows_1, num_rows_2, num_rows_3]
34
- allow_run_generation = False
35
- return "The image generation process finished!", render_tool.process_multi(rendered_txt_values, shared_prompt,
36
- width_values, ratio_values,
37
- top_left_x_values, top_left_y_values,
38
- yaw_values, num_rows_values,
39
- shared_num_samples, shared_image_resolution,
40
- shared_ddim_steps, shared_guess_mode,
41
- shared_strength, shared_scale, shared_seed,
42
- shared_eta, shared_a_prompt, shared_n_prompt
43
- ), allow_run_generation
44
-
45
- def process_multi_wrapper_only_show_rendered(rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
46
- shared_prompt,
47
- width_0, width_1, width_2, width_3,
48
- ratio_0, ratio_1, ratio_2, ratio_3,
49
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
50
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
51
- yaw_0, yaw_1, yaw_2, yaw_3,
52
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
53
- shared_num_samples, shared_image_resolution,
54
- shared_ddim_steps, shared_guess_mode,
55
- shared_strength, shared_scale, shared_seed,
56
- shared_eta, shared_a_prompt, shared_n_prompt):
57
- rendered_txt_values = [rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3]
58
- width_values = [width_0, width_1, width_2, width_3]
59
- ratio_values = [ratio_0, ratio_1, ratio_2, ratio_3]
60
- top_left_x_values = [top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3]
61
- top_left_y_values = [top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3]
62
- yaw_values = [yaw_0, yaw_1, yaw_2, yaw_3]
63
- num_rows_values = [num_rows_0, num_rows_1, num_rows_2, num_rows_3]
64
- allow_run_generation = True
65
-
66
- glyph_image = render_tool.process_multi(rendered_txt_values, shared_prompt,
67
- width_values, ratio_values,
68
- top_left_x_values, top_left_y_values,
69
- yaw_values, num_rows_values,
70
- shared_num_samples, shared_image_resolution,
71
- shared_ddim_steps, shared_guess_mode,
72
- shared_strength, shared_scale, shared_seed,
73
- shared_eta, shared_a_prompt, shared_n_prompt,
74
- only_show_rendered_image=True)
75
-
76
- if glyph_image[0] is None:
77
- return "Warning: no glyph image would be rendered because the glyph insructions are not provided!", None, allow_run_generation
78
- else:
79
- return "The glyph image is successfully rendered!", glyph_image, allow_run_generation
80
-
81
- def load_ckpt(model_ckpt = "LAION-Glyph-10M-Epoch-5"):
82
- global render_tool, model
83
- if torch.cuda.is_available():
84
- for i in range(5):
85
- torch.cuda.empty_cache()
86
- time.sleep(2)
87
- print("empty the cuda cache")
88
-
89
- if model_ckpt == "LAION-Glyph-10M-Epoch-6":
90
- model = load_model_ckpt(model, "checkpoints/laion10M_epoch_6_model_ema_only.ckpt")
91
- elif model_ckpt == "TextCaps-5K-Epoch-10":
92
- model = load_model_ckpt(model, "checkpoints/textcaps5K_epoch_10_model_ema_only.ckpt")
93
- elif model_ckpt == "TextCaps-5K-Epoch-20":
94
- model = load_model_ckpt(model, "checkpoints/textcaps5K_epoch_20_model_ema_only.ckpt")
95
- elif model_ckpt == "TextCaps-5K-Epoch-40":
96
- model = load_model_ckpt(model, "checkpoints/textcaps5K_epoch_40_model_ema_only.ckpt")
97
-
98
- render_tool = Render_Text(model, save_memory = SAVE_MEMORY)
99
- output_str = f"already change the model checkpoint to {model_ckpt}"
100
- print(output_str)
101
- if torch.cuda.is_available():
102
- for i in range(5):
103
- torch.cuda.empty_cache()
104
- time.sleep(2)
105
- print("empty the cuda cache")
106
- allow_run_generation = False
107
- return output_str, None, allow_run_generation
108
-
109
- def export_parameters(*args):
110
- return str(args)
111
-
112
- def import_parameters(parameters):
113
- return eval(parameters)
114
-
115
- SAVE_MEMORY = True #False
116
- disable_verbosity()
117
- if SAVE_MEMORY:
118
- enable_sliced_attention()
119
- cfg = OmegaConf.load("config.yaml")
120
- model = load_model_from_config(cfg, "checkpoints/laion10M_epoch_6_model_ema_only.ckpt", verbose=True)
121
- render_tool = Render_Text(model, save_memory = SAVE_MEMORY)
122
-
123
- description = """
124
- ## GlyphControl: Glyph Conditional Control for Visual Text Generation (NeurIPS 2023)
125
- Github link: [Link](https://github.com/AIGText/GlyphControl-release).
126
- Report: [link](https://arxiv.org/pdf/2305.18259.pdf).\n
127
- You could try the listed examples at the bottom by clicking on them and modify the parameters for your own creation. We will update the examples progressively.\n
128
- (By using the "Parameter Summary" part, you can import or export the parameter settings of generated images in an easier way.)
129
- """
130
-
131
- SPACE_ID = os.getenv('SPACE_ID')
132
- if SPACE_ID is not None:
133
- # description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. < a href=" ">< img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></ a></p >'
134
- description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
135
-
136
- block = gr.Blocks().queue()
137
-
138
- with block:
139
- with gr.Row():
140
- gr.Markdown(description)
141
- only_show_rendered_image = gr.Number(value=1, visible=False)
142
- default_width = [0.3, 0.3, 0.3, 0.3]
143
- default_top_left_x = [0.35, 0.15, 0.15, 0.5]
144
- default_top_left_y = [0.4, 0.15, 0.65, 0.65]
145
- with gr.Column():
146
- with gr.Row():
147
- for i in range(4):
148
- with gr.Column():
149
- exec(f"""rendered_txt_{i} = gr.Textbox(label=f"Render Text {i+1}")""")
150
-
151
- with gr.Accordion(f"Advanced options {i+1}", open=False):
152
- exec(f"""width_{i} = gr.Slider(label="Bbox Width", minimum=0., maximum=1, value={default_width[i]}, step=0.01) """)
153
- exec(f"""ratio_{i} = gr.Slider(label="Bbox_width_height_ratio", minimum=0., maximum=5, value=0., step=0.02, visible=False) """)
154
- # exec(f"""top_left_x_{i} = gr.Slider(label="Bbox Top Left x", minimum=0., maximum=1, value={0.35 - 0.25 * math.cos(math.pi * i)}, step=0.01) """)
155
- # exec(f"""top_left_y_{i} = gr.Slider(label="Bbox Top Left y", minimum=0., maximum=1, value={0.1 if i < 2 else 0.6}, step=0.01) """)
156
- exec(f"""top_left_x_{i} = gr.Slider(label="Bbox Top Left x", minimum=0., maximum=1, value={default_top_left_x[i]}, step=0.01) """)
157
- exec(f"""top_left_y_{i} = gr.Slider(label="Bbox Top Left y", minimum=0., maximum=1, value={default_top_left_y[i]}, step=0.01) """)
158
- exec(f"""yaw_{i} = gr.Slider(label="Bbox Yaw", minimum=-20, maximum=20, value=0, step=5) """)
159
- # exec(f"""num_rows_{i} = gr.Slider(label="num_rows", minimum=1, maximum=4, value=1, step=1, visible=False) """)
160
- exec(f"""num_rows_{i} = gr.Slider(label="num_rows", minimum=1, maximum=4, value=1, step=1) """)
161
-
162
- with gr.Row():
163
- with gr.Column():
164
- shared_prompt = gr.Textbox(label="Shared Prompt")
165
- with gr.Row():
166
- show_render_button = gr.Button(value="Render Glyph Image")
167
- run_button = gr.Button(value="Run Generation")
168
- allow_run_generation = gr.Checkbox(label='allow_run_generation',
169
- value=False, visible=False)
170
-
171
- with gr.Accordion("Model Options", open=False):
172
- with gr.Row():
173
- # model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M", "Textcaps5K-10"], label="Checkpoint", default = "LAION-Glyph-10M")
174
- # model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M-Epoch-6", "LAION-Glyph-10M-Epoch-5", "LAION-Glyph-1M"], label="Checkpoint", default = "LAION-Glyph-10M-Epoch-6")
175
- model_ckpt = gr.inputs.Dropdown(["LAION-Glyph-10M-Epoch-6", "TextCaps-5K-Epoch-10", "TextCaps-5K-Epoch-20", "TextCaps-5K-Epoch-40"], label="Checkpoint", default = "LAION-Glyph-10M-Epoch-6")
176
- # load_button = gr.Button(value = "Load Checkpoint")
177
-
178
- with gr.Accordion("Shared Advanced Options", open=False):
179
- with gr.Row():
180
- shared_num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=5, step=1)
181
- shared_image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64, visible=False)
182
- shared_strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01, visible=False)
183
- shared_guess_mode = gr.Checkbox(label='Guess Mode', value=False, visible=False)
184
- shared_seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
185
- with gr.Row():
186
- shared_scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
187
- shared_ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
188
- shared_eta = gr.Number(label="eta (DDIM)", value=0.0, visible=False)
189
- with gr.Row():
190
- shared_a_prompt = gr.Textbox(label="Added Prompt", value='4K, dslr, best quality, extremely detailed')
191
- shared_n_prompt = gr.Textbox(label="Negative Prompt",
192
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
193
- with gr.Accordion("Parameter Summary", open=False):
194
- with gr.Row():
195
- parameters = gr.Text(label = "Parameters")
196
- with gr.Row():
197
- import_button = gr.Button(value="Import")
198
- export_button = gr.Button(value="Export")
199
- with gr.Accordion("Output", open=True):
200
- # with gr.Row():
201
- # export_button = gr.Button(value="Export Parameters")
202
- with gr.Row():
203
- message = gr.Text(interactive=False, label = "Message")
204
- with gr.Row():
205
- result_gallery = gr.Gallery(label='Images', show_label=False, elem_id="gallery").style(grid=2, height='auto')
206
- gr.Examples(
207
- examples= examples, #"./examples",
208
- # [[, "LAION-Glyph-10M-Epoch-6"]],
209
- # ["./assets/img2.jpg", "r50-hdetr_sam-vit-b"],
210
- # ["./assets/img3.jpg", "r50-hdetr_sam-vit-b"],
211
- # ["./assets/img4.jpg", "r50-hdetr_sam-vit-b"]],
212
- inputs=[ model_ckpt, shared_prompt,
213
- rendered_txt_0, width_0, ratio_0, top_left_x_0, top_left_y_0, yaw_0, num_rows_0,
214
- rendered_txt_1, width_1, ratio_1, top_left_x_1, top_left_y_1, yaw_1, num_rows_1,
215
- rendered_txt_2, width_2, ratio_2, top_left_x_2, top_left_y_2, yaw_2, num_rows_2,
216
- rendered_txt_3, width_3, ratio_3, top_left_x_3, top_left_y_3, yaw_3, num_rows_3,
217
- shared_num_samples, shared_image_resolution,
218
- shared_ddim_steps, shared_guess_mode,
219
- shared_strength, shared_scale, shared_seed,
220
- shared_eta, shared_a_prompt, shared_n_prompt],
221
- # outputs=output_img,
222
- # fn=inference
223
- )
224
- export_button.click(fn=export_parameters,
225
- inputs = [model_ckpt, shared_prompt,
226
- rendered_txt_0, width_0, ratio_0, top_left_x_0, top_left_y_0, yaw_0, num_rows_0,
227
- rendered_txt_1, width_1, ratio_1, top_left_x_1, top_left_y_1, yaw_1, num_rows_1,
228
- rendered_txt_2, width_2, ratio_2, top_left_x_2, top_left_y_2, yaw_2, num_rows_2,
229
- rendered_txt_3, width_3, ratio_3, top_left_x_3, top_left_y_3, yaw_3, num_rows_3,
230
- shared_num_samples, shared_image_resolution,
231
- shared_ddim_steps, shared_guess_mode,
232
- shared_strength, shared_scale, shared_seed,
233
- shared_eta, shared_a_prompt, shared_n_prompt],
234
- outputs = [parameters] )
235
-
236
- import_button.click(fn=import_parameters,
237
- inputs = [parameters],
238
- outputs = [model_ckpt, shared_prompt,
239
- rendered_txt_0, width_0, ratio_0, top_left_x_0, top_left_y_0, yaw_0, num_rows_0,
240
- rendered_txt_1, width_1, ratio_1, top_left_x_1, top_left_y_1, yaw_1, num_rows_1,
241
- rendered_txt_2, width_2, ratio_2, top_left_x_2, top_left_y_2, yaw_2, num_rows_2,
242
- rendered_txt_3, width_3, ratio_3, top_left_x_3, top_left_y_3, yaw_3, num_rows_3,
243
- shared_num_samples, shared_image_resolution,
244
- shared_ddim_steps, shared_guess_mode,
245
- shared_strength, shared_scale, shared_seed,
246
- shared_eta, shared_a_prompt, shared_n_prompt]
247
- )
248
-
249
- run_button.click(fn=process_multi_wrapper,
250
- inputs=[rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
251
- shared_prompt,
252
- width_0, width_1, width_2, width_3,
253
- ratio_0, ratio_1, ratio_2, ratio_3,
254
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
255
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
256
- yaw_0, yaw_1, yaw_2, yaw_3,
257
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
258
- shared_num_samples, shared_image_resolution,
259
- shared_ddim_steps, shared_guess_mode,
260
- shared_strength, shared_scale, shared_seed,
261
- shared_eta, shared_a_prompt, shared_n_prompt, allow_run_generation],
262
- outputs=[message, result_gallery, allow_run_generation])
263
-
264
- show_render_button.click(fn=process_multi_wrapper_only_show_rendered,
265
- inputs=[rendered_txt_0, rendered_txt_1, rendered_txt_2, rendered_txt_3,
266
- shared_prompt,
267
- width_0, width_1, width_2, width_3,
268
- ratio_0, ratio_1, ratio_2, ratio_3,
269
- top_left_x_0, top_left_x_1, top_left_x_2, top_left_x_3,
270
- top_left_y_0, top_left_y_1, top_left_y_2, top_left_y_3,
271
- yaw_0, yaw_1, yaw_2, yaw_3,
272
- num_rows_0, num_rows_1, num_rows_2, num_rows_3,
273
- shared_num_samples, shared_image_resolution,
274
- shared_ddim_steps, shared_guess_mode,
275
- shared_strength, shared_scale, shared_seed,
276
- shared_eta, shared_a_prompt, shared_n_prompt],
277
- outputs=[message, result_gallery, allow_run_generation])
278
-
279
- model_ckpt.change(load_ckpt,
280
- inputs = [model_ckpt],
281
- outputs = [message, result_gallery, allow_run_generation]
282
- )
283
-
284
- block.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/_base_/__init__.py DELETED
File without changes
spaces/Abhilashvj/planogram-compliance/utils/loggers/clearml/hpo.py DELETED
@@ -1,144 +0,0 @@
1
- from clearml import Task
2
-
3
- # Connecting ClearML with the current process,
4
- # from here on everything is logged automatically
5
- from clearml.automation import HyperParameterOptimizer, UniformParameterRange
6
- from clearml.automation.optuna import OptimizerOptuna
7
-
8
- task = Task.init(
9
- project_name="Hyper-Parameter Optimization",
10
- task_name="YOLOv5",
11
- task_type=Task.TaskTypes.optimizer,
12
- reuse_last_task_id=False,
13
- )
14
-
15
- # Example use case:
16
- optimizer = HyperParameterOptimizer(
17
- # This is the experiment we want to optimize
18
- base_task_id="<your_template_task_id>",
19
- # here we define the hyper-parameters to optimize
20
- # Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter>
21
- # For Example, here we see in the base experiment a section Named: "General"
22
- # under it a parameter named "batch_size", this becomes "General/batch_size"
23
- # If you have `argparse` for example, then arguments will appear under the "Args" section,
24
- # and you should instead pass "Args/batch_size"
25
- hyper_parameters=[
26
- UniformParameterRange(
27
- "Hyperparameters/lr0", min_value=1e-5, max_value=1e-1
28
- ),
29
- UniformParameterRange(
30
- "Hyperparameters/lrf", min_value=0.01, max_value=1.0
31
- ),
32
- UniformParameterRange(
33
- "Hyperparameters/momentum", min_value=0.6, max_value=0.98
34
- ),
35
- UniformParameterRange(
36
- "Hyperparameters/weight_decay", min_value=0.0, max_value=0.001
37
- ),
38
- UniformParameterRange(
39
- "Hyperparameters/warmup_epochs", min_value=0.0, max_value=5.0
40
- ),
41
- UniformParameterRange(
42
- "Hyperparameters/warmup_momentum", min_value=0.0, max_value=0.95
43
- ),
44
- UniformParameterRange(
45
- "Hyperparameters/warmup_bias_lr", min_value=0.0, max_value=0.2
46
- ),
47
- UniformParameterRange(
48
- "Hyperparameters/box", min_value=0.02, max_value=0.2
49
- ),
50
- UniformParameterRange(
51
- "Hyperparameters/cls", min_value=0.2, max_value=4.0
52
- ),
53
- UniformParameterRange(
54
- "Hyperparameters/cls_pw", min_value=0.5, max_value=2.0
55
- ),
56
- UniformParameterRange(
57
- "Hyperparameters/obj", min_value=0.2, max_value=4.0
58
- ),
59
- UniformParameterRange(
60
- "Hyperparameters/obj_pw", min_value=0.5, max_value=2.0
61
- ),
62
- UniformParameterRange(
63
- "Hyperparameters/iou_t", min_value=0.1, max_value=0.7
64
- ),
65
- UniformParameterRange(
66
- "Hyperparameters/anchor_t", min_value=2.0, max_value=8.0
67
- ),
68
- UniformParameterRange(
69
- "Hyperparameters/fl_gamma", min_value=0.0, max_value=4.0
70
- ),
71
- UniformParameterRange(
72
- "Hyperparameters/hsv_h", min_value=0.0, max_value=0.1
73
- ),
74
- UniformParameterRange(
75
- "Hyperparameters/hsv_s", min_value=0.0, max_value=0.9
76
- ),
77
- UniformParameterRange(
78
- "Hyperparameters/hsv_v", min_value=0.0, max_value=0.9
79
- ),
80
- UniformParameterRange(
81
- "Hyperparameters/degrees", min_value=0.0, max_value=45.0
82
- ),
83
- UniformParameterRange(
84
- "Hyperparameters/translate", min_value=0.0, max_value=0.9
85
- ),
86
- UniformParameterRange(
87
- "Hyperparameters/scale", min_value=0.0, max_value=0.9
88
- ),
89
- UniformParameterRange(
90
- "Hyperparameters/shear", min_value=0.0, max_value=10.0
91
- ),
92
- UniformParameterRange(
93
- "Hyperparameters/perspective", min_value=0.0, max_value=0.001
94
- ),
95
- UniformParameterRange(
96
- "Hyperparameters/flipud", min_value=0.0, max_value=1.0
97
- ),
98
- UniformParameterRange(
99
- "Hyperparameters/fliplr", min_value=0.0, max_value=1.0
100
- ),
101
- UniformParameterRange(
102
- "Hyperparameters/mosaic", min_value=0.0, max_value=1.0
103
- ),
104
- UniformParameterRange(
105
- "Hyperparameters/mixup", min_value=0.0, max_value=1.0
106
- ),
107
- UniformParameterRange(
108
- "Hyperparameters/copy_paste", min_value=0.0, max_value=1.0
109
- ),
110
- ],
111
- # this is the objective metric we want to maximize/minimize
112
- objective_metric_title="metrics",
113
- objective_metric_series="mAP_0.5",
114
- # now we decide if we want to maximize it or minimize it (accuracy we maximize)
115
- objective_metric_sign="max",
116
- # let us limit the number of concurrent experiments,
117
- # this in turn will make sure we do dont bombard the scheduler with experiments.
118
- # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
119
- max_number_of_concurrent_tasks=1,
120
- # this is the optimizer class (actually doing the optimization)
121
- # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
122
- optimizer_class=OptimizerOptuna,
123
- # If specified only the top K performing Tasks will be kept, the others will be automatically archived
124
- save_top_k_tasks_only=5, # 5,
125
- compute_time_limit=None,
126
- total_max_jobs=20,
127
- min_iteration_per_job=None,
128
- max_iteration_per_job=None,
129
- )
130
-
131
- # report every 10 seconds, this is way too often, but we are testing here
132
- optimizer.set_report_period(10 / 60)
133
- # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
134
- # an_optimizer.start_locally(job_complete_callback=job_complete_callback)
135
- # set the time limit for the optimization process (2 hours)
136
- optimizer.set_time_limit(in_minutes=120.0)
137
- # Start the optimization process in the local environment
138
- optimizer.start_locally()
139
- # wait until process is done (notice we are controlling the optimization process in the background)
140
- optimizer.wait()
141
- # make sure background optimization stopped
142
- optimizer.stop()
143
-
144
- print("We are done, good bye")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/FreeGpt.py DELETED
@@ -1,55 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import time, hashlib, random
4
-
5
- from ..typing import AsyncGenerator
6
- from ..requests import StreamSession
7
- from .base_provider import AsyncGeneratorProvider
8
-
9
- domains = [
10
- 'https://k.aifree.site',
11
- 'https://p.aifree.site'
12
- ]
13
-
14
- class FreeGpt(AsyncGeneratorProvider):
15
- url = "https://freegpts1.aifree.site/"
16
- supports_gpt_35_turbo = True
17
- working = True
18
-
19
- @classmethod
20
- async def create_async_generator(
21
- cls,
22
- model: str,
23
- messages: list[dict[str, str]],
24
- timeout: int = 30,
25
- **kwargs
26
- ) -> AsyncGenerator:
27
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
28
- prompt = messages[-1]["content"]
29
- timestamp = int(time.time())
30
- data = {
31
- "messages": messages,
32
- "time": timestamp,
33
- "pass": None,
34
- "sign": generate_signature(timestamp, prompt)
35
- }
36
- url = random.choice(domains)
37
- async with session.post(f"{url}/api/generate", json=data) as response:
38
- response.raise_for_status()
39
- async for chunk in response.iter_content():
40
- yield chunk.decode()
41
-
42
- @classmethod
43
- @property
44
- def params(cls):
45
- params = [
46
- ("model", "str"),
47
- ("messages", "list[dict[str, str]]"),
48
- ("stream", "bool"),
49
- ]
50
- param = ", ".join([": ".join(p) for p in params])
51
- return f"g4f.provider.{cls.__name__} supports: ({param})"
52
-
53
- def generate_signature(timestamp: int, message: str, secret: str = ""):
54
- data = f"{timestamp}:{message}:{secret}"
55
- return hashlib.sha256(data.encode()).hexdigest()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/models/diffusion/ddim.py DELETED
@@ -1,292 +0,0 @@
1
- """SAMPLING ONLY."""
2
-
3
- import torch
4
- import numpy as np
5
- from tqdm import tqdm
6
-
7
- from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \
8
- extract_into_tensor
9
-
10
-
11
- class DDIMSampler(object):
12
- def __init__(self, model, schedule="linear", **kwargs):
13
- super().__init__()
14
- self.model = model
15
- self.ddpm_num_timesteps = model.num_timesteps
16
- self.schedule = schedule
17
-
18
- def register_buffer(self, name, attr):
19
- if type(attr) == torch.Tensor:
20
- if attr.device != torch.device("cuda"):
21
- attr = attr.to(torch.device("cuda"))
22
- setattr(self, name, attr)
23
-
24
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
25
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
26
- num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
27
- alphas_cumprod = self.model.alphas_cumprod
28
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
29
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
30
-
31
- self.register_buffer('betas', to_torch(self.model.betas))
32
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
33
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
34
-
35
- # calculations for diffusion q(x_t | x_{t-1}) and others
36
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
37
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
38
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
39
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
40
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
41
-
42
- # ddim sampling parameters
43
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
44
- ddim_timesteps=self.ddim_timesteps,
45
- eta=ddim_eta, verbose=verbose)
46
- self.register_buffer('ddim_sigmas', ddim_sigmas)
47
- self.register_buffer('ddim_alphas', ddim_alphas)
48
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
49
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
50
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
51
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
52
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
53
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
54
-
55
- @torch.no_grad()
56
- def sample(self,
57
- S,
58
- batch_size,
59
- shape,
60
- conditioning=None,
61
- callback=None,
62
- normals_sequence=None,
63
- img_callback=None,
64
- quantize_x0=False,
65
- eta=0.,
66
- mask=None,
67
- x0=None,
68
- temperature=1.,
69
- noise_dropout=0.,
70
- score_corrector=None,
71
- corrector_kwargs=None,
72
- verbose=True,
73
- x_T=None,
74
- log_every_t=100,
75
- unconditional_guidance_scale=1.,
76
- unconditional_conditioning=None,
77
- features_adapter=None,
78
- append_to_context=None,
79
- cond_tau=0.4,
80
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
81
- **kwargs
82
- ):
83
- if conditioning is not None:
84
- if isinstance(conditioning, dict):
85
- cbs = conditioning[list(conditioning.keys())[0]].shape[0]
86
- if cbs != batch_size:
87
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
88
- else:
89
- if conditioning.shape[0] != batch_size:
90
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
91
-
92
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
93
- # sampling
94
- C, H, W = shape
95
- size = (batch_size, C, H, W)
96
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
97
-
98
- samples, intermediates = self.ddim_sampling(conditioning, size,
99
- callback=callback,
100
- img_callback=img_callback,
101
- quantize_denoised=quantize_x0,
102
- mask=mask, x0=x0,
103
- ddim_use_original_steps=False,
104
- noise_dropout=noise_dropout,
105
- temperature=temperature,
106
- score_corrector=score_corrector,
107
- corrector_kwargs=corrector_kwargs,
108
- x_T=x_T,
109
- log_every_t=log_every_t,
110
- unconditional_guidance_scale=unconditional_guidance_scale,
111
- unconditional_conditioning=unconditional_conditioning,
112
- features_adapter=features_adapter,
113
- append_to_context=append_to_context,
114
- cond_tau=cond_tau,
115
- )
116
- return samples, intermediates
117
-
118
- @torch.no_grad()
119
- def ddim_sampling(self, cond, shape,
120
- x_T=None, ddim_use_original_steps=False,
121
- callback=None, timesteps=None, quantize_denoised=False,
122
- mask=None, x0=None, img_callback=None, log_every_t=100,
123
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
124
- unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,
125
- append_to_context=None, cond_tau=0.4):
126
- device = self.model.betas.device
127
- b = shape[0]
128
- if x_T is None:
129
- img = torch.randn(shape, device=device)
130
- else:
131
- img = x_T
132
-
133
- if timesteps is None:
134
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
135
- elif timesteps is not None and not ddim_use_original_steps:
136
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
137
- timesteps = self.ddim_timesteps[:subset_end]
138
-
139
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
140
- time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)
141
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
142
- print(f"Running DDIM Sampling with {total_steps} timesteps")
143
-
144
- iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
145
-
146
- for i, step in enumerate(iterator):
147
- index = total_steps - i - 1
148
- ts = torch.full((b,), step, device=device, dtype=torch.long)
149
-
150
- if mask is not None:
151
- assert x0 is not None
152
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
153
- img = img_orig * mask + (1. - mask) * img
154
-
155
- outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
156
- quantize_denoised=quantize_denoised, temperature=temperature,
157
- noise_dropout=noise_dropout, score_corrector=score_corrector,
158
- corrector_kwargs=corrector_kwargs,
159
- unconditional_guidance_scale=unconditional_guidance_scale,
160
- unconditional_conditioning=unconditional_conditioning,
161
- features_adapter=None if index < int(
162
- (1 - cond_tau) * total_steps) else features_adapter,
163
- # TODO support style_cond_tau
164
- append_to_context=None if index < int(
165
- 0.5 * total_steps) else append_to_context,
166
- )
167
- img, pred_x0 = outs
168
- if callback: callback(i)
169
- if img_callback: img_callback(pred_x0, i)
170
-
171
- if index % log_every_t == 0 or index == total_steps - 1:
172
- intermediates['x_inter'].append(img)
173
- intermediates['pred_x0'].append(pred_x0)
174
-
175
- return img, intermediates
176
-
177
- @torch.no_grad()
178
- def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
179
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
180
- unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,
181
- append_to_context=None):
182
- b, *_, device = *x.shape, x.device
183
-
184
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
185
- if append_to_context is not None:
186
- model_output = self.model.apply_model(x, t, torch.cat([c, append_to_context], dim=1),
187
- features_adapter=features_adapter)
188
- else:
189
- model_output = self.model.apply_model(x, t, c, features_adapter=features_adapter)
190
- else:
191
- x_in = torch.cat([x] * 2)
192
- t_in = torch.cat([t] * 2)
193
- if isinstance(c, dict):
194
- assert isinstance(unconditional_conditioning, dict)
195
- c_in = dict()
196
- for k in c:
197
- if isinstance(c[k], list):
198
- c_in[k] = [torch.cat([
199
- unconditional_conditioning[k][i],
200
- c[k][i]]) for i in range(len(c[k]))]
201
- else:
202
- c_in[k] = torch.cat([
203
- unconditional_conditioning[k],
204
- c[k]])
205
- elif isinstance(c, list):
206
- c_in = list()
207
- assert isinstance(unconditional_conditioning, list)
208
- for i in range(len(c)):
209
- c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
210
- else:
211
- if append_to_context is not None:
212
- pad_len = append_to_context.size(1)
213
- new_unconditional_conditioning = torch.cat(
214
- [unconditional_conditioning, unconditional_conditioning[:, -pad_len:, :]], dim=1)
215
- new_c = torch.cat([c, append_to_context], dim=1)
216
- c_in = torch.cat([new_unconditional_conditioning, new_c])
217
- else:
218
- c_in = torch.cat([unconditional_conditioning, c])
219
- model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in, features_adapter=features_adapter).chunk(2)
220
- model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
221
-
222
- if self.model.parameterization == "v":
223
- e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
224
- else:
225
- e_t = model_output
226
-
227
- if score_corrector is not None:
228
- assert self.model.parameterization == "eps", 'not implemented'
229
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
230
-
231
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
232
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
233
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
234
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
235
- # select parameters corresponding to the currently considered timestep
236
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
237
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
238
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
239
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
240
-
241
- # current prediction for x_0
242
- if self.model.parameterization != "v":
243
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
244
- else:
245
- pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
246
-
247
- if quantize_denoised:
248
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
249
- # direction pointing to x_t
250
- dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
251
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
252
- if noise_dropout > 0.:
253
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
254
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
255
- return x_prev, pred_x0
256
-
257
- @torch.no_grad()
258
- def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
259
- # fast, but does not allow for exact reconstruction
260
- # t serves as an index to gather the correct alphas
261
- if use_original_steps:
262
- sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
263
- sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
264
- else:
265
- sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
266
- sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
267
-
268
- if noise is None:
269
- noise = torch.randn_like(x0)
270
- return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
271
- extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
272
-
273
- @torch.no_grad()
274
- def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
275
- use_original_steps=False):
276
-
277
- timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
278
- timesteps = timesteps[:t_start]
279
-
280
- time_range = np.flip(timesteps)
281
- total_steps = timesteps.shape[0]
282
- print(f"Running DDIM Sampling with {total_steps} timesteps")
283
-
284
- iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
285
- x_dec = x_latent
286
- for i, step in enumerate(iterator):
287
- index = total_steps - i - 1
288
- ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
289
- x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
290
- unconditional_guidance_scale=unconditional_guidance_scale,
291
- unconditional_conditioning=unconditional_conditioning)
292
- return x_dec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapting/TrendFlow/mypages/navigation.py DELETED
@@ -1,7 +0,0 @@
1
- import streamlit as st
2
-
3
- def __go_to__(page:str):
4
- st.session_state['current_page'] = page
5
-
6
- def go_to_home():
7
- __go_to__('home')
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenWidth.js DELETED
@@ -1,20 +0,0 @@
1
- var GetChildrenWidth = function () {
2
- if (this.rexSizer.hidden) {
3
- return 0;
4
- }
5
-
6
- var result;
7
- var child = this.child,
8
- childConfig = child.rexSizer;
9
- if (childConfig.hidden) {
10
- result = 0;
11
- } else if (this.scrollMode === 0) { // scroll y
12
- result = this.getChildWidth(child);
13
- } else { // scroll x
14
- result = 0;
15
- }
16
-
17
- return result;
18
- }
19
-
20
- export default GetChildrenWidth;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlanMars/QYL-AI-Space/modules/overwrites.py DELETED
@@ -1,101 +0,0 @@
1
- from __future__ import annotations
2
- import logging
3
-
4
- from llama_index import Prompt
5
- from typing import List, Tuple
6
- import mdtex2html
7
- from gradio_client import utils as client_utils
8
-
9
- from modules.presets import *
10
- from modules.llama_func import *
11
- from modules.config import render_latex
12
-
13
- def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
14
- logging.debug("Compacting text chunks...🚀🚀🚀")
15
- combined_str = [c.strip() for c in text_chunks if c.strip()]
16
- combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
17
- combined_str = "\n\n".join(combined_str)
18
- # resplit based on self.max_chunk_overlap
19
- text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
20
- return text_splitter.split_text(combined_str)
21
-
22
-
23
- def postprocess(
24
- self,
25
- y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple],
26
- ) -> List[List[str | Dict | None]]:
27
- """
28
- Parameters:
29
- y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
30
- Returns:
31
- List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
32
- """
33
- if y is None:
34
- return []
35
- processed_messages = []
36
- for message_pair in y:
37
- assert isinstance(
38
- message_pair, (tuple, list)
39
- ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
40
- assert (
41
- len(message_pair) == 2
42
- ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
43
-
44
- processed_messages.append(
45
- [
46
- self._postprocess_chat_messages(message_pair[0], "user"),
47
- self._postprocess_chat_messages(message_pair[1], "bot"),
48
- ]
49
- )
50
- return processed_messages
51
-
52
- def postprocess_chat_messages(
53
- self, chat_message: str | Tuple | List | None, message_type: str
54
- ) -> str | Dict | None:
55
- if chat_message is None:
56
- return None
57
- elif isinstance(chat_message, (tuple, list)):
58
- filepath = chat_message[0]
59
- mime_type = client_utils.get_mimetype(filepath)
60
- filepath = self.make_temp_copy_if_needed(filepath)
61
- return {
62
- "name": filepath,
63
- "mime_type": mime_type,
64
- "alt_text": chat_message[1] if len(chat_message) > 1 else None,
65
- "data": None, # These last two fields are filled in by the frontend
66
- "is_file": True,
67
- }
68
- elif isinstance(chat_message, str):
69
- if message_type == "bot":
70
- if not detect_converted_mark(chat_message):
71
- chat_message = convert_mdtext(chat_message)
72
- elif message_type == "user":
73
- if not detect_converted_mark(chat_message):
74
- chat_message = convert_asis(chat_message)
75
- return chat_message
76
- else:
77
- raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
78
-
79
- with open("./assets/custom.js", "r", encoding="utf-8") as f, \
80
- open("./assets/external-scripts.js", "r", encoding="utf-8") as f1:
81
- customJS = f.read()
82
- externalScripts = f1.read()
83
-
84
-
85
- def reload_javascript():
86
- print("Reloading javascript...")
87
- js = f'<script>{customJS}</script><script async>{externalScripts}</script>'
88
- if render_latex:
89
- js += """\
90
- <script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-MML-AM_CHTML"></script>
91
- <script type="text/x-mathjax-config">MathJax.Hub.Config({skipStartupTypeset: false, tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']],displayMath: [['$$','$$'], ['\\[','\\]']]}});</script>
92
- """
93
- def template_response(*args, **kwargs):
94
- res = GradioTemplateResponseOriginal(*args, **kwargs)
95
- res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
96
- res.init_headers()
97
- return res
98
-
99
- gr.routes.templates.TemplateResponse = template_response
100
-
101
- GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlanMars/QYL-AI-Space/modules/shared.py DELETED
@@ -1,55 +0,0 @@
1
- from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST
2
- import os
3
- import queue
4
-
5
- class State:
6
- interrupted = False
7
- multi_api_key = False
8
- completion_url = COMPLETION_URL
9
- balance_api_url = BALANCE_API_URL
10
- usage_api_url = USAGE_API_URL
11
-
12
- def interrupt(self):
13
- self.interrupted = True
14
-
15
- def recover(self):
16
- self.interrupted = False
17
-
18
- def set_api_host(self, api_host):
19
- self.completion_url = f"https://{api_host}/v1/chat/completions"
20
- self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants"
21
- self.usage_api_url = f"https://{api_host}/dashboard/billing/usage"
22
- os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1"
23
-
24
- def reset_api_host(self):
25
- self.completion_url = COMPLETION_URL
26
- self.balance_api_url = BALANCE_API_URL
27
- self.usage_api_url = USAGE_API_URL
28
- os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1"
29
- return API_HOST
30
-
31
- def reset_all(self):
32
- self.interrupted = False
33
- self.completion_url = COMPLETION_URL
34
-
35
- def set_api_key_queue(self, api_key_list):
36
- self.multi_api_key = True
37
- self.api_key_queue = queue.Queue()
38
- for api_key in api_key_list:
39
- self.api_key_queue.put(api_key)
40
-
41
- def switching_api_key(self, func):
42
- if not hasattr(self, "api_key_queue"):
43
- return func
44
-
45
- def wrapped(*args, **kwargs):
46
- api_key = self.api_key_queue.get()
47
- args[0].api_key = api_key
48
- ret = func(*args, **kwargs)
49
- self.api_key_queue.put(api_key)
50
- return ret
51
-
52
- return wrapped
53
-
54
-
55
- state = State()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/util.py DELETED
@@ -1,349 +0,0 @@
1
- from torch import nn
2
- import torch.nn.functional as F
3
- import torch
4
-
5
-
6
- class TPS:
7
- '''
8
- TPS transformation, mode 'kp' for Eq(2) in the paper, mode 'random' for equivariance loss.
9
- '''
10
- def __init__(self, mode, bs, **kwargs):
11
- self.bs = bs
12
- self.mode = mode
13
- if mode == 'random':
14
- noise = torch.normal(mean=0, std=kwargs['sigma_affine'] * torch.ones([bs, 2, 3]))
15
- self.theta = noise + torch.eye(2, 3).view(1, 2, 3)
16
- self.control_points = make_coordinate_grid((kwargs['points_tps'], kwargs['points_tps']), type=noise.type())
17
- self.control_points = self.control_points.unsqueeze(0)
18
- self.control_params = torch.normal(mean=0,
19
- std=kwargs['sigma_tps'] * torch.ones([bs, 1, kwargs['points_tps'] ** 2]))
20
- elif mode == 'kp':
21
- kp_1 = kwargs["kp_1"]
22
- kp_2 = kwargs["kp_2"]
23
- device = kp_1.device
24
- kp_type = kp_1.type()
25
- self.gs = kp_1.shape[1]
26
- n = kp_1.shape[2]
27
- K = torch.norm(kp_1[:,:,:, None]-kp_1[:,:, None, :], dim=4, p=2)
28
- K = K**2
29
- K = K * torch.log(K+1e-9)
30
-
31
- one1 = torch.ones(self.bs, kp_1.shape[1], kp_1.shape[2], 1).to(device).type(kp_type)
32
- kp_1p = torch.cat([kp_1,one1], 3)
33
-
34
- zero = torch.zeros(self.bs, kp_1.shape[1], 3, 3).to(device).type(kp_type)
35
- P = torch.cat([kp_1p, zero],2)
36
- L = torch.cat([K,kp_1p.permute(0,1,3,2)],2)
37
- L = torch.cat([L,P],3)
38
-
39
- zero = torch.zeros(self.bs, kp_1.shape[1], 3, 2).to(device).type(kp_type)
40
- Y = torch.cat([kp_2, zero], 2)
41
- one = torch.eye(L.shape[2]).expand(L.shape).to(device).type(kp_type)*0.01
42
- L = L + one
43
-
44
- param = torch.matmul(torch.inverse(L),Y)
45
- self.theta = param[:,:,n:,:].permute(0,1,3,2)
46
-
47
- self.control_points = kp_1
48
- self.control_params = param[:,:,:n,:]
49
- else:
50
- raise Exception("Error TPS mode")
51
-
52
- def transform_frame(self, frame):
53
- grid = make_coordinate_grid(frame.shape[2:], type=frame.type()).unsqueeze(0).to(frame.device)
54
- grid = grid.view(1, frame.shape[2] * frame.shape[3], 2)
55
- shape = [self.bs, frame.shape[2], frame.shape[3], 2]
56
- if self.mode == 'kp':
57
- shape.insert(1, self.gs)
58
- grid = self.warp_coordinates(grid).view(*shape)
59
- return grid
60
-
61
- def warp_coordinates(self, coordinates):
62
- theta = self.theta.type(coordinates.type()).to(coordinates.device)
63
- control_points = self.control_points.type(coordinates.type()).to(coordinates.device)
64
- control_params = self.control_params.type(coordinates.type()).to(coordinates.device)
65
-
66
- if self.mode == 'kp':
67
- transformed = torch.matmul(theta[:, :, :, :2], coordinates.permute(0, 2, 1)) + theta[:, :, :, 2:]
68
-
69
- distances = coordinates.view(coordinates.shape[0], 1, 1, -1, 2) - control_points.view(self.bs, control_points.shape[1], -1, 1, 2)
70
-
71
- distances = distances ** 2
72
- result = distances.sum(-1)
73
- result = result * torch.log(result + 1e-9)
74
- result = torch.matmul(result.permute(0, 1, 3, 2), control_params)
75
- transformed = transformed.permute(0, 1, 3, 2) + result
76
-
77
- elif self.mode == 'random':
78
- theta = theta.unsqueeze(1)
79
- transformed = torch.matmul(theta[:, :, :, :2], coordinates.unsqueeze(-1)) + theta[:, :, :, 2:]
80
- transformed = transformed.squeeze(-1)
81
- ances = coordinates.view(coordinates.shape[0], -1, 1, 2) - control_points.view(1, 1, -1, 2)
82
- distances = ances ** 2
83
-
84
- result = distances.sum(-1)
85
- result = result * torch.log(result + 1e-9)
86
- result = result * control_params
87
- result = result.sum(dim=2).view(self.bs, coordinates.shape[1], 1)
88
- transformed = transformed + result
89
- else:
90
- raise Exception("Error TPS mode")
91
-
92
- return transformed
93
-
94
-
95
- def kp2gaussian(kp, spatial_size, kp_variance):
96
- """
97
- Transform a keypoint into gaussian like representation
98
- """
99
-
100
- coordinate_grid = make_coordinate_grid(spatial_size, kp.type()).to(kp.device)
101
- number_of_leading_dimensions = len(kp.shape) - 1
102
- shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
103
- coordinate_grid = coordinate_grid.view(*shape)
104
- repeats = kp.shape[:number_of_leading_dimensions] + (1, 1, 1)
105
- coordinate_grid = coordinate_grid.repeat(*repeats)
106
-
107
- # Preprocess kp shape
108
- shape = kp.shape[:number_of_leading_dimensions] + (1, 1, 2)
109
- kp = kp.view(*shape)
110
-
111
- mean_sub = (coordinate_grid - kp)
112
-
113
- out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
114
-
115
- return out
116
-
117
-
118
- def make_coordinate_grid(spatial_size, type):
119
- """
120
- Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
121
- """
122
- h, w = spatial_size
123
- x = torch.arange(w).type(type)
124
- y = torch.arange(h).type(type)
125
-
126
- x = (2 * (x / (w - 1)) - 1)
127
- y = (2 * (y / (h - 1)) - 1)
128
-
129
- yy = y.view(-1, 1).repeat(1, w)
130
- xx = x.view(1, -1).repeat(h, 1)
131
-
132
- meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
133
-
134
- return meshed
135
-
136
-
137
- class ResBlock2d(nn.Module):
138
- """
139
- Res block, preserve spatial resolution.
140
- """
141
-
142
- def __init__(self, in_features, kernel_size, padding):
143
- super(ResBlock2d, self).__init__()
144
- self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
145
- padding=padding)
146
- self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
147
- padding=padding)
148
- self.norm1 = nn.InstanceNorm2d(in_features, affine=True)
149
- self.norm2 = nn.InstanceNorm2d(in_features, affine=True)
150
-
151
- def forward(self, x):
152
- out = self.norm1(x)
153
- out = F.relu(out)
154
- out = self.conv1(out)
155
- out = self.norm2(out)
156
- out = F.relu(out)
157
- out = self.conv2(out)
158
- out += x
159
- return out
160
-
161
-
162
- class UpBlock2d(nn.Module):
163
- """
164
- Upsampling block for use in decoder.
165
- """
166
-
167
- def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
168
- super(UpBlock2d, self).__init__()
169
-
170
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
171
- padding=padding, groups=groups)
172
- self.norm = nn.InstanceNorm2d(out_features, affine=True)
173
-
174
- def forward(self, x):
175
- out = F.interpolate(x, scale_factor=2)
176
- out = self.conv(out)
177
- out = self.norm(out)
178
- out = F.relu(out)
179
- return out
180
-
181
-
182
- class DownBlock2d(nn.Module):
183
- """
184
- Downsampling block for use in encoder.
185
- """
186
-
187
- def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
188
- super(DownBlock2d, self).__init__()
189
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
190
- padding=padding, groups=groups)
191
- self.norm = nn.InstanceNorm2d(out_features, affine=True)
192
- self.pool = nn.AvgPool2d(kernel_size=(2, 2))
193
-
194
- def forward(self, x):
195
- out = self.conv(x)
196
- out = self.norm(out)
197
- out = F.relu(out)
198
- out = self.pool(out)
199
- return out
200
-
201
-
202
- class SameBlock2d(nn.Module):
203
- """
204
- Simple block, preserve spatial resolution.
205
- """
206
-
207
- def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):
208
- super(SameBlock2d, self).__init__()
209
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,
210
- kernel_size=kernel_size, padding=padding, groups=groups)
211
- self.norm = nn.InstanceNorm2d(out_features, affine=True)
212
-
213
- def forward(self, x):
214
- out = self.conv(x)
215
- out = self.norm(out)
216
- out = F.relu(out)
217
- return out
218
-
219
-
220
- class Encoder(nn.Module):
221
- """
222
- Hourglass Encoder
223
- """
224
-
225
- def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
226
- super(Encoder, self).__init__()
227
-
228
- down_blocks = []
229
- for i in range(num_blocks):
230
- down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
231
- min(max_features, block_expansion * (2 ** (i + 1))),
232
- kernel_size=3, padding=1))
233
- self.down_blocks = nn.ModuleList(down_blocks)
234
-
235
- def forward(self, x):
236
- outs = [x]
237
- #print('encoder:' ,outs[-1].shape)
238
- for down_block in self.down_blocks:
239
- outs.append(down_block(outs[-1]))
240
- #print('encoder:' ,outs[-1].shape)
241
- return outs
242
-
243
-
244
- class Decoder(nn.Module):
245
- """
246
- Hourglass Decoder
247
- """
248
-
249
- def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
250
- super(Decoder, self).__init__()
251
-
252
- up_blocks = []
253
- self.out_channels = []
254
- for i in range(num_blocks)[::-1]:
255
- in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
256
- self.out_channels.append(in_filters)
257
- out_filters = min(max_features, block_expansion * (2 ** i))
258
- up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))
259
-
260
- self.up_blocks = nn.ModuleList(up_blocks)
261
- self.out_channels.append(block_expansion + in_features)
262
- # self.out_filters = block_expansion + in_features
263
-
264
- def forward(self, x, mode = 0):
265
- out = x.pop()
266
- outs = []
267
- for up_block in self.up_blocks:
268
- out = up_block(out)
269
- skip = x.pop()
270
- out = torch.cat([out, skip], dim=1)
271
- outs.append(out)
272
- if(mode == 0):
273
- return out
274
- else:
275
- return outs
276
-
277
-
278
- class Hourglass(nn.Module):
279
- """
280
- Hourglass architecture.
281
- """
282
-
283
- def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
284
- super(Hourglass, self).__init__()
285
- self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
286
- self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
287
- self.out_channels = self.decoder.out_channels
288
- # self.out_filters = self.decoder.out_filters
289
-
290
- def forward(self, x, mode = 0):
291
- return self.decoder(self.encoder(x), mode)
292
-
293
-
294
- class AntiAliasInterpolation2d(nn.Module):
295
- """
296
- Band-limited downsampling, for better preservation of the input signal.
297
- """
298
- def __init__(self, channels, scale):
299
- super(AntiAliasInterpolation2d, self).__init__()
300
- sigma = (1 / scale - 1) / 2
301
- kernel_size = 2 * round(sigma * 4) + 1
302
- self.ka = kernel_size // 2
303
- self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
304
-
305
- kernel_size = [kernel_size, kernel_size]
306
- sigma = [sigma, sigma]
307
- # The gaussian kernel is the product of the
308
- # gaussian function of each dimension.
309
- kernel = 1
310
- meshgrids = torch.meshgrid(
311
- [
312
- torch.arange(size, dtype=torch.float32)
313
- for size in kernel_size
314
- ]
315
- )
316
- for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
317
- mean = (size - 1) / 2
318
- kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
319
-
320
- # Make sure sum of values in gaussian kernel equals 1.
321
- kernel = kernel / torch.sum(kernel)
322
- # Reshape to depthwise convolutional weight
323
- kernel = kernel.view(1, 1, *kernel.size())
324
- kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
325
-
326
- self.register_buffer('weight', kernel)
327
- self.groups = channels
328
- self.scale = scale
329
-
330
- def forward(self, input):
331
- if self.scale == 1.0:
332
- return input
333
-
334
- out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
335
- out = F.conv2d(out, weight=self.weight, groups=self.groups)
336
- out = F.interpolate(out, scale_factor=(self.scale, self.scale))
337
-
338
- return out
339
-
340
-
341
- def to_homogeneous(coordinates):
342
- ones_shape = list(coordinates.shape)
343
- ones_shape[-1] = 1
344
- ones = torch.ones(ones_shape).type(coordinates.type())
345
-
346
- return torch.cat([coordinates, ones], dim=-1)
347
-
348
- def from_homogeneous(coordinates):
349
- return coordinates[..., :2] / coordinates[..., 2:3]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/analyze_errors.py DELETED
@@ -1,316 +0,0 @@
1
- #!/usr/bin/env python3
2
- import cv2
3
- import numpy as np
4
- import sklearn
5
- import torch
6
- import os
7
- import pickle
8
- import pandas as pd
9
- import matplotlib.pyplot as plt
10
- from joblib import Parallel, delayed
11
-
12
- from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset, load_image
13
- from saicinpainting.evaluation.losses.fid.inception import InceptionV3
14
- from saicinpainting.evaluation.utils import load_yaml
15
- from saicinpainting.training.visualizers.base import visualize_mask_and_images
16
-
17
-
18
- def draw_score(img, score):
19
- img = np.transpose(img, (1, 2, 0))
20
- cv2.putText(img, f'{score:.2f}',
21
- (40, 40),
22
- cv2.FONT_HERSHEY_SIMPLEX,
23
- 1,
24
- (0, 1, 0),
25
- thickness=3)
26
- img = np.transpose(img, (2, 0, 1))
27
- return img
28
-
29
-
30
- def save_global_samples(global_mask_fnames, mask2real_fname, mask2fake_fname, out_dir, real_scores_by_fname, fake_scores_by_fname):
31
- for cur_mask_fname in global_mask_fnames:
32
- cur_real_fname = mask2real_fname[cur_mask_fname]
33
- orig_img = load_image(cur_real_fname, mode='RGB')
34
- fake_img = load_image(mask2fake_fname[cur_mask_fname], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]]
35
- mask = load_image(cur_mask_fname, mode='L')[None, ...]
36
-
37
- draw_score(orig_img, real_scores_by_fname.loc[cur_real_fname, 'real_score'])
38
- draw_score(fake_img, fake_scores_by_fname.loc[cur_mask_fname, 'fake_score'])
39
-
40
- cur_grid = visualize_mask_and_images(dict(image=orig_img, mask=mask, fake=fake_img),
41
- keys=['image', 'fake'],
42
- last_without_mask=True)
43
- cur_grid = np.clip(cur_grid * 255, 0, 255).astype('uint8')
44
- cur_grid = cv2.cvtColor(cur_grid, cv2.COLOR_RGB2BGR)
45
- cv2.imwrite(os.path.join(out_dir, os.path.splitext(os.path.basename(cur_mask_fname))[0] + '.jpg'),
46
- cur_grid)
47
-
48
-
49
- def save_samples_by_real(worst_best_by_real, mask2fake_fname, fake_info, out_dir):
50
- for real_fname in worst_best_by_real.index:
51
- worst_mask_path = worst_best_by_real.loc[real_fname, 'worst']
52
- best_mask_path = worst_best_by_real.loc[real_fname, 'best']
53
- orig_img = load_image(real_fname, mode='RGB')
54
- worst_mask_img = load_image(worst_mask_path, mode='L')[None, ...]
55
- worst_fake_img = load_image(mask2fake_fname[worst_mask_path], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]]
56
- best_mask_img = load_image(best_mask_path, mode='L')[None, ...]
57
- best_fake_img = load_image(mask2fake_fname[best_mask_path], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]]
58
-
59
- draw_score(orig_img, worst_best_by_real.loc[real_fname, 'real_score'])
60
- draw_score(worst_fake_img, worst_best_by_real.loc[real_fname, 'worst_score'])
61
- draw_score(best_fake_img, worst_best_by_real.loc[real_fname, 'best_score'])
62
-
63
- cur_grid = visualize_mask_and_images(dict(image=orig_img, mask=np.zeros_like(worst_mask_img),
64
- worst_mask=worst_mask_img, worst_img=worst_fake_img,
65
- best_mask=best_mask_img, best_img=best_fake_img),
66
- keys=['image', 'worst_mask', 'worst_img', 'best_mask', 'best_img'],
67
- rescale_keys=['worst_mask', 'best_mask'],
68
- last_without_mask=True)
69
- cur_grid = np.clip(cur_grid * 255, 0, 255).astype('uint8')
70
- cur_grid = cv2.cvtColor(cur_grid, cv2.COLOR_RGB2BGR)
71
- cv2.imwrite(os.path.join(out_dir,
72
- os.path.splitext(os.path.basename(real_fname))[0] + '.jpg'),
73
- cur_grid)
74
-
75
- fig, (ax1, ax2) = plt.subplots(1, 2)
76
- cur_stat = fake_info[fake_info['real_fname'] == real_fname]
77
- cur_stat['fake_score'].hist(ax=ax1)
78
- cur_stat['real_score'].hist(ax=ax2)
79
- fig.tight_layout()
80
- fig.savefig(os.path.join(out_dir,
81
- os.path.splitext(os.path.basename(real_fname))[0] + '_scores.png'))
82
- plt.close(fig)
83
-
84
-
85
- def extract_overlapping_masks(mask_fnames, cur_i, fake_scores_table, max_overlaps_n=2):
86
- result_pairs = []
87
- result_scores = []
88
- mask_fname_a = mask_fnames[cur_i]
89
- mask_a = load_image(mask_fname_a, mode='L')[None, ...] > 0.5
90
- cur_score_a = fake_scores_table.loc[mask_fname_a, 'fake_score']
91
- for mask_fname_b in mask_fnames[cur_i + 1:]:
92
- mask_b = load_image(mask_fname_b, mode='L')[None, ...] > 0.5
93
- if not np.any(mask_a & mask_b):
94
- continue
95
- cur_score_b = fake_scores_table.loc[mask_fname_b, 'fake_score']
96
- result_pairs.append((mask_fname_a, mask_fname_b))
97
- result_scores.append(cur_score_b - cur_score_a)
98
- if len(result_pairs) >= max_overlaps_n:
99
- break
100
- return result_pairs, result_scores
101
-
102
-
103
- def main(args):
104
- config = load_yaml(args.config)
105
-
106
- latents_dir = os.path.join(args.outpath, 'latents')
107
- os.makedirs(latents_dir, exist_ok=True)
108
- global_worst_dir = os.path.join(args.outpath, 'global_worst')
109
- os.makedirs(global_worst_dir, exist_ok=True)
110
- global_best_dir = os.path.join(args.outpath, 'global_best')
111
- os.makedirs(global_best_dir, exist_ok=True)
112
- worst_best_by_best_worst_score_diff_max_dir = os.path.join(args.outpath, 'worst_best_by_real', 'best_worst_score_diff_max')
113
- os.makedirs(worst_best_by_best_worst_score_diff_max_dir, exist_ok=True)
114
- worst_best_by_best_worst_score_diff_min_dir = os.path.join(args.outpath, 'worst_best_by_real', 'best_worst_score_diff_min')
115
- os.makedirs(worst_best_by_best_worst_score_diff_min_dir, exist_ok=True)
116
- worst_best_by_real_best_score_diff_max_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_best_score_diff_max')
117
- os.makedirs(worst_best_by_real_best_score_diff_max_dir, exist_ok=True)
118
- worst_best_by_real_best_score_diff_min_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_best_score_diff_min')
119
- os.makedirs(worst_best_by_real_best_score_diff_min_dir, exist_ok=True)
120
- worst_best_by_real_worst_score_diff_max_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_worst_score_diff_max')
121
- os.makedirs(worst_best_by_real_worst_score_diff_max_dir, exist_ok=True)
122
- worst_best_by_real_worst_score_diff_min_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_worst_score_diff_min')
123
- os.makedirs(worst_best_by_real_worst_score_diff_min_dir, exist_ok=True)
124
-
125
- if not args.only_report:
126
- block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
127
- inception_model = InceptionV3([block_idx]).eval().cuda()
128
-
129
- dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs)
130
-
131
- real2vector_cache = {}
132
-
133
- real_features = []
134
- fake_features = []
135
-
136
- orig_fnames = []
137
- mask_fnames = []
138
- mask2real_fname = {}
139
- mask2fake_fname = {}
140
-
141
- for batch_i, batch in enumerate(dataset):
142
- orig_img_fname = dataset.img_filenames[batch_i]
143
- mask_fname = dataset.mask_filenames[batch_i]
144
- fake_fname = dataset.pred_filenames[batch_i]
145
- mask2real_fname[mask_fname] = orig_img_fname
146
- mask2fake_fname[mask_fname] = fake_fname
147
-
148
- cur_real_vector = real2vector_cache.get(orig_img_fname, None)
149
- if cur_real_vector is None:
150
- with torch.no_grad():
151
- in_img = torch.from_numpy(batch['image'][None, ...]).cuda()
152
- cur_real_vector = inception_model(in_img)[0].squeeze(-1).squeeze(-1).cpu().numpy()
153
- real2vector_cache[orig_img_fname] = cur_real_vector
154
-
155
- pred_img = torch.from_numpy(batch['inpainted'][None, ...]).cuda()
156
- cur_fake_vector = inception_model(pred_img)[0].squeeze(-1).squeeze(-1).cpu().numpy()
157
-
158
- real_features.append(cur_real_vector)
159
- fake_features.append(cur_fake_vector)
160
-
161
- orig_fnames.append(orig_img_fname)
162
- mask_fnames.append(mask_fname)
163
-
164
- ids_features = np.concatenate(real_features + fake_features, axis=0)
165
- ids_labels = np.array(([1] * len(real_features)) + ([0] * len(fake_features)))
166
-
167
- with open(os.path.join(latents_dir, 'featues.pkl'), 'wb') as f:
168
- pickle.dump(ids_features, f, protocol=3)
169
- with open(os.path.join(latents_dir, 'labels.pkl'), 'wb') as f:
170
- pickle.dump(ids_labels, f, protocol=3)
171
- with open(os.path.join(latents_dir, 'orig_fnames.pkl'), 'wb') as f:
172
- pickle.dump(orig_fnames, f, protocol=3)
173
- with open(os.path.join(latents_dir, 'mask_fnames.pkl'), 'wb') as f:
174
- pickle.dump(mask_fnames, f, protocol=3)
175
- with open(os.path.join(latents_dir, 'mask2real_fname.pkl'), 'wb') as f:
176
- pickle.dump(mask2real_fname, f, protocol=3)
177
- with open(os.path.join(latents_dir, 'mask2fake_fname.pkl'), 'wb') as f:
178
- pickle.dump(mask2fake_fname, f, protocol=3)
179
-
180
- svm = sklearn.svm.LinearSVC(dual=False)
181
- svm.fit(ids_features, ids_labels)
182
-
183
- pred_scores = svm.decision_function(ids_features)
184
- real_scores = pred_scores[:len(real_features)]
185
- fake_scores = pred_scores[len(real_features):]
186
-
187
- with open(os.path.join(latents_dir, 'pred_scores.pkl'), 'wb') as f:
188
- pickle.dump(pred_scores, f, protocol=3)
189
- with open(os.path.join(latents_dir, 'real_scores.pkl'), 'wb') as f:
190
- pickle.dump(real_scores, f, protocol=3)
191
- with open(os.path.join(latents_dir, 'fake_scores.pkl'), 'wb') as f:
192
- pickle.dump(fake_scores, f, protocol=3)
193
- else:
194
- with open(os.path.join(latents_dir, 'orig_fnames.pkl'), 'rb') as f:
195
- orig_fnames = pickle.load(f)
196
- with open(os.path.join(latents_dir, 'mask_fnames.pkl'), 'rb') as f:
197
- mask_fnames = pickle.load(f)
198
- with open(os.path.join(latents_dir, 'mask2real_fname.pkl'), 'rb') as f:
199
- mask2real_fname = pickle.load(f)
200
- with open(os.path.join(latents_dir, 'mask2fake_fname.pkl'), 'rb') as f:
201
- mask2fake_fname = pickle.load(f)
202
- with open(os.path.join(latents_dir, 'real_scores.pkl'), 'rb') as f:
203
- real_scores = pickle.load(f)
204
- with open(os.path.join(latents_dir, 'fake_scores.pkl'), 'rb') as f:
205
- fake_scores = pickle.load(f)
206
-
207
- real_info = pd.DataFrame(data=[dict(real_fname=fname,
208
- real_score=score)
209
- for fname, score
210
- in zip(orig_fnames, real_scores)])
211
- real_info.set_index('real_fname', drop=True, inplace=True)
212
-
213
- fake_info = pd.DataFrame(data=[dict(mask_fname=fname,
214
- fake_fname=mask2fake_fname[fname],
215
- real_fname=mask2real_fname[fname],
216
- fake_score=score)
217
- for fname, score
218
- in zip(mask_fnames, fake_scores)])
219
- fake_info = fake_info.join(real_info, on='real_fname', how='left')
220
- fake_info.drop_duplicates(['fake_fname', 'real_fname'], inplace=True)
221
-
222
- fake_stats_by_real = fake_info.groupby('real_fname')['fake_score'].describe()[['mean', 'std']].rename(
223
- {'mean': 'mean_fake_by_real', 'std': 'std_fake_by_real'}, axis=1)
224
- fake_info = fake_info.join(fake_stats_by_real, on='real_fname', rsuffix='stat_by_real')
225
- fake_info.drop_duplicates(['fake_fname', 'real_fname'], inplace=True)
226
- fake_info.to_csv(os.path.join(latents_dir, 'join_scores_table.csv'), sep='\t', index=False)
227
-
228
- fake_scores_table = fake_info.set_index('mask_fname')['fake_score'].to_frame()
229
- real_scores_table = fake_info.set_index('real_fname')['real_score'].drop_duplicates().to_frame()
230
-
231
- fig, (ax1, ax2) = plt.subplots(1, 2)
232
- ax1.hist(fake_scores)
233
- ax2.hist(real_scores)
234
- fig.tight_layout()
235
- fig.savefig(os.path.join(args.outpath, 'global_scores_hist.png'))
236
- plt.close(fig)
237
-
238
- global_worst_masks = fake_info.sort_values('fake_score', ascending=True)['mask_fname'].iloc[:config.take_global_top].to_list()
239
- global_best_masks = fake_info.sort_values('fake_score', ascending=False)['mask_fname'].iloc[:config.take_global_top].to_list()
240
- save_global_samples(global_worst_masks, mask2real_fname, mask2fake_fname, global_worst_dir, real_scores_table, fake_scores_table)
241
- save_global_samples(global_best_masks, mask2real_fname, mask2fake_fname, global_best_dir, real_scores_table, fake_scores_table)
242
-
243
- # grouped by real
244
- worst_samples_by_real = fake_info.groupby('real_fname').apply(
245
- lambda d: d.set_index('mask_fname')['fake_score'].idxmin()).to_frame().rename({0: 'worst'}, axis=1)
246
- best_samples_by_real = fake_info.groupby('real_fname').apply(
247
- lambda d: d.set_index('mask_fname')['fake_score'].idxmax()).to_frame().rename({0: 'best'}, axis=1)
248
- worst_best_by_real = pd.concat([worst_samples_by_real, best_samples_by_real], axis=1)
249
-
250
- worst_best_by_real = worst_best_by_real.join(fake_scores_table.rename({'fake_score': 'worst_score'}, axis=1),
251
- on='worst')
252
- worst_best_by_real = worst_best_by_real.join(fake_scores_table.rename({'fake_score': 'best_score'}, axis=1),
253
- on='best')
254
- worst_best_by_real = worst_best_by_real.join(real_scores_table)
255
-
256
- worst_best_by_real['best_worst_score_diff'] = worst_best_by_real['best_score'] - worst_best_by_real['worst_score']
257
- worst_best_by_real['real_best_score_diff'] = worst_best_by_real['real_score'] - worst_best_by_real['best_score']
258
- worst_best_by_real['real_worst_score_diff'] = worst_best_by_real['real_score'] - worst_best_by_real['worst_score']
259
-
260
- worst_best_by_best_worst_score_diff_min = worst_best_by_real.sort_values('best_worst_score_diff', ascending=True).iloc[:config.take_worst_best_top]
261
- worst_best_by_best_worst_score_diff_max = worst_best_by_real.sort_values('best_worst_score_diff', ascending=False).iloc[:config.take_worst_best_top]
262
- save_samples_by_real(worst_best_by_best_worst_score_diff_min, mask2fake_fname, fake_info, worst_best_by_best_worst_score_diff_min_dir)
263
- save_samples_by_real(worst_best_by_best_worst_score_diff_max, mask2fake_fname, fake_info, worst_best_by_best_worst_score_diff_max_dir)
264
-
265
- worst_best_by_real_best_score_diff_min = worst_best_by_real.sort_values('real_best_score_diff', ascending=True).iloc[:config.take_worst_best_top]
266
- worst_best_by_real_best_score_diff_max = worst_best_by_real.sort_values('real_best_score_diff', ascending=False).iloc[:config.take_worst_best_top]
267
- save_samples_by_real(worst_best_by_real_best_score_diff_min, mask2fake_fname, fake_info, worst_best_by_real_best_score_diff_min_dir)
268
- save_samples_by_real(worst_best_by_real_best_score_diff_max, mask2fake_fname, fake_info, worst_best_by_real_best_score_diff_max_dir)
269
-
270
- worst_best_by_real_worst_score_diff_min = worst_best_by_real.sort_values('real_worst_score_diff', ascending=True).iloc[:config.take_worst_best_top]
271
- worst_best_by_real_worst_score_diff_max = worst_best_by_real.sort_values('real_worst_score_diff', ascending=False).iloc[:config.take_worst_best_top]
272
- save_samples_by_real(worst_best_by_real_worst_score_diff_min, mask2fake_fname, fake_info, worst_best_by_real_worst_score_diff_min_dir)
273
- save_samples_by_real(worst_best_by_real_worst_score_diff_max, mask2fake_fname, fake_info, worst_best_by_real_worst_score_diff_max_dir)
274
-
275
- # analyze what change of mask causes bigger change of score
276
- overlapping_mask_fname_pairs = []
277
- overlapping_mask_fname_score_diffs = []
278
- for cur_real_fname in orig_fnames:
279
- cur_fakes_info = fake_info[fake_info['real_fname'] == cur_real_fname]
280
- cur_mask_fnames = sorted(cur_fakes_info['mask_fname'].unique())
281
-
282
- cur_mask_pairs_and_scores = Parallel(args.n_jobs)(
283
- delayed(extract_overlapping_masks)(cur_mask_fnames, i, fake_scores_table)
284
- for i in range(len(cur_mask_fnames) - 1)
285
- )
286
- for cur_pairs, cur_scores in cur_mask_pairs_and_scores:
287
- overlapping_mask_fname_pairs.extend(cur_pairs)
288
- overlapping_mask_fname_score_diffs.extend(cur_scores)
289
-
290
- overlapping_mask_fname_pairs = np.asarray(overlapping_mask_fname_pairs)
291
- overlapping_mask_fname_score_diffs = np.asarray(overlapping_mask_fname_score_diffs)
292
- overlapping_sort_idx = np.argsort(overlapping_mask_fname_score_diffs)
293
- overlapping_mask_fname_pairs = overlapping_mask_fname_pairs[overlapping_sort_idx]
294
- overlapping_mask_fname_score_diffs = overlapping_mask_fname_score_diffs[overlapping_sort_idx]
295
-
296
-
297
-
298
-
299
-
300
-
301
- if __name__ == '__main__':
302
- import argparse
303
-
304
- aparser = argparse.ArgumentParser()
305
- aparser.add_argument('config', type=str, help='Path to config for dataset generation')
306
- aparser.add_argument('datadir', type=str,
307
- help='Path to folder with images and masks (output of gen_mask_dataset.py)')
308
- aparser.add_argument('predictdir', type=str,
309
- help='Path to folder with predicts (e.g. predict_hifill_baseline.py)')
310
- aparser.add_argument('outpath', type=str, help='Where to put results')
311
- aparser.add_argument('--only-report', action='store_true',
312
- help='Whether to skip prediction and feature extraction, '
313
- 'load all the possible latents and proceed with report only')
314
- aparser.add_argument('--n-jobs', type=int, default=8, help='how many processes to use for pair mask mining')
315
-
316
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from .monotonic_align.core import maximum_path_c
4
-
5
-
6
- def maximum_path(neg_cent, mask):
7
- """ Cython optimized version.
8
- neg_cent: [b, t_t, t_s]
9
- mask: [b, t_t, t_s]
10
- """
11
- device = neg_cent.device
12
- dtype = neg_cent.dtype
13
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
14
- path = np.zeros(neg_cent.shape, dtype=np.int32)
15
-
16
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
17
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
18
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
19
- return torch.from_numpy(path).to(device=device, dtype=dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/attentions.py DELETED
@@ -1,473 +0,0 @@
1
- # from https://github.com/jaywalnut310/vits
2
- import math
3
-
4
- import torch
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- import commons
9
- from modules import LayerNorm
10
-
11
-
12
- class Encoder(nn.Module):
13
- def __init__(
14
- self,
15
- hidden_channels,
16
- filter_channels,
17
- n_heads,
18
- n_layers,
19
- kernel_size=1,
20
- p_dropout=0.,
21
- window_size=4,
22
- **kwargs
23
- ):
24
- super().__init__()
25
- self.hidden_channels = hidden_channels
26
- self.filter_channels = filter_channels
27
- self.n_heads = n_heads
28
- self.n_layers = n_layers
29
- self.kernel_size = kernel_size
30
- self.p_dropout = p_dropout
31
- self.window_size = window_size
32
-
33
- self.drop = nn.Dropout(p_dropout)
34
- self.attn_layers = nn.ModuleList()
35
- self.norm_layers_1 = nn.ModuleList()
36
- self.ffn_layers = nn.ModuleList()
37
- self.norm_layers_2 = nn.ModuleList()
38
- for i in range(self.n_layers):
39
- self.attn_layers.append(
40
- MultiHeadAttention(
41
- hidden_channels,
42
- hidden_channels,
43
- n_heads,
44
- p_dropout=p_dropout,
45
- window_size=window_size
46
- )
47
- )
48
- self.norm_layers_1.append(LayerNorm(hidden_channels))
49
- self.ffn_layers.append(
50
- FFN(
51
- hidden_channels,
52
- hidden_channels,
53
- filter_channels,
54
- kernel_size,
55
- p_dropout=p_dropout
56
- )
57
- )
58
- self.norm_layers_2.append(LayerNorm(hidden_channels))
59
-
60
- def forward(self, x, x_mask):
61
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
62
- x = x * x_mask
63
- for i in range(self.n_layers):
64
- y = self.attn_layers[i](x, x, attn_mask)
65
- y = self.drop(y)
66
- x = self.norm_layers_1[i](x + y)
67
-
68
- y = self.ffn_layers[i](x, x_mask)
69
- y = self.drop(y)
70
- x = self.norm_layers_2[i](x + y)
71
- x = x * x_mask
72
- return x
73
-
74
-
75
- class Decoder(nn.Module):
76
- def __init__(
77
- self,
78
- hidden_channels,
79
- filter_channels,
80
- n_heads,
81
- n_layers,
82
- kernel_size=1,
83
- p_dropout=0.,
84
- proximal_bias=False,
85
- proximal_init=True,
86
- **kwargs
87
- ):
88
- super().__init__()
89
- self.hidden_channels = hidden_channels
90
- self.filter_channels = filter_channels
91
- self.n_heads = n_heads
92
- self.n_layers = n_layers
93
- self.kernel_size = kernel_size
94
- self.p_dropout = p_dropout
95
- self.proximal_bias = proximal_bias
96
- self.proximal_init = proximal_init
97
-
98
- self.drop = nn.Dropout(p_dropout)
99
- self.self_attn_layers = nn.ModuleList()
100
- self.norm_layers_0 = nn.ModuleList()
101
- self.encdec_attn_layers = nn.ModuleList()
102
- self.norm_layers_1 = nn.ModuleList()
103
- self.ffn_layers = nn.ModuleList()
104
- self.norm_layers_2 = nn.ModuleList()
105
- for i in range(self.n_layers):
106
- self.self_attn_layers.append(
107
- MultiHeadAttention(
108
- hidden_channels,
109
- hidden_channels,
110
- n_heads,
111
- p_dropout=p_dropout,
112
- proximal_bias=proximal_bias,
113
- proximal_init=proximal_init
114
- )
115
- )
116
- self.norm_layers_0.append(LayerNorm(hidden_channels))
117
- self.encdec_attn_layers.append(
118
- MultiHeadAttention(
119
- hidden_channels,
120
- hidden_channels,
121
- n_heads,
122
- p_dropout=p_dropout
123
- )
124
- )
125
- self.norm_layers_1.append(LayerNorm(hidden_channels))
126
- self.ffn_layers.append(
127
- FFN(
128
- hidden_channels,
129
- hidden_channels,
130
- filter_channels,
131
- kernel_size,
132
- p_dropout=p_dropout,
133
- causal=True
134
- )
135
- )
136
- self.norm_layers_2.append(LayerNorm(hidden_channels))
137
-
138
- def forward(self, x, x_mask, h, h_mask):
139
- """
140
- x: decoder input
141
- h: encoder output
142
- """
143
- self_attn_mask = commons.subsequent_mask(
144
- x_mask.size(2)
145
- ).to(device=x.device, dtype=x.dtype)
146
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
147
- x = x * x_mask
148
- for i in range(self.n_layers):
149
- y = self.self_attn_layers[i](x, x, self_attn_mask)
150
- y = self.drop(y)
151
- x = self.norm_layers_0[i](x + y)
152
-
153
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
154
- y = self.drop(y)
155
- x = self.norm_layers_1[i](x + y)
156
-
157
- y = self.ffn_layers[i](x, x_mask)
158
- y = self.drop(y)
159
- x = self.norm_layers_2[i](x + y)
160
- x = x * x_mask
161
- return x
162
-
163
-
164
- class MultiHeadAttention(nn.Module):
165
- def __init__(
166
- self,
167
- channels,
168
- out_channels,
169
- n_heads,
170
- p_dropout=0.,
171
- window_size=None,
172
- heads_share=True,
173
- block_length=None,
174
- proximal_bias=False,
175
- proximal_init=False
176
- ):
177
- super().__init__()
178
- assert channels % n_heads == 0
179
-
180
- self.channels = channels
181
- self.out_channels = out_channels
182
- self.n_heads = n_heads
183
- self.p_dropout = p_dropout
184
- self.window_size = window_size
185
- self.heads_share = heads_share
186
- self.block_length = block_length
187
- self.proximal_bias = proximal_bias
188
- self.proximal_init = proximal_init
189
- self.attn = None
190
-
191
- self.k_channels = channels // n_heads
192
- self.conv_q = nn.Conv1d(channels, channels, 1)
193
- self.conv_k = nn.Conv1d(channels, channels, 1)
194
- self.conv_v = nn.Conv1d(channels, channels, 1)
195
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
196
- self.drop = nn.Dropout(p_dropout)
197
-
198
- if window_size is not None:
199
- n_heads_rel = 1 if heads_share else n_heads
200
- rel_stddev = self.k_channels ** -0.5
201
- self.emb_rel_k = nn.Parameter(torch.randn(
202
- n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
203
- self.emb_rel_v = nn.Parameter(torch.randn(
204
- n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
205
-
206
- nn.init.xavier_uniform_(self.conv_q.weight)
207
- nn.init.xavier_uniform_(self.conv_k.weight)
208
- nn.init.xavier_uniform_(self.conv_v.weight)
209
- if proximal_init:
210
- with torch.no_grad():
211
- self.conv_k.weight.copy_(self.conv_q.weight)
212
- self.conv_k.bias.copy_(self.conv_q.bias)
213
-
214
- def forward(self, x, c, attn_mask=None):
215
- q = self.conv_q(x)
216
- k = self.conv_k(c)
217
- v = self.conv_v(c)
218
-
219
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
220
-
221
- x = self.conv_o(x)
222
- return x
223
-
224
- def attention(self, query, key, value, mask=None):
225
- # reshape [b, d, t] -> [b, n_h, t, d_k]
226
- b, d, t_s, t_t = (*key.size(), query.size(2))
227
- # query = query.view(
228
- # b,
229
- # self.n_heads,
230
- # self.k_channels,
231
- # t_t
232
- # ).transpose(2, 3) #[b,h,t_t,c], d=h*c
233
- # key = key.view(
234
- # b,
235
- # self.n_heads,
236
- # self.k_channels,
237
- # t_s
238
- # ).transpose(2, 3) #[b,h,t_s,c]
239
- # value = value.view(
240
- # b,
241
- # self.n_heads,
242
- # self.k_channels,
243
- # t_s
244
- # ).transpose(2, 3) #[b,h,t_s,c]
245
- # scores = torch.matmul(
246
- # query / math.sqrt(self.k_channels), key.transpose(-2, -1)
247
- # ) #[b,h,t_t,t_s]
248
- query = query.view(
249
- b,
250
- self.n_heads,
251
- self.k_channels,
252
- t_t
253
- ) # [b,h,c,t_t]
254
- key = key.view(
255
- b,
256
- self.n_heads,
257
- self.k_channels,
258
- t_s
259
- ) # [b,h,c,t_s]
260
- value = value.view(
261
- b,
262
- self.n_heads,
263
- self.k_channels,
264
- t_s
265
- ) # [b,h,c,t_s]
266
- scores = torch.einsum('bhdt,bhds -> bhts', query / math.sqrt(self.k_channels), key) # [b,h,t_t,t_s]
267
- # if self.window_size is not None:
268
- # assert t_s == t_t, "Relative attention is only available for self-attention."
269
- # key_relative_embeddings = self._get_relative_embeddings(
270
- # self.emb_rel_k, t_s
271
- # )
272
- # rel_logits = self._matmul_with_relative_keys(
273
- # query / math.sqrt(self.k_channels), key_relative_embeddings
274
- # ) #[b,h,t_t,d],[h or 1,e,d] ->[b,h,t_t,e]
275
- # scores_local = self._relative_position_to_absolute_position(rel_logits)
276
- # scores = scores + scores_local
277
- # if self.proximal_bias:
278
- # assert t_s == t_t, "Proximal bias is only available for self-attention."
279
- # scores = scores + \
280
- # self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
281
- # if mask is not None:
282
- # scores = scores.masked_fill(mask == 0, -1e4)
283
- # if self.block_length is not None:
284
- # assert t_s == t_t, "Local attention is only available for self-attention."
285
- # block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
286
- # scores = scores.masked_fill(block_mask == 0, -1e4)
287
- # p_attn = F.softmax(scores, dim=-1) # [b, h, t_t, t_s]
288
- # p_attn = self.drop(p_attn)
289
- # output = torch.matmul(p_attn, value) # [b,h,t_t,t_s],[b,h,t_s,c] -> [b,h,t_t,c]
290
- # if self.window_size is not None:
291
- # relative_weights = self._absolute_position_to_relative_position(p_attn) #[b, h, t_t, 2*t_t-1]
292
- # value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) #[h or 1, 2*t_t-1, c]
293
- # output = output + \
294
- # self._matmul_with_relative_values(
295
- # relative_weights, value_relative_embeddings) # [b, h, t_t, 2*t_t-1],[h or 1, 2*t_t-1, c] -> [b, h, t_t, c]
296
- # output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, c] -> [b,h,c,t_t] -> [b, d, t_t]
297
- if self.window_size is not None:
298
- assert t_s == t_t, "Relative attention is only available for self-attention."
299
- key_relative_embeddings = self._get_relative_embeddings(
300
- self.emb_rel_k, t_s
301
- )
302
- rel_logits = torch.einsum('bhdt,hed->bhte',
303
- query / math.sqrt(self.k_channels), key_relative_embeddings
304
- ) # [b,h,c,t_t],[h or 1,e,c] ->[b,h,t_t,e]
305
- scores_local = self._relative_position_to_absolute_position(rel_logits)
306
- scores = scores + scores_local
307
- if self.proximal_bias:
308
- assert t_s == t_t, "Proximal bias is only available for self-attention."
309
- scores = scores + \
310
- self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
311
- if mask is not None:
312
- scores = scores.masked_fill(mask == 0, -1e4)
313
- if self.block_length is not None:
314
- assert t_s == t_t, "Local attention is only available for self-attention."
315
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
316
- scores = scores.masked_fill(block_mask == 0, -1e4)
317
- p_attn = F.softmax(scores, dim=-1) # [b, h, t_t, t_s]
318
- p_attn = self.drop(p_attn)
319
- output = torch.einsum('bhcs,bhts->bhct', value, p_attn) # [b,h,c,t_s],[b,h,t_t,t_s] -> [b,h,c,t_t]
320
- if self.window_size is not None:
321
- relative_weights = self._absolute_position_to_relative_position(p_attn) # [b, h, t_t, 2*t_t-1]
322
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) # [h or 1, 2*t_t-1, c]
323
- output = output + \
324
- torch.einsum('bhte,hec->bhct',
325
- relative_weights, value_relative_embeddings) # [b, h, t_t, 2*t_t-1],[h or 1, 2*t_t-1, c] -> [b, h, c, t_t]
326
- output = output.view(b, d, t_t) # [b, h, c, t_t] -> [b, d, t_t]
327
- return output, p_attn
328
-
329
- def _matmul_with_relative_values(self, x, y):
330
- """
331
- x: [b, h, l, m]
332
- y: [h or 1, m, d]
333
- ret: [b, h, l, d]
334
- """
335
- ret = torch.matmul(x, y.unsqueeze(0))
336
- return ret
337
-
338
- def _matmul_with_relative_keys(self, x, y):
339
- """
340
- x: [b, h, l, d]
341
- y: [h or 1, m, d]
342
- ret: [b, h, l, m]
343
- """
344
- # ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
345
- ret = torch.einsum('bhld,hmd -> bhlm', x, y)
346
- return ret
347
-
348
- def _get_relative_embeddings(self, relative_embeddings, length):
349
- max_relative_position = 2 * self.window_size + 1
350
- # Pad first before slice to avoid using cond ops.
351
- pad_length = max(length - (self.window_size + 1), 0)
352
- slice_start_position = max((self.window_size + 1) - length, 0)
353
- slice_end_position = slice_start_position + 2 * length - 1
354
- if pad_length > 0:
355
- padded_relative_embeddings = F.pad(
356
- relative_embeddings,
357
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
358
- else:
359
- padded_relative_embeddings = relative_embeddings
360
- used_relative_embeddings = padded_relative_embeddings[
361
- :, slice_start_position:slice_end_position
362
- ]
363
- return used_relative_embeddings
364
-
365
- def _relative_position_to_absolute_position(self, x):
366
- """
367
- x: [b, h, l, 2*l-1]
368
- ret: [b, h, l, l]
369
- """
370
- batch, heads, length, _ = x.size()
371
- # Concat columns of pad to shift from relative to absolute indexing.
372
- x = F.pad(x, commons.convert_pad_shape(
373
- [[0, 0], [0, 0], [0, 0], [0, 1]]
374
- ))
375
-
376
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
377
- x_flat = x.view([batch, heads, length * 2 * length])
378
- x_flat = F.pad(x_flat, commons.convert_pad_shape(
379
- [[0, 0], [0, 0], [0, length - 1]]
380
- ))
381
-
382
- # Reshape and slice out the padded elements.
383
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
384
- :, :, :length, length - 1:
385
- ]
386
- return x_final
387
-
388
- def _absolute_position_to_relative_position(self, x):
389
- """
390
- x: [b, h, l, l]
391
- ret: [b, h, l, 2*l-1]
392
- """
393
- batch, heads, length, _ = x.size()
394
- # padd along column
395
- x = F.pad(x, commons.convert_pad_shape(
396
- [[0, 0], [0, 0], [0, 0], [0, length - 1]]
397
- ))
398
- x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
399
- # add 0's in the beginning that will skew the elements after reshape
400
- x_flat = F.pad(x_flat, commons.convert_pad_shape(
401
- [[0, 0], [0, 0], [length, 0]]
402
- ))
403
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
404
- return x_final
405
-
406
- def _attention_bias_proximal(self, length):
407
- """Bias for self-attention to encourage attention to close positions.
408
- Args:
409
- length: an integer scalar.
410
- Returns:
411
- a Tensor with shape [1, 1, length, length]
412
- """
413
- r = torch.arange(length, dtype=torch.float32)
414
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
415
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
416
-
417
-
418
- class FFN(nn.Module):
419
- def __init__(
420
- self,
421
- in_channels,
422
- out_channels,
423
- filter_channels,
424
- kernel_size,
425
- p_dropout=0.,
426
- activation=None,
427
- causal=False
428
- ):
429
- super().__init__()
430
- self.in_channels = in_channels
431
- self.out_channels = out_channels
432
- self.filter_channels = filter_channels
433
- self.kernel_size = kernel_size
434
- self.p_dropout = p_dropout
435
- self.activation = activation
436
- self.causal = causal
437
-
438
- if causal:
439
- self.padding = self._causal_padding
440
- else:
441
- self.padding = self._same_padding
442
-
443
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
444
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
445
- self.drop = nn.Dropout(p_dropout)
446
-
447
- def forward(self, x, x_mask):
448
- x = self.conv_1(self.padding(x * x_mask))
449
- if self.activation == "gelu":
450
- x = x * torch.sigmoid(1.702 * x)
451
- else:
452
- x = torch.relu(x)
453
- x = self.drop(x)
454
- x = self.conv_2(self.padding(x * x_mask))
455
- return x * x_mask
456
-
457
- def _causal_padding(self, x):
458
- if self.kernel_size == 1:
459
- return x
460
- pad_l = self.kernel_size - 1
461
- pad_r = 0
462
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
463
- x = F.pad(x, commons.convert_pad_shape(padding))
464
- return x
465
-
466
- def _same_padding(self, x):
467
- if self.kernel_size == 1:
468
- return x
469
- pad_l = (self.kernel_size - 1) // 2
470
- pad_r = self.kernel_size // 2
471
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
472
- x = F.pad(x, commons.convert_pad_shape(padding))
473
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/legacy.py DELETED
@@ -1,223 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- # NVIDIA CORPORATION and its licensors retain all intellectual property
5
- # and proprietary rights in and to this software, related documentation
6
- # and any modifications thereto. Any use, reproduction, disclosure or
7
- # distribution of this software and related documentation without an express
8
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
9
- #
10
- import pickle
11
- import dnnlib
12
- import re
13
- from typing import List, Optional
14
- import torch
15
- import copy
16
- import numpy as np
17
- from torch_utils import misc
18
-
19
-
20
- # ----------------------------------------------------------------------------
21
- # loading torch pkl
22
- def load_network_pkl(f, force_fp16=False, G_only=False):
23
- data = _LegacyUnpickler(f).load()
24
- if G_only:
25
- f = open('ori_model_Gonly.txt', 'a+')
26
- else:
27
- f = open('ori_model.txt', 'a+')
28
- for key in data.keys():
29
- f.write(str(data[key]))
30
- f.close()
31
-
32
- # We comment out this part, if you want to convert TF pickle, you can use the original script from StyleGAN2-ada-pytorch
33
- # # Legacy TensorFlow pickle => convert.
34
- # if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
35
- # tf_G, tf_D, tf_Gs = data
36
- # G = convert_tf_generator(tf_G)
37
- # D = convert_tf_discriminator(tf_D)
38
- # G_ema = convert_tf_generator(tf_Gs)
39
- # data = dict(G=G, D=D, G_ema=G_ema)
40
-
41
- # Add missing fields.
42
- if 'training_set_kwargs' not in data:
43
- data['training_set_kwargs'] = None
44
- if 'augment_pipe' not in data:
45
- data['augment_pipe'] = None
46
-
47
- # Validate contents.
48
- assert isinstance(data['G_ema'], torch.nn.Module)
49
- if not G_only:
50
- assert isinstance(data['D'], torch.nn.Module)
51
- assert isinstance(data['G'], torch.nn.Module)
52
- assert isinstance(data['training_set_kwargs'], (dict, type(None)))
53
- assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
54
-
55
- # Force FP16.
56
- if force_fp16:
57
- if G_only:
58
- convert_list = ['G_ema'] # 'G'
59
- else:
60
- convert_list = ['G', 'D', 'G_ema']
61
- for key in convert_list:
62
- old = data[key]
63
- kwargs = copy.deepcopy(old.init_kwargs)
64
- if key.startswith('G'):
65
- kwargs.synthesis_kwargs = dnnlib.EasyDict(
66
- kwargs.get('synthesis_kwargs', {}))
67
- kwargs.synthesis_kwargs.num_fp16_res = 4
68
- kwargs.synthesis_kwargs.conv_clamp = 256
69
- if key.startswith('D'):
70
- kwargs.num_fp16_res = 4
71
- kwargs.conv_clamp = 256
72
- if kwargs != old.init_kwargs:
73
- new = type(old)(**kwargs).eval().requires_grad_(False)
74
- misc.copy_params_and_buffers(old, new, require_all=True)
75
- data[key] = new
76
- return data
77
-
78
-
79
- class _TFNetworkStub(dnnlib.EasyDict):
80
- pass
81
-
82
-
83
- class _LegacyUnpickler(pickle.Unpickler):
84
- def find_class(self, module, name):
85
- if module == 'dnnlib.tflib.network' and name == 'Network':
86
- return _TFNetworkStub
87
- return super().find_class(module, name)
88
-
89
- # ----------------------------------------------------------------------------
90
-
91
-
92
- def num_range(s: str) -> List[int]:
93
- '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
94
-
95
- range_re = re.compile(r'^(\d+)-(\d+)$')
96
- m = range_re.match(s)
97
- if m:
98
- return list(range(int(m.group(1)), int(m.group(2))+1))
99
- vals = s.split(',')
100
- return [int(x) for x in vals]
101
-
102
-
103
- # ----------------------------------------------------------------------------
104
- # loading tf pkl
105
- def load_pkl(file_or_url):
106
- with open(file_or_url, 'rb') as file:
107
- return pickle.load(file, encoding='latin1')
108
-
109
- # ----------------------------------------------------------------------------
110
-
111
- # For editing
112
-
113
-
114
- def visual(output, out_path):
115
- import torch
116
- import cv2
117
- import numpy as np
118
- output = (output + 1)/2
119
- output = torch.clamp(output, 0, 1)
120
- if output.shape[1] == 1:
121
- output = torch.cat([output, output, output], 1)
122
- output = output[0].detach().cpu().permute(1, 2, 0).numpy()
123
- output = (output*255).astype(np.uint8)
124
- output = output[:, :, ::-1]
125
- cv2.imwrite(out_path, output)
126
-
127
-
128
- def save_obj(obj, path):
129
- with open(path, 'wb+') as f:
130
- pickle.dump(obj, f, protocol=4)
131
-
132
- # ----------------------------------------------------------------------------
133
-
134
- # Converting pkl to pth, change dict info inside pickle
135
-
136
-
137
- def convert_to_rgb(state_ros, state_nv, ros_name, nv_name):
138
- state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.torgb.weight"].unsqueeze(
139
- 0)
140
- state_ros[f"{ros_name}.bias"] = state_nv[f"{nv_name}.torgb.bias"].unsqueeze(
141
- 0).unsqueeze(-1).unsqueeze(-1)
142
- state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.torgb.affine.weight"]
143
- state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.torgb.affine.bias"]
144
-
145
-
146
- def convert_conv(state_ros, state_nv, ros_name, nv_name):
147
- state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.weight"].unsqueeze(
148
- 0)
149
- state_ros[f"{ros_name}.activate.bias"] = state_nv[f"{nv_name}.bias"]
150
- state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.affine.weight"]
151
- state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.affine.bias"]
152
- state_ros[f"{ros_name}.noise.weight"] = state_nv[f"{nv_name}.noise_strength"].unsqueeze(
153
- 0)
154
-
155
-
156
- def convert_blur_kernel(state_ros, state_nv, level):
157
- """Not quite sure why there is a factor of 4 here"""
158
- # They are all the same
159
- state_ros[f"convs.{2*level}.conv.blur.kernel"] = 4 * \
160
- state_nv["synthesis.b4.resample_filter"]
161
- state_ros[f"to_rgbs.{level}.upsample.kernel"] = 4 * \
162
- state_nv["synthesis.b4.resample_filter"]
163
-
164
-
165
- def determine_config(state_nv):
166
- mapping_names = [name for name in state_nv.keys() if "mapping.fc" in name]
167
- sythesis_names = [
168
- name for name in state_nv.keys() if "synthesis.b" in name]
169
-
170
- n_mapping = max([int(re.findall("(\d+)", n)[0])
171
- for n in mapping_names]) + 1
172
- resolution = max([int(re.findall("(\d+)", n)[0]) for n in sythesis_names])
173
- n_layers = np.log(resolution/2)/np.log(2)
174
-
175
- return n_mapping, n_layers
176
-
177
-
178
- def convert(network_pkl, output_file, G_only=False):
179
- with dnnlib.util.open_url(network_pkl) as f:
180
- G_nvidia = load_network_pkl(f, G_only=G_only)['G_ema']
181
-
182
- state_nv = G_nvidia.state_dict()
183
- n_mapping, n_layers = determine_config(state_nv)
184
-
185
- state_ros = {}
186
-
187
- for i in range(n_mapping):
188
- state_ros[f"style.{i+1}.weight"] = state_nv[f"mapping.fc{i}.weight"]
189
- state_ros[f"style.{i+1}.bias"] = state_nv[f"mapping.fc{i}.bias"]
190
-
191
- for i in range(int(n_layers)):
192
- if i > 0:
193
- for conv_level in range(2):
194
- convert_conv(
195
- state_ros, state_nv, f"convs.{2*i-2+conv_level}", f"synthesis.b{4*(2**i)}.conv{conv_level}")
196
- state_ros[f"noises.noise_{2*i-1+conv_level}"] = state_nv[f"synthesis.b{4*(2**i)}.conv{conv_level}.noise_const"].unsqueeze(
197
- 0).unsqueeze(0)
198
-
199
- convert_to_rgb(state_ros, state_nv,
200
- f"to_rgbs.{i-1}", f"synthesis.b{4*(2**i)}")
201
- convert_blur_kernel(state_ros, state_nv, i-1)
202
-
203
- else:
204
- state_ros[f"input.input"] = state_nv[f"synthesis.b{4*(2**i)}.const"].unsqueeze(
205
- 0)
206
- convert_conv(state_ros, state_nv, "conv1",
207
- f"synthesis.b{4*(2**i)}.conv1")
208
- state_ros[f"noises.noise_{2*i}"] = state_nv[f"synthesis.b{4*(2**i)}.conv1.noise_const"].unsqueeze(
209
- 0).unsqueeze(0)
210
- convert_to_rgb(state_ros, state_nv, "to_rgb1",
211
- f"synthesis.b{4*(2**i)}")
212
-
213
- # https://github.com/yuval-alaluf/restyle-encoder/issues/1#issuecomment-828354736
214
- latent_avg = state_nv['mapping.w_avg']
215
- state_dict = {"g_ema": state_ros, "latent_avg": latent_avg}
216
- # if G_only:
217
- # f = open('converted_model_Gonly.txt','a+')
218
- # else:
219
- # f = open('converted_model.txt','a+')
220
- # for key in state_dict['g_ema'].keys():
221
- # f.write(str(key)+': '+str(state_dict['g_ema'][key].shape)+'\n')
222
- # f.close()
223
- torch.save(state_dict, output_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/coaches/base_coach.py DELETED
@@ -1,159 +0,0 @@
1
- import abc
2
- import os
3
- import pickle
4
- from argparse import Namespace
5
- import wandb
6
- import os.path
7
- from .localitly_regulizer import Space_Regulizer, l2_loss
8
- import torch
9
- from torchvision import transforms
10
- from lpips import LPIPS
11
- from pti.training.projectors import w_projector
12
- from pti.pti_configs import global_config, paths_config, hyperparameters
13
- from pti.pti_models.e4e.psp import pSp
14
- from utils.log_utils import log_image_from_w
15
- from utils.models_utils import toogle_grad, load_old_G
16
-
17
-
18
- class BaseCoach:
19
- def __init__(self, data_loader, use_wandb):
20
-
21
- self.use_wandb = use_wandb
22
- self.data_loader = data_loader
23
- self.w_pivots = {}
24
- self.image_counter = 0
25
-
26
- if hyperparameters.first_inv_type == 'w+':
27
- self.initilize_e4e()
28
-
29
- self.e4e_image_transform = transforms.Compose([
30
- transforms.ToPILImage(),
31
- transforms.Resize((256, 128)),
32
- transforms.ToTensor(),
33
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
34
-
35
- # Initialize loss
36
- self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(
37
- global_config.device).eval()
38
-
39
- self.restart_training()
40
-
41
- # Initialize checkpoint dir
42
- self.checkpoint_dir = paths_config.checkpoints_dir
43
- os.makedirs(self.checkpoint_dir, exist_ok=True)
44
-
45
- def restart_training(self):
46
-
47
- # Initialize networks
48
- self.G = load_old_G()
49
- toogle_grad(self.G, True)
50
-
51
- self.original_G = load_old_G()
52
-
53
- self.space_regulizer = Space_Regulizer(
54
- self.original_G, self.lpips_loss)
55
- self.optimizer = self.configure_optimizers()
56
-
57
- def get_inversion(self, w_path_dir, image_name, image):
58
- embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
59
- os.makedirs(embedding_dir, exist_ok=True)
60
-
61
- w_pivot = None
62
-
63
- if hyperparameters.use_last_w_pivots:
64
- w_pivot = self.load_inversions(w_path_dir, image_name)
65
-
66
- if not hyperparameters.use_last_w_pivots or w_pivot is None:
67
- w_pivot = self.calc_inversions(image, image_name)
68
- torch.save(w_pivot, f'{embedding_dir}/0.pt')
69
-
70
- w_pivot = w_pivot.to(global_config.device)
71
- return w_pivot
72
-
73
- def load_inversions(self, w_path_dir, image_name):
74
- if image_name in self.w_pivots:
75
- return self.w_pivots[image_name]
76
-
77
- if hyperparameters.first_inv_type == 'w+':
78
- w_potential_path = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}/0.pt'
79
- else:
80
- w_potential_path = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}/0.pt'
81
- if not os.path.isfile(w_potential_path):
82
- return None
83
- w = torch.load(w_potential_path).to(global_config.device)
84
- self.w_pivots[image_name] = w
85
- return w
86
-
87
- def calc_inversions(self, image, image_name):
88
- if hyperparameters.first_inv_type == 'w+':
89
- w = self.get_e4e_inversion(image)
90
-
91
- else:
92
- id_image = torch.squeeze(
93
- (image.to(global_config.device) + 1) / 2) * 255
94
- w = w_projector.project(self.G, id_image, device=torch.device(global_config.device), w_avg_samples=600,
95
- num_steps=hyperparameters.first_inv_steps, w_name=image_name,
96
- use_wandb=self.use_wandb)
97
-
98
- return w
99
-
100
- @abc.abstractmethod
101
- def train(self):
102
- pass
103
-
104
- def configure_optimizers(self):
105
- optimizer = torch.optim.Adam(
106
- self.G.parameters(), lr=hyperparameters.pti_learning_rate)
107
-
108
- return optimizer
109
-
110
- def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
111
- loss = 0.0
112
-
113
- if hyperparameters.pt_l2_lambda > 0:
114
- l2_loss_val = l2_loss(generated_images, real_images)
115
- if self.use_wandb:
116
- wandb.log({f'MSE_loss_val_{log_name}': l2_loss_val.detach(
117
- ).cpu()}, step=global_config.training_step)
118
- loss += l2_loss_val * hyperparameters.pt_l2_lambda
119
- if hyperparameters.pt_lpips_lambda > 0:
120
- loss_lpips = self.lpips_loss(generated_images, real_images)
121
- loss_lpips = torch.squeeze(loss_lpips)
122
- if self.use_wandb:
123
- wandb.log({f'LPIPS_loss_val_{log_name}': loss_lpips.detach(
124
- ).cpu()}, step=global_config.training_step)
125
- loss += loss_lpips * hyperparameters.pt_lpips_lambda
126
-
127
- if use_ball_holder and hyperparameters.use_locality_regularization:
128
- ball_holder_loss_val = self.space_regulizer.space_regulizer_loss(
129
- new_G, w_batch, use_wandb=self.use_wandb)
130
- loss += ball_holder_loss_val
131
-
132
- return loss, l2_loss_val, loss_lpips
133
-
134
- def forward(self, w):
135
- generated_images = self.G.synthesis(
136
- w, noise_mode='const', force_fp32=True)
137
-
138
- return generated_images
139
-
140
- def initilize_e4e(self):
141
- ckpt = torch.load(paths_config.e4e, map_location='cpu')
142
- opts = ckpt['opts']
143
- opts['batch_size'] = hyperparameters.train_batch_size
144
- opts['checkpoint_path'] = paths_config.e4e
145
- opts = Namespace(**opts)
146
- self.e4e_inversion_net = pSp(opts)
147
- self.e4e_inversion_net.eval()
148
- self.e4e_inversion_net = self.e4e_inversion_net.to(
149
- global_config.device)
150
- toogle_grad(self.e4e_inversion_net, False)
151
-
152
- def get_e4e_inversion(self, image):
153
- image = (image + 1) / 2
154
- new_image = self.e4e_image_transform(image[0]).to(global_config.device)
155
- _, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
156
- input_code=False)
157
- if self.use_wandb:
158
- log_image_from_w(w, self.G, 'First e4e inversion')
159
- return w
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/persistence.py DELETED
@@ -1,262 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- """Facilities for pickling Python code alongside other data.
12
-
13
- The pickled code is automatically imported into a separate Python module
14
- during unpickling. This way, any previously exported pickles will remain
15
- usable even if the original code is no longer available, or if the current
16
- version of the code is not consistent with what was originally pickled."""
17
-
18
- import sys
19
- import pickle
20
- import io
21
- import inspect
22
- import copy
23
- import uuid
24
- import types
25
- import dnnlib
26
-
27
- # ----------------------------------------------------------------------------
28
-
29
- _version = 6 # internal version number
30
- _decorators = set() # {decorator_class, ...}
31
- _import_hooks = [] # [hook_function, ...]
32
- _module_to_src_dict = dict() # {module: src, ...}
33
- _src_to_module_dict = dict() # {src: module, ...}
34
-
35
- # ----------------------------------------------------------------------------
36
-
37
-
38
- def persistent_class(orig_class):
39
- r"""Class decorator that extends a given class to save its source code
40
- when pickled.
41
-
42
- Example:
43
-
44
- from torch_utils import persistence
45
-
46
- @persistence.persistent_class
47
- class MyNetwork(torch.nn.Module):
48
- def __init__(self, num_inputs, num_outputs):
49
- super().__init__()
50
- self.fc = MyLayer(num_inputs, num_outputs)
51
- ...
52
-
53
- @persistence.persistent_class
54
- class MyLayer(torch.nn.Module):
55
- ...
56
-
57
- When pickled, any instance of `MyNetwork` and `MyLayer` will save its
58
- source code alongside other internal state (e.g., parameters, buffers,
59
- and submodules). This way, any previously exported pickle will remain
60
- usable even if the class definitions have been modified or are no
61
- longer available.
62
-
63
- The decorator saves the source code of the entire Python module
64
- containing the decorated class. It does *not* save the source code of
65
- any imported modules. Thus, the imported modules must be available
66
- during unpickling, also including `torch_utils.persistence` itself.
67
-
68
- It is ok to call functions defined in the same module from the
69
- decorated class. However, if the decorated class depends on other
70
- classes defined in the same module, they must be decorated as well.
71
- This is illustrated in the above example in the case of `MyLayer`.
72
-
73
- It is also possible to employ the decorator just-in-time before
74
- calling the constructor. For example:
75
-
76
- cls = MyLayer
77
- if want_to_make_it_persistent:
78
- cls = persistence.persistent_class(cls)
79
- layer = cls(num_inputs, num_outputs)
80
-
81
- As an additional feature, the decorator also keeps track of the
82
- arguments that were used to construct each instance of the decorated
83
- class. The arguments can be queried via `obj.init_args` and
84
- `obj.init_kwargs`, and they are automatically pickled alongside other
85
- object state. A typical use case is to first unpickle a previous
86
- instance of a persistent class, and then upgrade it to use the latest
87
- version of the source code:
88
-
89
- with open('old_pickle.pkl', 'rb') as f:
90
- old_net = pickle.load(f)
91
- new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
92
- misc.copy_params_and_buffers(old_net, new_net, require_all=True)
93
- """
94
- assert isinstance(orig_class, type)
95
- if is_persistent(orig_class):
96
- return orig_class
97
-
98
- assert orig_class.__module__ in sys.modules
99
- orig_module = sys.modules[orig_class.__module__]
100
- orig_module_src = _module_to_src(orig_module)
101
-
102
- class Decorator(orig_class):
103
- _orig_module_src = orig_module_src
104
- _orig_class_name = orig_class.__name__
105
-
106
- def __init__(self, *args, **kwargs):
107
- super().__init__(*args, **kwargs)
108
- self._init_args = copy.deepcopy(args)
109
- self._init_kwargs = copy.deepcopy(kwargs)
110
- assert orig_class.__name__ in orig_module.__dict__
111
- _check_pickleable(self.__reduce__())
112
-
113
- @property
114
- def init_args(self):
115
- return copy.deepcopy(self._init_args)
116
-
117
- @property
118
- def init_kwargs(self):
119
- return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
120
-
121
- def __reduce__(self):
122
- fields = list(super().__reduce__())
123
- fields += [None] * max(3 - len(fields), 0)
124
- if fields[0] is not _reconstruct_persistent_obj:
125
- meta = dict(type='class', version=_version, module_src=self._orig_module_src,
126
- class_name=self._orig_class_name, state=fields[2])
127
- fields[0] = _reconstruct_persistent_obj # reconstruct func
128
- fields[1] = (meta,) # reconstruct args
129
- fields[2] = None # state dict
130
- return tuple(fields)
131
-
132
- Decorator.__name__ = orig_class.__name__
133
- _decorators.add(Decorator)
134
- return Decorator
135
-
136
- # ----------------------------------------------------------------------------
137
-
138
-
139
- def is_persistent(obj):
140
- r"""Test whether the given object or class is persistent, i.e.,
141
- whether it will save its source code when pickled.
142
- """
143
- try:
144
- if obj in _decorators:
145
- return True
146
- except TypeError:
147
- pass
148
- return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
149
-
150
- # ----------------------------------------------------------------------------
151
-
152
-
153
- def import_hook(hook):
154
- r"""Register an import hook that is called whenever a persistent object
155
- is being unpickled. A typical use case is to patch the pickled source
156
- code to avoid errors and inconsistencies when the API of some imported
157
- module has changed.
158
-
159
- The hook should have the following signature:
160
-
161
- hook(meta) -> modified meta
162
-
163
- `meta` is an instance of `dnnlib.EasyDict` with the following fields:
164
-
165
- type: Type of the persistent object, e.g. `'class'`.
166
- version: Internal version number of `torch_utils.persistence`.
167
- module_src Original source code of the Python module.
168
- class_name: Class name in the original Python module.
169
- state: Internal state of the object.
170
-
171
- Example:
172
-
173
- @persistence.import_hook
174
- def wreck_my_network(meta):
175
- if meta.class_name == 'MyNetwork':
176
- print('MyNetwork is being imported. I will wreck it!')
177
- meta.module_src = meta.module_src.replace("True", "False")
178
- return meta
179
- """
180
- assert callable(hook)
181
- _import_hooks.append(hook)
182
-
183
- # ----------------------------------------------------------------------------
184
-
185
-
186
- def _reconstruct_persistent_obj(meta):
187
- r"""Hook that is called internally by the `pickle` module to unpickle
188
- a persistent object.
189
- """
190
- meta = dnnlib.EasyDict(meta)
191
- meta.state = dnnlib.EasyDict(meta.state)
192
- for hook in _import_hooks:
193
- meta = hook(meta)
194
- assert meta is not None
195
-
196
- assert meta.version == _version
197
- module = _src_to_module(meta.module_src)
198
-
199
- assert meta.type == 'class'
200
- orig_class = module.__dict__[meta.class_name]
201
- decorator_class = persistent_class(orig_class)
202
- obj = decorator_class.__new__(decorator_class)
203
-
204
- setstate = getattr(obj, '__setstate__', None)
205
- if callable(setstate):
206
- setstate(meta.state) # pylint: disable=not-callable
207
- else:
208
- obj.__dict__.update(meta.state)
209
- return obj
210
-
211
- # ----------------------------------------------------------------------------
212
-
213
-
214
- def _module_to_src(module):
215
- r"""Query the source code of a given Python module.
216
- """
217
- src = _module_to_src_dict.get(module, None)
218
- if src is None:
219
- src = inspect.getsource(module)
220
- _module_to_src_dict[module] = src
221
- _src_to_module_dict[src] = module
222
- return src
223
-
224
-
225
- def _src_to_module(src):
226
- r"""Get or create a Python module for the given source code.
227
- """
228
- module = _src_to_module_dict.get(src, None)
229
- if module is None:
230
- module_name = "_imported_module_" + uuid.uuid4().hex
231
- module = types.ModuleType(module_name)
232
- sys.modules[module_name] = module
233
- _module_to_src_dict[module] = src
234
- _src_to_module_dict[src] = module
235
- exec(src, module.__dict__) # pylint: disable=exec-used
236
- return module
237
-
238
- # ----------------------------------------------------------------------------
239
-
240
-
241
- def _check_pickleable(obj):
242
- r"""Check that the given object is pickleable, raising an exception if
243
- it is not. This function is expected to be considerably more efficient
244
- than actually pickling the object.
245
- """
246
- def recurse(obj):
247
- if isinstance(obj, (list, tuple, set)):
248
- return [recurse(x) for x in obj]
249
- if isinstance(obj, dict):
250
- return [[recurse(x), recurse(y)] for x, y in obj.items()]
251
- if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
252
- return None # Python primitive types are pickleable.
253
- if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:
254
- return None # NumPy arrays and PyTorch tensors are pickleable.
255
- if is_persistent(obj):
256
- # Persistent objects are pickleable, by virtue of the constructor check.
257
- return None
258
- return obj
259
- with io.BytesIO() as f:
260
- pickle.dump(recurse(obj), f)
261
-
262
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/ddim_noise_comparative_analysis.py DELETED
@@ -1,190 +0,0 @@
1
- # Copyright 2022 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import List, Optional, Tuple, Union
16
-
17
- import PIL
18
- import torch
19
- from torchvision import transforms
20
-
21
- from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
- from diffusers.schedulers import DDIMScheduler
23
- from diffusers.utils import randn_tensor
24
-
25
-
26
- trans = transforms.Compose(
27
- [
28
- transforms.Resize((256, 256)),
29
- transforms.ToTensor(),
30
- transforms.Normalize([0.5], [0.5]),
31
- ]
32
- )
33
-
34
-
35
- def preprocess(image):
36
- if isinstance(image, torch.Tensor):
37
- return image
38
- elif isinstance(image, PIL.Image.Image):
39
- image = [image]
40
-
41
- image = [trans(img.convert("RGB")) for img in image]
42
- image = torch.stack(image)
43
- return image
44
-
45
-
46
- class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
47
- r"""
48
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
-
51
- Parameters:
52
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
53
- scheduler ([`SchedulerMixin`]):
54
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
55
- [`DDPMScheduler`], or [`DDIMScheduler`].
56
- """
57
-
58
- def __init__(self, unet, scheduler):
59
- super().__init__()
60
-
61
- # make sure scheduler can always be converted to DDIM
62
- scheduler = DDIMScheduler.from_config(scheduler.config)
63
-
64
- self.register_modules(unet=unet, scheduler=scheduler)
65
-
66
- def check_inputs(self, strength):
67
- if strength < 0 or strength > 1:
68
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
69
-
70
- def get_timesteps(self, num_inference_steps, strength, device):
71
- # get the original timestep using init_timestep
72
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
73
-
74
- t_start = max(num_inference_steps - init_timestep, 0)
75
- timesteps = self.scheduler.timesteps[t_start:]
76
-
77
- return timesteps, num_inference_steps - t_start
78
-
79
- def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
80
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
81
- raise ValueError(
82
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
83
- )
84
-
85
- init_latents = image.to(device=device, dtype=dtype)
86
-
87
- if isinstance(generator, list) and len(generator) != batch_size:
88
- raise ValueError(
89
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
90
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
91
- )
92
-
93
- shape = init_latents.shape
94
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
95
-
96
- # get latents
97
- print("add noise to latents at timestep", timestep)
98
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
99
- latents = init_latents
100
-
101
- return latents
102
-
103
- @torch.no_grad()
104
- def __call__(
105
- self,
106
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
107
- strength: float = 0.8,
108
- batch_size: int = 1,
109
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
110
- eta: float = 0.0,
111
- num_inference_steps: int = 50,
112
- use_clipped_model_output: Optional[bool] = None,
113
- output_type: Optional[str] = "pil",
114
- return_dict: bool = True,
115
- ) -> Union[ImagePipelineOutput, Tuple]:
116
- r"""
117
- Args:
118
- image (`torch.FloatTensor` or `PIL.Image.Image`):
119
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
120
- process.
121
- strength (`float`, *optional*, defaults to 0.8):
122
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
123
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
124
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
125
- be maximum and the denoising process will run for the full number of iterations specified in
126
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
127
- batch_size (`int`, *optional*, defaults to 1):
128
- The number of images to generate.
129
- generator (`torch.Generator`, *optional*):
130
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
131
- to make generation deterministic.
132
- eta (`float`, *optional*, defaults to 0.0):
133
- The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
134
- num_inference_steps (`int`, *optional*, defaults to 50):
135
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
136
- expense of slower inference.
137
- use_clipped_model_output (`bool`, *optional*, defaults to `None`):
138
- if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
139
- downstream to the scheduler. So use `None` for schedulers which don't support this argument.
140
- output_type (`str`, *optional*, defaults to `"pil"`):
141
- The output format of the generate image. Choose between
142
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
143
- return_dict (`bool`, *optional*, defaults to `True`):
144
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
145
-
146
- Returns:
147
- [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
148
- True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
149
- """
150
- # 1. Check inputs. Raise error if not correct
151
- self.check_inputs(strength)
152
-
153
- # 2. Preprocess image
154
- image = preprocess(image)
155
-
156
- # 3. set timesteps
157
- self.scheduler.set_timesteps(num_inference_steps, device=self.device)
158
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
159
- latent_timestep = timesteps[:1].repeat(batch_size)
160
-
161
- # 4. Prepare latent variables
162
- latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator)
163
- image = latents
164
-
165
- # 5. Denoising loop
166
- for t in self.progress_bar(timesteps):
167
- # 1. predict noise model_output
168
- model_output = self.unet(image, t).sample
169
-
170
- # 2. predict previous mean of image x_t-1 and add variance depending on eta
171
- # eta corresponds to η in paper and should be between [0, 1]
172
- # do x_t -> x_t-1
173
- image = self.scheduler.step(
174
- model_output,
175
- t,
176
- image,
177
- eta=eta,
178
- use_clipped_model_output=use_clipped_model_output,
179
- generator=generator,
180
- ).prev_sample
181
-
182
- image = (image / 2 + 0.5).clamp(0, 1)
183
- image = image.cpu().permute(0, 2, 3, 1).numpy()
184
- if output_type == "pil":
185
- image = self.numpy_to_pil(image)
186
-
187
- if not return_dict:
188
- return (image, latent_timestep.item())
189
-
190
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/constants.py DELETED
@@ -1,32 +0,0 @@
1
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import os
15
-
16
- from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
17
-
18
-
19
- default_cache_path = HUGGINGFACE_HUB_CACHE
20
-
21
-
22
- CONFIG_NAME = "config.json"
23
- WEIGHTS_NAME = "diffusion_pytorch_model.bin"
24
- FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
25
- ONNX_WEIGHTS_NAME = "model.onnx"
26
- SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
27
- ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
28
- HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co"
29
- DIFFUSERS_CACHE = default_cache_path
30
- DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
31
- HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
32
- DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
5
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/README.md DELETED
@@ -1,88 +0,0 @@
1
- # High-resolution networks (HRNets) for object detection
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- ```latex
8
- @inproceedings{SunXLW19,
9
- title={Deep High-Resolution Representation Learning for Human Pose Estimation},
10
- author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang},
11
- booktitle={CVPR},
12
- year={2019}
13
- }
14
-
15
- @article{SunZJCXLMWLW19,
16
- title={High-Resolution Representations for Labeling Pixels and Regions},
17
- author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao
18
- and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang},
19
- journal = {CoRR},
20
- volume = {abs/1904.04514},
21
- year={2019}
22
- }
23
- ```
24
-
25
- ## Results and Models
26
-
27
- ### Faster R-CNN
28
-
29
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
30
- | :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:| :--------:|
31
- | HRNetV2p-W18 | pytorch | 1x | 6.6 | 13.4 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130_211246.log.json) |
32
- | HRNetV2p-W18 | pytorch | 2x | 6.6 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731.log.json) |
33
- | HRNetV2p-W32 | pytorch | 1x | 9.0 | 12.4 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130_204442.log.json) |
34
- | HRNetV2p-W32 | pytorch | 2x | 9.0 | | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927.log.json) |
35
- | HRNetV2p-W40 | pytorch | 1x | 10.4 | 10.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210_125315.log.json) |
36
- | HRNetV2p-W40 | pytorch | 2x | 10.4 | | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033.log.json) |
37
-
38
- ### Mask R-CNN
39
-
40
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
41
- | :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:|:------:|:--------:|
42
- | HRNetV2p-W18 | pytorch | 1x | 7.0 | 11.7 | 37.7 | 34.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205_232523.log.json) |
43
- | HRNetV2p-W18 | pytorch | 2x | 7.0 | - | 39.8 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212_134222.log.json) |
44
- | HRNetV2p-W32 | pytorch | 1x | 9.4 | 11.3 | 41.2 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207_055017.log.json) |
45
- | HRNetV2p-W32 | pytorch | 2x | 9.4 | - | 42.5 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213_150518.log.json) |
46
- | HRNetV2p-W40 | pytorch | 1x | 10.9 | | 42.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646.log.json) |
47
- | HRNetV2p-W40 | pytorch | 2x | 10.9 | | 42.8 | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732.log.json) |
48
-
49
- ### Cascade R-CNN
50
-
51
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
52
- | :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------: | :--------: |
53
- | HRNetV2p-W18 | pytorch | 20e | 7.0 | 11.0 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210_105632.log.json) |
54
- | HRNetV2p-W32 | pytorch | 20e | 9.4 | 11.0 | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208_160511.log.json) |
55
- | HRNetV2p-W40 | pytorch | 20e | 10.8 | | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112.log.json) |
56
-
57
- ### Cascade Mask R-CNN
58
-
59
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
60
- | :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:|:------:|:--------:|
61
- | HRNetV2p-W18 | pytorch | 20e | 8.5 | 8.5 |41.6 |36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210_093149.log.json) |
62
- | HRNetV2p-W32 | pytorch | 20e | | 8.3 |44.3 |38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043.log.json) |
63
- | HRNetV2p-W40 | pytorch | 20e | 12.5 | |45.1 |39.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922.log.json) |
64
-
65
- ### Hybrid Task Cascade (HTC)
66
-
67
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
68
- | :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:|:------:|:--------:|
69
- | HRNetV2p-W18 | pytorch | 20e | 10.8 | 4.7 | 42.8 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210_182735.log.json) |
70
- | HRNetV2p-W32 | pytorch | 20e | 13.1 | 4.9 | 45.4 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207_193153.log.json) |
71
- | HRNetV2p-W40 | pytorch | 20e | 14.6 | | 46.4 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411.log.json) |
72
-
73
- ### FCOS
74
-
75
- | Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
76
- |:---------:|:-------:|:-------:|:--------:|:-------:|:------:|:------:|:------:|:------:|:--------:|
77
- |HRNetV2p-W18| pytorch | Y | N | 1x | 13.0 | 12.9 | 35.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth) &#124; [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710.log.json) |
78
- |HRNetV2p-W18| pytorch | Y | N | 2x | 13.0 | - | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110.log.json) |
79
- |HRNetV2p-W32| pytorch | Y | N | 1x | 17.5 | 12.9 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730.log.json) |
80
- |HRNetV2p-W32| pytorch | Y | N | 2x | 17.5 | - | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133.log.json) |
81
- |HRNetV2p-W18| pytorch | Y | Y | 2x | 13.0 | 12.9 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651.log.json) |
82
- |HRNetV2p-W32| pytorch | Y | Y | 2x | 17.5 | 12.4 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846.log.json) |
83
- |HRNetV2p-W48| pytorch | Y | Y | 2x | 20.3 | 10.8 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752.log.json) |
84
-
85
- **Note:**
86
-
87
- - The `28e` schedule in HTC indicates decreasing the lr at 24 and 27 epochs, with a total of 28 epochs.
88
- - HRNetV2 ImageNet pretrained models are in [HRNets for Image Classification](https://github.com/HRNet/HRNet-Image-Classification).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_r101_fpn_2x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './rpn_r50_fpn_2x_coco.py'
2
- model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/cgnet/README.md DELETED
@@ -1,26 +0,0 @@
1
- # CGNet: A Light-weight Context Guided Network for Semantic Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latext
8
- @article{wu2020cgnet,
9
- title={Cgnet: A light-weight context guided network for semantic segmentation},
10
- author={Wu, Tianyi and Tang, Sheng and Zhang, Rui and Cao, Juan and Zhang, Yongdong},
11
- journal={IEEE Transactions on Image Processing},
12
- volume={30},
13
- pages={1169--1179},
14
- year={2020},
15
- publisher={IEEE}
16
- }
17
- ```
18
-
19
- ## Results and models
20
-
21
- ### Cityscapes
22
-
23
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
24
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
25
- | CGNet | M3N21 | 680x680 | 60000 | 7.5 | 30.51 | 65.63 | 68.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet/cgnet_680x680_60k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes-20201101_110253.log.json) |
26
- | CGNet | M3N21 | 512x1024 | 60000 | 8.3 | 31.14 | 68.27 | 70.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet/cgnet_512x1024_60k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes_20201101_110254-124ea03b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes-20201101_110254.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllama.py DELETED
@@ -1,218 +0,0 @@
1
- from pathlib import Path
2
-
3
- import torch
4
- import torch.nn.functional as F
5
- from torch import version as torch_version
6
-
7
- from modules import shared
8
- from modules.logging_colors import logger
9
- from modules.models import clear_torch_cache
10
- from modules.text_generation import get_max_prompt_length
11
-
12
- try:
13
- from exllama.generator import ExLlamaGenerator
14
- from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
15
- from exllama.tokenizer import ExLlamaTokenizer
16
- except:
17
- logger.warning('exllama module failed to import. Will attempt to import from repositories/.')
18
- try:
19
- from modules.relative_imports import RelativeImport
20
-
21
- with RelativeImport("repositories/exllama"):
22
- from generator import ExLlamaGenerator
23
- from model import ExLlama, ExLlamaCache, ExLlamaConfig
24
- from tokenizer import ExLlamaTokenizer
25
- except:
26
- logger.error(
27
- "Could not find repositories/exllama. Please ensure that exllama"
28
- " (https://github.com/turboderp/exllama) is cloned inside repositories/ and is up to date."
29
- )
30
- raise
31
-
32
-
33
- class ExllamaModel:
34
- def __init__(self):
35
- pass
36
-
37
- @classmethod
38
- def from_pretrained(self, path_to_model):
39
-
40
- path_to_model = Path(f'{shared.args.model_dir}') / Path(path_to_model)
41
- tokenizer_model_path = path_to_model / "tokenizer.model"
42
- model_config_path = path_to_model / "config.json"
43
-
44
- # Find the model checkpoint
45
- model_path = None
46
- for ext in ['.safetensors', '.pt', '.bin']:
47
- found = list(path_to_model.glob(f"*{ext}"))
48
- if len(found) > 0:
49
- if len(found) > 1:
50
- logger.warning(f'More than one {ext} model has been found. The last one will be selected. It could be wrong.')
51
-
52
- model_path = found[-1]
53
- break
54
-
55
- config = ExLlamaConfig(str(model_config_path))
56
- config.model_path = str(model_path)
57
- config.max_seq_len = shared.args.max_seq_len
58
- config.compress_pos_emb = shared.args.compress_pos_emb
59
- if shared.args.gpu_split:
60
- config.set_auto_map(shared.args.gpu_split)
61
- config.gpu_peer_fix = True
62
-
63
- if shared.args.alpha_value > 1 and shared.args.rope_freq_base == 0:
64
- config.alpha_value = shared.args.alpha_value
65
- config.calculate_rotary_embedding_base()
66
- elif shared.args.rope_freq_base > 0:
67
- config.rotary_embedding_base = shared.args.rope_freq_base
68
-
69
- if torch_version.hip:
70
- config.rmsnorm_no_half2 = True
71
- config.rope_no_half2 = True
72
- config.matmul_no_half2 = True
73
- config.silu_no_half2 = True
74
-
75
- model = ExLlama(config)
76
- tokenizer = ExLlamaTokenizer(str(tokenizer_model_path))
77
- cache = ExLlamaCache(model)
78
- generator = ExLlamaGenerator(model, tokenizer, cache)
79
-
80
- result = self()
81
- result.config = config
82
- result.model = model
83
- result.cache = cache
84
- result.tokenizer = tokenizer
85
- result.generator = generator
86
- return result, result
87
-
88
- def encode(self, string, **kwargs):
89
- return self.tokenizer.encode(string, max_seq_len=self.model.config.max_seq_len, add_bos=True)
90
-
91
- def decode(self, ids, **kwargs):
92
- if isinstance(ids, list):
93
- ids = torch.tensor([ids])
94
- elif isinstance(ids, torch.Tensor) and ids.numel() == 1:
95
- ids = ids.view(1, -1)
96
-
97
- return self.tokenizer.decode(ids)[0]
98
-
99
- def get_logits(self, token_ids, **kwargs):
100
- self.cache.current_seq_len = 0
101
- self.model.forward(token_ids[:, :-1], self.cache, input_mask=None, preprocess_only=True)
102
- return self.model.forward(token_ids[:, -1:], self.cache, **kwargs).float().cpu()
103
-
104
- def generate_with_streaming(self, prompt, state):
105
-
106
- # The cache batch size must be 2 for CFG and 1 otherwise
107
- if state['guidance_scale'] == 1:
108
- if self.cache.batch_size == 2:
109
- del self.cache
110
- clear_torch_cache()
111
- self.cache = ExLlamaCache(self.model)
112
- self.generator = ExLlamaGenerator(self.model, self.tokenizer, self.cache)
113
- else:
114
- if self.cache.batch_size == 1:
115
- del self.cache
116
- clear_torch_cache()
117
- self.cache = ExLlamaCache(self.model, batch_size=2)
118
- self.generator = ExLlamaGenerator(self.model, self.tokenizer, self.cache)
119
-
120
- self.generator.settings.temperature = state['temperature']
121
- self.generator.settings.top_p = state['top_p']
122
- self.generator.settings.top_k = state['top_k']
123
- self.generator.settings.typical = state['typical_p']
124
- self.generator.settings.token_repetition_penalty_max = state['repetition_penalty']
125
- self.generator.settings.token_repetition_penalty_sustain = -1 if state['repetition_penalty_range'] <= 0 else state['repetition_penalty_range']
126
- if state['ban_eos_token']:
127
- self.generator.disallow_tokens([self.tokenizer.eos_token_id])
128
- else:
129
- self.generator.disallow_tokens(None)
130
-
131
- if state['custom_token_bans']:
132
- to_ban = [int(x) for x in state['custom_token_bans'].split(',')]
133
- if len(to_ban) > 0:
134
- self.generator.disallow_tokens(to_ban)
135
-
136
- # Case 1: no CFG
137
- if state['guidance_scale'] == 1:
138
- self.generator.end_beam_search()
139
-
140
- # Tokenizing the input
141
- ids = self.generator.tokenizer.encode(prompt, max_seq_len=self.model.config.max_seq_len)
142
- if state['add_bos_token']:
143
- ids = torch.cat(
144
- [torch.tensor([[self.tokenizer.bos_token_id]]).to(ids.device),
145
- ids], dim=1
146
- ).to(torch.int64)
147
- ids = ids[:, -get_max_prompt_length(state):]
148
- if state['auto_max_new_tokens']:
149
- max_new_tokens = state['truncation_length'] - ids.shape[-1]
150
- else:
151
- max_new_tokens = state['max_new_tokens']
152
-
153
- self.generator.gen_begin_reuse(ids)
154
- initial_len = self.generator.sequence[0].shape[0]
155
- has_leading_space = False
156
-
157
- for i in range(max_new_tokens):
158
- token = self.generator.gen_single_token()
159
- if i == 0 and self.generator.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'):
160
- has_leading_space = True
161
-
162
- decoded_text = self.generator.tokenizer.decode(self.generator.sequence[0][initial_len:])
163
- if has_leading_space:
164
- decoded_text = ' ' + decoded_text
165
-
166
- yield decoded_text
167
- if token.item() == self.generator.tokenizer.eos_token_id or shared.stop_everything:
168
- break
169
-
170
- # Case 2: CFG
171
- # Copied from https://github.com/turboderp/exllama/blob/master/example_cfg.py
172
- else:
173
- alpha = state['guidance_scale']
174
- prompts = [prompt, state['negative_prompt'] or '']
175
-
176
- ids, mask = self.tokenizer.encode(
177
- prompts,
178
- return_mask=True,
179
- max_seq_len=self.model.config.max_seq_len,
180
- add_bos=state['add_bos_token']
181
- )
182
- if state['auto_max_new_tokens']:
183
- max_new_tokens = state['truncation_length'] - ids[0].shape[-1]
184
- else:
185
- max_new_tokens = state['max_new_tokens']
186
-
187
- self.generator.gen_begin(ids, mask=mask)
188
- initial_len = self.generator.sequence[0].shape[0]
189
- has_leading_space = False
190
-
191
- for i in range(max_new_tokens):
192
- logits = self.model.forward(self.generator.sequence[:, -1:], self.cache, input_mask=mask)
193
- self.generator.apply_rep_penalty(logits)
194
-
195
- logits = F.log_softmax(logits, dim=-1)
196
- logits_mixed = alpha * logits[0] + (1 - alpha) * logits[1]
197
-
198
- token, _ = self.generator.sample_current(logits_mixed)
199
- if i == 0 and self.generator.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'):
200
- has_leading_space = True
201
-
202
- decoded_text = self.generator.tokenizer.decode(self.generator.sequence[0][initial_len:])
203
- if has_leading_space:
204
- decoded_text = ' ' + decoded_text
205
-
206
- yield decoded_text
207
- if token.item() == self.tokenizer.eos_token_id or shared.stop_everything:
208
- break
209
-
210
- batch_token = token.repeat(2, 1)
211
- self.generator.gen_accept_token(batch_token)
212
-
213
- def generate(self, prompt, state):
214
- output = ''
215
- for output in self.generate_with_streaming(prompt, state):
216
- pass
217
-
218
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/points_sampler.py DELETED
@@ -1,177 +0,0 @@
1
- from typing import List
2
-
3
- import torch
4
- from torch import nn as nn
5
-
6
- from annotator.uniformer.mmcv.runner import force_fp32
7
- from .furthest_point_sample import (furthest_point_sample,
8
- furthest_point_sample_with_dist)
9
-
10
-
11
- def calc_square_dist(point_feat_a, point_feat_b, norm=True):
12
- """Calculating square distance between a and b.
13
-
14
- Args:
15
- point_feat_a (Tensor): (B, N, C) Feature vector of each point.
16
- point_feat_b (Tensor): (B, M, C) Feature vector of each point.
17
- norm (Bool, optional): Whether to normalize the distance.
18
- Default: True.
19
-
20
- Returns:
21
- Tensor: (B, N, M) Distance between each pair points.
22
- """
23
- num_channel = point_feat_a.shape[-1]
24
- # [bs, n, 1]
25
- a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1)
26
- # [bs, 1, m]
27
- b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1)
28
-
29
- corr_matrix = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2))
30
-
31
- dist = a_square + b_square - 2 * corr_matrix
32
- if norm:
33
- dist = torch.sqrt(dist) / num_channel
34
- return dist
35
-
36
-
37
- def get_sampler_cls(sampler_type):
38
- """Get the type and mode of points sampler.
39
-
40
- Args:
41
- sampler_type (str): The type of points sampler.
42
- The valid value are "D-FPS", "F-FPS", or "FS".
43
-
44
- Returns:
45
- class: Points sampler type.
46
- """
47
- sampler_mappings = {
48
- 'D-FPS': DFPSSampler,
49
- 'F-FPS': FFPSSampler,
50
- 'FS': FSSampler,
51
- }
52
- try:
53
- return sampler_mappings[sampler_type]
54
- except KeyError:
55
- raise KeyError(
56
- f'Supported `sampler_type` are {sampler_mappings.keys()}, but got \
57
- {sampler_type}')
58
-
59
-
60
- class PointsSampler(nn.Module):
61
- """Points sampling.
62
-
63
- Args:
64
- num_point (list[int]): Number of sample points.
65
- fps_mod_list (list[str], optional): Type of FPS method, valid mod
66
- ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
67
- F-FPS: using feature distances for FPS.
68
- D-FPS: using Euclidean distances of points for FPS.
69
- FS: using F-FPS and D-FPS simultaneously.
70
- fps_sample_range_list (list[int], optional):
71
- Range of points to apply FPS. Default: [-1].
72
- """
73
-
74
- def __init__(self,
75
- num_point: List[int],
76
- fps_mod_list: List[str] = ['D-FPS'],
77
- fps_sample_range_list: List[int] = [-1]):
78
- super().__init__()
79
- # FPS would be applied to different fps_mod in the list,
80
- # so the length of the num_point should be equal to
81
- # fps_mod_list and fps_sample_range_list.
82
- assert len(num_point) == len(fps_mod_list) == len(
83
- fps_sample_range_list)
84
- self.num_point = num_point
85
- self.fps_sample_range_list = fps_sample_range_list
86
- self.samplers = nn.ModuleList()
87
- for fps_mod in fps_mod_list:
88
- self.samplers.append(get_sampler_cls(fps_mod)())
89
- self.fp16_enabled = False
90
-
91
- @force_fp32()
92
- def forward(self, points_xyz, features):
93
- """
94
- Args:
95
- points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
96
- features (Tensor): (B, C, N) Descriptors of the features.
97
-
98
- Returns:
99
- Tensor: (B, npoint, sample_num) Indices of sampled points.
100
- """
101
- indices = []
102
- last_fps_end_index = 0
103
-
104
- for fps_sample_range, sampler, npoint in zip(
105
- self.fps_sample_range_list, self.samplers, self.num_point):
106
- assert fps_sample_range < points_xyz.shape[1]
107
-
108
- if fps_sample_range == -1:
109
- sample_points_xyz = points_xyz[:, last_fps_end_index:]
110
- if features is not None:
111
- sample_features = features[:, :, last_fps_end_index:]
112
- else:
113
- sample_features = None
114
- else:
115
- sample_points_xyz = \
116
- points_xyz[:, last_fps_end_index:fps_sample_range]
117
- if features is not None:
118
- sample_features = features[:, :, last_fps_end_index:
119
- fps_sample_range]
120
- else:
121
- sample_features = None
122
-
123
- fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,
124
- npoint)
125
-
126
- indices.append(fps_idx + last_fps_end_index)
127
- last_fps_end_index += fps_sample_range
128
- indices = torch.cat(indices, dim=1)
129
-
130
- return indices
131
-
132
-
133
- class DFPSSampler(nn.Module):
134
- """Using Euclidean distances of points for FPS."""
135
-
136
- def __init__(self):
137
- super().__init__()
138
-
139
- def forward(self, points, features, npoint):
140
- """Sampling points with D-FPS."""
141
- fps_idx = furthest_point_sample(points.contiguous(), npoint)
142
- return fps_idx
143
-
144
-
145
- class FFPSSampler(nn.Module):
146
- """Using feature distances for FPS."""
147
-
148
- def __init__(self):
149
- super().__init__()
150
-
151
- def forward(self, points, features, npoint):
152
- """Sampling points with F-FPS."""
153
- assert features is not None, \
154
- 'feature input to FFPS_Sampler should not be None'
155
- features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
156
- features_dist = calc_square_dist(
157
- features_for_fps, features_for_fps, norm=False)
158
- fps_idx = furthest_point_sample_with_dist(features_dist, npoint)
159
- return fps_idx
160
-
161
-
162
- class FSSampler(nn.Module):
163
- """Using F-FPS and D-FPS simultaneously."""
164
-
165
- def __init__(self):
166
- super().__init__()
167
-
168
- def forward(self, points, features, npoint):
169
- """Sampling points with FS_Sampling."""
170
- assert features is not None, \
171
- 'feature input to FS_Sampler should not be None'
172
- ffps_sampler = FFPSSampler()
173
- dfps_sampler = DFPSSampler()
174
- fps_idx_ffps = ffps_sampler(points, features, npoint)
175
- fps_idx_dfps = dfps_sampler(points, features, npoint)
176
- fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)
177
- return fps_idx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/augmentations.py DELETED
@@ -1,397 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Image augmentation functions
4
- """
5
-
6
- import math
7
- import random
8
-
9
- import cv2
10
- import numpy as np
11
- import torch
12
- import torchvision.transforms as T
13
- import torchvision.transforms.functional as TF
14
-
15
- from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
16
- from utils.metrics import bbox_ioa
17
-
18
- IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
19
- IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
20
-
21
-
22
- class Albumentations:
23
- # YOLOv5 Albumentations class (optional, only used if package is installed)
24
- def __init__(self, size=640):
25
- self.transform = None
26
- prefix = colorstr('albumentations: ')
27
- try:
28
- import albumentations as A
29
- check_version(A.__version__, '1.0.3', hard=True) # version requirement
30
-
31
- T = [
32
- A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),
33
- A.Blur(p=0.01),
34
- A.MedianBlur(p=0.01),
35
- A.ToGray(p=0.01),
36
- A.CLAHE(p=0.01),
37
- A.RandomBrightnessContrast(p=0.0),
38
- A.RandomGamma(p=0.0),
39
- A.ImageCompression(quality_lower=75, p=0.0)] # transforms
40
- self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
41
-
42
- LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
43
- except ImportError: # package not installed, skip
44
- pass
45
- except Exception as e:
46
- LOGGER.info(f'{prefix}{e}')
47
-
48
- def __call__(self, im, labels, p=1.0):
49
- if self.transform and random.random() < p:
50
- new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
51
- im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
52
- return im, labels
53
-
54
-
55
- def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
56
- # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std
57
- return TF.normalize(x, mean, std, inplace=inplace)
58
-
59
-
60
- def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
61
- # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean
62
- for i in range(3):
63
- x[:, i] = x[:, i] * std[i] + mean[i]
64
- return x
65
-
66
-
67
- def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
68
- # HSV color-space augmentation
69
- if hgain or sgain or vgain:
70
- r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
71
- hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
72
- dtype = im.dtype # uint8
73
-
74
- x = np.arange(0, 256, dtype=r.dtype)
75
- lut_hue = ((x * r[0]) % 180).astype(dtype)
76
- lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
77
- lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
78
-
79
- im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
80
- cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
81
-
82
-
83
- def hist_equalize(im, clahe=True, bgr=False):
84
- # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
85
- yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
86
- if clahe:
87
- c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
88
- yuv[:, :, 0] = c.apply(yuv[:, :, 0])
89
- else:
90
- yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
91
- return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
92
-
93
-
94
- def replicate(im, labels):
95
- # Replicate labels
96
- h, w = im.shape[:2]
97
- boxes = labels[:, 1:].astype(int)
98
- x1, y1, x2, y2 = boxes.T
99
- s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
100
- for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
101
- x1b, y1b, x2b, y2b = boxes[i]
102
- bh, bw = y2b - y1b, x2b - x1b
103
- yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
104
- x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
105
- im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
106
- labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
107
-
108
- return im, labels
109
-
110
-
111
- def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
112
- # Resize and pad image while meeting stride-multiple constraints
113
- shape = im.shape[:2] # current shape [height, width]
114
- if isinstance(new_shape, int):
115
- new_shape = (new_shape, new_shape)
116
-
117
- # Scale ratio (new / old)
118
- r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
119
- if not scaleup: # only scale down, do not scale up (for better val mAP)
120
- r = min(r, 1.0)
121
-
122
- # Compute padding
123
- ratio = r, r # width, height ratios
124
- new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
125
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
126
- if auto: # minimum rectangle
127
- dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
128
- elif scaleFill: # stretch
129
- dw, dh = 0.0, 0.0
130
- new_unpad = (new_shape[1], new_shape[0])
131
- ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
132
-
133
- dw /= 2 # divide padding into 2 sides
134
- dh /= 2
135
-
136
- if shape[::-1] != new_unpad: # resize
137
- im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
138
- top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
139
- left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
140
- im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
141
- return im, ratio, (dw, dh)
142
-
143
-
144
- def random_perspective(im,
145
- targets=(),
146
- segments=(),
147
- degrees=10,
148
- translate=.1,
149
- scale=.1,
150
- shear=10,
151
- perspective=0.0,
152
- border=(0, 0)):
153
- # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
154
- # targets = [cls, xyxy]
155
-
156
- height = im.shape[0] + border[0] * 2 # shape(h,w,c)
157
- width = im.shape[1] + border[1] * 2
158
-
159
- # Center
160
- C = np.eye(3)
161
- C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
162
- C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
163
-
164
- # Perspective
165
- P = np.eye(3)
166
- P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
167
- P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
168
-
169
- # Rotation and Scale
170
- R = np.eye(3)
171
- a = random.uniform(-degrees, degrees)
172
- # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
173
- s = random.uniform(1 - scale, 1 + scale)
174
- # s = 2 ** random.uniform(-scale, scale)
175
- R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
176
-
177
- # Shear
178
- S = np.eye(3)
179
- S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
180
- S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
181
-
182
- # Translation
183
- T = np.eye(3)
184
- T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
185
- T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
186
-
187
- # Combined rotation matrix
188
- M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
189
- if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
190
- if perspective:
191
- im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
192
- else: # affine
193
- im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
194
-
195
- # Visualize
196
- # import matplotlib.pyplot as plt
197
- # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
198
- # ax[0].imshow(im[:, :, ::-1]) # base
199
- # ax[1].imshow(im2[:, :, ::-1]) # warped
200
-
201
- # Transform label coordinates
202
- n = len(targets)
203
- if n:
204
- use_segments = any(x.any() for x in segments)
205
- new = np.zeros((n, 4))
206
- if use_segments: # warp segments
207
- segments = resample_segments(segments) # upsample
208
- for i, segment in enumerate(segments):
209
- xy = np.ones((len(segment), 3))
210
- xy[:, :2] = segment
211
- xy = xy @ M.T # transform
212
- xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
213
-
214
- # clip
215
- new[i] = segment2box(xy, width, height)
216
-
217
- else: # warp boxes
218
- xy = np.ones((n * 4, 3))
219
- xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
220
- xy = xy @ M.T # transform
221
- xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
222
-
223
- # create new boxes
224
- x = xy[:, [0, 2, 4, 6]]
225
- y = xy[:, [1, 3, 5, 7]]
226
- new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
227
-
228
- # clip
229
- new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
230
- new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
231
-
232
- # filter candidates
233
- i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
234
- targets = targets[i]
235
- targets[:, 1:5] = new[i]
236
-
237
- return im, targets
238
-
239
-
240
- def copy_paste(im, labels, segments, p=0.5):
241
- # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
242
- n = len(segments)
243
- if p and n:
244
- h, w, c = im.shape # height, width, channels
245
- im_new = np.zeros(im.shape, np.uint8)
246
- for j in random.sample(range(n), k=round(p * n)):
247
- l, s = labels[j], segments[j]
248
- box = w - l[3], l[2], w - l[1], l[4]
249
- ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
250
- if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
251
- labels = np.concatenate((labels, [[l[0], *box]]), 0)
252
- segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
253
- cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)
254
-
255
- result = cv2.flip(im, 1) # augment segments (flip left-right)
256
- i = cv2.flip(im_new, 1).astype(bool)
257
- im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
258
-
259
- return im, labels, segments
260
-
261
-
262
- def cutout(im, labels, p=0.5):
263
- # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
264
- if random.random() < p:
265
- h, w = im.shape[:2]
266
- scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
267
- for s in scales:
268
- mask_h = random.randint(1, int(h * s)) # create random masks
269
- mask_w = random.randint(1, int(w * s))
270
-
271
- # box
272
- xmin = max(0, random.randint(0, w) - mask_w // 2)
273
- ymin = max(0, random.randint(0, h) - mask_h // 2)
274
- xmax = min(w, xmin + mask_w)
275
- ymax = min(h, ymin + mask_h)
276
-
277
- # apply random color mask
278
- im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
279
-
280
- # return unobscured labels
281
- if len(labels) and s > 0.03:
282
- box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
283
- ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area
284
- labels = labels[ioa < 0.60] # remove >60% obscured labels
285
-
286
- return labels
287
-
288
-
289
- def mixup(im, labels, im2, labels2):
290
- # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
291
- r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
292
- im = (im * r + im2 * (1 - r)).astype(np.uint8)
293
- labels = np.concatenate((labels, labels2), 0)
294
- return im, labels
295
-
296
-
297
- def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
298
- # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
299
- w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
300
- w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
301
- ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
302
- return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
303
-
304
-
305
- def classify_albumentations(
306
- augment=True,
307
- size=224,
308
- scale=(0.08, 1.0),
309
- ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33
310
- hflip=0.5,
311
- vflip=0.0,
312
- jitter=0.4,
313
- mean=IMAGENET_MEAN,
314
- std=IMAGENET_STD,
315
- auto_aug=False):
316
- # YOLOv5 classification Albumentations (optional, only used if package is installed)
317
- prefix = colorstr('albumentations: ')
318
- try:
319
- import albumentations as A
320
- from albumentations.pytorch import ToTensorV2
321
- check_version(A.__version__, '1.0.3', hard=True) # version requirement
322
- if augment: # Resize and crop
323
- T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]
324
- if auto_aug:
325
- # TODO: implement AugMix, AutoAug & RandAug in albumentation
326
- LOGGER.info(f'{prefix}auto augmentations are currently not supported')
327
- else:
328
- if hflip > 0:
329
- T += [A.HorizontalFlip(p=hflip)]
330
- if vflip > 0:
331
- T += [A.VerticalFlip(p=vflip)]
332
- if jitter > 0:
333
- color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
334
- T += [A.ColorJitter(*color_jitter, 0)]
335
- else: # Use fixed crop for eval set (reproducibility)
336
- T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
337
- T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor
338
- LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
339
- return A.Compose(T)
340
-
341
- except ImportError: # package not installed, skip
342
- LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')
343
- except Exception as e:
344
- LOGGER.info(f'{prefix}{e}')
345
-
346
-
347
- def classify_transforms(size=224):
348
- # Transforms to apply if albumentations not installed
349
- assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'
350
- # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
351
- return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
352
-
353
-
354
- class LetterBox:
355
- # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
356
- def __init__(self, size=(640, 640), auto=False, stride=32):
357
- super().__init__()
358
- self.h, self.w = (size, size) if isinstance(size, int) else size
359
- self.auto = auto # pass max size integer, automatically solve for short side using stride
360
- self.stride = stride # used with auto
361
-
362
- def __call__(self, im): # im = np.array HWC
363
- imh, imw = im.shape[:2]
364
- r = min(self.h / imh, self.w / imw) # ratio of new/old
365
- h, w = round(imh * r), round(imw * r) # resized image
366
- hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
367
- top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
368
- im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
369
- im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
370
- return im_out
371
-
372
-
373
- class CenterCrop:
374
- # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
375
- def __init__(self, size=640):
376
- super().__init__()
377
- self.h, self.w = (size, size) if isinstance(size, int) else size
378
-
379
- def __call__(self, im): # im = np.array HWC
380
- imh, imw = im.shape[:2]
381
- m = min(imh, imw) # min dimension
382
- top, left = (imh - m) // 2, (imw - m) // 2
383
- return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
384
-
385
-
386
- class ToTensor:
387
- # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
388
- def __init__(self, half=False):
389
- super().__init__()
390
- self.half = half
391
-
392
- def __call__(self, im): # im = np.array HWC in BGR order
393
- im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
394
- im = torch.from_numpy(im) # to torch
395
- im = im.half() if self.half else im.float() # uint8 to fp16/32
396
- im /= 255.0 # 0-255 to 0.0-1.0
397
- return im
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/wrapper.py DELETED
@@ -1,33 +0,0 @@
1
- # SPDX-FileCopyrightText: 2015 Eric Larson
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from .adapter import CacheControlAdapter
6
- from .cache import DictCache
7
-
8
-
9
- def CacheControl(
10
- sess,
11
- cache=None,
12
- cache_etags=True,
13
- serializer=None,
14
- heuristic=None,
15
- controller_class=None,
16
- adapter_class=None,
17
- cacheable_methods=None,
18
- ):
19
-
20
- cache = DictCache() if cache is None else cache
21
- adapter_class = adapter_class or CacheControlAdapter
22
- adapter = adapter_class(
23
- cache,
24
- cache_etags=cache_etags,
25
- serializer=serializer,
26
- heuristic=heuristic,
27
- controller_class=controller_class,
28
- cacheable_methods=cacheable_methods,
29
- )
30
- sess.mount("http://", adapter)
31
- sess.mount("https://", adapter)
32
-
33
- return sess
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/reporters.py DELETED
@@ -1,43 +0,0 @@
1
- class BaseReporter(object):
2
- """Delegate class to provider progress reporting for the resolver."""
3
-
4
- def starting(self):
5
- """Called before the resolution actually starts."""
6
-
7
- def starting_round(self, index):
8
- """Called before each round of resolution starts.
9
-
10
- The index is zero-based.
11
- """
12
-
13
- def ending_round(self, index, state):
14
- """Called before each round of resolution ends.
15
-
16
- This is NOT called if the resolution ends at this round. Use `ending`
17
- if you want to report finalization. The index is zero-based.
18
- """
19
-
20
- def ending(self, state):
21
- """Called before the resolution ends successfully."""
22
-
23
- def adding_requirement(self, requirement, parent):
24
- """Called when adding a new requirement into the resolve criteria.
25
-
26
- :param requirement: The additional requirement to be applied to filter
27
- the available candidaites.
28
- :param parent: The candidate that requires ``requirement`` as a
29
- dependency, or None if ``requirement`` is one of the root
30
- requirements passed in from ``Resolver.resolve()``.
31
- """
32
-
33
- def resolving_conflicts(self, causes):
34
- """Called when starting to attempt requirement conflict resolution.
35
-
36
- :param causes: The information on the collision that caused the backtracking.
37
- """
38
-
39
- def rejecting_candidate(self, criterion, candidate):
40
- """Called when rejecting a candidate during backtracking."""
41
-
42
- def pinning(self, candidate):
43
- """Called when adding a candidate to the potential solution."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py DELETED
@@ -1,159 +0,0 @@
1
- """The match_hostname() function from Python 3.3.3, essential when using SSL."""
2
-
3
- # Note: This file is under the PSF license as the code comes from the python
4
- # stdlib. http://docs.python.org/3/license.html
5
-
6
- import re
7
- import sys
8
-
9
- # ipaddress has been backported to 2.6+ in pypi. If it is installed on the
10
- # system, use it to handle IPAddress ServerAltnames (this was added in
11
- # python-3.5) otherwise only do DNS matching. This allows
12
- # util.ssl_match_hostname to continue to be used in Python 2.7.
13
- try:
14
- import ipaddress
15
- except ImportError:
16
- ipaddress = None
17
-
18
- __version__ = "3.5.0.1"
19
-
20
-
21
- class CertificateError(ValueError):
22
- pass
23
-
24
-
25
- def _dnsname_match(dn, hostname, max_wildcards=1):
26
- """Matching according to RFC 6125, section 6.4.3
27
-
28
- http://tools.ietf.org/html/rfc6125#section-6.4.3
29
- """
30
- pats = []
31
- if not dn:
32
- return False
33
-
34
- # Ported from python3-syntax:
35
- # leftmost, *remainder = dn.split(r'.')
36
- parts = dn.split(r".")
37
- leftmost = parts[0]
38
- remainder = parts[1:]
39
-
40
- wildcards = leftmost.count("*")
41
- if wildcards > max_wildcards:
42
- # Issue #17980: avoid denials of service by refusing more
43
- # than one wildcard per fragment. A survey of established
44
- # policy among SSL implementations showed it to be a
45
- # reasonable choice.
46
- raise CertificateError(
47
- "too many wildcards in certificate DNS name: " + repr(dn)
48
- )
49
-
50
- # speed up common case w/o wildcards
51
- if not wildcards:
52
- return dn.lower() == hostname.lower()
53
-
54
- # RFC 6125, section 6.4.3, subitem 1.
55
- # The client SHOULD NOT attempt to match a presented identifier in which
56
- # the wildcard character comprises a label other than the left-most label.
57
- if leftmost == "*":
58
- # When '*' is a fragment by itself, it matches a non-empty dotless
59
- # fragment.
60
- pats.append("[^.]+")
61
- elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
62
- # RFC 6125, section 6.4.3, subitem 3.
63
- # The client SHOULD NOT attempt to match a presented identifier
64
- # where the wildcard character is embedded within an A-label or
65
- # U-label of an internationalized domain name.
66
- pats.append(re.escape(leftmost))
67
- else:
68
- # Otherwise, '*' matches any dotless string, e.g. www*
69
- pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
70
-
71
- # add the remaining fragments, ignore any wildcards
72
- for frag in remainder:
73
- pats.append(re.escape(frag))
74
-
75
- pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
76
- return pat.match(hostname)
77
-
78
-
79
- def _to_unicode(obj):
80
- if isinstance(obj, str) and sys.version_info < (3,):
81
- # ignored flake8 # F821 to support python 2.7 function
82
- obj = unicode(obj, encoding="ascii", errors="strict") # noqa: F821
83
- return obj
84
-
85
-
86
- def _ipaddress_match(ipname, host_ip):
87
- """Exact matching of IP addresses.
88
-
89
- RFC 6125 explicitly doesn't define an algorithm for this
90
- (section 1.7.2 - "Out of Scope").
91
- """
92
- # OpenSSL may add a trailing newline to a subjectAltName's IP address
93
- # Divergence from upstream: ipaddress can't handle byte str
94
- ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
95
- return ip == host_ip
96
-
97
-
98
- def match_hostname(cert, hostname):
99
- """Verify that *cert* (in decoded format as returned by
100
- SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
101
- rules are followed, but IP addresses are not accepted for *hostname*.
102
-
103
- CertificateError is raised on failure. On success, the function
104
- returns nothing.
105
- """
106
- if not cert:
107
- raise ValueError(
108
- "empty or no certificate, match_hostname needs a "
109
- "SSL socket or SSL context with either "
110
- "CERT_OPTIONAL or CERT_REQUIRED"
111
- )
112
- try:
113
- # Divergence from upstream: ipaddress can't handle byte str
114
- host_ip = ipaddress.ip_address(_to_unicode(hostname))
115
- except (UnicodeError, ValueError):
116
- # ValueError: Not an IP address (common case)
117
- # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking
118
- # byte strings. addresses should be all ascii, so we consider it not
119
- # an ipaddress in this case
120
- host_ip = None
121
- except AttributeError:
122
- # Divergence from upstream: Make ipaddress library optional
123
- if ipaddress is None:
124
- host_ip = None
125
- else: # Defensive
126
- raise
127
- dnsnames = []
128
- san = cert.get("subjectAltName", ())
129
- for key, value in san:
130
- if key == "DNS":
131
- if host_ip is None and _dnsname_match(value, hostname):
132
- return
133
- dnsnames.append(value)
134
- elif key == "IP Address":
135
- if host_ip is not None and _ipaddress_match(value, host_ip):
136
- return
137
- dnsnames.append(value)
138
- if not dnsnames:
139
- # The subject is only checked when there is no dNSName entry
140
- # in subjectAltName
141
- for sub in cert.get("subject", ()):
142
- for key, value in sub:
143
- # XXX according to RFC 2818, the most specific Common Name
144
- # must be used.
145
- if key == "commonName":
146
- if _dnsname_match(value, hostname):
147
- return
148
- dnsnames.append(value)
149
- if len(dnsnames) > 1:
150
- raise CertificateError(
151
- "hostname %r "
152
- "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
153
- )
154
- elif len(dnsnames) == 1:
155
- raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
156
- else:
157
- raise CertificateError(
158
- "no appropriate commonName or subjectAltName fields were found"
159
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_itertools.py DELETED
@@ -1,23 +0,0 @@
1
- from setuptools.extern.more_itertools import consume # noqa: F401
2
-
3
-
4
- # copied from jaraco.itertools 6.1
5
- def ensure_unique(iterable, key=lambda x: x):
6
- """
7
- Wrap an iterable to raise a ValueError if non-unique values are encountered.
8
-
9
- >>> list(ensure_unique('abc'))
10
- ['a', 'b', 'c']
11
- >>> consume(ensure_unique('abca'))
12
- Traceback (most recent call last):
13
- ...
14
- ValueError: Duplicate element 'a' encountered.
15
- """
16
- seen = set()
17
- seen_add = seen.add
18
- for element in iterable:
19
- k = key(element)
20
- if k in seen:
21
- raise ValueError(f"Duplicate element {element!r} encountered.")
22
- seen_add(k)
23
- yield element
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AyakuraMei/Real-CUGAN/upcunet_v3.py DELETED
@@ -1,714 +0,0 @@
1
- import torch
2
- from torch import nn as nn
3
- from torch.nn import functional as F
4
- import os, sys
5
- import numpy as np
6
-
7
- root_path = os.path.abspath('.')
8
- sys.path.append(root_path)
9
-
10
-
11
- class SEBlock(nn.Module):
12
- def __init__(self, in_channels, reduction=8, bias=False):
13
- super(SEBlock, self).__init__()
14
- self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
15
- self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
16
-
17
- def forward(self, x):
18
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
19
- x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
20
- else:
21
- x0 = torch.mean(x, dim=(2, 3), keepdim=True)
22
- x0 = self.conv1(x0)
23
- x0 = F.relu(x0, inplace=True)
24
- x0 = self.conv2(x0)
25
- x0 = torch.sigmoid(x0)
26
- x = torch.mul(x, x0)
27
- return x
28
-
29
- def forward_mean(self, x, x0):
30
- x0 = self.conv1(x0)
31
- x0 = F.relu(x0, inplace=True)
32
- x0 = self.conv2(x0)
33
- x0 = torch.sigmoid(x0)
34
- x = torch.mul(x, x0)
35
- return x
36
-
37
-
38
- class UNetConv(nn.Module):
39
- def __init__(self, in_channels, mid_channels, out_channels, se):
40
- super(UNetConv, self).__init__()
41
- self.conv = nn.Sequential(
42
- nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
43
- nn.LeakyReLU(0.1, inplace=True),
44
- nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
45
- nn.LeakyReLU(0.1, inplace=True),
46
- )
47
- if se:
48
- self.seblock = SEBlock(out_channels, reduction=8, bias=True)
49
- else:
50
- self.seblock = None
51
-
52
- def forward(self, x):
53
- z = self.conv(x)
54
- if self.seblock is not None:
55
- z = self.seblock(z)
56
- return z
57
-
58
-
59
- class UNet1(nn.Module):
60
- def __init__(self, in_channels, out_channels, deconv):
61
- super(UNet1, self).__init__()
62
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
63
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
64
- self.conv2 = UNetConv(64, 128, 64, se=True)
65
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
66
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
67
-
68
- if deconv:
69
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
70
- else:
71
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
72
-
73
- for m in self.modules():
74
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
75
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
76
- elif isinstance(m, nn.Linear):
77
- nn.init.normal_(m.weight, 0, 0.01)
78
- if m.bias is not None:
79
- nn.init.constant_(m.bias, 0)
80
-
81
- def forward(self, x):
82
- x1 = self.conv1(x)
83
- x2 = self.conv1_down(x1)
84
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
85
- x2 = self.conv2(x2)
86
- x2 = self.conv2_up(x2)
87
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
88
-
89
- x1 = F.pad(x1, (-4, -4, -4, -4))
90
- x3 = self.conv3(x1 + x2)
91
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
92
- z = self.conv_bottom(x3)
93
- return z
94
-
95
- def forward_a(self, x):
96
- x1 = self.conv1(x)
97
- x2 = self.conv1_down(x1)
98
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
99
- x2 = self.conv2.conv(x2)
100
- return x1, x2
101
-
102
- def forward_b(self, x1, x2):
103
- x2 = self.conv2_up(x2)
104
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
105
-
106
- x1 = F.pad(x1, (-4, -4, -4, -4))
107
- x3 = self.conv3(x1 + x2)
108
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
109
- z = self.conv_bottom(x3)
110
- return z
111
-
112
-
113
- class UNet1x3(nn.Module):
114
- def __init__(self, in_channels, out_channels, deconv):
115
- super(UNet1x3, self).__init__()
116
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
117
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
118
- self.conv2 = UNetConv(64, 128, 64, se=True)
119
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
120
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
121
-
122
- if deconv:
123
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
124
- else:
125
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
126
-
127
- for m in self.modules():
128
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
129
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
130
- elif isinstance(m, nn.Linear):
131
- nn.init.normal_(m.weight, 0, 0.01)
132
- if m.bias is not None:
133
- nn.init.constant_(m.bias, 0)
134
-
135
- def forward(self, x):
136
- x1 = self.conv1(x)
137
- x2 = self.conv1_down(x1)
138
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
139
- x2 = self.conv2(x2)
140
- x2 = self.conv2_up(x2)
141
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
142
-
143
- x1 = F.pad(x1, (-4, -4, -4, -4))
144
- x3 = self.conv3(x1 + x2)
145
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
146
- z = self.conv_bottom(x3)
147
- return z
148
-
149
- def forward_a(self, x):
150
- x1 = self.conv1(x)
151
- x2 = self.conv1_down(x1)
152
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
153
- x2 = self.conv2.conv(x2)
154
- return x1, x2
155
-
156
- def forward_b(self, x1, x2):
157
- x2 = self.conv2_up(x2)
158
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
159
-
160
- x1 = F.pad(x1, (-4, -4, -4, -4))
161
- x3 = self.conv3(x1 + x2)
162
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
163
- z = self.conv_bottom(x3)
164
- return z
165
-
166
-
167
- class UNet2(nn.Module):
168
- def __init__(self, in_channels, out_channels, deconv):
169
- super(UNet2, self).__init__()
170
-
171
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
172
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
173
- self.conv2 = UNetConv(64, 64, 128, se=True)
174
- self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
175
- self.conv3 = UNetConv(128, 256, 128, se=True)
176
- self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
177
- self.conv4 = UNetConv(128, 64, 64, se=True)
178
- self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
179
- self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
180
-
181
- if deconv:
182
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
183
- else:
184
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
185
-
186
- for m in self.modules():
187
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
188
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
189
- elif isinstance(m, nn.Linear):
190
- nn.init.normal_(m.weight, 0, 0.01)
191
- if m.bias is not None:
192
- nn.init.constant_(m.bias, 0)
193
-
194
- def forward(self, x):
195
- x1 = self.conv1(x)
196
- x2 = self.conv1_down(x1)
197
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
198
- x2 = self.conv2(x2)
199
-
200
- x3 = self.conv2_down(x2)
201
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
202
- x3 = self.conv3(x3)
203
- x3 = self.conv3_up(x3)
204
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
205
-
206
- x2 = F.pad(x2, (-4, -4, -4, -4))
207
- x4 = self.conv4(x2 + x3)
208
- x4 = self.conv4_up(x4)
209
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
210
-
211
- x1 = F.pad(x1, (-16, -16, -16, -16))
212
- x5 = self.conv5(x1 + x4)
213
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
214
-
215
- z = self.conv_bottom(x5)
216
- return z
217
-
218
- def forward_a(self, x): # conv234结尾有se
219
- x1 = self.conv1(x)
220
- x2 = self.conv1_down(x1)
221
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
222
- x2 = self.conv2.conv(x2)
223
- return x1, x2
224
-
225
- def forward_b(self, x2): # conv234结尾有se
226
- x3 = self.conv2_down(x2)
227
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
228
- x3 = self.conv3.conv(x3)
229
- return x3
230
-
231
- def forward_c(self, x2, x3): # conv234结尾有se
232
- x3 = self.conv3_up(x3)
233
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
234
-
235
- x2 = F.pad(x2, (-4, -4, -4, -4))
236
- x4 = self.conv4.conv(x2 + x3)
237
- return x4
238
-
239
- def forward_d(self, x1, x4): # conv234结尾有se
240
- x4 = self.conv4_up(x4)
241
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
242
-
243
- x1 = F.pad(x1, (-16, -16, -16, -16))
244
- x5 = self.conv5(x1 + x4)
245
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
246
-
247
- z = self.conv_bottom(x5)
248
- return z
249
-
250
-
251
- class UpCunet2x(nn.Module): # 完美tile,全程无损
252
- def __init__(self, in_channels=3, out_channels=3):
253
- super(UpCunet2x, self).__init__()
254
- self.unet1 = UNet1(in_channels, out_channels, deconv=True)
255
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
256
-
257
- def forward(self, x, tile_mode): # 1.7G
258
- n, c, h0, w0 = x.shape
259
- if (tile_mode == 0): # 不tile
260
- ph = ((h0 - 1) // 2 + 1) * 2
261
- pw = ((w0 - 1) // 2 + 1) * 2
262
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
263
- x = self.unet1.forward(x)
264
- x0 = self.unet2.forward(x)
265
- x1 = F.pad(x, (-20, -20, -20, -20))
266
- x = torch.add(x0, x1)
267
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
268
- return x
269
- elif (tile_mode == 1): # 对长边减半
270
- if (w0 >= h0):
271
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
272
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
273
- else:
274
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
275
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
276
- crop_size = (crop_size_h, crop_size_w) # 6.6G
277
- elif (tile_mode == 2): # hw都减半
278
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
279
- elif (tile_mode == 3): # hw都三分之一
280
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
281
- elif (tile_mode == 4): # hw都四分���一
282
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
283
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
284
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
285
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
286
- n, c, h, w = x.shape
287
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
288
- if ("Half" in x.type()):
289
- se_mean0 = se_mean0.half()
290
- n_patch = 0
291
- tmp_dict = {}
292
- opt_res_dict = {}
293
- for i in range(0, h - 36, crop_size[0]):
294
- tmp_dict[i] = {}
295
- for j in range(0, w - 36, crop_size[1]):
296
- x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
297
- n, c1, h1, w1 = x_crop.shape
298
- tmp0, x_crop = self.unet1.forward_a(x_crop)
299
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
300
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
301
- else:
302
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
303
- se_mean0 += tmp_se_mean
304
- n_patch += 1
305
- tmp_dict[i][j] = (tmp0, x_crop)
306
- se_mean0 /= n_patch
307
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
308
- if ("Half" in x.type()):
309
- se_mean1 = se_mean1.half()
310
- for i in range(0, h - 36, crop_size[0]):
311
- for j in range(0, w - 36, crop_size[1]):
312
- tmp0, x_crop = tmp_dict[i][j]
313
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
314
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
315
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
316
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
317
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
318
- else:
319
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
320
- se_mean1 += tmp_se_mean
321
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
322
- se_mean1 /= n_patch
323
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
324
- if ("Half" in x.type()):
325
- se_mean0 = se_mean0.half()
326
- for i in range(0, h - 36, crop_size[0]):
327
- for j in range(0, w - 36, crop_size[1]):
328
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
329
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
330
- tmp_x3 = self.unet2.forward_b(tmp_x2)
331
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
332
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
333
- else:
334
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
335
- se_mean0 += tmp_se_mean
336
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
337
- se_mean0 /= n_patch
338
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
339
- if ("Half" in x.type()):
340
- se_mean1 = se_mean1.half()
341
- for i in range(0, h - 36, crop_size[0]):
342
- for j in range(0, w - 36, crop_size[1]):
343
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
344
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
345
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
346
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
347
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
348
- else:
349
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
350
- se_mean1 += tmp_se_mean
351
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
352
- se_mean1 /= n_patch
353
- for i in range(0, h - 36, crop_size[0]):
354
- opt_res_dict[i] = {}
355
- for j in range(0, w - 36, crop_size[1]):
356
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
357
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
358
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
359
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
360
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
361
- opt_res_dict[i][j] = x_crop
362
- del tmp_dict
363
- torch.cuda.empty_cache()
364
- res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
365
- if ("Half" in x.type()):
366
- res = res.half()
367
- for i in range(0, h - 36, crop_size[0]):
368
- for j in range(0, w - 36, crop_size[1]):
369
- res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
370
- del opt_res_dict
371
- torch.cuda.empty_cache()
372
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
373
- return res #
374
-
375
-
376
- class UpCunet3x(nn.Module): # 完美tile,全程无损
377
- def __init__(self, in_channels=3, out_channels=3):
378
- super(UpCunet3x, self).__init__()
379
- self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
380
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
381
-
382
- def forward(self, x, tile_mode): # 1.7G
383
- n, c, h0, w0 = x.shape
384
- if (tile_mode == 0): # 不tile
385
- ph = ((h0 - 1) // 4 + 1) * 4
386
- pw = ((w0 - 1) // 4 + 1) * 4
387
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
388
- x = self.unet1.forward(x)
389
- x0 = self.unet2.forward(x)
390
- x1 = F.pad(x, (-20, -20, -20, -20))
391
- x = torch.add(x0, x1)
392
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
393
- return x
394
- elif (tile_mode == 1): # 对长边减半
395
- if (w0 >= h0):
396
- crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
397
- crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
398
- else:
399
- crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
400
- crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
401
- crop_size = (crop_size_h, crop_size_w) # 6.6G
402
- elif (tile_mode == 2): # hw都减半
403
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
404
- elif (tile_mode == 3): # hw都三分之一
405
- crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
406
- elif (tile_mode == 4): # hw都四分之一
407
- crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
408
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
409
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
410
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
411
- n, c, h, w = x.shape
412
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
413
- if ("Half" in x.type()):
414
- se_mean0 = se_mean0.half()
415
- n_patch = 0
416
- tmp_dict = {}
417
- opt_res_dict = {}
418
- for i in range(0, h - 28, crop_size[0]):
419
- tmp_dict[i] = {}
420
- for j in range(0, w - 28, crop_size[1]):
421
- x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
422
- n, c1, h1, w1 = x_crop.shape
423
- tmp0, x_crop = self.unet1.forward_a(x_crop)
424
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
425
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
426
- else:
427
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
428
- se_mean0 += tmp_se_mean
429
- n_patch += 1
430
- tmp_dict[i][j] = (tmp0, x_crop)
431
- se_mean0 /= n_patch
432
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
433
- if ("Half" in x.type()):
434
- se_mean1 = se_mean1.half()
435
- for i in range(0, h - 28, crop_size[0]):
436
- for j in range(0, w - 28, crop_size[1]):
437
- tmp0, x_crop = tmp_dict[i][j]
438
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
439
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
440
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
441
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
442
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
443
- else:
444
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
445
- se_mean1 += tmp_se_mean
446
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
447
- se_mean1 /= n_patch
448
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
449
- if ("Half" in x.type()):
450
- se_mean0 = se_mean0.half()
451
- for i in range(0, h - 28, crop_size[0]):
452
- for j in range(0, w - 28, crop_size[1]):
453
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
454
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
455
- tmp_x3 = self.unet2.forward_b(tmp_x2)
456
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
457
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
458
- else:
459
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
460
- se_mean0 += tmp_se_mean
461
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
462
- se_mean0 /= n_patch
463
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
464
- if ("Half" in x.type()):
465
- se_mean1 = se_mean1.half()
466
- for i in range(0, h - 28, crop_size[0]):
467
- for j in range(0, w - 28, crop_size[1]):
468
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
469
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
470
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
471
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
472
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
473
- else:
474
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
475
- se_mean1 += tmp_se_mean
476
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
477
- se_mean1 /= n_patch
478
- for i in range(0, h - 28, crop_size[0]):
479
- opt_res_dict[i] = {}
480
- for j in range(0, w - 28, crop_size[1]):
481
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
482
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
483
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
484
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
485
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
486
- opt_res_dict[i][j] = x_crop #
487
- del tmp_dict
488
- torch.cuda.empty_cache()
489
- res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
490
- if ("Half" in x.type()):
491
- res = res.half()
492
- for i in range(0, h - 28, crop_size[0]):
493
- for j in range(0, w - 28, crop_size[1]):
494
- res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
495
- del opt_res_dict
496
- torch.cuda.empty_cache()
497
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
498
- return res
499
-
500
-
501
- class UpCunet4x(nn.Module): # 完美tile,全程无损
502
- def __init__(self, in_channels=3, out_channels=3):
503
- super(UpCunet4x, self).__init__()
504
- self.unet1 = UNet1(in_channels, 64, deconv=True)
505
- self.unet2 = UNet2(64, 64, deconv=False)
506
- self.ps = nn.PixelShuffle(2)
507
- self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
508
-
509
- def forward(self, x, tile_mode):
510
- n, c, h0, w0 = x.shape
511
- x00 = x
512
- if (tile_mode == 0): # 不tile
513
- ph = ((h0 - 1) // 2 + 1) * 2
514
- pw = ((w0 - 1) // 2 + 1) * 2
515
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
516
- x = self.unet1.forward(x)
517
- x0 = self.unet2.forward(x)
518
- x1 = F.pad(x, (-20, -20, -20, -20))
519
- x = torch.add(x0, x1)
520
- x = self.conv_final(x)
521
- x = F.pad(x, (-1, -1, -1, -1))
522
- x = self.ps(x)
523
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
524
- x += F.interpolate(x00, scale_factor=4, mode='nearest')
525
- return x
526
- elif (tile_mode == 1): # 对长边减半
527
- if (w0 >= h0):
528
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
529
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
530
- else:
531
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
532
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
533
- crop_size = (crop_size_h, crop_size_w) # 6.6G
534
- elif (tile_mode == 2): # hw都减半
535
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
536
- elif (tile_mode == 3): # hw都三分之一
537
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
538
- elif (tile_mode == 4): # hw都四分之一
539
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
540
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
541
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
542
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
543
- n, c, h, w = x.shape
544
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
545
- if ("Half" in x.type()):
546
- se_mean0 = se_mean0.half()
547
- n_patch = 0
548
- tmp_dict = {}
549
- opt_res_dict = {}
550
- for i in range(0, h - 38, crop_size[0]):
551
- tmp_dict[i] = {}
552
- for j in range(0, w - 38, crop_size[1]):
553
- x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
554
- n, c1, h1, w1 = x_crop.shape
555
- tmp0, x_crop = self.unet1.forward_a(x_crop)
556
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
557
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
558
- else:
559
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
560
- se_mean0 += tmp_se_mean
561
- n_patch += 1
562
- tmp_dict[i][j] = (tmp0, x_crop)
563
- se_mean0 /= n_patch
564
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
565
- if ("Half" in x.type()):
566
- se_mean1 = se_mean1.half()
567
- for i in range(0, h - 38, crop_size[0]):
568
- for j in range(0, w - 38, crop_size[1]):
569
- tmp0, x_crop = tmp_dict[i][j]
570
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
571
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
572
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
573
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
574
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
575
- else:
576
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
577
- se_mean1 += tmp_se_mean
578
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
579
- se_mean1 /= n_patch
580
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
581
- if ("Half" in x.type()):
582
- se_mean0 = se_mean0.half()
583
- for i in range(0, h - 38, crop_size[0]):
584
- for j in range(0, w - 38, crop_size[1]):
585
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
586
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
587
- tmp_x3 = self.unet2.forward_b(tmp_x2)
588
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
589
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
590
- else:
591
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
592
- se_mean0 += tmp_se_mean
593
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
594
- se_mean0 /= n_patch
595
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
596
- if ("Half" in x.type()):
597
- se_mean1 = se_mean1.half()
598
- for i in range(0, h - 38, crop_size[0]):
599
- for j in range(0, w - 38, crop_size[1]):
600
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
601
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
602
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
603
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
604
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
605
- else:
606
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
607
- se_mean1 += tmp_se_mean
608
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
609
- se_mean1 /= n_patch
610
- for i in range(0, h - 38, crop_size[0]):
611
- opt_res_dict[i] = {}
612
- for j in range(0, w - 38, crop_size[1]):
613
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
614
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
615
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
616
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
617
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
618
- x_crop = self.conv_final(x_crop)
619
- x_crop = F.pad(x_crop, (-1, -1, -1, -1))
620
- x_crop = self.ps(x_crop)
621
- opt_res_dict[i][j] = x_crop
622
- del tmp_dict
623
- torch.cuda.empty_cache()
624
- res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
625
- if ("Half" in x.type()):
626
- res = res.half()
627
- for i in range(0, h - 38, crop_size[0]):
628
- for j in range(0, w - 38, crop_size[1]):
629
- # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
630
- res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
631
- del opt_res_dict
632
- torch.cuda.empty_cache()
633
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
634
- res += F.interpolate(x00, scale_factor=4, mode='nearest')
635
- return res #
636
-
637
-
638
- class RealWaifuUpScaler(object):
639
- def __init__(self, scale, weight_path, half, device):
640
- weight = torch.load(weight_path, map_location="cpu")
641
- self.model = eval("UpCunet%sx" % scale)()
642
- if (half == True):
643
- self.model = self.model.half().to(device)
644
- else:
645
- self.model = self.model.to(device)
646
- self.model.load_state_dict(weight, strict=True)
647
- self.model.eval()
648
- self.half = half
649
- self.device = device
650
-
651
- def np2tensor(self, np_frame):
652
- if (self.half == False):
653
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
654
- else:
655
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
656
-
657
- def tensor2np(self, tensor):
658
- if (self.half == False):
659
- return (
660
- np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
661
- else:
662
- return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
663
- (1, 2, 0)))
664
-
665
- def __call__(self, frame, tile_mode):
666
- with torch.no_grad():
667
- tensor = self.np2tensor(frame)
668
- result = self.tensor2np(self.model(tensor, tile_mode))
669
- return result
670
-
671
-
672
- if __name__ == "__main__":
673
- ###########inference_img
674
- import time, cv2, sys
675
- from time import time as ttime
676
-
677
- for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
678
- ("weights_v3/up4x-latest-denoise3x.pth", 4)]:
679
- for tile_mode in [0, 1, 2, 3, 4]:
680
- upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
681
- input_dir = "%s/input_dir1" % root_path
682
- output_dir = "%s/opt-dir-all-test" % root_path
683
- os.makedirs(output_dir, exist_ok=True)
684
- for name in os.listdir(input_dir):
685
- print(name)
686
- tmp = name.split(".")
687
- inp_path = os.path.join(input_dir, name)
688
- suffix = tmp[-1]
689
- prefix = ".".join(tmp[:-1])
690
- tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
691
- print(inp_path, tmp_path)
692
- # 支持中文路径
693
- # os.link(inp_path, tmp_path)#win用硬链接
694
- os.symlink(inp_path, tmp_path) # linux用软链接
695
- frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
696
- t0 = ttime()
697
- result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
698
- t1 = ttime()
699
- print(prefix, "done", t1 - t0)
700
- tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
701
- cv2.imwrite(tmp_opt_path, result)
702
- n = 0
703
- while (1):
704
- if (n == 0):
705
- suffix = "_%sx_tile%s.png" % (scale, tile_mode)
706
- else:
707
- suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
708
- if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
709
- break
710
- else:
711
- n += 1
712
- final_opt_path = os.path.join(output_dir, prefix + suffix)
713
- os.rename(tmp_opt_path, final_opt_path)
714
- os.remove(tmp_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/interface/top-menu/index.tsx DELETED
@@ -1,260 +0,0 @@
1
- "use client"
2
-
3
- import { useEffect, useState } from "react"
4
- import { useSearchParams } from "next/navigation"
5
- import Image from "next/image"
6
-
7
- import {
8
- Select,
9
- SelectContent,
10
- SelectItem,
11
- SelectTrigger,
12
- SelectValue,
13
- } from "@/components/ui/select"
14
- import { Label } from "@/components/ui/label"
15
- import { cn } from "@/lib/utils"
16
- import { FontName, defaultFont } from "@/lib/fonts"
17
- import { Input } from "@/components/ui/input"
18
- import { PresetName, defaultPreset, nonRandomPresets, presets } from "@/app/engine/presets"
19
- import { useStore } from "@/app/store"
20
- import { Button } from "@/components/ui/button"
21
- import { LayoutName, allLayoutLabels, defaultLayout, nonRandomLayouts } from "@/app/layouts"
22
-
23
- import layoutPreview0 from "../../../../public/layouts/layout0.jpg"
24
- import layoutPreview1 from "../../../../public/layouts/layout1.jpg"
25
- import layoutPreview2 from "../../../../public/layouts/layout2.jpg"
26
- import layoutPreview3 from "../../../../public/layouts/layout3.jpg"
27
- import { StaticImageData } from "next/image"
28
- import { Switch } from "@/components/ui/switch"
29
-
30
- const layoutIcons: Partial<Record<LayoutName, StaticImageData>> = {
31
- Layout0: layoutPreview0,
32
- Layout1: layoutPreview1,
33
- Layout2: layoutPreview2,
34
- Layout3: layoutPreview3,
35
- Layout4: undefined,
36
- }
37
-
38
- export function TopMenu() {
39
- // const font = useStore(state => state.font)
40
- // const setFont = useStore(state => state.setFont)
41
- const preset = useStore(state => state.preset)
42
- const prompt = useStore(state => state.prompt)
43
- const layout = useStore(state => state.layout)
44
- const setLayout = useStore(state => state.setLayout)
45
-
46
- const setShowCaptions = useStore(state => state.setShowCaptions)
47
- const showCaptions = useStore(state => state.showCaptions)
48
-
49
- const generate = useStore(state => state.generate)
50
-
51
- const isGeneratingStory = useStore(state => state.isGeneratingStory)
52
- const atLeastOnePanelIsBusy = useStore(state => state.atLeastOnePanelIsBusy)
53
- const isBusy = isGeneratingStory || atLeastOnePanelIsBusy
54
-
55
- const searchParams = useSearchParams()
56
-
57
- const requestedPreset = (searchParams.get('preset') as PresetName) || defaultPreset
58
- const requestedFont = (searchParams.get('font') as FontName) || defaultFont
59
- const requestedPrompt = (searchParams.get('prompt') as string) || ""
60
- const requestedLayout = (searchParams.get('layout') as LayoutName) || defaultLayout
61
-
62
- const [draftPrompt, setDraftPrompt] = useState(requestedPrompt)
63
- const [draftPreset, setDraftPreset] = useState<PresetName>(requestedPreset)
64
- const [draftLayout, setDraftLayout] = useState<LayoutName>(requestedLayout)
65
-
66
- const handleSubmit = () => {
67
- const promptChanged = draftPrompt.trim() !== prompt.trim()
68
- const presetChanged = draftPreset !== preset.id
69
- const layoutChanged = draftLayout !== layout
70
- if (!isBusy && (promptChanged || presetChanged || layoutChanged)) {
71
- generate(draftPrompt, draftPreset, draftLayout)
72
- }
73
- }
74
-
75
- useEffect(() => {
76
- const layoutChanged = draftLayout !== layout
77
- if (layoutChanged && !isBusy) {
78
- setLayout(draftLayout)
79
- }
80
- }, [layout, draftLayout, isBusy])
81
-
82
- return (
83
- <div className={cn(
84
- `print:hidden`,
85
- `z-10 fixed top-0 left-0 right-0`,
86
- `flex flex-col md:flex-row w-full justify-between items-center`,
87
- `backdrop-blur-xl`,
88
- `transition-all duration-200 ease-in-out`,
89
- `px-2 py-2 border-b-1 border-gray-50 dark:border-gray-50`,
90
- `bg-stone-900/70 dark:bg-stone-900/70 text-gray-50 dark:text-gray-50`,
91
- `space-y-2 md:space-y-0 md:space-x-3 lg:space-x-6`
92
- )}>
93
- <div className="flex flex-row space-x-2 md:space-x-3 w-full md:w-auto">
94
- <div className={cn(
95
- `transition-all duration-200 ease-in-out`,
96
- `flex flex-row items-center justify-start space-x-3 font-mono`,
97
- `flex-grow`
98
- )}>
99
-
100
- {/* <Label className="flex text-2xs md:text-sm md:w-24">Style:</Label> */}
101
-
102
- <Select
103
- defaultValue={defaultPreset}
104
- onValueChange={(value) => { setDraftPreset(value as PresetName) }}
105
- disabled={isBusy}
106
- >
107
- <SelectTrigger className="flex-grow">
108
- <SelectValue className="text-2xs md:text-sm" placeholder="Style" />
109
- </SelectTrigger>
110
- <SelectContent>
111
- {nonRandomPresets.map(key =>
112
- <SelectItem key={key} value={key}>{presets[key].label}</SelectItem>
113
- )}
114
- </SelectContent>
115
- </Select>
116
- </div>
117
- <div className={cn(
118
- `transition-all duration-200 ease-in-out`,
119
- `flex flex-row items-center justify-start space-x-3 font-mono`,
120
- `w-40`
121
- )}>
122
-
123
- {/* <Label className="flex text-2xs md:text-sm md:w-24">Style:</Label> */}
124
-
125
- <Select
126
- defaultValue={defaultLayout}
127
- onValueChange={(value) => { setDraftLayout(value as LayoutName) }}
128
- disabled={isBusy}
129
- >
130
- <SelectTrigger className="flex-grow">
131
- <SelectValue className="text-2xs md:text-sm" placeholder="Layout" />
132
- </SelectTrigger>
133
- <SelectContent>
134
- {nonRandomLayouts.map(key =>
135
- <SelectItem key={key} value={key} className="w-full">
136
- <div className="space-x-6 flex flex-row items-center justify-between font-mono">
137
- <div className="flex">{
138
- (allLayoutLabels as any)[key]
139
- }</div>
140
-
141
- {(layoutIcons as any)[key]
142
- ? <Image
143
- className="rounded-sm opacity-75"
144
- src={(layoutIcons as any)[key]}
145
- width={20}
146
- height={18}
147
- alt={key}
148
- /> : null}
149
-
150
- </div>
151
- </SelectItem>
152
- )}
153
- </SelectContent>
154
- </Select>
155
- </div>
156
- <div className="flex flex-row items-center space-x-3">
157
- <Switch
158
- checked={showCaptions}
159
- onCheckedChange={setShowCaptions}
160
- />
161
- <Label>
162
- <span className="hidden md:inline">Caption</span>
163
- <span className="inline md:hidden">Cap.</span>
164
- </Label>
165
- </div>
166
- {/*
167
- <div className={cn(
168
- `transition-all duration-200 ease-in-out`,
169
- `flex flex-row items-center space-x-3 font-mono w-1/2 md:w-auto md:hidden`
170
- )}>
171
- <Label className="flex text-2xs md:text-sm md:w-24">Font:</Label>
172
- <Select
173
- defaultValue={fontList.includes(preset.font) ? preset.font : "cartoonist"}
174
- onValueChange={(value) => { setFont(value as FontName) }}
175
- disabled={atLeastOnePanelIsBusy}
176
- >
177
- <SelectTrigger className="flex-grow">
178
- <SelectValue className="text-2xs md:text-sm" placeholder="Type" />
179
- </SelectTrigger>
180
- <SelectContent>
181
- {Object.keys(fonts)
182
- .map((font) =>
183
- <SelectItem
184
- key={font}
185
- value={font}>{
186
- font
187
- }</SelectItem>
188
- )}
189
- </SelectContent>
190
- </Select>
191
- </div>
192
- */}
193
- </div>
194
- <div className={cn(
195
- `transition-all duration-200 ease-in-out`,
196
- `flex flex-grow flex-col space-y-2 md:space-y-0 md:flex-row items-center md:space-x-3 font-mono w-full md:w-auto`
197
- )}>
198
- <div className="flex flex-row flex-grow w-full">
199
- <Input
200
- placeholder="Story"
201
- className="w-full bg-neutral-300 text-neutral-800 dark:bg-neutral-300 dark:text-neutral-800 rounded-r-none"
202
- // disabled={atLeastOnePanelIsBusy}
203
- onChange={(e) => {
204
- setDraftPrompt(e.target.value)
205
- }}
206
- onKeyDown={({ key }) => {
207
- if (key === 'Enter') {
208
- handleSubmit()
209
- }
210
- }}
211
- value={draftPrompt}
212
- />
213
- <Button
214
- className={cn(
215
- `rounded-l-none cursor-pointer`,
216
- `transition-all duration-200 ease-in-out`,
217
- `bg-[rgb(59,134,247)] hover:bg-[rgb(69,144,255)] disabled:bg-[rgb(59,134,247)]`
218
- )}
219
- onClick={() => {
220
- handleSubmit()
221
- }}
222
- disabled={!draftPrompt?.trim().length || isBusy}
223
- >
224
- Go
225
- </Button>
226
- </div>
227
- </div>
228
- {/*
229
- Let's add this feature later, because right now people
230
- are confused about why they can't activate it
231
- <div className={cn(
232
- `transition-all duration-200 ease-in-out`,
233
- `hidden md:flex flex-row items-center space-x-3 font-mono w-full md:w-auto`
234
- )}>
235
- <Label className="flex text-2xs md:text-sm w-24">Font:</Label>
236
- <Select
237
- defaultValue={fontList.includes(preset.font) ? preset.font : "actionman"}
238
- onValueChange={(value) => { setFont(value as FontName) }}
239
- // disabled={isBusy}
240
- disabled={true}
241
- >
242
- <SelectTrigger className="flex-grow">
243
- <SelectValue className="text-2xs md:text-sm" placeholder="Type" />
244
- </SelectTrigger>
245
- <SelectContent>
246
- {Object.keys(fonts)
247
- .map((font) =>
248
- <SelectItem
249
- key={font}
250
- value={font}>{
251
- font
252
- }</SelectItem>
253
- )}
254
- </SelectContent>
255
- </Select>
256
- </div>
257
- */}
258
- </div>
259
- )
260
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BasToTheMax/TTS/Dockerfile DELETED
@@ -1,34 +0,0 @@
1
- # Python
2
- FROM python:3.9
3
-
4
- # update apt
5
- RUN apt-get update -y
6
-
7
- # Add apt packages
8
- RUN apt-get install libsndfile1 curl wget -y
9
-
10
- # Deps
11
- RUN apt install libsndfile1 espeak-ng -y
12
-
13
- # Set up a new user named "user" with user ID 1000
14
- RUN useradd -m -u 1000 user
15
-
16
- # Switch to the "user" user
17
- USER user
18
-
19
- # Set home to the user's home directory
20
- ENV HOME=/home/user \
21
- PATH=/home/user/.local/bin:$PATH
22
-
23
- # Set the working directory to the user's home directory
24
- WORKDIR $HOME/app
25
-
26
- RUN pip install --no-cache-dir --upgrade tts
27
-
28
- # Copy the current directory contents into the container at $HOME/app setting the owner to the user
29
- COPY --chown=user . $HOME/app
30
-
31
- # COPY . .
32
-
33
- # CMD ["tts-server", "--model_name", "tts_models/en/ljspeech/glow-tts", "--port", "7860"]
34
- CMD ["tts-server", "--model_name", "tts_models/en/ljspeech/vits", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/app.d.ts DELETED
@@ -1,17 +0,0 @@
1
- /// <reference types="@sveltejs/kit" />
2
- /// <reference types="unplugin-icons/types/svelte" />
3
-
4
- // See https://kit.svelte.dev/docs/types#app
5
- // for information about these interfaces
6
- declare global {
7
- namespace App {
8
- // interface Error {}
9
- interface Locals {
10
- sessionId: string;
11
- }
12
- // interface PageData {}
13
- // interface Platform {}
14
- }
15
- }
16
-
17
- export {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/misc.py DELETED
@@ -1,730 +0,0 @@
1
- # The following comment should be removed at some point in the future.
2
- # mypy: strict-optional=False
3
-
4
- import contextlib
5
- import errno
6
- import getpass
7
- import hashlib
8
- import io
9
- import logging
10
- import os
11
- import posixpath
12
- import shutil
13
- import stat
14
- import sys
15
- import sysconfig
16
- import urllib.parse
17
- from io import StringIO
18
- from itertools import filterfalse, tee, zip_longest
19
- from types import TracebackType
20
- from typing import (
21
- Any,
22
- BinaryIO,
23
- Callable,
24
- ContextManager,
25
- Dict,
26
- Generator,
27
- Iterable,
28
- Iterator,
29
- List,
30
- Optional,
31
- TextIO,
32
- Tuple,
33
- Type,
34
- TypeVar,
35
- Union,
36
- cast,
37
- )
38
-
39
- from pip._vendor.pyproject_hooks import BuildBackendHookCaller
40
- from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
41
-
42
- from pip import __version__
43
- from pip._internal.exceptions import CommandError, ExternallyManagedEnvironment
44
- from pip._internal.locations import get_major_minor_version
45
- from pip._internal.utils.compat import WINDOWS
46
- from pip._internal.utils.virtualenv import running_under_virtualenv
47
-
48
- __all__ = [
49
- "rmtree",
50
- "display_path",
51
- "backup_dir",
52
- "ask",
53
- "splitext",
54
- "format_size",
55
- "is_installable_dir",
56
- "normalize_path",
57
- "renames",
58
- "get_prog",
59
- "captured_stdout",
60
- "ensure_dir",
61
- "remove_auth_from_url",
62
- "check_externally_managed",
63
- "ConfiguredBuildBackendHookCaller",
64
- ]
65
-
66
- logger = logging.getLogger(__name__)
67
-
68
- T = TypeVar("T")
69
- ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
70
- VersionInfo = Tuple[int, int, int]
71
- NetlocTuple = Tuple[str, Tuple[Optional[str], Optional[str]]]
72
-
73
-
74
- def get_pip_version() -> str:
75
- pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..")
76
- pip_pkg_dir = os.path.abspath(pip_pkg_dir)
77
-
78
- return "pip {} from {} (python {})".format(
79
- __version__,
80
- pip_pkg_dir,
81
- get_major_minor_version(),
82
- )
83
-
84
-
85
- def normalize_version_info(py_version_info: Tuple[int, ...]) -> Tuple[int, int, int]:
86
- """
87
- Convert a tuple of ints representing a Python version to one of length
88
- three.
89
-
90
- :param py_version_info: a tuple of ints representing a Python version,
91
- or None to specify no version. The tuple can have any length.
92
-
93
- :return: a tuple of length three if `py_version_info` is non-None.
94
- Otherwise, return `py_version_info` unchanged (i.e. None).
95
- """
96
- if len(py_version_info) < 3:
97
- py_version_info += (3 - len(py_version_info)) * (0,)
98
- elif len(py_version_info) > 3:
99
- py_version_info = py_version_info[:3]
100
-
101
- return cast("VersionInfo", py_version_info)
102
-
103
-
104
- def ensure_dir(path: str) -> None:
105
- """os.path.makedirs without EEXIST."""
106
- try:
107
- os.makedirs(path)
108
- except OSError as e:
109
- # Windows can raise spurious ENOTEMPTY errors. See #6426.
110
- if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:
111
- raise
112
-
113
-
114
- def get_prog() -> str:
115
- try:
116
- prog = os.path.basename(sys.argv[0])
117
- if prog in ("__main__.py", "-c"):
118
- return f"{sys.executable} -m pip"
119
- else:
120
- return prog
121
- except (AttributeError, TypeError, IndexError):
122
- pass
123
- return "pip"
124
-
125
-
126
- # Retry every half second for up to 3 seconds
127
- # Tenacity raises RetryError by default, explicitly raise the original exception
128
- @retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5))
129
- def rmtree(dir: str, ignore_errors: bool = False) -> None:
130
- shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler)
131
-
132
-
133
- def rmtree_errorhandler(func: Callable[..., Any], path: str, exc_info: ExcInfo) -> None:
134
- """On Windows, the files in .svn are read-only, so when rmtree() tries to
135
- remove them, an exception is thrown. We catch that here, remove the
136
- read-only attribute, and hopefully continue without problems."""
137
- try:
138
- has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE)
139
- except OSError:
140
- # it's equivalent to os.path.exists
141
- return
142
-
143
- if has_attr_readonly:
144
- # convert to read/write
145
- os.chmod(path, stat.S_IWRITE)
146
- # use the original function to repeat the operation
147
- func(path)
148
- return
149
- else:
150
- raise
151
-
152
-
153
- def display_path(path: str) -> str:
154
- """Gives the display value for a given path, making it relative to cwd
155
- if possible."""
156
- path = os.path.normcase(os.path.abspath(path))
157
- if path.startswith(os.getcwd() + os.path.sep):
158
- path = "." + path[len(os.getcwd()) :]
159
- return path
160
-
161
-
162
- def backup_dir(dir: str, ext: str = ".bak") -> str:
163
- """Figure out the name of a directory to back up the given dir to
164
- (adding .bak, .bak2, etc)"""
165
- n = 1
166
- extension = ext
167
- while os.path.exists(dir + extension):
168
- n += 1
169
- extension = ext + str(n)
170
- return dir + extension
171
-
172
-
173
- def ask_path_exists(message: str, options: Iterable[str]) -> str:
174
- for action in os.environ.get("PIP_EXISTS_ACTION", "").split():
175
- if action in options:
176
- return action
177
- return ask(message, options)
178
-
179
-
180
- def _check_no_input(message: str) -> None:
181
- """Raise an error if no input is allowed."""
182
- if os.environ.get("PIP_NO_INPUT"):
183
- raise Exception(
184
- f"No input was expected ($PIP_NO_INPUT set); question: {message}"
185
- )
186
-
187
-
188
- def ask(message: str, options: Iterable[str]) -> str:
189
- """Ask the message interactively, with the given possible responses"""
190
- while 1:
191
- _check_no_input(message)
192
- response = input(message)
193
- response = response.strip().lower()
194
- if response not in options:
195
- print(
196
- "Your response ({!r}) was not one of the expected responses: "
197
- "{}".format(response, ", ".join(options))
198
- )
199
- else:
200
- return response
201
-
202
-
203
- def ask_input(message: str) -> str:
204
- """Ask for input interactively."""
205
- _check_no_input(message)
206
- return input(message)
207
-
208
-
209
- def ask_password(message: str) -> str:
210
- """Ask for a password interactively."""
211
- _check_no_input(message)
212
- return getpass.getpass(message)
213
-
214
-
215
- def strtobool(val: str) -> int:
216
- """Convert a string representation of truth to true (1) or false (0).
217
-
218
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
219
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
220
- 'val' is anything else.
221
- """
222
- val = val.lower()
223
- if val in ("y", "yes", "t", "true", "on", "1"):
224
- return 1
225
- elif val in ("n", "no", "f", "false", "off", "0"):
226
- return 0
227
- else:
228
- raise ValueError(f"invalid truth value {val!r}")
229
-
230
-
231
- def format_size(bytes: float) -> str:
232
- if bytes > 1000 * 1000:
233
- return "{:.1f} MB".format(bytes / 1000.0 / 1000)
234
- elif bytes > 10 * 1000:
235
- return "{} kB".format(int(bytes / 1000))
236
- elif bytes > 1000:
237
- return "{:.1f} kB".format(bytes / 1000.0)
238
- else:
239
- return "{} bytes".format(int(bytes))
240
-
241
-
242
- def tabulate(rows: Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]:
243
- """Return a list of formatted rows and a list of column sizes.
244
-
245
- For example::
246
-
247
- >>> tabulate([['foobar', 2000], [0xdeadbeef]])
248
- (['foobar 2000', '3735928559'], [10, 4])
249
- """
250
- rows = [tuple(map(str, row)) for row in rows]
251
- sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")]
252
- table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows]
253
- return table, sizes
254
-
255
-
256
- def is_installable_dir(path: str) -> bool:
257
- """Is path is a directory containing pyproject.toml or setup.py?
258
-
259
- If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for
260
- a legacy setuptools layout by identifying setup.py. We don't check for the
261
- setup.cfg because using it without setup.py is only available for PEP 517
262
- projects, which are already covered by the pyproject.toml check.
263
- """
264
- if not os.path.isdir(path):
265
- return False
266
- if os.path.isfile(os.path.join(path, "pyproject.toml")):
267
- return True
268
- if os.path.isfile(os.path.join(path, "setup.py")):
269
- return True
270
- return False
271
-
272
-
273
- def read_chunks(
274
- file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE
275
- ) -> Generator[bytes, None, None]:
276
- """Yield pieces of data from a file-like object until EOF."""
277
- while True:
278
- chunk = file.read(size)
279
- if not chunk:
280
- break
281
- yield chunk
282
-
283
-
284
- def normalize_path(path: str, resolve_symlinks: bool = True) -> str:
285
- """
286
- Convert a path to its canonical, case-normalized, absolute version.
287
-
288
- """
289
- path = os.path.expanduser(path)
290
- if resolve_symlinks:
291
- path = os.path.realpath(path)
292
- else:
293
- path = os.path.abspath(path)
294
- return os.path.normcase(path)
295
-
296
-
297
- def splitext(path: str) -> Tuple[str, str]:
298
- """Like os.path.splitext, but take off .tar too"""
299
- base, ext = posixpath.splitext(path)
300
- if base.lower().endswith(".tar"):
301
- ext = base[-4:] + ext
302
- base = base[:-4]
303
- return base, ext
304
-
305
-
306
- def renames(old: str, new: str) -> None:
307
- """Like os.renames(), but handles renaming across devices."""
308
- # Implementation borrowed from os.renames().
309
- head, tail = os.path.split(new)
310
- if head and tail and not os.path.exists(head):
311
- os.makedirs(head)
312
-
313
- shutil.move(old, new)
314
-
315
- head, tail = os.path.split(old)
316
- if head and tail:
317
- try:
318
- os.removedirs(head)
319
- except OSError:
320
- pass
321
-
322
-
323
- def is_local(path: str) -> bool:
324
- """
325
- Return True if path is within sys.prefix, if we're running in a virtualenv.
326
-
327
- If we're not in a virtualenv, all paths are considered "local."
328
-
329
- Caution: this function assumes the head of path has been normalized
330
- with normalize_path.
331
- """
332
- if not running_under_virtualenv():
333
- return True
334
- return path.startswith(normalize_path(sys.prefix))
335
-
336
-
337
- def write_output(msg: Any, *args: Any) -> None:
338
- logger.info(msg, *args)
339
-
340
-
341
- class StreamWrapper(StringIO):
342
- orig_stream: TextIO = None
343
-
344
- @classmethod
345
- def from_stream(cls, orig_stream: TextIO) -> "StreamWrapper":
346
- cls.orig_stream = orig_stream
347
- return cls()
348
-
349
- # compileall.compile_dir() needs stdout.encoding to print to stdout
350
- # https://github.com/python/mypy/issues/4125
351
- @property
352
- def encoding(self): # type: ignore
353
- return self.orig_stream.encoding
354
-
355
-
356
- @contextlib.contextmanager
357
- def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]:
358
- """Return a context manager used by captured_stdout/stdin/stderr
359
- that temporarily replaces the sys stream *stream_name* with a StringIO.
360
-
361
- Taken from Lib/support/__init__.py in the CPython repo.
362
- """
363
- orig_stdout = getattr(sys, stream_name)
364
- setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
365
- try:
366
- yield getattr(sys, stream_name)
367
- finally:
368
- setattr(sys, stream_name, orig_stdout)
369
-
370
-
371
- def captured_stdout() -> ContextManager[StreamWrapper]:
372
- """Capture the output of sys.stdout:
373
-
374
- with captured_stdout() as stdout:
375
- print('hello')
376
- self.assertEqual(stdout.getvalue(), 'hello\n')
377
-
378
- Taken from Lib/support/__init__.py in the CPython repo.
379
- """
380
- return captured_output("stdout")
381
-
382
-
383
- def captured_stderr() -> ContextManager[StreamWrapper]:
384
- """
385
- See captured_stdout().
386
- """
387
- return captured_output("stderr")
388
-
389
-
390
- # Simulates an enum
391
- def enum(*sequential: Any, **named: Any) -> Type[Any]:
392
- enums = dict(zip(sequential, range(len(sequential))), **named)
393
- reverse = {value: key for key, value in enums.items()}
394
- enums["reverse_mapping"] = reverse
395
- return type("Enum", (), enums)
396
-
397
-
398
- def build_netloc(host: str, port: Optional[int]) -> str:
399
- """
400
- Build a netloc from a host-port pair
401
- """
402
- if port is None:
403
- return host
404
- if ":" in host:
405
- # Only wrap host with square brackets when it is IPv6
406
- host = f"[{host}]"
407
- return f"{host}:{port}"
408
-
409
-
410
- def build_url_from_netloc(netloc: str, scheme: str = "https") -> str:
411
- """
412
- Build a full URL from a netloc.
413
- """
414
- if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc:
415
- # It must be a bare IPv6 address, so wrap it with brackets.
416
- netloc = f"[{netloc}]"
417
- return f"{scheme}://{netloc}"
418
-
419
-
420
- def parse_netloc(netloc: str) -> Tuple[str, Optional[int]]:
421
- """
422
- Return the host-port pair from a netloc.
423
- """
424
- url = build_url_from_netloc(netloc)
425
- parsed = urllib.parse.urlparse(url)
426
- return parsed.hostname, parsed.port
427
-
428
-
429
- def split_auth_from_netloc(netloc: str) -> NetlocTuple:
430
- """
431
- Parse out and remove the auth information from a netloc.
432
-
433
- Returns: (netloc, (username, password)).
434
- """
435
- if "@" not in netloc:
436
- return netloc, (None, None)
437
-
438
- # Split from the right because that's how urllib.parse.urlsplit()
439
- # behaves if more than one @ is present (which can be checked using
440
- # the password attribute of urlsplit()'s return value).
441
- auth, netloc = netloc.rsplit("@", 1)
442
- pw: Optional[str] = None
443
- if ":" in auth:
444
- # Split from the left because that's how urllib.parse.urlsplit()
445
- # behaves if more than one : is present (which again can be checked
446
- # using the password attribute of the return value)
447
- user, pw = auth.split(":", 1)
448
- else:
449
- user, pw = auth, None
450
-
451
- user = urllib.parse.unquote(user)
452
- if pw is not None:
453
- pw = urllib.parse.unquote(pw)
454
-
455
- return netloc, (user, pw)
456
-
457
-
458
- def redact_netloc(netloc: str) -> str:
459
- """
460
- Replace the sensitive data in a netloc with "****", if it exists.
461
-
462
- For example:
463
- - "user:[email protected]" returns "user:****@example.com"
464
- - "[email protected]" returns "****@example.com"
465
- """
466
- netloc, (user, password) = split_auth_from_netloc(netloc)
467
- if user is None:
468
- return netloc
469
- if password is None:
470
- user = "****"
471
- password = ""
472
- else:
473
- user = urllib.parse.quote(user)
474
- password = ":****"
475
- return "{user}{password}@{netloc}".format(
476
- user=user, password=password, netloc=netloc
477
- )
478
-
479
-
480
- def _transform_url(
481
- url: str, transform_netloc: Callable[[str], Tuple[Any, ...]]
482
- ) -> Tuple[str, NetlocTuple]:
483
- """Transform and replace netloc in a url.
484
-
485
- transform_netloc is a function taking the netloc and returning a
486
- tuple. The first element of this tuple is the new netloc. The
487
- entire tuple is returned.
488
-
489
- Returns a tuple containing the transformed url as item 0 and the
490
- original tuple returned by transform_netloc as item 1.
491
- """
492
- purl = urllib.parse.urlsplit(url)
493
- netloc_tuple = transform_netloc(purl.netloc)
494
- # stripped url
495
- url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment)
496
- surl = urllib.parse.urlunsplit(url_pieces)
497
- return surl, cast("NetlocTuple", netloc_tuple)
498
-
499
-
500
- def _get_netloc(netloc: str) -> NetlocTuple:
501
- return split_auth_from_netloc(netloc)
502
-
503
-
504
- def _redact_netloc(netloc: str) -> Tuple[str]:
505
- return (redact_netloc(netloc),)
506
-
507
-
508
- def split_auth_netloc_from_url(url: str) -> Tuple[str, str, Tuple[str, str]]:
509
- """
510
- Parse a url into separate netloc, auth, and url with no auth.
511
-
512
- Returns: (url_without_auth, netloc, (username, password))
513
- """
514
- url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
515
- return url_without_auth, netloc, auth
516
-
517
-
518
- def remove_auth_from_url(url: str) -> str:
519
- """Return a copy of url with 'username:password@' removed."""
520
- # username/pass params are passed to subversion through flags
521
- # and are not recognized in the url.
522
- return _transform_url(url, _get_netloc)[0]
523
-
524
-
525
- def redact_auth_from_url(url: str) -> str:
526
- """Replace the password in a given url with ****."""
527
- return _transform_url(url, _redact_netloc)[0]
528
-
529
-
530
- class HiddenText:
531
- def __init__(self, secret: str, redacted: str) -> None:
532
- self.secret = secret
533
- self.redacted = redacted
534
-
535
- def __repr__(self) -> str:
536
- return "<HiddenText {!r}>".format(str(self))
537
-
538
- def __str__(self) -> str:
539
- return self.redacted
540
-
541
- # This is useful for testing.
542
- def __eq__(self, other: Any) -> bool:
543
- if type(self) != type(other):
544
- return False
545
-
546
- # The string being used for redaction doesn't also have to match,
547
- # just the raw, original string.
548
- return self.secret == other.secret
549
-
550
-
551
- def hide_value(value: str) -> HiddenText:
552
- return HiddenText(value, redacted="****")
553
-
554
-
555
- def hide_url(url: str) -> HiddenText:
556
- redacted = redact_auth_from_url(url)
557
- return HiddenText(url, redacted=redacted)
558
-
559
-
560
- def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None:
561
- """Protection of pip.exe from modification on Windows
562
-
563
- On Windows, any operation modifying pip should be run as:
564
- python -m pip ...
565
- """
566
- pip_names = [
567
- "pip",
568
- f"pip{sys.version_info.major}",
569
- f"pip{sys.version_info.major}.{sys.version_info.minor}",
570
- ]
571
-
572
- # See https://github.com/pypa/pip/issues/1299 for more discussion
573
- should_show_use_python_msg = (
574
- modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names
575
- )
576
-
577
- if should_show_use_python_msg:
578
- new_command = [sys.executable, "-m", "pip"] + sys.argv[1:]
579
- raise CommandError(
580
- "To modify pip, please run the following command:\n{}".format(
581
- " ".join(new_command)
582
- )
583
- )
584
-
585
-
586
- def check_externally_managed() -> None:
587
- """Check whether the current environment is externally managed.
588
-
589
- If the ``EXTERNALLY-MANAGED`` config file is found, the current environment
590
- is considered externally managed, and an ExternallyManagedEnvironment is
591
- raised.
592
- """
593
- if running_under_virtualenv():
594
- return
595
- marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED")
596
- if not os.path.isfile(marker):
597
- return
598
- raise ExternallyManagedEnvironment.from_config(marker)
599
-
600
-
601
- def is_console_interactive() -> bool:
602
- """Is this console interactive?"""
603
- return sys.stdin is not None and sys.stdin.isatty()
604
-
605
-
606
- def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]:
607
- """Return (hash, length) for path using hashlib.sha256()"""
608
-
609
- h = hashlib.sha256()
610
- length = 0
611
- with open(path, "rb") as f:
612
- for block in read_chunks(f, size=blocksize):
613
- length += len(block)
614
- h.update(block)
615
- return h, length
616
-
617
-
618
- def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]:
619
- """
620
- Return paired elements.
621
-
622
- For example:
623
- s -> (s0, s1), (s2, s3), (s4, s5), ...
624
- """
625
- iterable = iter(iterable)
626
- return zip_longest(iterable, iterable)
627
-
628
-
629
- def partition(
630
- pred: Callable[[T], bool],
631
- iterable: Iterable[T],
632
- ) -> Tuple[Iterable[T], Iterable[T]]:
633
- """
634
- Use a predicate to partition entries into false entries and true entries,
635
- like
636
-
637
- partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
638
- """
639
- t1, t2 = tee(iterable)
640
- return filterfalse(pred, t1), filter(pred, t2)
641
-
642
-
643
- class ConfiguredBuildBackendHookCaller(BuildBackendHookCaller):
644
- def __init__(
645
- self,
646
- config_holder: Any,
647
- source_dir: str,
648
- build_backend: str,
649
- backend_path: Optional[str] = None,
650
- runner: Optional[Callable[..., None]] = None,
651
- python_executable: Optional[str] = None,
652
- ):
653
- super().__init__(
654
- source_dir, build_backend, backend_path, runner, python_executable
655
- )
656
- self.config_holder = config_holder
657
-
658
- def build_wheel(
659
- self,
660
- wheel_directory: str,
661
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
662
- metadata_directory: Optional[str] = None,
663
- ) -> str:
664
- cs = self.config_holder.config_settings
665
- return super().build_wheel(
666
- wheel_directory, config_settings=cs, metadata_directory=metadata_directory
667
- )
668
-
669
- def build_sdist(
670
- self,
671
- sdist_directory: str,
672
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
673
- ) -> str:
674
- cs = self.config_holder.config_settings
675
- return super().build_sdist(sdist_directory, config_settings=cs)
676
-
677
- def build_editable(
678
- self,
679
- wheel_directory: str,
680
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
681
- metadata_directory: Optional[str] = None,
682
- ) -> str:
683
- cs = self.config_holder.config_settings
684
- return super().build_editable(
685
- wheel_directory, config_settings=cs, metadata_directory=metadata_directory
686
- )
687
-
688
- def get_requires_for_build_wheel(
689
- self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
690
- ) -> List[str]:
691
- cs = self.config_holder.config_settings
692
- return super().get_requires_for_build_wheel(config_settings=cs)
693
-
694
- def get_requires_for_build_sdist(
695
- self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
696
- ) -> List[str]:
697
- cs = self.config_holder.config_settings
698
- return super().get_requires_for_build_sdist(config_settings=cs)
699
-
700
- def get_requires_for_build_editable(
701
- self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
702
- ) -> List[str]:
703
- cs = self.config_holder.config_settings
704
- return super().get_requires_for_build_editable(config_settings=cs)
705
-
706
- def prepare_metadata_for_build_wheel(
707
- self,
708
- metadata_directory: str,
709
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
710
- _allow_fallback: bool = True,
711
- ) -> str:
712
- cs = self.config_holder.config_settings
713
- return super().prepare_metadata_for_build_wheel(
714
- metadata_directory=metadata_directory,
715
- config_settings=cs,
716
- _allow_fallback=_allow_fallback,
717
- )
718
-
719
- def prepare_metadata_for_build_editable(
720
- self,
721
- metadata_directory: str,
722
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
723
- _allow_fallback: bool = True,
724
- ) -> str:
725
- cs = self.config_holder.config_settings
726
- return super().prepare_metadata_for_build_editable(
727
- metadata_directory=metadata_directory,
728
- config_settings=cs,
729
- _allow_fallback=_allow_fallback,
730
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blessin/one-liners/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: One Liners
3
- emoji: 📈
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brasd99/TTS-Voice-Cloner/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: TTS Voice Cloner
3
- emoji: 🚀
4
- colorFrom: purple
5
- colorTo: indigo
6
- sdk: streamlit
7
- sdk_version: 1.25.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/runners/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- from minigpt4.runners.runner_base import RunnerBase
9
-
10
- __all__ = ["RunnerBase"]
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/tests/test_gfpgan_model.py DELETED
@@ -1,132 +0,0 @@
1
- import tempfile
2
- import torch
3
- import yaml
4
- from basicsr.archs.stylegan2_arch import StyleGAN2Discriminator
5
- from basicsr.data.paired_image_dataset import PairedImageDataset
6
- from basicsr.losses.losses import GANLoss, L1Loss, PerceptualLoss
7
-
8
- from gfpgan.archs.arcface_arch import ResNetArcFace
9
- from gfpgan.archs.gfpganv1_arch import FacialComponentDiscriminator, GFPGANv1
10
- from gfpgan.models.gfpgan_model import GFPGANModel
11
-
12
-
13
- def test_gfpgan_model():
14
- with open('tests/data/test_gfpgan_model.yml', mode='r') as f:
15
- opt = yaml.load(f, Loader=yaml.FullLoader)
16
-
17
- # build model
18
- model = GFPGANModel(opt)
19
- # test attributes
20
- assert model.__class__.__name__ == 'GFPGANModel'
21
- assert isinstance(model.net_g, GFPGANv1) # generator
22
- assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator
23
- # facial component discriminators
24
- assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator)
25
- assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator)
26
- assert isinstance(model.net_d_mouth, FacialComponentDiscriminator)
27
- # identity network
28
- assert isinstance(model.network_identity, ResNetArcFace)
29
- # losses
30
- assert isinstance(model.cri_pix, L1Loss)
31
- assert isinstance(model.cri_perceptual, PerceptualLoss)
32
- assert isinstance(model.cri_gan, GANLoss)
33
- assert isinstance(model.cri_l1, L1Loss)
34
- # optimizer
35
- assert isinstance(model.optimizers[0], torch.optim.Adam)
36
- assert isinstance(model.optimizers[1], torch.optim.Adam)
37
-
38
- # prepare data
39
- gt = torch.rand((1, 3, 512, 512), dtype=torch.float32)
40
- lq = torch.rand((1, 3, 512, 512), dtype=torch.float32)
41
- loc_left_eye = torch.rand((1, 4), dtype=torch.float32)
42
- loc_right_eye = torch.rand((1, 4), dtype=torch.float32)
43
- loc_mouth = torch.rand((1, 4), dtype=torch.float32)
44
- data = dict(gt=gt, lq=lq, loc_left_eye=loc_left_eye, loc_right_eye=loc_right_eye, loc_mouth=loc_mouth)
45
- model.feed_data(data)
46
- # check data shape
47
- assert model.lq.shape == (1, 3, 512, 512)
48
- assert model.gt.shape == (1, 3, 512, 512)
49
- assert model.loc_left_eyes.shape == (1, 4)
50
- assert model.loc_right_eyes.shape == (1, 4)
51
- assert model.loc_mouths.shape == (1, 4)
52
-
53
- # ----------------- test optimize_parameters -------------------- #
54
- model.feed_data(data)
55
- model.optimize_parameters(1)
56
- assert model.output.shape == (1, 3, 512, 512)
57
- assert isinstance(model.log_dict, dict)
58
- # check returned keys
59
- expected_keys = [
60
- 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth',
61
- 'l_g_comp_style_loss', 'l_identity', 'l_d', 'real_score', 'fake_score', 'l_d_r1', 'l_d_left_eye',
62
- 'l_d_right_eye', 'l_d_mouth'
63
- ]
64
- assert set(expected_keys).issubset(set(model.log_dict.keys()))
65
-
66
- # ----------------- remove pyramid_loss_weight-------------------- #
67
- model.feed_data(data)
68
- model.optimize_parameters(100000) # large than remove_pyramid_loss = 50000
69
- assert model.output.shape == (1, 3, 512, 512)
70
- assert isinstance(model.log_dict, dict)
71
- # check returned keys
72
- expected_keys = [
73
- 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth',
74
- 'l_g_comp_style_loss', 'l_identity', 'l_d', 'real_score', 'fake_score', 'l_d_r1', 'l_d_left_eye',
75
- 'l_d_right_eye', 'l_d_mouth'
76
- ]
77
- assert set(expected_keys).issubset(set(model.log_dict.keys()))
78
-
79
- # ----------------- test save -------------------- #
80
- with tempfile.TemporaryDirectory() as tmpdir:
81
- model.opt['path']['models'] = tmpdir
82
- model.opt['path']['training_states'] = tmpdir
83
- model.save(0, 1)
84
-
85
- # ----------------- test the test function -------------------- #
86
- model.test()
87
- assert model.output.shape == (1, 3, 512, 512)
88
- # delete net_g_ema
89
- model.__delattr__('net_g_ema')
90
- model.test()
91
- assert model.output.shape == (1, 3, 512, 512)
92
- assert model.net_g.training is True # should back to training mode after testing
93
-
94
- # ----------------- test nondist_validation -------------------- #
95
- # construct dataloader
96
- dataset_opt = dict(
97
- name='Demo',
98
- dataroot_gt='tests/data/gt',
99
- dataroot_lq='tests/data/gt',
100
- io_backend=dict(type='disk'),
101
- scale=4,
102
- phase='val')
103
- dataset = PairedImageDataset(dataset_opt)
104
- dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
105
- assert model.is_train is True
106
- with tempfile.TemporaryDirectory() as tmpdir:
107
- model.opt['path']['visualization'] = tmpdir
108
- model.nondist_validation(dataloader, 1, None, save_img=True)
109
- assert model.is_train is True
110
- # check metric_results
111
- assert 'psnr' in model.metric_results
112
- assert isinstance(model.metric_results['psnr'], float)
113
-
114
- # validation
115
- with tempfile.TemporaryDirectory() as tmpdir:
116
- model.opt['is_train'] = False
117
- model.opt['val']['suffix'] = 'test'
118
- model.opt['path']['visualization'] = tmpdir
119
- model.opt['val']['pbar'] = True
120
- model.nondist_validation(dataloader, 1, None, save_img=True)
121
- # check metric_results
122
- assert 'psnr' in model.metric_results
123
- assert isinstance(model.metric_results['psnr'], float)
124
-
125
- # if opt['val']['suffix'] is None
126
- model.opt['val']['suffix'] = None
127
- model.opt['name'] = 'demo'
128
- model.opt['path']['visualization'] = tmpdir
129
- model.nondist_validation(dataloader, 1, None, save_img=True)
130
- # check metric_results
131
- assert 'psnr' in model.metric_results
132
- assert isinstance(model.metric_results['psnr'], float)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/sequence.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits sequence
22
- #include <thrust/system/cpp/detail/sequence.h>
23
-