parquet-converter commited on
Commit
faaec0e
·
1 Parent(s): 8f0abd7

Update parquet files (step 8 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0x876/Yotta_Mix/app.py +0 -3
  2. spaces/0x90e/ESRGAN-MANGA/process_image.py +0 -31
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/Skyrim-Simpackdll.md +0 -88
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cle De Licence Malwarebytes Anti Malware Gratuit Tlchargez Et Installez Le Logiciel En Quelques Minutes.md +0 -113
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDIdle Pro v5.9.8.3 (precracked) free download The ultimate tool for DVD playback enhancement.md +0 -90
  6. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Spiderman Friend Or Foe Iso Pc [REPACK].md +0 -28
  7. spaces/1gistliPinn/ChatGPT4/Examples/Alertpay-Paypal Money Hack V4 - [full UPDATED Version].md +0 -8
  8. spaces/1gistliPinn/ChatGPT4/Examples/Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent.md +0 -8
  9. spaces/1gistliPinn/ChatGPT4/Examples/Avatar The Last Cockbender Full Version __FULL__.md +0 -6
  10. spaces/1gistliPinn/ChatGPT4/Examples/Descarga wifislax 4.3 torrent todo lo que necesitas saber sobre esta versin de Wifislax.md +0 -32
  11. spaces/1gistliPinn/ChatGPT4/Examples/Free Netflix Download Premium 9.2 Code File [WORK].md +0 -9
  12. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/12 Locks II A Puzzle Game with 3 Different Rooms and 12 Locks Each - APK Download.md +0 -120
  13. spaces/1phancelerku/anime-remove-background/Cricket League Full Mod APK Everything You Need to Know.md +0 -99
  14. spaces/1phancelerku/anime-remove-background/Download Airtel Payment Bank App and Enjoy Online Banking Services.md +0 -120
  15. spaces/1phancelerku/anime-remove-background/Download Nubank Fake APK for Android 2023 Explore the Features of the Famous App.md +0 -138
  16. spaces/1phancelerku/anime-remove-background/Download Onmyoji Arena APK for Android - Play Offline Strategy Game.md +0 -132
  17. spaces/20four60/Auto-GPT/README.md +0 -11
  18. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/verification.py +0 -407
  19. spaces/AI-Hobbyist/Hoyo-RVC/Changelog_KO.md +0 -91
  20. spaces/AIConsultant/MusicGen/audiocraft/grids/__init__.py +0 -6
  21. spaces/AIConsultant/MusicGen/docs/AUDIOGEN.md +0 -158
  22. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio_inpaint.py +0 -1081
  23. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/logout/+page.server.ts +0 -17
  24. spaces/Adr740/Hadith_AI_Explorer/data.py +0 -2
  25. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PreLayout.js +0 -15
  26. spaces/Ailexcoder/GPT4ALL1/app.py +0 -143
  27. spaces/AlexWang/lama/saicinpainting/training/data/__init__.py +0 -0
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/sd_text2img_k_diffusion.py +0 -475
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/text_inpainting.py +0 -302
  30. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py +0 -335
  31. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_rpn_head.py +0 -171
  32. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py +0 -44
  33. spaces/Artificio/AdversarialArt/README.md +0 -12
  34. spaces/Artrajz/vits-simple-api/bert_vits2/text/tone_sandhi.py +0 -769
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/filesize.py +0 -89
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py +0 -8
  37. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py +0 -406
  38. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_nms_rotated.py +0 -172
  39. spaces/Benson/text-generation/Examples/Com.p1.chomp Sms Pro Apk.md +0 -104
  40. spaces/Benson/text-generation/Examples/Descargar Canciones De M Kumaran Hijo De Mahalakshmi.md +0 -102
  41. spaces/BetterAPI/BetterChat/src/lib/utils/randomUuid.ts +0 -14
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py +0 -224
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/scripts.py +0 -437
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/wheel.py +0 -222
  45. spaces/BramVanroy/text-to-amr/README.md +0 -16
  46. spaces/C6AI/HDRL/README.md +0 -12
  47. spaces/CCaniggia/GPT/Dockerfile +0 -11
  48. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/visualizer.py +0 -1133
  49. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/plain_train_net.py +0 -231
  50. spaces/CVPR/LIVE/thrust/thrust/async/sort.h +0 -275
spaces/0x876/Yotta_Mix/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/CompVis/stable-diffusion-v1-4").launch()
 
 
 
 
spaces/0x90e/ESRGAN-MANGA/process_image.py DELETED
@@ -1,31 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from run_cmd import run_cmd
4
- from PIL import Image
5
- import tempfile
6
- import uuid
7
- import numpy as np
8
-
9
- temp_path = tempfile.gettempdir()
10
-
11
- def inference(img, size, type):
12
- if not img:
13
- raise Exception("No image!")
14
-
15
- OUTPUT_PATH = os.path.join(temp_path, f"{str(uuid.uuid4())[0:12]}_{size}.png")
16
-
17
- img.save(OUTPUT_PATH)
18
-
19
- if type == "Manga":
20
- run_cmd(f"python inference_manga_v2.py {OUTPUT_PATH}")
21
- else:
22
- run_cmd(f"python inference.py {OUTPUT_PATH} {type}")
23
-
24
- img_out = Image.open(OUTPUT_PATH)
25
-
26
- if size == "x2":
27
- img_out = img_out.resize((img_out.width // 2, img_out.height // 2), resample=Image.BICUBIC)
28
-
29
- img_out = np.array(img_out)
30
-
31
- return img_out, gr.File.update(value=OUTPUT_PATH)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/Skyrim-Simpackdll.md DELETED
@@ -1,88 +0,0 @@
1
- ## Skyrim Simpackdll
2
-
3
-
4
-
5
-
6
-
7
- ![Skyrim Simpackdll](https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcR3PTHDbn7juqcw8p6GQesIZvxzdiwnWWuAEkPfULQhrCJKwHINNl6rc5c)
8
-
9
-
10
-
11
-
12
-
13
- **LINK ===> [https://jinyurl.com/2tA0aO](https://jinyurl.com/2tA0aO)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # How to Fix Skyrim Simpackdll Error
28
-
29
-
30
-
31
- If you are trying to play Skyrim on your PC, you may encounter an error message that says "simpack.dll is missing" or "simpack.dll not found". This error means that your system does not have the simpack.dll file, which is a dynamic link library (DLL) file that is required by the Skyrim game. The simpack.dll file contains various functions and routines that are used by the game to perform simulations of mechanical systems, such as vehicle dynamics, suspension systems, and powertrain systems.
32
-
33
-
34
-
35
- The simpack.dll error can be caused by various reasons, such as a corrupt or missing DLL file, conflicts with other software, or malware infections. In this article, we will show you how to fix the Skyrim simpackdll error by following these steps:
36
-
37
-
38
-
39
- 1. Reinstall the Skyrim game. The easiest way to fix the simpack.dll error is to reinstall the Skyrim game on your PC. This will ensure that you have all the necessary files and components for the game to run properly. To reinstall the game, you need to uninstall it first from your Control Panel or Settings app, and then install it again from your original source, such as a CD/DVD or a digital download.
40
-
41
- 2. Download and restore the simpack.dll file. If reinstalling the game does not work, you can try to download and restore the simpack.dll file manually. You can get the file from a reliable source, such as [DLLme.com](https://www.dllme.com/dll/files/simpack), which offers free DLL downloads for various software applications. To download and restore the simpack.dll file, follow these steps:
42
-
43
- - Go to [DLLme.com](https://www.dllme.com/dll/files/simpack) and search for "simpack.dll".
44
-
45
- - Click on the "Download" button and save the file to your computer.
46
-
47
- - Copy and paste the file to the folder where Skyrim is installed. The default location is C:\Program Files (x86)\Steam\steamapps\common\Skyrim.
48
-
49
- - Restart your computer and launch Skyrim.
50
-
51
- 3. Scan your PC for malware. Sometimes, the simpack.dll error can be caused by malware infections that may damage or delete the DLL file. To scan your PC for malware, you need to use a reputable antivirus or anti-malware software, such as [Malwarebytes](https://www.malwarebytes.com/), which can detect and remove various types of malware threats from your system. To scan your PC for malware, follow these steps:
52
-
53
- - Download and install Malwarebytes from [Malwarebytes.com](https://www.malwarebytes.com/).
54
-
55
- - Launch Malwarebytes and click on the "Scan" button.
56
-
57
- - Wait for the scan to complete and review the results.
58
-
59
- - If any malware is detected, click on the "Quarantine" button to remove them.
60
-
61
- - Restart your computer and launch Skyrim.
62
-
63
- 4. Update your drivers and Windows. Another possible cause of the simpack.dll error is outdated or incompatible drivers or Windows updates. Drivers are software components that allow your hardware devices to communicate with your operating system. Windows updates are software patches that fix bugs and improve security and performance of your system. To update your drivers and Windows, follow these steps:
64
-
65
- - Go to Device Manager by pressing Windows + X keys and selecting Device Manager from the menu.
66
-
67
- - Expand each category of devices and look for any yellow exclamation marks or red crosses.
68
-
69
- - If you find any, right-click on them and select Update driver.
70
-
71
- - Follow the on-screen instructions to install the latest drivers for your devices.
72
-
73
- - Go to Settings by pressing Windows + I keys and selecting Settings from the menu.
74
-
75
- - Click on Update & Security and then on Windows Update.
76
-
77
- - Click on Check for updates and install any available updates for your system.
78
-
79
- <145887f19f
80
-
81
-
82
-
83
-
84
-
85
-
86
-
87
-
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cle De Licence Malwarebytes Anti Malware Gratuit Tlchargez Et Installez Le Logiciel En Quelques Minutes.md DELETED
@@ -1,113 +0,0 @@
1
- <br />
2
- <h1>Cle De Licence Malwarebytes Anti Malware Gratuit</h1>
3
- <p>Are you looking for a way to protect your computer from viruses, malware, ransomware, and other online threats? Do you want to enjoy the full features of one of the best anti-malware software in the market without paying a dime? If yes, then you are in the right place. In this article, we will show you how to get a free license key for Malwarebytes Anti Malware, a powerful and reliable cyber security software that can scan and remove malware from your device in seconds. We will also explain what Malwarebytes Anti Malalare is, why you need a license key for it, and what features it offers. So, let's get started!</p>
4
- <h2>Introduction</h2>
5
- <h3>What is Malwarebytes Anti Malware?</h3>
6
- <p>Malwarebytes Anti Malware is a cyber security software that protects your device from malware, viruses, ransomware, spyware, adware, trojans, and other online threats. It uses multiple layers of malware-crushing technology, including real-time protection, artificial intelligence, behavior analysis, and web filtering. It can detect and remove threats that other antivirus programs may miss or overlook. It can also work alongside your existing antivirus software to provide an extra layer of security.</p>
7
- <h2>Cle De Licence Malwarebytes Anti Malware Gratuit</h2><br /><p><b><b>Download</b> ->->->-> <a href="https://byltly.com/2uKwA2">https://byltly.com/2uKwA2</a></b></p><br /><br />
8
- <h3>Why do you need a license key for Malwarebytes Anti Malalare?</h3>
9
- <p>Malwarebytes Anti Malalare offers two versions: a free version and a premium version. The free version allows you to scan and remove malware from your device manually. However, it does not offer real-time protection, ransomware protection, web protection, or privacy protection. These features are only available in the premium version, which requires a license key to activate. A license key is a unique code that verifies your purchase and unlocks the full features of Malwarebytes Anti Malalare. The premium version costs $39.99 per year for one device or $59.99 per year for three devices.</p>
10
- <h3>How to get a free license key for Malwarebytes Anti Malalare?</h3>
11
- <p>If you don't want to pay for the premium version of Malwarebytes Anti Malalare, but still want to enjoy its full features, there is a way to get a free license key for it. You can use one of the following methods:</p>
12
- <ul>
13
- <li>Use a trial version: You can download and install a 14-day trial version of Malwarebytes Anti Malalare from its official website <a href="https://www.malwarebytes.com/mwb-download">here</a>. This will give you access to all the premium features for free for two weeks. After that, you can either buy a license key or uninstall the software.</li>
14
- <li>Use a giveaway: You can look for online giveaways that offer free license keys for Malwarebytes Anti Malalare. These are usually sponsored by blogs, websites, or social media pages that promote the software. You may have to follow some instructions or enter some contests to participate in these giveaways. However, be careful not to fall for scams or fake giveaways that may infect your device with malware or steal your personal information.</li>
15
- <li>Use a crack: You can search for cracked versions of Malwarebytes Anti Malalare that come with pre-activated license keys. These are usually uploaded by hackers or pirates who bypass the security measures of the software. You can download these cracked versions from torrent sites or file-sharing platforms. However, this method is illegal and risky. You may violate the terms and conditions of the software and face legal consequences. You may also expose your device to malware or viruses that may be hidden in these cracked versions.</li>
16
- </ul>
17
- <p>The best and safest way to get a free license key for Malwarebytes Anti Malalare is to use the trial version. This will allow you to test the software and see if it suits your needs before buying it.</p>
18
- <h2>Features of Malwarebytes Anti Malalare</h2>
19
- <h3>Virus and malware protection</h3>
20
- <p>Malwarebytes Anti Malalare can scan your device for viruses and malware in seconds and remove them with ease. It uses advanced heuristics and artificial intelligence to detect and block threats that other antivirus programs may miss or ignore. It can also perform custom scans, scheduled scans, and hyper scans to suit your preferences and needs.</p>
21
- <h3>Ransomware protection</h3>
22
- <p>Malwarebytes Anti Malalare can protect your device from ransomware attacks that may encrypt your files and demand money to restore them. It uses behavior-based technology to monitor your system for suspicious activity and stop ransomware before it can cause any damage. It can also recover your files from backup if they are affected by ransomware.</p>
23
- <p>Comment obtenir une cle de licence gratuite pour Malwarebytes Anti Malware<br />
24
- Malwarebytes Anti Malware Premium gratuit avec cle d'activation<br />
25
- Telecharger Malwarebytes Anti Malware avec cle de licence 2023<br />
26
- Cle de licence Malwarebytes Anti Malware valide et fonctionnelle<br />
27
- Cle de licence Malwarebytes Anti Malware gratuite a vie<br />
28
- Code d'activation Malwarebytes Anti Malware gratuit et sans virus<br />
29
- Cle de licence Malwarebytes Anti Malware pour Windows 10<br />
30
- Cle de licence Malwarebytes Anti Malware pour Mac<br />
31
- Cle de licence Malwarebytes Anti Malware pour Android<br />
32
- Cle de licence Malwarebytes Anti Malware pour iOS<br />
33
- Cle de licence Malwarebytes Anti Malware pour Linux<br />
34
- Cle de licence Malwarebytes Anti Malware pour Chromebook<br />
35
- Cle de licence Malwarebytes Anti Malware pour Firefox<br />
36
- Cle de licence Malwarebytes Anti Malware pour Edge<br />
37
- Cle de licence Malwarebytes Anti Malware pour Opera<br />
38
- Cle de licence Malwarebytes Anti Malware pour Safari<br />
39
- Cle de licence Malwarebytes Anti Malware pour Brave<br />
40
- Cle de licence Malwarebytes Anti Malware pour Tor Browser<br />
41
- Cle de licence Malwarebytes Anti Malware pour VPN<br />
42
- Cle de licence Malwarebytes Anti Malware pour Ransomware Protection<br />
43
- Cle de licence Malwarebytes Anti Malware pour AdwCleaner<br />
44
- Cle de licence Malwarebytes Anti Malware pour Browser Guard<br />
45
- Cle de licence Malwarebytes Anti Malware pour Privacy<br />
46
- Cle de licence Malwarebytes Anti Malware pour Endpoint Protection<br />
47
- Cle de licence Malwarebytes Anti Malware pour Endpoint Detection and Response<br />
48
- Cle de licence Malwarebytes Anti Malware pour Incident Response<br />
49
- Cle de licence Malwarebytes Anti Malware pour Cloud Platform<br />
50
- Cle de licence Malwarebytes Anti Malware pour Nebula Platform<br />
51
- Cle de licence Malwarebytes Anti Malware pour OneView Platform<br />
52
- Cle de licence Malwarebytes Anti Malware pour MSP Premier Partner Program<br />
53
- Comparatif des meilleurs logiciels anti malware gratuits avec cle de licence<br />
54
- Avis et test complet sur le logiciel anti malware gratuit avec cle de licence<br />
55
- Tutoriel et guide d'utilisation du logiciel anti malware gratuit avec cle de licence<br />
56
- Astuces et conseils pour optimiser le logiciel anti malware gratuit avec cle de licence<br />
57
- FAQ sur le logiciel anti malware gratuit avec cle de licence<br />
58
- Forum et support technique sur le logiciel anti malware gratuit avec cle de licence<br />
59
- Blog et actualites sur le logiciel anti malware gratuit avec cle de licence<br />
60
- Video et demonstration sur le logiciel anti malware gratuit avec cle de licence<br />
61
- Telechargement et installation du logiciel anti malware gratuit avec cle de licence<br />
62
- Mise a jour et renouvellement du logiciel anti malware gratuit avec cle de licence<br />
63
- Desinstallation et desactivation du logiciel anti malware gratuit avec cle de licence<br />
64
- Problemes et solutions du logiciel anti malware gratuit avec cle de licence<br />
65
- Avantages et inconvenients du logiciel anti malware gratuit avec cle de licence<br />
66
- Alternatives et concurrents du logiciel anti malware gratuit avec cle de licence<br />
67
- Promotions et reductions sur le logiciel anti malware gratuit avec cle de licence<br />
68
- Garantie et remboursement sur le logiciel anti malware gratuit avec cle de licence<br />
69
- Contact et service client sur le logiciel anti malware gratuit avec cle de licence<br />
70
- Avis clients et temoignages sur le logiciel anti malware gratuit avec cle de licence</p>
71
- <h3>Web protection</h3>
72
- <p>Malwarebytes Anti Malalare can protect your online browsing from malicious websites, ads, and downloads that may harm your device or compromise your privacy. It uses web filtering technology to block phishing sites, scam sites, fake news sites, and other dangerous sites that may try to steal your personal information or infect your device with malware. It can also prevent unwanted programs from installing on your device without your consent.</p>
73
- <h3>Privacy protection</h3>
74
- <p>Malwarebytes Anti Malalare can protect your online privacy from hackers, trackers, and spies that may try to access your data or monitor your online activity. It uses VPN technology to encrypt your internet connection and hide your IP address and location from prying eyes. It also offers anti-tracking features that prevent websites from collecting your browsing history, cookies, or other data.</p>
75
- <h2>How to install and activate Malwarebytes Anti Malalare with a free license key</h2>
76
- <h3>Download and install Malwarebytes Anti Malalare</h3>
77
- <p>To download and install Malwarebytes Anti Malalare on your device, follow these steps:</p>
78
- <ol>
79
- ```html lare.</li>
80
- <li>Run the setup file and follow the instructions to install Malwarebytes Anti Malalare on your device. You may have to agree to the terms and conditions and choose a destination folder for the installation.</li>
81
- <li>Once the installation is complete, Malwarebytes Anti Malalare will launch automatically and start scanning your device for threats.</li>
82
- </ol>
83
- <h3>Enter the free license key</h3>
84
- <p>To activate the premium features of Malwarebytes Anti Malalare with a free license key, follow these steps:</p>
85
- <ol>
86
- <li>Open Malwarebytes Anti Malalare and click on the "Settings" icon in the top right corner.</li>
87
- <li>Click on the "Account" tab and then click on the "Activate License" button.</li>
88
- <li>Enter the free license key that you obtained from one of the methods mentioned above and click on "Activate License".</li>
89
- <li>You will see a confirmation message that your license key has been activated and your premium features have been unlocked.</li>
90
- </ol>
91
- <h3>Enjoy the full features of Malwarebytes Anti Malalare</h3>
92
- <p>Now that you have activated the premium features of Malwarebytes Anti Malalare with a free license key, you can enjoy the full benefits of this powerful and reliable cyber security software. You can scan and remove malware from your device in seconds, protect your device from ransomware attacks, block malicious websites and downloads, and secure your online privacy with VPN and anti-tracking features. You can also customize your settings, manage your devices, and access support and updates from Malwarebytes.</p>
93
- <h2>Conclusion</h2>
94
- <h3>Summary of the main points</h3>
95
- <p>In this article, we have shown you how to get a free license key for Malwarebytes Anti Malalare, a cyber security software that protects your device from malware, viruses, ransomware, spyware, adware, trojans, and other online threats. We have also explained what Malwarebytes Anti Malalare is, why you need a license key for it, and what features it offers. We have also provided a step-by-step guide on how to download, install, and activate Malwarebytes Anti Malalare with a free license key.</p>
96
- <h3>Call to action</h3>
97
- <p>If you want to protect your device from online threats and enjoy the full features of one of the best anti-malware software in the market without paying a dime, don't hesitate to get a free license key for Malwarebytes Anti Malalare today. You can use one of the methods we have suggested above or visit <a href="https://www.malwarebytes.com/mwb-download">this link</a> to download and install a 14-day trial version of Malwarebytes Anti Malalare. You will be amazed by how fast and effective this software is in scanning and removing malware from your device. Don't wait any longer and get your free license key for Malwarebytes Anti Malalare now!</p>
98
- <h2>Frequently Asked Questions</h2>
99
- <ul>
100
- <li><b>Q: Is Malwarebytes Anti Malalare safe to use?</b></li>
101
- <li>A: Yes, Malwarebytes Anti Malalare is safe to use. It is a legitimate and reputable cyber security software that has been trusted and loved by millions of users worldwide. It does not contain any malware or viruses itself and does not harm your device or data in any way.</li>
102
- <li><b>Q: Is Malwarebytes Anti Malalare compatible with other antivirus software?</b></li>
103
- <li>A: Yes, Malwarebytes Anti Malalare is compatible with other antivirus software. It can work alongside your existing antivirus software to provide an extra layer of security. However, you may have to adjust some settings or disable some features to avoid conflicts or performance issues.</li>
104
- <li><b>Q: How long does the free license key for Malwarebytes Anti Malalare last?</b></li>
105
- <li>A: The free license key for Malwarebytes Anti Malalare lasts for different periods depending on the method you use to obtain it. If you use the trial version, it lasts for 14 days. If you use a giveaway, it may last for a few months or a year. If you use a crack, it may last indefinitely or until it is detected and blocked by Malwarebytes.</li>
106
- ```html lare?</b></li>
107
- <li>A: You can renew or extend your free license key for Malwarebytes Anti Malalare by using one of the methods we have suggested above. You can either download and install a new trial version, look for a new giveaway, or search for a new crack. However, we recommend that you buy a license key from the official website of Malwarebytes if you want to support the developers and enjoy the premium features without any hassle or risk.</li>
108
- <li><b>Q: How can I contact Malwarebytes if I have any questions or issues with Malwarebytes Anti Malalare?</b></li>
109
- <li>A: You can contact Malwarebytes if you have any questions or issues with Malwarebytes Anti Malalare by visiting their support page <a href="https://support.malwarebytes.com">here</a>. You can find answers to common questions, troubleshooting guides, user manuals, and forums. You can also submit a ticket or chat with a support agent if you need more help.</li>
110
- </ul>
111
- </p> 0a6ba089eb<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDIdle Pro v5.9.8.3 (precracked) free download The ultimate tool for DVD playback enhancement.md DELETED
@@ -1,90 +0,0 @@
1
- <br />
2
- <h1>DVDIdle Pro v5.9.8.3 (precracked) free download</h1>
3
- <p>Do you love watching DVD movies on your PC but hate the annoying region codes and copy protections that prevent you from enjoying them fully? Do you want to extend the lifetime of your DVD drive by reducing its wear and tear? If you answered yes to any of these questions, then you need DVDIdle Pro, a powerful software that combines the functionality of DVD Region-Free and DVDIdle into one efficient program.</p>
4
- <h2>DVDIdle Pro v5.9.8.3 (precracked) free download</h2><br /><p><b><b>Download File</b> &#9734; <a href="https://byltly.com/2uKzKG">https://byltly.com/2uKzKG</a></b></p><br /><br />
5
- <h2>What is DVDIdle Pro and why do you need it?</h2>
6
- <p>DVDIdle Pro is a software that helps you to watch and copy any DVD movie on any DVD drive, regardless of the region code or the copy protection. It works in the background to provide a smart read-ahead cache for your DVD player, saving the DVD data that will be played soon onto your hard disk or RAM cache, and feeding it to your DVD player when needed. This way, your DVD drive can take a rest and work less, extending its lifetime by up to 800 percent.</p>
7
- <h3>DVDIdle Pro features</h3>
8
- <p>DVDIdle Pro has many features that make it a must-have software for any DVD lover. Some of these features are:</p>
9
- <ul>
10
- <li>It supports region-protected (RPC2) DVD drives, and does not require any firmware modifications.</li>
11
- <li>It works with all software DVD players, such as PowerDVD, WinDVD, etc.</li>
12
- <li>It works with all DVD copy software, such as DVDFab, DVD X Copy, InterVideo DVD Copy, etc.</li>
13
- <li>It removes user operation prohibitions (UOPs) from DVDs, allowing you to skip FBI warnings, trailers, etc.</li>
14
- <li>It removes CSS (Content Scrambling System) encryption from DVDs, allowing you to make backup copies of your DVDs.</li>
15
- <li>It removes region code enhancement (RCE) from DVDs, allowing you to watch DVDs from any region.</li>
16
- <li>It removes Sony ARccOS protection from DVDs, allowing you to copy DVDs with bad sectors.</li>
17
- <li>It removes Macrovision protection from DVDs, allowing you to connect your PC to your TV or projector.</li>
18
- </ul>
19
- <h3>DVDIdle Pro benefits</h3>
20
- <p>DVDIdle Pro has many benefits that make it worth downloading and installing on your PC. Some of these benefits are:</p>
21
- <ul>
22
- <li>It saves your money by allowing you to watch and copy any DVD movie without buying a region-free DVD player or a DVD decrypter.</li>
23
- <li>It saves your time by allowing you to skip unwanted parts of DVDs and access the main menu directly.</li>
24
- <li>It saves your disk space by allowing you to compress DVDs to fit on a single blank disc or a USB flash drive.</li>
25
- <li>It saves your battery power by allowing you to watch DVDs on your laptop without spinning the DVD drive constantly.</li>
26
- <li>It saves your eyesight by allowing you to adjust the brightness and contrast of DVDs according to your preference.</li>
27
- </ul>
28
- <h2>How to download DVDIdle Pro v5.9.8.3 (precracked) for free?</h2>
29
- <p>If you are convinced that DVDIdle Pro is the software that you need, then you might be wondering how to download it for free. Well, it's very easy and simple. Just follow these steps:</p>
30
- <h3>Step 1: Visit the developer's website</h3>
31
- <p>The first thing you need to do is visit the developer's website, where you can find more information about DVDIdle Pro and its features. You can also read some reviews and comments from other users who have tried it.</p>
32
- <h3>Step 2: Click on the download link</h3>
33
- <p>The next thing you need to do is click on the download link that is provided on the website. This will take you to another page where you can choose between two options: Download Now or Download Mirror. Either option will work fine, so just pick one and click on it.</p>
34
- <h3>Step 3: Install and run the program</h3>
35
- <p>The last thing you need to do is install and run the program on your PC. The installation process is very simple and straightforward, just follow the instructions on the screen. The best part is that this version of DVDIdle Pro is precracked, which means that you don't need to enter any serial number or activation code. Just run the program and enjoy its full features without any limitations.</p>
36
- <h2>How to use DVDIdle Pro v5.9.8.3 (precracked)?</h2>
37
- <p>Now that you have downloaded and installed DVDIdle Pro on your PC, you might be wondering how to use it effectively. Well, it's very easy and simple as well. Just follow these steps:</p>
38
- <h3>Step 1: Launch DVDIdle Pro</h3>
39
- <p>The first thing you need to do is launch DVDIdle Pro from your desktop or start menu. You will see a small icon in your system tray that indicates that the program is running in the background.</p>
40
- <p>How to get DVDIdle Pro v5.9.8.3 for free<br />
41
- DVDIdle Pro v5.9.8.3 cracked version download link<br />
42
- Best software to extend DVD drive lifetime<br />
43
- DVDIdle Pro v5.9.8.3 features and benefits<br />
44
- DVDIdle Pro v5.9.8.3 review and comparison<br />
45
- Where to find DVDIdle Pro v5.9.8.3 precracked<br />
46
- DVDIdle Pro v5.9.8.3 installation guide and tutorial<br />
47
- DVDIdle Pro v5.9.8.3 license key generator<br />
48
- DVDIdle Pro v5.9.8.3 alternative and similar software<br />
49
- DVDIdle Pro v5.9.8.3 discount and coupon code<br />
50
- DVDIdle Pro v5.9.8.3 system requirements and compatibility<br />
51
- DVDIdle Pro v5.9.8.3 customer support and feedback<br />
52
- DVDIdle Pro v5.9.8.3 update and upgrade<br />
53
- DVDIdle Pro v5.9.8.3 pros and cons<br />
54
- DVDIdle Pro v5.9.8.3 testimonials and ratings<br />
55
- How to uninstall DVDIdle Pro v5.9.8.3<br />
56
- DVDIdle Pro v5.9.8.3 troubleshooting and error fixing<br />
57
- How to use DVDIdle Pro v5.9.8.3 with other software<br />
58
- DVDIdle Pro v5.9.8.3 FAQs and tips<br />
59
- How to optimize DVD playback with DVDIdle Pro v5.9.8.<br />
60
- How to backup DVDs with DVDIdle Pro v5.<br />
61
- How to rip DVDs with DVDIdle Pro v5.<br />
62
- How to burn DVDs with DVDIdle Pro v5.<br />
63
- How to copy DVDs with DVDIdle Pro v5.<br />
64
- How to decrypt DVDs with DVDIdle Pro v5.<br />
65
- How to compress DVDs with DVDIdle Pro v5.<br />
66
- How to edit DVDs with DVDIdle Pro v5.<br />
67
- How to convert DVDs with DVDIdle Pro v5.<br />
68
- How to stream DVDs with DVDIdle Pro v5.<br />
69
- How to watch DVDs with DVDIdle Pro v5.<br />
70
- How to download DVDs with DVDIdle Pro v5.<br />
71
- How to create DVDs with DVDIdle Pro v5.<br />
72
- How to enhance DVDs with DVDIdle Pro v5.<br />
73
- How to repair DVDs with DVDIdle Pro v5.<br />
74
- How to clean DVDs with DVDIdle Pro v5.<br />
75
- How to organize DVDs with DVDIdle Pro v5.<br />
76
- How to protect DVDs with DVDIdle Pro v5.<br />
77
- How to share DVDs with DVDIdle Pro v5.<br />
78
- How to recover DVDs with DVDIdle Pro v5.<br />
79
- How to erase DVDs with DVDIdle Pro v5.<br />
80
- Is DVDIdle Pro v5 safe and legal?<br />
81
- Is DVDIdle Pro v5 worth it?<br />
82
- Is DVDIdle Pro v5 compatible with Windows 10?<br />
83
- Is DVDIdle Pro v5 the best DVD software?<br />
84
- Is DVDIdle Pro v5 virus-free?<br />
85
- Is DVDIdle Pro v5 a scam or legit?<br />
86
- Is DVDIdle Pro v5 free or paid?<br />
87
- Is DVDIdle Pro v5 easy or hard to use?<br />
88
- Is DVDIdle Pro v5 fast or slow?</p> 0a6ba089eb<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Spiderman Friend Or Foe Iso Pc [REPACK].md DELETED
@@ -1,28 +0,0 @@
1
- <br />
2
- ```
3
- <h1>How to Download Spiderman Friend Or Foe Iso Pc</h1>
4
- <p>If you are a fan of Spiderman and want to play a game that lets you team up with his friends and foes, you might be interested in Spiderman Friend Or Foe. This is a 2007 action game that was inspired by the Spiderman film trilogy and the classic Spiderman comics. In this game, you can control Spiderman and one of his allies or enemies, such as Doc Ock, Green Goblin, Venom, and Sandman, and fight against a global threat. You can also play co-op mode with a friend who can join the game at any time and take control of your sidekick.</p>
5
- <h2>Download Spiderman Friend Or Foe Iso Pc</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://byltly.com/2uKyLl">https://byltly.com/2uKyLl</a></b></p><br /><br />
6
- <p>Spiderman Friend Or Foe is available for Windows PC, but you will need to download an ISO file of the game disc and mount it on your computer. You will also need to install a NoDVD patch to bypass the SafeDisc DRM that does not work on Windows Vista and later. Here are the steps to download and play Spiderman Friend Or Foe Iso Pc:</p>
7
- <ol>
8
- <li>Go to one of the websites that offer the ISO file of Spiderman Friend Or Foe, such as <a href="https://www.myabandonware.com/game/spider-man-friend-or-foe-htd">My Abandonware</a>, <a href="https://oldgamesdownload.com/spider-man-friend-or-foe/">Old Games Download</a>, or <a href="https://archive.org/details/spider-man-friend-or-foe-usa">Archive.org</a>.</li>
9
- <li>Download the ISO file of Spiderman Friend Or Foe. The file size is about 2 GB.</li>
10
- <li>Download a utility for mounting disc image files, such as WinCDEmu, UltraISO, Alcohol 52%/Alcohol 102%, or Daemon Tools Lite.</li>
11
- <li>Install the utility and mount the ISO file of Spiderman Friend Or Foe on your computer.</li>
12
- <li>Run the Setup.exe file from the mounted disc and install the game on your computer.</li>
13
- <li>Download the NoDVD patch for Spiderman Friend Or Foe from <a href="https://www.myabandonware.com/game/spider-man-friend-or-foe-htd">My Abandonware</a>.</li>
14
- <li>Extract the Game.exe file from the NoDVD patch and replace the original Game.exe file in the game directory.</li>
15
- <li>Launch the game and enjoy playing Spiderman Friend Or Foe Iso Pc.</li>
16
- </ol>
17
- <p>If you have any problems running the game, you can try changing the compatibility mode and running it as administrator. You can also install DirectX 9 from the disc if needed. You can find more information about the game on <a href="https://wiki.pcsx2.net/Spider-Man:_Friend_or_Foe">PCSX2 Wiki</a> or <a href="https://www.mobygames.com/game/windows/spider-man-friend-or-foe">MobyGames</a>.</p>
18
- ```
19
-
20
- ```
21
- <p>Spiderman Friend Or Foe is a game that offers a lot of fun and variety for Spiderman fans. You can choose from 13 different characters to play as your sidekick, each with their own unique abilities and combos. You can also switch between them at any time during the game. You can unlock more characters by defeating them in boss battles or by collecting meteor shards that are scattered around the levels.</p>
22
- <p></p>
23
- <p>The game has a total of 18 levels that are set in different locations around the world, such as Egypt, Tokyo, Nepal, Transylvania, and New York. Each level has its own enemies, puzzles, and secrets to discover. You can also replay any level you have completed to find more collectibles and improve your score. The game also has a challenge mode where you can test your skills against waves of enemies and bosses.</p>
24
- <p>The game has a simple and intuitive control scheme that makes it easy to play. You can use the keyboard and mouse or a gamepad to control your character. You can also play co-op mode with a friend on the same PC by using split-screen or LAN connection. The co-op mode allows you to work together and perform team combos to defeat your foes.</p>
25
- <p>Spiderman Friend Or Foe is a game that will appeal to Spiderman fans of all ages. It has a colorful and comic-like graphics style that matches the tone of the game. It also has a humorous and original story that features voice acting from some of the actors from the Spiderman movies, such as Tobey Maguire, James Franco, and Thomas Haden Church. The game also has a lot of references and easter eggs to the Spiderman comics and movies that fans will appreciate.</p>
26
- ```</p> cec2833e83<br />
27
- <br />
28
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Alertpay-Paypal Money Hack V4 - [full UPDATED Version].md DELETED
@@ -1,8 +0,0 @@
1
- <h2>Alertpay-Paypal Money Hack V4 - [Full Version]</h2><br /><p><b><b>Download</b> &mdash; <a href="https://imgfil.com/2uxYjY">https://imgfil.com/2uxYjY</a></b></p><br /><br />
2
-
3
- All you have to do is install this application on your device, which is very easy to install. You will be able to easily hack the alertpay account with ease, and without having to make the effort of getting access to any of alertpay. The application is a great success and it has a rating of 4.7 out of 5 on Google Play. It has, before you decide to go with this software, go through a few demos and read how to use the application very clearly. This hack will allow you to receive your money very fast as long as you have alertpay, paypal or any other online payment system. The application is 100% safe and does not carry any virus or malware in it. Your data will be kept secure as long as you do not use an infected device. The data you will be sharing with this hack will be completely private and no other person can access it. You do not have to worry about your credit card information as it is secured and encrypted by default. The application will be very simple to use, with only 3 steps you can use this hack on your device. The application will be sent as a apk file to your device when you have to receive your money on alertpay account. Don’t wait anymore and download the hack to get your money very fast.
4
-
5
- PHP &amp; Software Architecture Projects for $250 - $750. Our Price : $7.00 Get the Latest Version for Alertpay/Paypal Money Hack V4 :-Alertpay/Paypal Money Hack V4 is a revolutionary software, . All you have to do is install this application on your device, which is very easy to install. You will be able to easily hack the alertpay account with ease, and without having to make the effort of getting access to any of alertpay. The application is a great success and it has a rating of 4.7 out of 5 on Google Play. It has, before you decide to go with this software, go through a few demos and read how to use the application very clearly. This hack will allow you to receive your money very fast as long as you have alertpay, paypal or any other online payment system. The application is 100% safe and does not carry any virus or malware in it. Your data will be kept secure as long as you do not use an infected device. The data you will be sharing with this hack will be completely private and no other person can access it. You do not have to worry about your 4fefd39f24<br />
6
- <br />
7
- <br />
8
- <p></p>
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent.md DELETED
@@ -1,8 +0,0 @@
1
- <br />
2
- <p>ipod lorem ipsum dolor. Is not very pretty with black vinyl. ipod iphone voltaren lavage. ipod iphone vaporware. alvin i vjeverice sinkronizirano na hrvatski torrent 3D.SK Gdje mogu prodati 3d simulacije!. alvin i vjeverice sinkronizirano na hrvatski 2. alvin i vjeverice sinkronizirano na hrvatski 3. alvin i vjeverice sinkronizirano na hrvatski 4. 5. alvin i vjeverice sinkronizirano na hrvatski 6. alvin i vjeverice sinkronizirano na hrvatski 7. alvin i vjeverice sinkronizirano na hrvatski 8. </p>
3
- <p>alvin i vjeverice 2 sinkronizirano na hr torrent fkk naturist boys 12 14yo in the camping alvin i vjeverice 2 sinkronizirano na hr torrent 3D.SK Human Photo. on 2016-Jan-27 07:02:12. alvin i vjeverice 2 sinkronizirano na hrvatski Najvei popis web lokacija za prijavu u Velikoj. All help you need!. </p>
4
- <h2>alvin i vjeverice 2 sinkronizirano na hr torrent</h2><br /><p><b><b>Download</b> &#9999; &#9999; &#9999; <a href="https://imgfil.com/2uxZc8">https://imgfil.com/2uxZc8</a></b></p><br /><br />
5
- <p>alvin i vjeverice 2 sinkronizirano na hr torrent 4. alvin i vjeverice 2 sinkronizirano na hrvatski 5. alvin i vjeverice 2 sinkronizirano na hrvatski 6. alvin i vjeverice 2 sinkronizirano na hrvatski 7. alvin i vjeverice 2 sinkronizirano na hrvatski 8. alvin i vjeverice 2 sinkronizirano na hrvatski 9. alvin i vjeverice 2 sinkronizirano na hrvatski 10. alvin i vjeverice 2 sinkronizirano na hrvatski 11. alvin i vjeverice 2 sinkronizirano na hrvatski 12. alvin i vjeverice 2 sinkronizirano na hrvatski 13. alvin i vjeverice 2 sinkronizirano na hrvatski 14. alvin i vjeverice 2 sinkronizirano na hrvatski 15. alvin i vjeverice 2 sinkronizirano na hrvatski 16. alvin i vjeverice 2 sinkronizirano na hrvatski 17. alvin i vjeverice 2 sinkronizirano na hrvatski 18.</p>
6
- <p>Sonet non sono pornografici. via!!. Un dvd dei film festivi cinematografici in attesa di ricevere un regolare la sala da cinema dove sono stati proiettati. Alla memoria..com/simbolizzazione/alvin-i-vjeverice-2-sinkronizirano-na-hr-torrent/.alvin-i-vjeverice-2-sinkronizirano-na-hr-torrent/. by. L. download. 538a28228e, Alvin I Vjeverice 3 Sinkronizirano Na Hr Torrent 2.28.18 Hr. HD. Alvin I. Vjeverice. Good quality movie Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent 3D 1080p. Vjeverice 3 Sinkronizirano Na Hr Torrent 2. Watch Alvin I Vjeverice 3 Sinkronizirano Na Hr Torrent 2.18 Hr HD Movie Online Free Download. Alvin.i.Vjeverice.3.> Alvin.> alvin i vjeverice 2 sinkronizirano na hr torrent </p> 899543212b<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Avatar The Last Cockbender Full Version __FULL__.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Avatar The Last Cockbender Full Version</h2><br /><p><b><b>Download Zip</b> &#9881;&#9881;&#9881; <a href="https://imgfil.com/2uxX4Z">https://imgfil.com/2uxX4Z</a></b></p><br /><br />
2
-
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Descarga wifislax 4.3 torrent todo lo que necesitas saber sobre esta versin de Wifislax.md DELETED
@@ -1,32 +0,0 @@
1
-
2
- <p>Esta version 3.2 de wifislax64 se ha centrado en intentar optimizar recursos para que los consumos del sistema sean lo mas bajo posibles sin causar impactos graves en el rendimiento cuando estamos en modo live.</p>
3
- <h2>descarga wifislax 4.3 torrent</h2><br /><p><b><b>Download File</b> > <a href="https://imgfil.com/2uy0uF">https://imgfil.com/2uy0uF</a></b></p><br /><br />
4
- <p>Si quieres tener la mejor experiencia posible de navegación y uso de la web activa y acepta nuestras políticas de privacidad y cookies. En caso contrario te recomendamos abandonar la web y buscar la información en otro sitio.<br />Si quieres registrarte para comentar las entradas de www.wifislax.com deberas aceptar las políticas de privacidad y las condiciones de uso.</p>
5
- <p>Distro para Aud<strong>itorías de seguridad en redes WiFi</strong>, una de las mejores distribuciones para ello es Wifislax. Esta distribución está basada en Slackware64 15 e incorpora todas las herramientas necesarias instaladas de forma predeterminada para realizar diferentes ataques a las redes WiFi, independientemente de si utilizan cifrado WEP, WPA o WPA2, además, también incluye todas las herramientas para <strong>hackear una red WiFi</strong> usando el protocolo WPS (Wi-Fi Protected Setup) de los routers. Ya está disponible la última versión WifiSlax 3.0 con muchos cambios, y la puedes descargar de forma totalmente gratuita.</p>
6
- <p>Se an publicado tantas iso de wifislax y vete tu a saber quien las modifica que muchas están corruptas o sencillamente no van bien.pienso que la aventura wifislax terminó y está más que abandonada, pero fue divertido mientras duro. Ahora cual es la mejor 4.12 o 3.0 ya que ninguna se actualiza ¿funcionan los servidores o también abandonados o vendidos a un mejor postor tendríamos que preguntar al número 1 de wifislax al de la idea.pasate a Kali instala en disco duro y se te olvidará que wifislax existió buenas noches hackers</p>
7
- <p>el 3.0 no arranca menuda perdida de tiempo ahora me bajo 4.11.1 y como no arranque me olvidare para siempre a wifislax asta que lo reparen profesionales no gente que se piensan un hacke por que han visto tutoriales</p>
8
- <p>Todas las versiones de Wifislax son creadas por USUARIONUEVO del foro de seguridadwireless. Sus servidores de descargas ni idea, los de elhacler.net alojados en ns2.elhacker.net funcionan sin publicidad</p>
9
- <p></p>
10
- <p>A día de hoy el uso de las redes P2P y los clientes torrent es algo habitual en cualquier entorno, tanto empresarial como a nivel particular. Y es que son de gran utilidad cuando descargamos o compartimos archivos y queremos hacerlo de una forma fiable y segura, sobre todo cuando son archivos de gran tamaño. Para ello se hace uso del protocolo Bittorrent y algún cliente o programa que nos ofrezca una interfaz para podernos manejar es estas redes y trabajar de forma cómoda y sencilla con los ficheros torrent. En este caso, uno de los mejores programas es <strong>qBittorrent</strong>, por eso, vamos a mostrar a continuación cómo instalar la herramienta en nuestro equipo y cómo configurarla para descargar archivos con ella.</p>
11
- <p>Cuando nos disponemos a buscar algún contenido concreto en torrent, son muchos los que suelen acudir a algunas de las muchas webs que podemos encontrar para descargar este tipo de contenidos. Sin embargo, estos sitios suelen ser populares por la cantidad de malware que contienen, problemas de acceso, caídas frecuentes, etc. De ahí, que lo mejor es echar mano de un cliente como qBittorrent, que, además, en este caso nos permite buscar torrents <strong>sin tener que acudir a ninguna página web</strong>.</p>
12
- <p>Para comenzar a usar el cliente, lo primero que tenemos que hacer es proceder con su <strong>descarga e instalación</strong> en nuestro ordenador. Por suerte qBittorrent es un software multiplataforma de código abierto y gratuito que podemos descargar desde su página web oficial. Por lo tanto, lo único que tenemos que hacer para descargar la herramienta es seguir el enlace anterior y elegir la versión del programa que se adapte a nuestro equipo en cuanto a sistema operativo y arquitectura utilizada.</p>
13
- <p>Una vez hecho esto, se nos descargará en nuestro disco duro un archivo .exe, por lo que, lo único que tendremos que hacer para su instalación es doble clic sobre él. El proceso es muy muy sencillo, ya que únicamente tendremos que elegir el idioma de instalación y la ruta donde queremos instalarlo y en tan solo unos instantes veremos cómo la herramienta está lista para empezar a usarla.</p>
14
- <p>Lanzamos qBittorrent en nuestro equipo y veremos cómo cuenta con una interfaz muy limpia y con casi todas las opciones más importantes a mano. Y decimos «casi», porque lo cierto es que, aunque el cliente incluye su propio <strong>motor de búsqueda</strong>, éste no viene activado por defecto. Por lo tanto, lo primero que debemos hacer es activarlo.</p>
15
- <p>El proceso puede tardar un poco, pero rápidamente veremos como en la ventana emergente se muestran un montón de trackers de torrents que podremos utilizar para buscar contenidos. Pulsamos en <strong>Aceptar</strong> para guardar los cambios y cerrar la ventana y ya tendremos todo listo para comenzar a usar el motor de búsqueda de qBittorrent.</p>
16
- <p>Desde las opciones de configuración de la herramienta también podemos encontrar otros ajustes interesantes para configurar qBittorrent a nuestro gusto. Para ello, únicamente tenemos que hacer clic sobre el icono del engranaje que encontramos en la parte superior. Esto nos abrirá una ventana en la que tendremos un panel izquierdo donde aparecen las opciones del menú.</p>
17
- <p>Entre ellas cabe destacar la opción <strong>Descargas</strong>, donde podremos configurar, entre otras cosas, la carpeta donde queremos que se nos guarden todos los archivos descargados desde qBittorrent.</p>
18
- <p>Dentro de la opción <strong>BitTorrent</strong> podemos configurar las descargas y las subidas activas, es decir, el número de descargas y subidas de archivos que queremos que estén activas como máximo al mismo tiempo.</p>
19
- <p>Lo ideal será hacer clic sobre la cabecera de la columna de los resultados, <strong>Semillas</strong>, para que los resultados se ordenen de manera descendente por Semillas y así obtener mejores velocidades de descarga, En el propio listado podremos ver también el tamaño del archivo y el motor de búsqueda en el que se ha encontrado.</p>
20
- <p>Una vez que hemos elegido el archivo a descargar, hacemos doble clic sobre él y esto nos abrirá una nueva ventana donde podremos indicar la <strong>carpeta donde queremos que se guarde el archivo descargado</strong>. Pulsamos en Aceptar y automáticamente comenzará el proceso de descarga.</p>
21
- <p>En ese mismo instante, en la pestaña <strong>Transferencias</strong> ya podremos ver la información sobre la descarga del archivo, porcentaje de progreso, semillas, estado de la descarga, pares, velocidad de bajada y subida, tiempo restante aproximado, etc.</p>
22
- <p>Además de la búsqueda y descarga de archivos desde el propio motor de búsqueda de qBittorrent, la herramienta nos ofrece la posibilidad de <strong>utilizar archivos torrent</strong> y <strong>enlaces magnet</strong>. Lo cierto es que el proceso es similar, pero en este caso, lo primero que tendremos que hacer es buscar en una página de archivos torrent el fichero que queremos descargar.</p>
23
- <p>Una vez encontrado y siempre asegurándonos que es un sitio y archivo de confianza, descargamos el archivo .torrent en nuestro ordenador. Ahora, lo siguiente que debemos hacer es asociar la apertura de archivos de este tipo con qBittorrent, ya que, de esta manera, lo único que tendremos que hacer para que comience a descargarse es hacer doble clic sobre el archivo .torrent.</p>
24
- <p>Si no es así o no queremos asociar la apertura de archivos de este tipo con el cliente, entonces tendremos que añadir el archivo de forma manual desde el propio programa. Para ello, hacemos clic sobre la opción de menú <strong>Archivo</strong> y a continuación, seleccionamos la opción <strong>Añadir archivo torrent</strong>. Seleccionamos el archivo que acabamos de descargar, aceptamos el mensaje que se nos muestra para añadir el nuevo torrent y comenzará el proceso de descarga de forma automática.</p>
25
- <p>Y si lo que queremos es descargar archivos a través de un enlace magnet, qBittorrent también nos da esa opción. Lo único que tenemos que hacer es ir a la opción <strong>Archivo > Añadir enlace torrent</strong> y copiar el enlace magnet en el cuadro de texto de la ventana que se nos abre a continuación. Por último, pulsamos en Descargar y el proceso de descarga comenzará automáticamente.</p>
26
- <p>Aunque la aplicación funcione correctamente, lo cierto es que en un momento determinado nos podemos encontrar con que los archivos no se descargan o lo hacen a una velocidad muy lenta. En este caso, hay varias cosas que debemos revisar para tratar de encontrar la causa del problema y la solución.</p>
27
- <p>Una de las causas de que no se realicen las descargas es que el <strong>firewall</strong> de Windows o de cualquier otra herramienta de seguridad, esté bloqueando las descargas a través de qBittorrent. Por lo tanto, podemos probar a desactivar de manera temporal la herramienta de seguridad y comprobar si de esta manera las descargas se realizan con normalidad.</p>
28
- <p>Otro aspecto para revisar son los <strong>puertos de nuestro router</strong>, para verificar que todos los necesarios para las conexiones de qBittorrent está abiertos y correctamente redirigidos al cliente. Es importante también revisar que el protocolo <strong>UPnP</strong> de nuestro router esté correctamente activado, puesto que nos ayudará a resolver ciertos problemas de conexión.</p>
29
- <p>También es recomendable hacer un análisis en busca de cualquier tipo de <strong>virus y malware</strong> a nuestro equipo, para evitar que cualquier tipo de amenaza esté usando la red para otros menesteres o simplemente esté usando los recursos de nuestro equipo y no deje que se dediquen a las descargas a través de qBittorrent.</p>
30
- <p>Por último, pero no menos importante, debemos asegurarnos de que los archivos que estamos intentando descargar tienen <strong>suficientes semillas</strong> para que la descarga sea lo más rápida y fluida posible. Aunque hayas encontrado el torrent que a tu parecer da la sensación de ser perfecto, si no tiene suficientes semillas no habrá manera de hacer la descarga a una velocidad digna. Por ello, no te ofusques, busca una alternativa y seguro que la encuentras en menos tiempo del que piensas. Normalmente archivos antiguos o que no estén de moda suelen ser más complicados de descargar, pero sigue intentándolo y acabarás encontrando una solución.</p> aaccfb2cb3<br />
31
- <br />
32
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Free Netflix Download Premium 9.2 Code File [WORK].md DELETED
@@ -1,9 +0,0 @@
1
- <br />
2
- <p> <strong>free netflix download premium 9.2 download</strong> pc app is one of the most popular vpn apps available. with this app, you can access your favorite content on pcs, laptops, chromebooks, and macs that support windows, fire os, chrome, and android. you can also use this app to protect your pc from computer viruses, hackers, ddos (distributed denial of service attacks), and more.</p>
3
- <h2>Free Netflix Download Premium 9.2 Code File</h2><br /><p><b><b>Download Zip</b> &#10027;&#10027;&#10027; <a href="https://imgfil.com/2uxWY4">https://imgfil.com/2uxWY4</a></b></p><br /><br />
4
- <p> <strong>free netflix download premium 9.2 download</strong> has a free version that lets you access videos and shows. the premium version, however, allows you to stream up to 4k quality at a faster speed than a free version. with this app, youll get the most popular series, including the walking dead, house of cards, and more. there are so many great series available, so if youre a fan, this is definitely the one for you.</p>
5
- <p> <strong>free netflix download premium 9.2 download</strong> comes with a hotspot shield pro license. if youre an android user, you should also be a hotspot shield premium user. it has paid apps available for both apple and android devices. its super easy to download and use, and best vpn application for every operating system. free version is perfect for users who want to bypass national censorship.</p>
6
- <p>using the app, users can access their favourite programming all in one location, meaning that there is no more need to search for the right program on the web. if a user prefers to keep up with the news, this is one of the best places to do so with netflix. netflix can be the perfect companion for your screen. the streaming giant has introduced many recent features, including 4k support and the option to watch live tv shows and episodes. to satisfy the need for streaming content, <strong>free netflix download premium serial</strong> comes with many quality features. aside from streaming, netflix has a vast library of programming. you can even see all of your favourite shows as soon as they come out, allowing you to watch your favourite shows whenever you want.</p>
7
- <p></p> 899543212b<br />
8
- <br />
9
- <br />
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/12 Locks II A Puzzle Game with 3 Different Rooms and 12 Locks Each - APK Download.md DELETED
@@ -1,120 +0,0 @@
1
-
2
- <h1>12 Locks II: A Fun and Challenging Puzzle Game for Android</h1>
3
- <p>If you are looking for a puzzle game that will test your logic, creativity and patience, you might want to try 12 Locks II. This is a sequel to the popular 12 Locks game by RUD Present, a developer that specializes in creating unique and colorful games with plasticine graphics. In this article, we will tell you everything you need to know about 12 Locks II, including what it is, how to play it, why you should download it and how to download it.</p>
4
- <h2>What is 12 Locks II?</h2>
5
- <p>12 Locks II is a puzzle game that challenges you to find all the keys to unlock 12 doors in different rooms. Each room has its own theme and style, such as a kitchen, a bathroom, a spaceship, a pirate ship and more. You will have to explore the rooms, interact with various objects, solve mini-puzzles and collect clues to find the keys.</p>
6
- <h2>12 locks 2 apk</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734; <a href="https://urlin.us/2uT0Vn">https://urlin.us/2uT0Vn</a></b></p><br /><br />
7
- <h3>The premise of the game</h3>
8
- <p>The game follows the adventures of a plasticine man who has a bad habit of locking all doors to 12 locks. As a result, he finds himself in different awkward situations all the time. For example, he might get stuck in a toilet, a fridge or a washing machine. Your task is to help him escape from these predicaments by unlocking the doors.</p>
9
- <h3>The features of the game</h3>
10
- <p>Some of the features that make 12 Locks II stand out are:</p>
11
- <ul>
12
- <li>The game has colorful and detailed graphics made with plasticine. The rooms are full of funny and quirky details that add to the charm and humor of the game.</li>
13
- <li>The game has cheerful and catchy music that matches the mood of each room. The sound effects are also realistic and amusing.</li>
14
- <li>The game has simple and intuitive controls. You just need to tap on the screen to interact with objects and drag items to use them.</li>
15
- <li>The game has challenging and varied puzzles that require logic, creativity and attention to detail. Some puzzles are easy and straightforward, while others are more complex and tricky. You will have to use your brain and your imagination to solve them.</li>
16
- <li>The game has no time limit or penalties. You can play at your own pace and enjoy the process of finding solutions. You can also use hints if you get stuck.</li>
17
- </ul>
18
- <h2>How to play 12 Locks II?</h2>
19
- <p>Playing 12 Locks II is easy and fun. Here are some tips on how to play it:</p>
20
- <h3>The controls of the game</h3>
21
- <p>To play 12 Locks II, you just need to tap on the screen to interact with objects and drag items to use them. You can also zoom in or out by pinching the screen. To move between rooms, you can swipe left or right on the screen. To access the inventory or the menu, you can tap on the icons at the bottom of the screen.</p>
22
- <h3>The tips and tricks of the game</h3>
23
- <p>To solve the puzzles in 12 Locks II, you need to pay attention to everything in the rooms. Here are some tips and tricks that might help you:</p>
24
- <p>12 locks 2 apk download<br />
25
- 12 locks 2 apk mod<br />
26
- 12 locks 2 apk free<br />
27
- 12 locks 2 apk latest version<br />
28
- 12 locks 2 apk android<br />
29
- 12 locks 2 apk full<br />
30
- 12 locks 2 apk offline<br />
31
- 12 locks 2 apk unlimited<br />
32
- 12 locks 2 apk puzzle game<br />
33
- 12 locks 2 apk for pc<br />
34
- 12 locks 2 apk online<br />
35
- 12 locks 2 apk hack<br />
36
- 12 locks 2 apk update<br />
37
- 12 locks 2 apk old version<br />
38
- 12 locks 2 apk no ads<br />
39
- 12 locks 2 apk premium<br />
40
- 12 locks 2 apk cheats<br />
41
- 12 locks 2 apk review<br />
42
- 12 locks 2 apk walkthrough<br />
43
- 12 locks 2 apk tips<br />
44
- 12 locks 2 apk guide<br />
45
- 12 locks 2 apk solutions<br />
46
- 12 locks 2 apk hints<br />
47
- 12 locks 2 apk levels<br />
48
- 12 locks 2 apk gameplay<br />
49
- 12 locks 2 apk trailer<br />
50
- 12 locks 2 apk video<br />
51
- 12 locks 2 apk screenshots<br />
52
- 12 locks 2 apk features<br />
53
- 12 locks 2 apk requirements<br />
54
- 12 locks 2 apk size<br />
55
- 12 locks 2 apk rating<br />
56
- 12 locks 2 apk feedback<br />
57
- 12 locks 2 apk comments<br />
58
- 12 locks II android game free download <br />
59
- how to play the game of the year - the best puzzle game ever - the game that will blow your mind - the game that will make you smarter - the game that will challenge you - the game that will test your logic - the game that will keep you entertained - the game that will make you laugh - the game that will make you think - the game that will make you happy</p>
60
- <ul>
61
- <li>Look for clues and hints in the environment. For example, you might find codes, symbols, colors, shapes, patterns, directions, numbers or words that can help you unlock the locks.</li>
62
- <li>Use logic and common sense to figure out the connections between the clues and the locks. For example, you might have to match colors, shapes, numbers or words to the corresponding locks.</li>
63
- <li>Use trial and error to test your hypotheses. For example, you might have to try different combinations of codes, symbols or directions to find the right one.</li>
64
- <li>Use creativity and imagination to think outside the box. For example, you might have to use unconventional methods or items to solve some puzzles.</li>
65
- <li>Don't be afraid to experiment and explore. For example, you might have to tap on everything, move objects around, combine items or use items in unexpected ways.</li>
66
- </ul>
67
- <h2>Why should you download 12 Locks II?</h2>
68
- <p>12 Locks II is a game that will keep you entertained and challenged for hours. Here are some reasons why you should download it:</p>
69
- <h3>The benefits of playing 12 Locks II</h3>
70
- <p>Playing 12 Locks II can have many benefits for your brain and your mood. Some of them are:</p>
71
- <ul>
72
- <li>It can improve your cognitive skills, such as memory, attention, concentration, logic, problem-solving and creativity.</li>
73
- <li>It can stimulate your curiosity and imagination, as you discover new rooms and puzzles.</li>
74
- <li>It can provide you with a sense of achievement and satisfaction, as you unlock the doors and progress in the game.</li>
75
- <li>It can reduce your stress and anxiety, as you focus on the game and forget about your worries.</li>
76
- <li>It can make you laugh and smile, as you enjoy the humor and fun of the game.</li>
77
- </ul>
78
- <h3>The reviews and ratings of the game</h3>
79
- <p>12 Locks II is a game that has received positive reviews and ratings from players and critics alike. Some of them are:</p>
80
- <table>
81
- <tr><th>Name</th><th>Rating</th><th>Review</th></tr>
82
- <tr><td>Google Play Store</td><td>4.5/5 stars</td><td>"This game is awesome! It's challenging but not frustrating. The graphics are cute and funny. The music is catchy. I love it!"</td></tr>
83
- <tr><td>App Store</td><td>4.7/5 stars</td><td>"This game is amazing! It's so creative and original. The puzzles are clever and fun. The rooms are colorful and detailed. I recommend it!"</td></tr>
84
- <tr><td>New Scientist</td><td>8/10 points</td><td>"This game is a delight! It's a perfect blend of logic, creativity and humor. The plasticine graphics are charming and quirky. The puzzles are varied and engaging."</td></tr>
85
- <tr><td>The Sun</td><td>9/10 points</td><td>"This game is a blast! It's a great way to kill time and exercise your brain. The rooms are full of surprises and jokes. The puzzles are challenging but fair."</td></tr>
86
- </table>
87
- <h2>How to download 12 Locks II?</h2>
88
- <p>If you are interested in playing 12 Locks II, you can download it easily from your preferred app store. Here are some steps on how to do it:</p>
89
- <h3>The requirements of the game</h3>
90
- <p>To play 12 Locks II, you need to have an Android or iOS device that meets the following requirements:</p>
91
- <ul>
92
- <li>Android: version 4.4 or higher; 40 MB of free space; internet connection (optional)</li>
93
- <li>iOS: version 10.0 or higher; 64 MB of free space; internet connection (optional)</li>
94
- </ul>
95
- <h3>The steps to download the game</h3>
96
- <p>To download 12 Locks II from your app store, you need to follow these steps:</p>
97
- <ol>
98
- <li>Open your app store (Google Play Store or App Store) on your device.</li>
99
- <li>Type "12 Locks II" in the search bar and tap on the game icon.</li>
100
- <li>Tap on the "Install" or "Get" button and wait for the download to finish.</li>
101
- <li>Tap on the "Open" or "Play" button and enjoy the game!</li>
102
- </ol>
103
- <h2>Conclusion</h2>
104
- <p>In conclusion, 12 Locks II is a fun and challenging puzzle game that will test your logic, creativity and patience. You will have to find all the keys to unlock 12 doors in different rooms with different themes and styles. You will have to explore the rooms, interact with objects, solve mini-puzzles and collect clues to find the keys. The game has colorful and detailed graphics made with plasticine, cheerful and catchy music, simple and intuitive controls, challenging and varied puzzles, no time limit or penalties, and hints if you get stuck. Playing 12 Locks II can improve your cognitive skills, stimulate your curiosity and imagination, provide you with a sense of achievement and satisfaction, reduce your stress and anxiety, and make you laugh and smile. You can download 12 Locks II from your preferred app store by following some simple steps. If you are looking for a puzzle game that will keep you entertained and challenged for hours, you should give 12 Locks II a try!</p>
105
- <h2>FAQs</h2>
106
- <p>Here are some frequently asked questions about 12 Locks II:</p>
107
- <ul>
108
- <li><b>Q: How many levels are there in 12 Locks II?</b></li>
109
- <li>A: There are 12 levels in 12 Locks II, each with a different room and theme.</li>
110
- <li><b>Q: How long does it take to finish the game?</b></li>
111
- <li>A: It depends on your skill and speed, but it can take anywhere from a few hours to a few days to finish the game.</li>
112
- <li><b>Q: Is the game suitable for children?</b></li>
113
- <li>A: Yes, the game is suitable for children of all ages. The game has no violence, gore or inappropriate content. The game is also educational and fun.</li>
114
- <li><b>Q: Is the game free to play?</b></li>
115
- <li>A: Yes, the game is free to play. However, the game contains ads that can be removed by purchasing the premium version of the game.</li>
116
- <li><b>Q: Can I play the game offline?</b></li>
117
- <li>A: Yes, you can play the game offline. However, you will need an internet connection to download the game and access some features such as hints or updates.</li>
118
- </ul></p> 197e85843d<br />
119
- <br />
120
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Cricket League Full Mod APK Everything You Need to Know.md DELETED
@@ -1,99 +0,0 @@
1
- <br />
2
- <h1>Cricket League Full Mod APK Download: A Guide for Cricket Fans</h1>
3
- <p>If you are a fan of cricket, you might have heard of Cricket League, a popular mobile game developed by Miniclip. Cricket League is a realistic and immersive cricket simulation game that lets you play as your favorite teams and players in various modes and tournaments. You can customize your team, choose your batting and bowling style, and compete with other players online.</p>
4
- <p>However, if you want to enjoy the game to the fullest, you might want to download the mod apk version of Cricket League. A mod apk is a modified version of an original app that gives you access to features that are not available in the official version. In this article, we will tell you everything you need to know about Cricket League Full Mod APK, including its features, how to download and install it, and its pros and cons.</p>
5
- <h2>cricket league full mod apk download</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://jinyurl.com/2uNLVH">https://jinyurl.com/2uNLVH</a></b></p><br /><br />
6
- <h2>Features of Cricket League Full Mod APK</h2>
7
- <p>Cricket League Full Mod APK is a hacked version of Cricket League that gives you unlimited coins and gems, which are the in-game currencies. You can use these coins and gems to unlock all the players, modes, stadiums, and equipment in the game. You can also customize your players' appearance, skills, and attributes to suit your preferences.</p>
8
- <p>Another feature of Cricket League Full Mod APK is that it removes all the ads that might interrupt your gameplay. You can enjoy the game without any distractions or interruptions. Moreover, Cricket League Full Mod APK allows you to play all the modes that are available in the game, such as Quick Match, World Cup, T20 Blast, Super Over, and more. You can also play online with other players who have the mod apk version.</p>
9
- <p>Cricket League Full Mod APK is also easy to install and does not require root access or any other permissions. You just need to download the mod apk file from a trusted source and follow some simple steps to install it on your device.</p>
10
- <h2>How to Download and Install Cricket League Full Mod APK</h2>
11
- <p>If you want to download and install Cricket League Full Mod APK on your device, here are the steps you need to follow:</p>
12
- <ol>
13
- <li>Enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.</li>
14
- <li>Download the mod apk file from a trusted source. You can search for "Cricket League Full Mod APK" on Google or use this link to download it directly.</li>
15
- <li>Locate and install the mod apk file. Once you have downloaded the file, go to your file manager and find the file. Tap on it and follow the instructions to install it on your device.</li>
16
- <li>Launch the game and enjoy. After installing the mod apk file, you can launch the game from your app drawer or home screen. You will see that you have unlimited coins and gems and all the features unlocked in the game.</li>
17
- </ol>
18
- <h2>Pros and Cons of Cricket League Full Mod APK</h2>
19
- <p>Cricket League Full Mod APK has many advantages, but it also has some drawbacks. Here are some of them:</p>
20
- <h3>Pros</h3>
21
- <ul>
22
- <li>More fun. With unlimited coins and gems, you can unlock all the players, modes, stadiums, and equipment in the game. You can also customize your players' appearance, skills, and attributes to suit your preferences. You can also play online with other players who have the mod apk version. This makes the game more fun and exciting.</li>
23
- <li>More options. With Cricket League Full Mod APK, you can play all the modes that are available in the game, such as Quick Match, World Cup, T20 Blast, Super Over, and more. You can also choose from different teams and players from around the world. You can also switch between batting and bowling at any time.</li>
24
- <li>More customization. With Cricket League Full Mod APK, you can customize your team, choose your batting and bowling style, and adjust the difficulty level of the game. You can also change the camera angle, the pitch condition, and the weather in the game. You can also create your own tournaments and leagues with your own rules and settings.</li>
25
- </ul>
26
- <h3>Cons</h3>
27
- <ul>
28
- <li>Risk of malware. Downloading and installing mod apk files from unknown sources can expose your device to malware and viruses that can harm your device or steal your personal information. You should always scan the mod apk file before installing it and use a reliable antivirus app on your device.</li>
29
- <li>Ban from official servers. Using mod apk files can violate the terms and conditions of the original app and result in a ban from the official servers. You might not be able to play online with other players who have the official version of the game or access the updates and features that are released by the developers.</li>
30
- <li>Compatibility issues. Mod apk files might not be compatible with all devices or versions of Android. You might experience crashes, glitches, or errors while playing the game or installing the mod apk file. You should always check the compatibility of the mod apk file before downloading and installing it.</li>
31
- </ul>
32
- <h2>Conclusion and FAQs</h2>
33
- <p>Cricket League is a great game for cricket fans who want to experience the thrill and excitement of playing cricket on their mobile devices. However, if you want to unlock all the features and enjoy the game to the fullest, you might want to download Cricket League Full Mod APK, which gives you unlimited coins and gems, all players and modes unlocked, no ads, and easy installation.</p>
34
- <p>However, you should also be aware of the risks and drawbacks of using mod apk files, such as malware, ban from official servers, and compatibility issues. You should always download mod apk files from trusted sources and scan them before installing them. You should also backup your data before using mod apk files and uninstall them if you encounter any problems.</p>
35
- <p>cricket league mod apk unlimited money<br />
36
- cricket league hack apk free download<br />
37
- cricket league premium apk unlocked<br />
38
- cricket league pro mod apk latest version<br />
39
- cricket league 2023 mod apk download<br />
40
- cricket league game mod apk android 1<br />
41
- cricket league online mod apk no ads<br />
42
- cricket league 3d mod apk revdl<br />
43
- cricket league fantasy mod apk unlimited gems<br />
44
- cricket league manager mod apk download<br />
45
- cricket league world cup mod apk offline<br />
46
- cricket league simulator mod apk rexdl<br />
47
- cricket league tournament mod apk unlimited coins<br />
48
- cricket league champions mod apk download<br />
49
- cricket league 2022 mod apk android<br />
50
- cricket league ultimate mod apk no root<br />
51
- cricket league real mod apk hack<br />
52
- cricket league super mod apk online<br />
53
- cricket league 2021 mod apk free<br />
54
- cricket league mega mod apk obb<br />
55
- cricket league vip mod apk download<br />
56
- cricket league fun mod apk unlimited everything<br />
57
- cricket league master mod apk latest<br />
58
- cricket league 2020 mod apk update<br />
59
- cricket league best mod apk download<br />
60
- cricket league action mod apk no verification<br />
61
- cricket league dream mod apk unlimited players<br />
62
- cricket league star mod apk download<br />
63
- cricket league epic mod apk android oyun club<br />
64
- cricket league legend mod apk free download<br />
65
- cricket league adventure mod apk unlimited tickets<br />
66
- cricket league hero mod apk download<br />
67
- cricket league classic mod apk android republic<br />
68
- cricket league amazing mod apk unlimited lives<br />
69
- cricket league blast mod apk download<br />
70
- cricket league power mod apk unlimited energy<br />
71
- cricket league fever mod apk download<br />
72
- cricket league battle mod apk android zone<br />
73
- cricket league glory mod apk unlimited gold<br />
74
- cricket league challenge mod apk download<br />
75
- cricket league strike mod apk unlimited balls<br />
76
- cricket league rush mod apk download<br />
77
- cricket league blitz mod apk unlimited boosters<br />
78
- cricket league thrill mod apk download<br />
79
- cricket league storm mod apk unlimited cash<br />
80
- cricket league smash mod apk download<br />
81
- cricket league spark mod apk unlimited diamonds<br />
82
- cricket league firework mod apk download<br />
83
- cricket league boom mod apk unlimited keys</p>
84
- <p>We hope this article has helped you understand everything you need to know about Cricket League Full Mod APK. If you have any questions or feedback, please feel free to leave a comment below. Here are some FAQs that might answer some of your queries:</p>
85
- <h3>FAQs</h3>
86
- <ol>
87
- <li>What is Cricket League?</li>
88
- <p>Cricket League is a realistic and immersive cricket simulation game developed by Miniclip. It lets you play as your favorite teams and players in various modes and tournaments. You can customize your team, choose your batting and bowling style, and compete with other players online.</p>
89
- <li>What is Cricket League Full Mod APK?</li>
90
- <p>Cricket League Full Mod APK is a hacked version of Cricket League that gives you unlimited coins and gems, which are the in-game currencies. You can use these coins and gems to unlock all the players, modes, stadiums, and equipment in the game. You can also customize your players' appearance, skills, and attributes to suit your preferences.</p>
91
- <li>How to download Cricket League Full Mod APK?</li>
92
- <p>To download Cricket League Full Mod APK, you need to enable unknown sources on your device, download the mod apk file from a trusted source , locate and install the mod apk file on your device, and launch the game.</p>
93
- <li>What are the pros and cons of Cricket League Full Mod APK?</li>
94
- <p>The pros of Cricket League Full Mod APK are more fun, more options, more customization. The cons of Cricket League Full Mod APK are risk of malware, ban from official servers, compatibility issues.</p>
95
- <li>Is Cricket League Full Mod APK safe to use?</li>
96
- <p>Cricket League Full Mod APK is not completely safe to use as it can expose your device to malware and viruses that can harm your device or steal your personal information. It can also violate the terms and conditions of the original app and result in a ban from the official servers. It can also cause crashes, glitches, or errors on your device. You should always scan the mod apk file before installing it and use a reliable antivirus app on your device. You should also backup your data before using mod apk files and uninstall them if you encounter any problems.</p>
97
- </ol></p> 197e85843d<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Airtel Payment Bank App and Enjoy Online Banking Services.md DELETED
@@ -1,120 +0,0 @@
1
-
2
- <h1>How to Download Airtel Payment Bank</h1>
3
- <p>If you are looking for a convenient and secure way to manage your money, pay bills, shop online, and more, then you should consider downloading Airtel Payment Bank. Airtel Payment Bank is a digital banking platform that offers you a range of benefits and features that make your life easier. In this article, we will show you how to download Airtel Payment Bank app and online debit card, and how to use them for various transactions. Let's get started!</p>
4
- <h2>download airtel payment bank</h2><br /><p><b><b>Download Zip</b> &raquo; <a href="https://jinyurl.com/2uNTu7">https://jinyurl.com/2uNTu7</a></b></p><br /><br />
5
- <h2>What is Airtel Payment Bank?</h2>
6
- <p>Airtel Payment Bank is a payment bank that operates under the license of the Reserve Bank of India (RBI). It is a subsidiary of Bharti Airtel, one of the leading telecom operators in India. Airtel Payment Bank allows you to open a savings account with up to Rs. 1 lakh balance, and get an interest rate of 4% per annum. You can also get a free online debit card that you can use for online payments, shopping, and ATM withdrawals. Some of the benefits of Airtel Payment Bank are:</p>
7
- <ul>
8
- <li>You can access your account anytime, anywhere through the Airtel Thanks app or the website.</li>
9
- <li>You can get cashback and discounts on various transactions such as recharges, bill payments, movie tickets, etc.</li>
10
- <li>You can transfer money to any bank account or mobile number using UPI, IMPS, or NEFT.</li>
11
- <li>You can withdraw cash from any of the over 5 lakh banking points across India.</li>
12
- <li>You can enjoy zero balance and zero maintenance charges on your account.</li>
13
- </ul>
14
- <h2>How to Download Airtel Payment Bank App?</h2>
15
- <p>To download Airtel Payment Bank app, you need to follow these simple steps:</p>
16
- <ol>
17
- <li>Visit the official website of Airtel Payment Bank <a href="(^1^)">(^1^)</a> or go to the app store of your device (Google Play Store or Apple App Store) and search for "Airtel Thanks".</li>
18
- <li>Enter your mobile number and click on "Get OTP". You will receive a one-time password (OTP) on your phone.</li>
19
- <li>Enter the OTP and click on "Verify". You will be redirected to the app download page.</li>
20
- <li>Click on "Install" and wait for the app to download and install on your device.</li>
21
- <li>Open the app and create your account by entering your personal details, Aadhaar number, PAN number, etc. You will also need to set a four-digit PIN for your account.</li>
22
- </ol>
23
- <h2>How to Download Airtel Payment Bank Online Debit Card?</h2>
24
- <p>To download Airtel Payment Bank online debit card, you need to follow these steps:</p>
25
- <ol>
26
- <li>Log in to your Airtel Payment Bank account using the app or the website.</li>
27
- <li>Go to the online debit card section and click on " Generate Card". You will see your card details such as card number, expiry date, and CVV.</li>
28
- <li>Click on "Download Card" and save the PDF file on your device. You can also print the card if you want.</li>
29
- <li>You can use your online debit card for any online transactions that accept Visa cards. You can also link your card to any payment app such as Google Pay, PhonePe, Paytm, etc.</li>
30
- </ol>
31
- <h2>How to Use Airtel Payment Bank for Various Transactions?</h2>
32
- <p>Airtel Payment Bank offers you a variety of services and transactions that you can use with ease and convenience. Here are some of the common transactions that you can do with Airtel Payment Bank:</p>
33
- <h3>Recharges and Bill Payments</h3>
34
- <p>You can recharge your mobile, DTH, or broadband service using Airtel Payment Bank. You can also pay your electricity, water, gas, or postpaid bills using the app or the website. You can get cashback and discounts on some of these transactions. To recharge or pay bills, you need to:</p>
35
- <ol>
36
- <li>Log in to your Airtel Payment Bank account and select the service that you want to recharge or pay.</li>
37
- <li>Enter the amount and the details of the service provider.</li>
38
- <li>Choose your payment method (wallet balance, online debit card, UPI, etc.) and confirm the transaction.</li>
39
- <li>You will receive a confirmation message and a receipt on your phone and email.</li>
40
- </ol>
41
- <h3>Shopping and Online Payments</h3>
42
- <p>You can shop online from various websites and apps that accept Airtel Payment Bank as a payment option. You can also make online payments for various services such as food delivery, cab booking, movie tickets, etc. using Airtel Payment Bank. You can get cashback and discounts on some of these transactions. To shop or pay online, you need to:</p>
43
- <ol>
44
- <li>Select Airtel Payment Bank as your payment option on the website or app that you are using.</li>
45
- <li>Enter your mobile number and OTP to verify your identity.</li>
46
- <li>Choose your payment method (wallet balance, online debit card, UPI, etc.) and confirm the transaction.</li>
47
- <li>You will receive a confirmation message and a receipt on your phone and email.</li>
48
- </ol>
49
- <h3>Money Transfer and Cash Withdrawal</h3>
50
- <p>You can transfer money to any bank account or mobile number using Airtel Payment Bank. You can also withdraw cash from any of the over 5 lakh banking points across India using your mobile number and PIN. You can get cashback and discounts on some of these transactions. To transfer money or withdraw cash, you need to:</p>
51
- <ol>
52
- <li>Log in to your Airtel Payment Bank account and select the option of money transfer or cash withdrawal.</li>
53
- <li>Enter the amount and the details of the recipient (bank account number, IFSC code, mobile number, etc.) or the banking point (name, location, etc.).</li>
54
- <li>Choose your payment method (wallet balance, online debit card, UPI, etc.) and confirm the transaction.</li>
55
- <li>You will receive a confirmation message and a receipt on your phone and email.</li>
56
- </ol>
57
- <h2>Conclusion</h2>
58
- <p>Airtel Payment Bank is a great way to manage your money digitally and enjoy various benefits and features. It is easy to download Airtel Payment Bank app and online debit card, and use them for various transactions. You can also save money by getting cashback and discounts on some of these transactions. So what are you waiting for? Download Airtel Payment Bank today and experience the convenience of digital banking!</p>
59
- <p>How to download airtel payment bank app<br />
60
- Download airtel payment bank statement<br />
61
- Download airtel payment bank apk<br />
62
- Download airtel payment bank kyc form<br />
63
- Download airtel payment bank online debit card<br />
64
- Download airtel payment bank passbook<br />
65
- Download airtel payment bank app for pc<br />
66
- Download airtel payment bank app for android<br />
67
- Download airtel payment bank app for ios<br />
68
- Download airtel payment bank app latest version<br />
69
- Benefits of downloading airtel payment bank app<br />
70
- Steps to download airtel payment bank app<br />
71
- Download airtel payment bank customer care number<br />
72
- Download airtel payment bank logo<br />
73
- Download airtel payment bank offer<br />
74
- Download airtel payment bank referral code<br />
75
- Download airtel payment bank account opening form<br />
76
- Download airtel payment bank mini statement<br />
77
- Download airtel payment bank cheque book<br />
78
- Download airtel payment bank interest rate<br />
79
- Download airtel payment bank ifsc code<br />
80
- Download airtel payment bank atm card<br />
81
- Download airtel payment bank upi pin<br />
82
- Download airtel payment bank qr code<br />
83
- Download airtel payment bank fastag<br />
84
- Download airtel payment bank wallet<br />
85
- Download airtel payment bank recharge plan<br />
86
- Download airtel payment bank dth recharge<br />
87
- Download airtel payment bank electricity bill pay<br />
88
- Download airtel payment bank gas bill pay<br />
89
- Download airtel payment bank water bill pay<br />
90
- Download airtel payment bank broadband bill pay<br />
91
- Download airtel payment bank insurance premium pay<br />
92
- Download airtel payment bank loan repayment<br />
93
- Download airtel payment bank money transfer<br />
94
- Download airtel payment bank cash deposit<br />
95
- Download airtel payment bank cash withdrawal<br />
96
- Download airtel payment bank balance check<br />
97
- Download airtel payment bank transaction history<br />
98
- Download airtel payment bank rewards program<br />
99
- Download airtel payment bank cashback offer<br />
100
- Download airtel payment bank coupon code<br />
101
- Download airtel payment bank promo code<br />
102
- Download airtel payment bank review and rating<br />
103
- Download airtel payment bank faq and help center<br />
104
- Download airtel payment bank terms and conditions <br />
105
- Download airtel payment bank privacy policy <br />
106
- Download airtel payments banks careers and jobs <br />
107
- Download Aitel Payment Bank Branch Locator</p>
108
- <h2>FAQs</h2>
109
- <h4>Q1: What are the charges for using Airtel Payment Bank?</h4>
110
- <p>A1: There are no charges for opening an account, maintaining a zero balance, or getting an online debit card with Airtel Payment Bank. However, there may be some charges for certain transactions such as money transfer, cash withdrawal, ATM usage, etc. depending on the amount and frequency of the transaction. You can check the latest charges on the website or app of Airtel Payment Bank.</p>
111
- <h4>Q2: How can I check my balance and transaction history?</h4>
112
- <p>A2: You can check your balance and transaction history by logging in to your Airtel Payment Bank account using the app or the website. You can also dial *400# from your registered mobile number and follow the instructions to check your balance.</p>
113
- <h4>Q3: How can I contact customer care for any queries or issues?</h4>
114
- <p>A3: You can contact customer care for any queries or issues by calling 400 from your registered mobile number or calling 8800688006 from any other number. You can also email your query or issue to [email protected]. Alternatively, you can visit the nearest banking point and get assistance from the staff.</p>
115
- <h4>Q4: Is Airtel Payment Bank safe and secure?</h4>
116
- <p>A4: Yes, Airtel Payment Bank is safe and secure. It uses advanced encryption and security protocols to protect your data and transactions. It also complies with the RBI guidelines and regulations for payment banks. You can also safeguard your account by keeping your PIN confidential and changing it regularly.</p>
117
- <h4>Q5: What are the eligibility criteria for opening an Airtel Payment Bank account?</h4>
118
- <p>A5: To open an Airtel Payment Bank account, you need to be an Indian citizen above 18 years of age. You also need to have a valid Aadhaar number and PAN number. You can open only one account per mobile number with Airtel Payment Bank.</p> 197e85843d<br />
119
- <br />
120
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Nubank Fake APK for Android 2023 Explore the Features of the Famous App.md DELETED
@@ -1,138 +0,0 @@
1
-
2
- <h1>Nubank Fake APK Download 2023: What You Need to Know</h1>
3
- <p>If you are looking for a way to manage your money with freedom, convenience, and security, you might have heard of Nubank, the largest digital bank in Latin America. But before you download the Nubank app, you need to be aware of the risks of fake apps that can harm your device and steal your information. In this article, we will explain what Nubank is, why it is popular, what a fake APK is, how to spot and avoid it, and how to download and install the genuine Nubank app safely.</p>
4
- <h2>What is Nubank and why is it popular?</h2>
5
- <p>Nubank is a digital bank that helps customers in Brazil, Mexico, and Colombia manage their money with freedom. Nubank is just one digital banking option in the Latin American world. It offers a variety of services for its customers, such as credit card applications, loans, life insurance, and business accounts. Nubank wants to improve the banking experience for customers across Latin America by using proprietary technology to create streamlined, uncomplicated, and completely digital banking options for its customers.</p>
6
- <h2>nubank fake apk download 2023</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://jinyurl.com/2uNQ5l">https://jinyurl.com/2uNQ5l</a></b></p><br /><br />
7
- <h3>Nubank app features and benefits</h3>
8
- <p>The benefits of using a Nubank personal or business account include:</p>
9
- <ul>
10
- <li>No annuity or maintenance fees</li>
11
- <li>Free and unlimited transfers to any bank</li>
12
- <li>Real-time tracking of transactions</li>
13
- <li>Easy contact with customer support</li>
14
- <li>Transparent, simple, and fair experience</li>
15
- </ul>
16
- <p>The Nubank app also allows users to stay on top of payments, save money, and track spending from anywhere. Users can block their credit card, apply for a limit raise, or access rewards programs from their mobile device.</p>
17
- <p>nubank fake apk download 2023 mediafire<br />
18
- nubank fake apk download 2023 youtube<br />
19
- nubank fake apk download 2023 atualizado<br />
20
- nubank fake apk download 2023 RDfantasma<br />
21
- nubank fake apk download 2023 link<br />
22
- nubank fake apk download 2023 via<br />
23
- nubank fake apk download 2023 grátis<br />
24
- nubank fake apk download 2023 experiência<br />
25
- nubank fake apk download 2023 simulada<br />
26
- nubank fake apk download 2023 funcionalidades<br />
27
- nubank fake apk download 2023 APKCombo<br />
28
- nubank fake apk download 2023 Android<br />
29
- nubank fake apk download 2023 latest version<br />
30
- nubank fake apk download 2023 update<br />
31
- nubank fake apk download 2023 free<br />
32
- nubank fake apk download 2023 mobile app game<br />
33
- nubank fake apk download 2023 pix falso<br />
34
- nubank fake apk download 2023 baixar<br />
35
- nubank fake apk download 2023 português<br />
36
- nubank fake apk download 2023 Brasil<br />
37
- nubank fake apk download 2023 hackeado<br />
38
- nubank fake apk download 2023 modificado<br />
39
- nubank fake apk download 2023 infinito<br />
40
- nubank fake apk download 2023 dinheiro ilimitado<br />
41
- nubank fake apk download 2023 crédito falso<br />
42
- nubank fake apk download 2023 gerador de pix<br />
43
- nubank fake apk download 2023 como usar<br />
44
- nubank fake apk download 2023 tutorial<br />
45
- nubank fake apk download 2023 passo a passo<br />
46
- nubank fake apk download 2023 dicas e truques<br />
47
- nubank fake apk download 2023 review<br />
48
- nubank fake apk download 2023 teste<br />
49
- nubank fake apk download 2023 funciona mesmo<br />
50
- nubank fake apk download 2023 vale a pena<br />
51
- nubank fake apk download 2023 é seguro<br />
52
- nubank fake apk download 2023 é confiável<br />
53
- nubank fake apk download 2023 é legal<br />
54
- nubank fake apk download 2023 é verdadeiro<br />
55
- nubank fake apk download 2023 é original<br />
56
- nubank fake apk download 2023 é oficial</p>
57
- <h3>Nubank app availability and requirements</h3>
58
- <p>Nubank currently only operates in Brazil, Colombia, and Mexico. To use the Nubank app, you need to have a compatible device with Android 4.4 or higher or iOS 10 or higher. You also need to have an internet connection to access the app's features. To open an account with Nubank, you need to provide some personal information, such as your name, email address, phone number, date of birth, and tax identification number.</p>
59
- <h2>What is a fake APK and why is it dangerous?</h2>
60
- <p>An APK (Android Package Kit) is a file format that contains all the elements needed to install an app on an Android device. A fake APK is an app that imitates a legitimate one but instead carries out malicious activities. These activities include monitoring your activity, installing malware, showing annoying ads, or stealing your personal information.</p>
61
- <h3>How fake apps work and what they can do</h3>
62
- <p>Fake apps can be distributed in various ways. They can be hosted on third-party app stores or fake app stores. Cybercriminals can even use official app stores to distribute fake apps, despite the security measures in place. A cybercriminal can register themselves as a developer on any app store, download a legitimate app, and rewrite it using malicious code. Then, they can upload their fake app to the app store.</p>
63
- <p>Once you download a fake app on your device, it can perform various actions without your consent or knowledge. For example, it can:</p>
64
- <ul>
65
- <li>Send premium SMS messages or make calls to charge you money</li>
66
- <li>Access your contacts, photos, messages, or other data</li>
67
- <li> <li>Download more malware or adware on your device</li>
68
- <li>Redirect you to phishing websites or fake login pages</li>
69
- <li>Use your device as part of a botnet to launch cyberattacks</li>
70
- </ul>
71
- <p>These actions can compromise your device's performance, security, and privacy. You can lose money, data, or even your identity if you fall victim to a fake app.</p>
72
- <h3>How to spot and avoid fake apps</h3>
73
- <p>To protect yourself from fake apps, you need to be vigilant and careful when downloading apps. Here are some tips to help you spot and avoid fake apps:</p>
74
- <ul>
75
- <li>Check the app's name, developer, description, and reviews. Look for spelling errors, grammar mistakes, low ratings, or negative feedback.</li>
76
- <li>Compare the app with the official website of the service or company. Look for inconsistencies or discrepancies in the logo, design, or features.</li>
77
- <li>Avoid downloading apps from third-party app stores or unknown sources. Use only trusted and verified app stores, such as Google Play Store or Apple App Store.</li>
78
- <li>Check the app's permissions and settings. Avoid apps that ask for unnecessary or excessive permissions, such as access to your camera, microphone, location, or contacts.</li>
79
- <li>Use a reputable antivirus or security app on your device. Scan your device regularly and remove any suspicious or unwanted apps.</li>
80
- </ul>
81
- <h2>How to download and install the genuine Nubank app safely</h2>
82
- <p>If you want to enjoy the benefits of Nubank without risking your device or data, you need to download and install the genuine Nubank app safely. Here are the steps to do so:</p>
83
- <h3>How to find and verify the official Nubank app</h3>
84
- <p>The official Nubank app is available on Google Play Store for Android devices and Apple App Store for iOS devices. To find and verify the official Nubank app, you can:</p>
85
- <ul>
86
- <li>Search for "Nubank" on the app store. Make sure the app's name is spelled correctly and matches the logo of Nubank.</li>
87
- <li>Check the app's developer name. The official Nubank app is developed by "Nu Pagamentos S.A." for Android devices and "Nubank" for iOS devices.</li>
88
- <li>Check the app's rating, reviews, and downloads. The official Nubank app has a high rating (4.5 stars or above), positive reviews, and millions of downloads.</li>
89
- <li>Check the app's description and screenshots. The official Nubank app has a clear and detailed description of its features and benefits, as well as screenshots that show its interface and functionality.</li>
90
- </ul>
91
- <h3>How to install and set up the Nubank app on your device</h3>
92
- <p>Once you have found and verified the official Nubank app, you can install it on your device by following these steps:</p>
93
- <ol>
94
- <li>Tap on the "Install" button on the app store. Wait for the app to download and install on your device.</li>
95
- <li>Open the app and tap on "Create account". Enter your personal information, such as your name, email address, phone number, date of birth, and tax identification number.</li>
96
- <li>Verify your identity by taking a selfie and uploading a photo of your ID document.</li>
97
- <li>Wait for Nubank to approve your account. This may take a few minutes or hours depending on their verification process.</li>
98
- <li>Once your account is approved, you can access the app's features and services. You can also request a physical credit card that will be delivered to your address.</li>
99
- </ol>
100
- <h2>Conclusion</h2>
101
- <p>Nubank is a digital bank that offers a convenient, secure, and transparent way to manage your money with freedom. However, you need to be careful of fake apps that can imitate Nubank and harm your device or data. To avoid fake apps, you need to check the app's name, developer, description, reviews, permissions, and settings before downloading it. You also need to use only trusted and verified app stores to download apps. To download and install the genuine Nubank app safely, you need to find and verify the official Nubank app on Google Play Store or Apple App Store, then follow the steps to install and set up the app on your device.</p>
102
- <p>We hope this article has helped you understand what Nubank is, why it is popular, what a fake APK is, how to spot and avoid it, and how to download and install the genuine Nubank app safely. If you have any questions or feedback, please feel free to contact us. We would love to hear from you!</p>
103
- <p><b>Disclaimer:</b> This article is for informational purposes only and does not constitute financial or legal advice. Please consult a professional before making any decisions regarding your money or data.</p>
104
- <h3>FAQs</h3>
105
- <p>Here are some frequently asked questions about Nubank and fake apps:</p>
106
- <ol>
107
- <li><b>Is Nubank safe and reliable?</b></li>
108
- <p>Yes, Nubank is safe and reliable. Nubank is regulated by the Central Bank of Brazil, the National Monetary Council, and the Securities and Exchange Commission of Brazil. Nubank also uses advanced encryption and security protocols to protect your data and transactions. Nubank has over 40 million customers and has won several awards for its innovation and customer satisfaction.</p>
109
- <li><b>How can I contact Nubank customer support?</b></li>
110
- <p>You can contact Nubank customer support through the app, phone, email, or chat. You can also visit their website or social media pages for more information. Nubank customer support is available 24/7 and speaks Portuguese, Spanish, and English.</p>
111
- <li><b>What are the advantages of using a digital bank over a traditional bank?</b></li>
112
- <p>Some of the advantages of using a digital bank over a traditional bank are:</p>
113
- <ul>
114
- <li>You can access your account and services anytime, anywhere, from your mobile device.</li>
115
- <li>You can save money on fees, commissions, and interest rates.</li>
116
- <li>You can enjoy more flexibility, convenience, and transparency in your banking experience.</li>
117
- <li>You can benefit from innovative features, such as rewards programs, cashback, or personal finance tools.</li>
118
- </ul>
119
- <li><b>How can I update the Nubank app?</b></li>
120
- <p>You can update the Nubank app by following these steps:</p>
121
- <ol>
122
- <li>Open the app store on your device.</li>
123
- <li>Search for "Nubank" and tap on the app.</li>
124
- <li>If there is an update available, tap on the "Update" button.</li>
125
- <li>Wait for the app to download and install the update.</li>
126
- <li>Open the app and enjoy the new features and improvements.</li>
127
- </ol>
128
- <li><b>How can I uninstall the Nubank app?</b></li>
129
- <p>You can uninstall the Nubank app by following these steps:</p>
130
- <ol>
131
- <li>Open the settings on your device.</li>
132
- <li>Tap on "Apps" or "Applications".</li>
133
- <li>Find and tap on "Nubank".</li>
134
- <li>Tap on "Uninstall" or "Delete".</li>
135
- <li>Confirm your action and wait for the app to be removed from your device.</li>
136
- </ol></p> 401be4b1e0<br />
137
- <br />
138
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Onmyoji Arena APK for Android - Play Offline Strategy Game.md DELETED
@@ -1,132 +0,0 @@
1
- <br />
2
- <h1>Onmyoji Arena APK Offline: How to Play the Game Without Internet Connection</h1>
3
- <p>Do you love playing Onmyoji Arena, the popular mobile MOBA game based on Japanese folklore and mythology? Do you wish you could play it anytime and anywhere, even without an internet connection? If so, you are in luck. In this article, we will show you how to download and install Onmyoji Arena APK offline, and how to play the game without internet connection. Read on to find out more.</p>
4
- <h2>onmyoji arena apk offline</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://jinyurl.com/2uNPAh">https://jinyurl.com/2uNPAh</a></b></p><br /><br />
5
- <h2>What is Onmyoji Arena?</h2>
6
- <p>Onmyoji Arena is a mobile game developed by NetEase Games, based on the hit RPG game Onmyoji. It is a 5v5 MOBA game that features stunning 3D graphics, elegant Japanese aesthetics, and a stellar voice cast. The game has over 70 characters, called shikigami, that you can choose from, each with their own unique skills and abilities. You can team up with your friends or other players online, and compete in various modes, such as ranked matches, casual matches, or special events. You can also customize your shikigami with different skins, accessories, and emotes.</p>
7
- <h2>Why would you want to play Onmyoji Arena offline?</h2>
8
- <h3>The benefits of playing the game without internet connection</h3>
9
- <p>Playing Onmyoji Arena offline has some advantages over playing it online. For instance:</p>
10
- <ul>
11
- <li>You can play the game anytime and anywhere, without worrying about your data usage or wifi availability.</li>
12
- <li>You can avoid lag, disconnects, or other network issues that might affect your gameplay or performance.</li>
13
- <li>You can practice your skills and strategies with different shikigami, without affecting your rank or reputation.</li>
14
- <li>You can enjoy the game at your own pace, without pressure or competition from other players.</li>
15
- </ul>
16
- <h3>The drawbacks of playing the game without internet connection</h3>
17
- <p>However, playing Onmyoji Arena offline also has some disadvantages over playing it online. For example:</p>
18
- <p>onmyoji arena mod apk offline<br />
19
- onmyoji arena apk download offline<br />
20
- onmyoji arena offline mode apk<br />
21
- onmyoji arena latest version offline apk<br />
22
- onmyoji arena apk obb offline<br />
23
- onmyoji arena hack apk offline<br />
24
- onmyoji arena apk data offline<br />
25
- onmyoji arena apk pure offline<br />
26
- onmyoji arena apk revdl offline<br />
27
- onmyoji arena apk rexdl offline<br />
28
- onmyoji arena apk mirror offline<br />
29
- onmyoji arena apk update offline<br />
30
- onmyoji arena apk android offline<br />
31
- onmyoji arena apk ios offline<br />
32
- onmyoji arena apk pc offline<br />
33
- onmyoji arena apk no internet offline<br />
34
- onmyoji arena apk free download offline<br />
35
- onmyoji arena apk full version offline<br />
36
- onmyoji arena apk unlimited money offline<br />
37
- onmyoji arena apk cheat offline<br />
38
- onmyoji arena apk english offline<br />
39
- onmyoji arena apk chinese offline<br />
40
- onmyoji arena apk global offline<br />
41
- onmyoji arena apk japan offline<br />
42
- onmyoji arena apk korea offline<br />
43
- onmyoji arena apk vietnam offline<br />
44
- onmyoji arena apk indonesia offline<br />
45
- onmyoji arena apk malaysia offline<br />
46
- onmyoji arena apk philippines offline<br />
47
- onmyoji arena apk thailand offline<br />
48
- onmyoji arena 3v3v3 battle royale mode apk offline<br />
49
- onmyoji arena 5v5 moba game apk offline<br />
50
- onmyoji arena fair and balanced gameplay apk offline<br />
51
- onmyoji arena elegant japanese aesthetics and voice cast apk offline<br />
52
- onmyoji arena gorgeous 3d models and graphics apk offline<br />
53
- onmyoji arena innovative map and gameplay features apk offline<br />
54
- onmyoji arena original characters and shikigami from the Onmyoji world apk offline<br />
55
- onmyoji arena new skins and events every week apk offline<br />
56
- onmyoji arena cross-platform play with pc and mobile players apk offline<br />
57
- onmyoji arena easy to learn and play with intuitive controls and tutorials apk offline<br />
58
- how to install Onmyoji Arena APK Offline <br />
59
- how to play Onmyoji Arena APK Offline <br />
60
- how to update Onmyoji Arena APK Offline <br />
61
- how to hack Onmyoji Arena APK Offline <br />
62
- how to fix Onmyoji Arena APK Offline errors <br />
63
- how to uninstall Onmyoji Arena APK Offline <br />
64
- how to backup Onmyoji Arena APK Offline data <br />
65
- how to transfer Onmyoji Arena APK Offline account <br />
66
- how to redeem Onmyo</p>
67
- <ul>
68
- <li>You will not be able to access some features or modes that require internet connection, such as ranked matches, casual matches, or special events.</li>
69
- <li>You will not be able to update your game or download new content that might be released by the developers.</li>
70
- <li>You will not be able to interact with other players or join a guild.</li>
71
- <li>You will not be able to earn rewards or achievements that are based on online activities.</li>
72
- </ul>
73
- <h2>How to download and install Onmyoji Arena APK offline?</h2>
74
- <h3>The steps to get the game on your Android device</h3>
75
- <p>If you want to play Onmyoji Arena offline, you will need to download and install the APK file of the game on your Android device. Here are the steps to do so:</p>
76
- <ol>
77
- <li>Go to a trusted website that offers Onmyoji Arena APK offline download, such as [APKCombo](^1^) or [Google Play Store](^2^).</li>
78
- <li>Choose the latest version of the game and click on the download button.</li>
79
- <li>Wait for the download to finish and locate the APK file on your device.</li>
80
- <li>Tap on the APK file and follow the instructions to install the game on your device.</li>
81
- <li>Launch the game and enjoy playing it offline.</li>
82
- </ol>
83
- <h <h3>The precautions to take before installing the game</h3>
84
- <p>Before you install Onmyoji Arena APK offline on your device, you should take some precautions to ensure your safety and security. Here are some tips to follow:</p>
85
- <ul>
86
- <li>Make sure you have enough storage space on your device to install the game.</li>
87
- <li>Make sure you have a backup of your data and settings in case something goes wrong during the installation.</li>
88
- <li>Make sure you download the APK file from a reliable and reputable source, and scan it for viruses or malware before installing it.</li>
89
- <li>Make sure you enable the option to install apps from unknown sources on your device settings, and disable it after the installation is done.</li>
90
- <li>Make sure you agree to the terms and conditions of the game before installing it.</li>
91
- </ul>
92
- <h2>How to play Onmyoji Arena offline?</h2>
93
- <h3>The modes and options available in the offline mode</h3>
94
- <p>Once you have installed Onmyoji Arena APK offline on your device, you can play the game without internet connection. However, you will only be able to access some modes and options in the offline mode. Here are some of them:</p>
95
- <ul>
96
- <li>You can play the tutorial mode, where you can learn the basics of the game and practice with different shikigami.</li>
97
- <li>You can play the practice mode, where you can choose any shikigami and any map, and play against AI opponents or bots.</li>
98
- <li>You can play the custom mode, where you can create your own match settings, such as the number of players, the difficulty level, and the map.</li>
99
- <li>You can play the story mode, where you can follow the plot of the game and unlock new shikigami and skins.</li>
100
- <li>You can access the shikigami gallery, where you can view the details and stats of each shikigami, as well as their skins, accessories, and emotes.</li>
101
- </ul>
102
- <h3>The tips and tricks to enjoy the game offline</h3>
103
- <p>Playing Onmyoji Arena offline can be fun and rewarding, if you know how to make the most of it. Here are some tips and tricks to enjoy the game offline:</p>
104
- <ul>
105
- <li>Try different shikigami and find out which ones suit your playstyle and preferences.</li>
106
- <li>Experiment with different builds and items for each shikigami, and see how they affect their performance.</li>
107
- <li>Learn the strengths and weaknesses of each shikigami, and how to counter them effectively.</li>
108
- <li>Master the mechanics and strategies of each map, such as the objectives, the lanes, the jungle, and the bosses.</li>
109
- <li>Challenge yourself by increasing the difficulty level or changing the match settings in the custom mode.</li>
110
- </ul>
111
- <h2>Conclusion</h2>
112
- <p>Onmyoji Arena is a great game that you can play online or offline. If you want to play it offline, you will need to download and install Onmyoji Arena APK offline on your Android device. You will be able to access some modes and options in the offline mode, such as tutorial, practice, custom, and story. You will also be able to enjoy the game offline by trying different shikigami, builds, items, maps, and settings. However, you will not be able to access some features or modes that require internet connection, such as ranked matches, casual matches, or special events. You will also not be able to update your game or download new content that might be released by the developers. You will also not be able to interact with other players or join a guild. You will also not be able to earn rewards or achievements that are based on online activities. Therefore, playing Onmyoji Arena offline has its pros and cons, and you should decide which mode suits you better. If you are interested in playing Onmyoji Arena offline, you can follow the steps and tips we have provided in this article. We hope you have fun playing Onmyoji Arena offline!</p>
113
- <h2>FAQs</h2>
114
- <h3>Q: Is Onmyoji Arena APK offline safe to download and install?</h3>
115
- <p>A: Yes, as long as you download it from a trusted website that offers Onmyoji Arena APK offline download, such as [APKCombo] or [Google Play Store]. You should also scan it for viruses or malware before installing it on your device.</p>
116
- <h3>Q: Can I play Onmyoji Arena offline on iOS devices?</h3>
117
- <p>A: No, Onmyoji Arena APK offline is only compatible with Android devices. If you want to play Onmyoji Arena on iOS devices, you will need an internet connection.</p>
118
- <h3>Q: Can I switch between online and offline mode in Onmyoji Arena?</h3>
119
- <p>A: Yes, you can switch between online and offline mode in Onmyoji Arena, as long as you have an internet connection. You can do so by tapping on the settings icon on the top right corner of the main screen, and then choosing the online or offline option. However, you should note that some of your data or progress might not be synced or saved when you switch modes.</p>
120
- <h3>Q: What are the best shikigami to play offline in Onmyoji Arena?</h3>
121
- <p>A: The best shikigami to play offline in Onmyoji Arena depends on your personal preference and playstyle. However, some of the shikigami that are generally considered to be good for offline mode are:</p>
122
- <ul>
123
- <li>Yoto Hime: A powerful samurai who can deal massive damage and execute enemies with her ultimate skill.</li>
124
- <li>Yamakaze: A swift ninja who can dash and blink around the map, and assassinate enemies with his stealth and burst.</li>
125
- <li>Shuten Doji: A tanky ogre who can absorb damage and heal himself, and stun enemies with his drunken rage.</li>
126
- <li>Hana: A graceful healer who can support her allies and herself with her healing and shielding skills.</li>
127
- <li>Ootengu: A versatile mage who can cast spells from a distance, and unleash a devastating storm with his ultimate skill.</li>
128
- </ul>
129
- <h3>Q: How can I get more skins, accessories, and emotes for my shikigami in offline mode?</h3>
130
- <p>A: Unfortunately, you cannot get more skins, accessories, or emotes for your shikigami in offline mode. You will need to play online mode to earn rewards or purchase items that can unlock more customization options for your shikigami.</p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/20four60/Auto-GPT/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Zenml Server
3
- emoji: 🧘
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- app_port: 8080
9
- license: wtfpl
10
- duplicated_from: zenml/zenml
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/verification.py DELETED
@@ -1,407 +0,0 @@
1
- """Helper for evaluation on the Labeled Faces in the Wild dataset
2
- """
3
-
4
- # MIT License
5
- #
6
- # Copyright (c) 2016 David Sandberg
7
- #
8
- # Permission is hereby granted, free of charge, to any person obtaining a copy
9
- # of this software and associated documentation files (the "Software"), to deal
10
- # in the Software without restriction, including without limitation the rights
11
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
- # copies of the Software, and to permit persons to whom the Software is
13
- # furnished to do so, subject to the following conditions:
14
- #
15
- # The above copyright notice and this permission notice shall be included in all
16
- # copies or substantial portions of the Software.
17
- #
18
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
- # SOFTWARE.
25
-
26
-
27
- import datetime
28
- import os
29
- import pickle
30
-
31
- import mxnet as mx
32
- import numpy as np
33
- import sklearn
34
- import torch
35
- from mxnet import ndarray as nd
36
- from scipy import interpolate
37
- from sklearn.decomposition import PCA
38
- from sklearn.model_selection import KFold
39
-
40
-
41
- class LFold:
42
- def __init__(self, n_splits=2, shuffle=False):
43
- self.n_splits = n_splits
44
- if self.n_splits > 1:
45
- self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)
46
-
47
- def split(self, indices):
48
- if self.n_splits > 1:
49
- return self.k_fold.split(indices)
50
- else:
51
- return [(indices, indices)]
52
-
53
-
54
- def calculate_roc(thresholds,
55
- embeddings1,
56
- embeddings2,
57
- actual_issame,
58
- nrof_folds=10,
59
- pca=0):
60
- assert (embeddings1.shape[0] == embeddings2.shape[0])
61
- assert (embeddings1.shape[1] == embeddings2.shape[1])
62
- nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
63
- nrof_thresholds = len(thresholds)
64
- k_fold = LFold(n_splits=nrof_folds, shuffle=False)
65
-
66
- tprs = np.zeros((nrof_folds, nrof_thresholds))
67
- fprs = np.zeros((nrof_folds, nrof_thresholds))
68
- accuracy = np.zeros((nrof_folds))
69
- indices = np.arange(nrof_pairs)
70
-
71
- if pca == 0:
72
- diff = np.subtract(embeddings1, embeddings2)
73
- dist = np.sum(np.square(diff), 1)
74
-
75
- for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
76
- if pca > 0:
77
- print('doing pca on', fold_idx)
78
- embed1_train = embeddings1[train_set]
79
- embed2_train = embeddings2[train_set]
80
- _embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
81
- pca_model = PCA(n_components=pca)
82
- pca_model.fit(_embed_train)
83
- embed1 = pca_model.transform(embeddings1)
84
- embed2 = pca_model.transform(embeddings2)
85
- embed1 = sklearn.preprocessing.normalize(embed1)
86
- embed2 = sklearn.preprocessing.normalize(embed2)
87
- diff = np.subtract(embed1, embed2)
88
- dist = np.sum(np.square(diff), 1)
89
-
90
- # Find the best threshold for the fold
91
- acc_train = np.zeros((nrof_thresholds))
92
- for threshold_idx, threshold in enumerate(thresholds):
93
- _, _, acc_train[threshold_idx] = calculate_accuracy(
94
- threshold, dist[train_set], actual_issame[train_set])
95
- best_threshold_index = np.argmax(acc_train)
96
- for threshold_idx, threshold in enumerate(thresholds):
97
- tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
98
- threshold, dist[test_set],
99
- actual_issame[test_set])
100
- _, _, accuracy[fold_idx] = calculate_accuracy(
101
- thresholds[best_threshold_index], dist[test_set],
102
- actual_issame[test_set])
103
-
104
- tpr = np.mean(tprs, 0)
105
- fpr = np.mean(fprs, 0)
106
- return tpr, fpr, accuracy
107
-
108
-
109
- def calculate_accuracy(threshold, dist, actual_issame):
110
- predict_issame = np.less(dist, threshold)
111
- tp = np.sum(np.logical_and(predict_issame, actual_issame))
112
- fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
113
- tn = np.sum(
114
- np.logical_and(np.logical_not(predict_issame),
115
- np.logical_not(actual_issame)))
116
- fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
117
-
118
- tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
119
- fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
120
- acc = float(tp + tn) / dist.size
121
- return tpr, fpr, acc
122
-
123
-
124
- def calculate_val(thresholds,
125
- embeddings1,
126
- embeddings2,
127
- actual_issame,
128
- far_target,
129
- nrof_folds=10):
130
- assert (embeddings1.shape[0] == embeddings2.shape[0])
131
- assert (embeddings1.shape[1] == embeddings2.shape[1])
132
- nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
133
- nrof_thresholds = len(thresholds)
134
- k_fold = LFold(n_splits=nrof_folds, shuffle=False)
135
-
136
- val = np.zeros(nrof_folds)
137
- far = np.zeros(nrof_folds)
138
-
139
- diff = np.subtract(embeddings1, embeddings2)
140
- dist = np.sum(np.square(diff), 1)
141
- indices = np.arange(nrof_pairs)
142
-
143
- for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
144
-
145
- # Find the threshold that gives FAR = far_target
146
- far_train = np.zeros(nrof_thresholds)
147
- for threshold_idx, threshold in enumerate(thresholds):
148
- _, far_train[threshold_idx] = calculate_val_far(
149
- threshold, dist[train_set], actual_issame[train_set])
150
- if np.max(far_train) >= far_target:
151
- f = interpolate.interp1d(far_train, thresholds, kind='slinear')
152
- threshold = f(far_target)
153
- else:
154
- threshold = 0.0
155
-
156
- val[fold_idx], far[fold_idx] = calculate_val_far(
157
- threshold, dist[test_set], actual_issame[test_set])
158
-
159
- val_mean = np.mean(val)
160
- far_mean = np.mean(far)
161
- val_std = np.std(val)
162
- return val_mean, val_std, far_mean
163
-
164
-
165
- def calculate_val_far(threshold, dist, actual_issame):
166
- predict_issame = np.less(dist, threshold)
167
- true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
168
- false_accept = np.sum(
169
- np.logical_and(predict_issame, np.logical_not(actual_issame)))
170
- n_same = np.sum(actual_issame)
171
- n_diff = np.sum(np.logical_not(actual_issame))
172
- # print(true_accept, false_accept)
173
- # print(n_same, n_diff)
174
- val = float(true_accept) / float(n_same)
175
- far = float(false_accept) / float(n_diff)
176
- return val, far
177
-
178
-
179
- def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
180
- # Calculate evaluation metrics
181
- thresholds = np.arange(0, 4, 0.01)
182
- embeddings1 = embeddings[0::2]
183
- embeddings2 = embeddings[1::2]
184
- tpr, fpr, accuracy = calculate_roc(thresholds,
185
- embeddings1,
186
- embeddings2,
187
- np.asarray(actual_issame),
188
- nrof_folds=nrof_folds,
189
- pca=pca)
190
- thresholds = np.arange(0, 4, 0.001)
191
- val, val_std, far = calculate_val(thresholds,
192
- embeddings1,
193
- embeddings2,
194
- np.asarray(actual_issame),
195
- 1e-3,
196
- nrof_folds=nrof_folds)
197
- return tpr, fpr, accuracy, val, val_std, far
198
-
199
- @torch.no_grad()
200
- def load_bin(path, image_size):
201
- try:
202
- with open(path, 'rb') as f:
203
- bins, issame_list = pickle.load(f) # py2
204
- except UnicodeDecodeError as e:
205
- with open(path, 'rb') as f:
206
- bins, issame_list = pickle.load(f, encoding='bytes') # py3
207
- data_list = []
208
- for flip in [0, 1]:
209
- data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
210
- data_list.append(data)
211
- for idx in range(len(issame_list) * 2):
212
- _bin = bins[idx]
213
- img = mx.image.imdecode(_bin)
214
- if img.shape[1] != image_size[0]:
215
- img = mx.image.resize_short(img, image_size[0])
216
- img = nd.transpose(img, axes=(2, 0, 1))
217
- for flip in [0, 1]:
218
- if flip == 1:
219
- img = mx.ndarray.flip(data=img, axis=2)
220
- data_list[flip][idx][:] = torch.from_numpy(img.asnumpy())
221
- if idx % 1000 == 0:
222
- print('loading bin', idx)
223
- print(data_list[0].shape)
224
- return data_list, issame_list
225
-
226
- @torch.no_grad()
227
- def test(data_set, backbone, batch_size, nfolds=10):
228
- print('testing verification..')
229
- data_list = data_set[0]
230
- issame_list = data_set[1]
231
- embeddings_list = []
232
- time_consumed = 0.0
233
- for i in range(len(data_list)):
234
- data = data_list[i]
235
- embeddings = None
236
- ba = 0
237
- while ba < data.shape[0]:
238
- bb = min(ba + batch_size, data.shape[0])
239
- count = bb - ba
240
- _data = data[bb - batch_size: bb]
241
- time0 = datetime.datetime.now()
242
- img = ((_data / 255) - 0.5) / 0.5
243
- net_out: torch.Tensor = backbone(img)
244
- _embeddings = net_out.detach().cpu().numpy()
245
- time_now = datetime.datetime.now()
246
- diff = time_now - time0
247
- time_consumed += diff.total_seconds()
248
- if embeddings is None:
249
- embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
250
- embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
251
- ba = bb
252
- embeddings_list.append(embeddings)
253
-
254
- _xnorm = 0.0
255
- _xnorm_cnt = 0
256
- for embed in embeddings_list:
257
- for i in range(embed.shape[0]):
258
- _em = embed[i]
259
- _norm = np.linalg.norm(_em)
260
- _xnorm += _norm
261
- _xnorm_cnt += 1
262
- _xnorm /= _xnorm_cnt
263
-
264
- acc1 = 0.0
265
- std1 = 0.0
266
- embeddings = embeddings_list[0] + embeddings_list[1]
267
- embeddings = sklearn.preprocessing.normalize(embeddings)
268
- print(embeddings.shape)
269
- print('infer time', time_consumed)
270
- _, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds)
271
- acc2, std2 = np.mean(accuracy), np.std(accuracy)
272
- return acc1, std1, acc2, std2, _xnorm, embeddings_list
273
-
274
-
275
- def dumpR(data_set,
276
- backbone,
277
- batch_size,
278
- name='',
279
- data_extra=None,
280
- label_shape=None):
281
- print('dump verification embedding..')
282
- data_list = data_set[0]
283
- issame_list = data_set[1]
284
- embeddings_list = []
285
- time_consumed = 0.0
286
- for i in range(len(data_list)):
287
- data = data_list[i]
288
- embeddings = None
289
- ba = 0
290
- while ba < data.shape[0]:
291
- bb = min(ba + batch_size, data.shape[0])
292
- count = bb - ba
293
-
294
- _data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
295
- time0 = datetime.datetime.now()
296
- if data_extra is None:
297
- db = mx.io.DataBatch(data=(_data,), label=(_label,))
298
- else:
299
- db = mx.io.DataBatch(data=(_data, _data_extra),
300
- label=(_label,))
301
- model.forward(db, is_train=False)
302
- net_out = model.get_outputs()
303
- _embeddings = net_out[0].asnumpy()
304
- time_now = datetime.datetime.now()
305
- diff = time_now - time0
306
- time_consumed += diff.total_seconds()
307
- if embeddings is None:
308
- embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
309
- embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
310
- ba = bb
311
- embeddings_list.append(embeddings)
312
- embeddings = embeddings_list[0] + embeddings_list[1]
313
- embeddings = sklearn.preprocessing.normalize(embeddings)
314
- actual_issame = np.asarray(issame_list)
315
- outname = os.path.join('temp.bin')
316
- with open(outname, 'wb') as f:
317
- pickle.dump((embeddings, issame_list),
318
- f,
319
- protocol=pickle.HIGHEST_PROTOCOL)
320
-
321
-
322
- # if __name__ == '__main__':
323
- #
324
- # parser = argparse.ArgumentParser(description='do verification')
325
- # # general
326
- # parser.add_argument('--data-dir', default='', help='')
327
- # parser.add_argument('--model',
328
- # default='../model/softmax,50',
329
- # help='path to load model.')
330
- # parser.add_argument('--target',
331
- # default='lfw,cfp_ff,cfp_fp,agedb_30',
332
- # help='test targets.')
333
- # parser.add_argument('--gpu', default=0, type=int, help='gpu id')
334
- # parser.add_argument('--batch-size', default=32, type=int, help='')
335
- # parser.add_argument('--max', default='', type=str, help='')
336
- # parser.add_argument('--mode', default=0, type=int, help='')
337
- # parser.add_argument('--nfolds', default=10, type=int, help='')
338
- # args = parser.parse_args()
339
- # image_size = [112, 112]
340
- # print('image_size', image_size)
341
- # ctx = mx.gpu(args.gpu)
342
- # nets = []
343
- # vec = args.model.split(',')
344
- # prefix = args.model.split(',')[0]
345
- # epochs = []
346
- # if len(vec) == 1:
347
- # pdir = os.path.dirname(prefix)
348
- # for fname in os.listdir(pdir):
349
- # if not fname.endswith('.params'):
350
- # continue
351
- # _file = os.path.join(pdir, fname)
352
- # if _file.startswith(prefix):
353
- # epoch = int(fname.split('.')[0].split('-')[1])
354
- # epochs.append(epoch)
355
- # epochs = sorted(epochs, reverse=True)
356
- # if len(args.max) > 0:
357
- # _max = [int(x) for x in args.max.split(',')]
358
- # assert len(_max) == 2
359
- # if len(epochs) > _max[1]:
360
- # epochs = epochs[_max[0]:_max[1]]
361
- #
362
- # else:
363
- # epochs = [int(x) for x in vec[1].split('|')]
364
- # print('model number', len(epochs))
365
- # time0 = datetime.datetime.now()
366
- # for epoch in epochs:
367
- # print('loading', prefix, epoch)
368
- # sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
369
- # # arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
370
- # all_layers = sym.get_internals()
371
- # sym = all_layers['fc1_output']
372
- # model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
373
- # # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
374
- # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0],
375
- # image_size[1]))])
376
- # model.set_params(arg_params, aux_params)
377
- # nets.append(model)
378
- # time_now = datetime.datetime.now()
379
- # diff = time_now - time0
380
- # print('model loading time', diff.total_seconds())
381
- #
382
- # ver_list = []
383
- # ver_name_list = []
384
- # for name in args.target.split(','):
385
- # path = os.path.join(args.data_dir, name + ".bin")
386
- # if os.path.exists(path):
387
- # print('loading.. ', name)
388
- # data_set = load_bin(path, image_size)
389
- # ver_list.append(data_set)
390
- # ver_name_list.append(name)
391
- #
392
- # if args.mode == 0:
393
- # for i in range(len(ver_list)):
394
- # results = []
395
- # for model in nets:
396
- # acc1, std1, acc2, std2, xnorm, embeddings_list = test(
397
- # ver_list[i], model, args.batch_size, args.nfolds)
398
- # print('[%s]XNorm: %f' % (ver_name_list[i], xnorm))
399
- # print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1))
400
- # print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2))
401
- # results.append(acc2)
402
- # print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results)))
403
- # elif args.mode == 1:
404
- # raise ValueError
405
- # else:
406
- # model = nets[0]
407
- # dumpR(ver_list[0], model, args.batch_size, args.target)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/Changelog_KO.md DELETED
@@ -1,91 +0,0 @@
1
- ### 2023년 6월 18일 업데이트
2
-
3
- - v2 버전에서 새로운 32k와 48k 사전 학습 모델을 추가.
4
- - non-f0 모델들의 추론 오류 수정.
5
- - 학습 세트가 1시간을 넘어가는 경우, 인덱스 생성 단계에서 minibatch-kmeans을 사용해, 학습속도 가속화.
6
- - [huggingface](https://huggingface.co/spaces/lj1995/vocal2guitar)에서 vocal2guitar 제공.
7
- - 데이터 처리 단계에서 이상 값 자동으로 제거.
8
- - ONNX로 내보내는(export) 옵션 탭 추가.
9
-
10
- 업데이트에 적용되지 않았지만 시도한 것들 :
11
-
12
- - 시계열 차원을 추가하여 특징 검색을 진행했지만, 유의미한 효과는 없었습니다.
13
- - PCA 차원 축소를 추가하여 특징 검색을 진행했지만, 유의미한 효과는 없었습니다.
14
- - ONNX 추론을 지원하는 것에 실패했습니다. nsf 생성시, Pytorch가 필요하기 때문입니다.
15
- - 훈련 중에 입력에 대한 음고, 성별, 이퀄라이저, 노이즈 등 무작위로 강화하는 것에, 유의미한 효과는 없었습니다.
16
-
17
- 추후 업데이트 목록:
18
-
19
- - Vocos-RVC (소형 보코더) 통합 예정.
20
- - 학습 단계에 음고 인식을 위한 Crepe 지원 예정.
21
- - Crepe의 정밀도를 REC-config와 동기화하여 지원 예정.
22
- - FO 에디터 지원 예정.
23
-
24
- ### 2023년 5월 28일 업데이트
25
-
26
- - v2 jupyter notebook 추가, 한국어 업데이트 로그 추가, 의존성 모듈 일부 수정.
27
- - 무성음 및 숨소리 보호 모드 추가.
28
- - crepe-full pitch 감지 지원.
29
- - UVR5 보컬 분리: 디버브 및 디-에코 모델 지원.
30
- - index 이름에 experiment 이름과 버전 추가.
31
- - 배치 음성 변환 처리 및 UVR5 보컬 분리 시, 사용자가 수동으로 출력 오디오의 내보내기(export) 형식을 선택할 수 있도록 지원.
32
- - 32k 훈련 모델 지원 종료.
33
-
34
- ### 2023년 5월 13일 업데이트
35
-
36
- - 원클릭 패키지의 이전 버전 런타임 내, 불필요한 코드(infer_pack 및 uvr5_pack) 제거.
37
- - 훈련 세트 전처리의 유사 다중 처리 버그 수정.
38
- - Harvest 피치 인식 알고리즘에 대한 중위수 필터링 반경 조정 추가.
39
- - 오디오 내보낼 때, 후처리 리샘플링 지원.
40
- - 훈련에 대한 다중 처리 "n_cpu" 설정이 "f0 추출"에서 "데이터 전처리 및 f0 추출"로 변경.
41
- - logs 폴더 하의 인덱스 경로를 자동으로 감지 및 드롭다운 목록 기능 제공.
42
- - 탭 페이지에 "자주 묻는 질문과 답변" 추가. (github RVC wiki 참조 가능)
43
- - 동일한 입력 오디오 경로를 사용할 때 추론, Harvest 피치를 캐시.
44
- (주의: Harvest 피치 추출을 사용하면 전체 파이프라인은 길고 반복적인 피치 추출 과정을 거치게됩니다. 캐싱을 하지 않는다면, 첫 inference 이후의 단계에서 timbre, 인덱스, 피치 중위수 필터링 반경 설정 등 대기시간이 엄청나게 길어집니다!)
45
-
46
- ### 2023년 5월 14일 업데이트
47
-
48
- - 입력의 볼륨 캡슐을 사용하여 출력의 볼륨 캡슐을 혼합하거나 대체. (입력이 무음이거나 출력의 노이즈 문제를 최소화 할 수 있습니다. 입력 오디오의 배경 노이즈(소음)가 큰 경우 해당 기능을 사용하지 않는 것이 좋습니다. 기본적으로 비활성화 되어있는 옵션입니다. (1: 비활성화 상태))
49
- - 추출된 소형 모델을 지정된 빈도로 저장하는 기능을 지원. (다양한 에폭 하에서의 성능을 보려고 하지만 모든 대형 체크포인트를 저장하고 매번 ckpt 처리를 통해 소형 모델을 수동으로 추출하고 싶지 않은 경우 이 기능은 매우 유용합니다)
50
- - 환경 변수를 설정하여 서버의 전역 프록시로 인한 "연결 오류" 문제 해결.
51
- - 사전 훈련된 v2 모델 지원. (현재 40k 버전만 테스트를 위해 공개적으로 사용 가능하며, 다른 두 개의 샘플링 비율은 아직 완전히 훈련되지 않아 보류되었습니다.)
52
- - 추론 전, 1을 초과하는 과도한 볼륨 제한.
53
- - 데이터 전처리 매개변수 미세 조정.
54
-
55
- ### 2023년 4월 9일 업데이트
56
-
57
- - GPU 이용률 향상을 위해 훈련 파라미터 수정: A100은 25%에서 약 90%로 증가, V100: 50%에서 약 90%로 증가, 2060S: 60%에서 약 85%로 증가, P40: 25%에서 약 95%로 증가.
58
- 훈련 속도가 크게 향상.
59
- - 매개변수 기준 변경: total batch_size는 GPU당 batch_size를 의미.
60
- - total_epoch 변경: 최대 한도가 100에서 1000으로 증가. 기본값이 10에서 20으로 증가.
61
- - ckpt 추출이 피치를 잘못 인식하여 비정상적인 추론을 유발하는 문제 수정.
62
- - 분산 훈련 과정에서 각 랭크마다 ckpt를 저장하는 문제 수정.
63
- - 특성 추출 과정에 나노 특성 필터링 적용.
64
- - 무음 입력/출력이 랜덤하게 소음을 생성하는 문제 수정. (이전 모델은 새 데이터셋으로 다시 훈련해야 합니다)
65
-
66
- ### 2023년 4월 16일 업데이트
67
-
68
- - 로컬 실시간 음성 변경 미니-GUI 추가, go-realtime-gui.bat를 더블 클릭하��� 시작.
69
- - 훈련 및 추론 중 50Hz 이하의 주파수 대역에 대해 필터링 적용.
70
- - 훈련 및 추론의 pyworld 최소 피치 추출을 기본 80에서 50으로 낮춤. 이로 인해, 50-80Hz 사이의 남성 저음이 무음화되지 않습니다.
71
- - 시스템 지역에 따른 WebUI 언어 변경 지원. (현재 en_US, ja_JP, zh_CN, zh_HK, zh_SG, zh_TW를 지원하며, 지원되지 않는 경우 기본값은 en_US)
72
- - 일부 GPU의 인식 수정. (예: V100-16G 인식 실패, P4 인식 실패)
73
-
74
- ### 2023년 4월 28일 업데이트
75
-
76
- - Faiss 인덱스 설정 업그레이드로 속도가 더 빨라지고 품질이 향상.
77
- - total_npy에 대한 의존성 제거. 추후의 모델 공유는 total_npy 입력을 필요로 하지 않습니다.
78
- - 16 시리즈 GPU에 대한 제한 해제, 4GB VRAM GPU에 대한 4GB 추론 설정 제공.
79
- - 일부 오디오 형식에 대한 UVR5 보컬 동반 분리에서의 버그 수정.
80
- - 실시간 음성 변경 미니-GUI는 이제 non-40k 및 non-lazy 피치 모델을 지원합니다.
81
-
82
- ### 추후 계획
83
-
84
- Features:
85
-
86
- - 다중 사용자 훈련 탭 지원.(최대 4명)
87
-
88
- Base model:
89
-
90
- - 훈련 데이터셋에 숨소리 wav 파일을 추가하여, 보컬의 호흡이 노이즈로 변환되는 문제 수정.
91
- - 보컬 훈련 세트의 기본 모델을 추가하기 위한 작업을 진행중이며, 이는 향후에 발표될 예정.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/grids/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- """Dora Grids."""
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/docs/AUDIOGEN.md DELETED
@@ -1,158 +0,0 @@
1
- # AudioGen: Textually-guided audio generation
2
-
3
- AudioCraft provides the code and a model re-implementing AudioGen, a [textually-guided audio generation][audiogen_arxiv]
4
- model that performs text-to-sound generation.
5
-
6
- The provided AudioGen reimplementation follows the LM model architecture introduced in [MusicGen][musicgen_arxiv]
7
- and is a single stage auto-regressive Transformer model trained over a 16kHz
8
- <a href="https://github.com/facebookresearch/encodec">EnCodec tokenizer</a> with 4 codebooks sampled at 50 Hz.
9
- This model variant reaches similar audio quality than the original implementation introduced in the AudioGen publication
10
- while providing faster generation speed given the smaller frame rate.
11
-
12
- **Important note:** The provided models are NOT the original models used to report numbers in the
13
- [AudioGen publication][audiogen_arxiv]. Refer to the model card to learn more about architectural changes.
14
-
15
- Listen to samples from the **original AudioGen implementation** in our [sample page][audiogen_samples].
16
-
17
-
18
- ## Model Card
19
-
20
- See [the model card](../model_cards/AUDIOGEN_MODEL_CARD.md).
21
-
22
-
23
- ## Installation
24
-
25
- Please follow the AudioCraft installation instructions from the [README](../README.md).
26
-
27
- AudioCraft requires a GPU with at least 16 GB of memory for running inference with the medium-sized models (~1.5B parameters).
28
-
29
- ## API and usage
30
-
31
- We provide a simple API and 1 pre-trained models for AudioGen:
32
-
33
- `facebook/audiogen-medium`: 1.5B model, text to sound - [🤗 Hub](https://huggingface.co/facebook/audiogen-medium)
34
-
35
- You can play with AudioGen by running the jupyter notebook at [`demos/audiogen_demo.ipynb`](../demos/audiogen_demo.ipynb) locally (if you have a GPU).
36
-
37
- See after a quick example for using the API.
38
-
39
- ```python
40
- import torchaudio
41
- from audiocraft.models import AudioGen
42
- from audiocraft.data.audio import audio_write
43
-
44
- model = AudioGen.get_pretrained('facebook/audiogen-medium')
45
- model.set_generation_params(duration=5) # generate 5 seconds.
46
- descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor']
47
- wav = model.generate(descriptions) # generates 3 samples.
48
-
49
- for idx, one_wav in enumerate(wav):
50
- # Will save under {idx}.wav, with loudness normalization at -14 db LUFS.
51
- audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True)
52
- ```
53
-
54
- ## Training
55
-
56
- The [AudioGenSolver](../audiocraft/solvers/audiogen.py) implements the AudioGen's training pipeline
57
- used to develop the released model. Note that this may not fully reproduce the results presented in the paper.
58
- Similarly to MusicGen, it defines an autoregressive language modeling task over multiple streams of
59
- discrete tokens extracted from a pre-trained EnCodec model (see [EnCodec documentation](./ENCODEC.md)
60
- for more details on how to train such model) with dataset-specific changes for environmental sound
61
- processing.
62
-
63
- Note that **we do NOT provide any of the datasets** used for training AudioGen.
64
-
65
- ### Example configurations and grids
66
-
67
- We provide configurations to reproduce the released models and our research.
68
- AudioGen solvers configuration are available in [config/solver/audiogen](../config/solver/audiogen).
69
- The base training configuration used for the released models is the following:
70
- [`solver=audiogen/audiogen_base_16khz`](../config/solver/audiogen/audiogen_base_16khz.yaml)
71
-
72
- Please find some example grids to train AudioGen at
73
- [audiocraft/grids/audiogen](../audiocraft/grids/audiogen/).
74
-
75
- ```shell
76
- # text-to-sound
77
- dora grid audiogen.audiogen_base_16khz
78
- ```
79
-
80
- ### Sound dataset and metadata
81
-
82
- AudioGen's underlying dataset is an AudioDataset augmented with description metadata.
83
- The AudioGen dataset implementation expects the metadata to be available as `.json` files
84
- at the same location as the audio files or through specified external folder.
85
- Learn more in the [datasets section](./DATASETS.md).
86
-
87
- ### Evaluation stage
88
-
89
- By default, evaluation stage is also computing the cross-entropy and the perplexity over the
90
- evaluation dataset. Indeed the objective metrics used for evaluation can be costly to run
91
- or require some extra dependencies. Please refer to the [metrics documentation](./METRICS.md)
92
- for more details on the requirements for each metric.
93
-
94
- We provide an off-the-shelf configuration to enable running the objective metrics
95
- for audio generation in
96
- [config/solver/audiogen/evaluation/objective_eval](../config/solver/audiogen/evaluation/objective_eval.yaml).
97
-
98
- One can then activate evaluation the following way:
99
- ```shell
100
- # using the configuration
101
- dora run solver=audiogen/debug solver/audiogen/evaluation=objective_eval
102
- # specifying each of the fields, e.g. to activate KL computation
103
- dora run solver=audiogen/debug evaluate.metrics.kld=true
104
- ```
105
-
106
- See [an example evaluation grid](../audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py).
107
-
108
- ### Generation stage
109
-
110
- The generation stage allows to generate samples conditionally and/or unconditionally and to perform
111
- audio continuation (from a prompt). We currently support greedy sampling (argmax), sampling
112
- from softmax with a given temperature, top-K and top-P (nucleus) sampling. The number of samples
113
- generated and the batch size used are controlled by the `dataset.generate` configuration
114
- while the other generation parameters are defined in `generate.lm`.
115
-
116
- ```shell
117
- # control sampling parameters
118
- dora run solver=audiogen/debug generate.lm.gen_duration=5 generate.lm.use_sampling=true generate.lm.top_k=15
119
- ```
120
-
121
- ## More information
122
-
123
- Refer to [MusicGen's instructions](./MUSICGEN.md).
124
-
125
- ### Learn more
126
-
127
- Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md).
128
-
129
-
130
- ## Citation
131
-
132
- AudioGen
133
- ```
134
- @article{kreuk2022audiogen,
135
- title={Audiogen: Textually guided audio generation},
136
- author={Kreuk, Felix and Synnaeve, Gabriel and Polyak, Adam and Singer, Uriel and D{\'e}fossez, Alexandre and Copet, Jade and Parikh, Devi and Taigman, Yaniv and Adi, Yossi},
137
- journal={arXiv preprint arXiv:2209.15352},
138
- year={2022}
139
- }
140
- ```
141
-
142
- MusicGen
143
- ```
144
- @article{copet2023simple,
145
- title={Simple and Controllable Music Generation},
146
- author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez},
147
- year={2023},
148
- journal={arXiv preprint arXiv:2306.05284},
149
- }
150
- ```
151
-
152
- ## License
153
-
154
- See license information in the [model card](../model_cards/AUDIOGEN_MODEL_CARD.md).
155
-
156
- [audiogen_arxiv]: https://arxiv.org/abs/2209.15352
157
- [musicgen_arxiv]: https://arxiv.org/abs/2306.05284
158
- [audiogen_samples]: https://felixkreuk.github.io/audiogen/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio_inpaint.py DELETED
@@ -1,1081 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
- import os
9
- import torch
10
- import torch.nn as nn
11
- import numpy as np
12
- import pytorch_lightning as pl
13
- from torch.optim.lr_scheduler import LambdaLR
14
- from einops import rearrange, repeat
15
- from contextlib import contextmanager
16
- from functools import partial
17
- from tqdm import tqdm
18
- from torchvision.utils import make_grid
19
- from pytorch_lightning.utilities.distributed import rank_zero_only
20
-
21
- from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
22
- from ldm.modules.ema import LitEma
23
- from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
24
- from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
25
- from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
26
- from ldm.models.diffusion.ddim import DDIMSampler
27
- from ldm.models.diffusion.ddpm import DDPM, disabled_train
28
-
29
- __conditioning_keys__ = {'concat': 'c_concat',
30
- 'crossattn': 'c_crossattn',
31
- 'adm': 'y'}
32
-
33
- # add mel_dim and mel_length params to ensure correct shape
34
- class LatentDiffusion_audioinpaint(DDPM):
35
- """main class"""
36
- def __init__(self,
37
- first_stage_config,
38
- cond_stage_config,
39
- num_timesteps_cond=None,
40
- mel_dim=80,
41
- mel_length=848,
42
- cond_stage_key="image",
43
- cond_stage_trainable=False,
44
- concat_mode=True,
45
- cond_stage_forward=None,
46
- conditioning_key=None,
47
- scale_factor=1.0,
48
- scale_by_std=False,
49
- test_repeat=1,
50
- test_numsteps = None,
51
- *args, **kwargs):
52
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
53
- self.scale_by_std = scale_by_std
54
- assert self.num_timesteps_cond <= kwargs['timesteps']
55
- # for backwards compatibility after implementation of DiffusionWrapper
56
- if conditioning_key is None:
57
- conditioning_key = 'concat' if concat_mode else 'crossattn'
58
- if cond_stage_config == '__is_unconditional__':
59
- conditioning_key = None
60
- ckpt_path = kwargs.pop("ckpt_path", None)
61
- ignore_keys = kwargs.pop("ignore_keys", [])
62
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
63
- self.test_repeat = test_repeat
64
- if test_numsteps == None:
65
- self.test_numsteps = self.num_timesteps
66
- self.concat_mode = concat_mode
67
- self.mel_dim = mel_dim
68
- self.mel_length = mel_length
69
- self.cond_stage_trainable = cond_stage_trainable
70
- self.cond_stage_key = cond_stage_key
71
- try:
72
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
73
- except:
74
- self.num_downs = 0
75
- if not scale_by_std:
76
- self.scale_factor = scale_factor
77
- else:
78
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
79
- self.instantiate_first_stage(first_stage_config)
80
- self.instantiate_cond_stage(cond_stage_config)
81
- self.cond_stage_forward = cond_stage_forward
82
- self.clip_denoised = False
83
- self.bbox_tokenizer = None
84
-
85
- self.restarted_from_ckpt = False
86
- if ckpt_path is not None:
87
- self.init_from_ckpt(ckpt_path, ignore_keys)
88
- self.restarted_from_ckpt = True
89
-
90
- def make_cond_schedule(self, ):
91
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
92
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
93
- self.cond_ids[:self.num_timesteps_cond] = ids
94
-
95
- @rank_zero_only
96
- @torch.no_grad()
97
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
98
- # only for very first batch
99
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
100
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
101
- # set rescale weight to 1./std of encodings
102
- print("### USING STD-RESCALING ###")
103
- x = super().get_input(batch, self.first_stage_key)
104
- x = x.to(self.device)
105
- encoder_posterior = self.encode_first_stage(x)
106
- z = self.get_first_stage_encoding(encoder_posterior).detach()
107
- del self.scale_factor
108
- self.register_buffer('scale_factor', 1. / z.flatten().std())
109
- print(f"setting self.scale_factor to {self.scale_factor}")
110
- print("### USING STD-RESCALING ###")
111
-
112
- def register_schedule(self,
113
- given_betas=None, beta_schedule="linear", timesteps=1000,
114
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
115
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
116
-
117
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
118
- if self.shorten_cond_schedule:
119
- self.make_cond_schedule()
120
-
121
- def instantiate_first_stage(self, config):
122
- model = instantiate_from_config(config)
123
- self.first_stage_model = model.eval()
124
- self.first_stage_model.train = disabled_train
125
- for param in self.first_stage_model.parameters():
126
- param.requires_grad = False
127
-
128
- def instantiate_cond_stage(self, config):
129
- if not self.cond_stage_trainable:
130
- if config == "__is_first_stage__":# for no_text inpainting task
131
- print("Using first stage also as cond stage.")
132
- self.cond_stage_model = self.first_stage_model
133
- elif config == "__is_unconditional__":# for unconditional image generation such as human face、ImageNet
134
- print(f"Training {self.__class__.__name__} as an unconditional model.")
135
- self.cond_stage_model = None
136
- # self.be_unconditional = True
137
- else:
138
- model = instantiate_from_config(config)
139
- self.cond_stage_model = model.eval()
140
- self.cond_stage_model.train = disabled_train
141
- for param in self.cond_stage_model.parameters():
142
- param.requires_grad = False
143
- else:
144
- assert config != '__is_first_stage__'
145
- assert config != '__is_unconditional__'
146
- model = instantiate_from_config(config)
147
- self.cond_stage_model = model
148
-
149
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
150
- denoise_row = []
151
- for zd in tqdm(samples, desc=desc):
152
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
153
- force_not_quantize=force_no_decoder_quantization))
154
- n_imgs_per_row = len(denoise_row)
155
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
156
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
157
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
158
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
159
- return denoise_grid
160
-
161
- def get_first_stage_encoding(self, encoder_posterior):# encode_emb from autoencoder
162
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
163
- z = encoder_posterior.sample()
164
- elif isinstance(encoder_posterior, torch.Tensor):
165
- z = encoder_posterior
166
- else:
167
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
168
- return self.scale_factor * z
169
-
170
- def get_learned_conditioning(self, c):
171
- if self.cond_stage_forward is None:
172
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
173
- c = self.cond_stage_model.encode(c)
174
- if isinstance(c, DiagonalGaussianDistribution):
175
- c = c.mode()
176
- else:
177
- c = self.cond_stage_model(c)
178
- else:
179
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
180
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
181
- return c
182
-
183
- def meshgrid(self, h, w):
184
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
185
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
186
-
187
- arr = torch.cat([y, x], dim=-1)
188
- return arr
189
-
190
- def delta_border(self, h, w):
191
- """
192
- :param h: height
193
- :param w: width
194
- :return: normalized distance to image border,
195
- wtith min distance = 0 at border and max dist = 0.5 at image center
196
- """
197
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
198
- arr = self.meshgrid(h, w) / lower_right_corner
199
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
200
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
201
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
202
- return edge_dist
203
-
204
- def get_weighting(self, h, w, Ly, Lx, device):
205
- weighting = self.delta_border(h, w)
206
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
207
- self.split_input_params["clip_max_weight"], )
208
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
209
-
210
- if self.split_input_params["tie_braker"]:
211
- L_weighting = self.delta_border(Ly, Lx)
212
- L_weighting = torch.clip(L_weighting,
213
- self.split_input_params["clip_min_tie_weight"],
214
- self.split_input_params["clip_max_tie_weight"])
215
-
216
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
217
- weighting = weighting * L_weighting
218
- return weighting
219
-
220
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
221
- """
222
- :param x: img of size (bs, c, h, w)
223
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
224
- """
225
- bs, nc, h, w = x.shape
226
-
227
- # number of crops in image
228
- Ly = (h - kernel_size[0]) // stride[0] + 1
229
- Lx = (w - kernel_size[1]) // stride[1] + 1
230
-
231
- if uf == 1 and df == 1:
232
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
233
- unfold = torch.nn.Unfold(**fold_params)
234
-
235
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
236
-
237
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
238
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
239
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
240
-
241
- elif uf > 1 and df == 1:
242
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
243
- unfold = torch.nn.Unfold(**fold_params)
244
-
245
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
246
- dilation=1, padding=0,
247
- stride=(stride[0] * uf, stride[1] * uf))
248
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
249
-
250
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
251
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
252
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
253
-
254
- elif df > 1 and uf == 1:
255
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
256
- unfold = torch.nn.Unfold(**fold_params)
257
-
258
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
259
- dilation=1, padding=0,
260
- stride=(stride[0] // df, stride[1] // df))
261
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
262
-
263
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
264
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
265
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
266
-
267
- else:
268
- raise NotImplementedError
269
-
270
- return fold, unfold, normalization, weighting
271
-
272
- @torch.no_grad()
273
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
274
- cond_key=None, return_original_cond=False, bs=None):
275
- x = super().get_input(batch, k)
276
- if bs is not None:
277
- x = x[:bs]
278
- x = x.to(self.device)
279
- encoder_posterior = self.encode_first_stage(x)
280
- z = self.get_first_stage_encoding(encoder_posterior).detach()
281
-
282
- if self.model.conditioning_key is not None:# 'crossattn' for txt2image, 'hybird' for txt_inpaint
283
- if cond_key is None:
284
- cond_key = self.cond_stage_key # 'caption' for txt_inpaint
285
- if self.model.conditioning_key == 'hybrid':
286
- xc = {}
287
- assert cond_key == 'caption' # only txt_inpaint is implemented now
288
- assert 'masked_image' in batch.keys()
289
- assert 'mask' in batch.keys()
290
- masked_image = super().get_input(batch,'masked_image')
291
- mask = super().get_input(batch,'mask')
292
- if bs is not None:
293
- masked_image,mask = masked_image[:bs],mask[:bs]
294
- masked_image,mask = masked_image.to(self.device),mask.to(self.device)
295
- masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach()
296
- resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:])
297
- xc['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1)
298
- xc[cond_key] = batch[cond_key]
299
- else:
300
- if cond_key != self.first_stage_key:
301
- if cond_key in ['caption', 'coordinates_bbox']:
302
- xc = batch[cond_key]
303
- elif cond_key == 'class_label':
304
- xc = batch
305
- else:
306
- xc = super().get_input(batch, cond_key).to(self.device)
307
- else:# cond_key == 'image'
308
- xc = x
309
- if not self.cond_stage_trainable or force_c_encode:# cond_stage_trainable is true for txt2img,force_c_encoder = True,when called in log_images
310
- if isinstance(xc, list):
311
- # import pudb; pudb.set_trace()
312
- c = self.get_learned_conditioning(xc)# 因为log_images内接下来要调用sample_log,所以需要预先得到处理好的c
313
- if isinstance(xc, dict):
314
- c = {}
315
- c['c_concat'] = xc['c_concat']
316
- c['c_crossattn'] = self.get_learned_conditioning(xc[cond_key])
317
- else:
318
- c = self.get_learned_conditioning(xc.to(self.device))
319
- else:
320
- c = xc
321
- if bs is not None:
322
- if isinstance(c,dict):
323
- for k in c.keys():
324
- c[k] = c[k][:bs]
325
- else:
326
- c = c[:bs]
327
-
328
- if self.use_positional_encodings:
329
- pos_x, pos_y = self.compute_latent_shifts(batch)
330
- ckey = __conditioning_keys__[self.model.conditioning_key]
331
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
332
-
333
- else:
334
- c = None
335
- xc = None
336
- if self.use_positional_encodings:
337
- pos_x, pos_y = self.compute_latent_shifts(batch)
338
- c = {'pos_x': pos_x, 'pos_y': pos_y}
339
- out = [z, c]
340
- if return_first_stage_outputs:
341
- xrec = self.decode_first_stage(z)
342
- out.extend([x, xrec])
343
- if return_original_cond:
344
- out.append(xc)
345
- return out
346
-
347
- @torch.no_grad()
348
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
349
- if predict_cids:
350
- if z.dim() == 4:
351
- z = torch.argmax(z.exp(), dim=1).long()
352
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
353
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
354
-
355
- z = 1. / self.scale_factor * z
356
-
357
- if hasattr(self, "split_input_params"):
358
- if self.split_input_params["patch_distributed_vq"]:
359
- ks = self.split_input_params["ks"] # eg. (128, 128)
360
- stride = self.split_input_params["stride"] # eg. (64, 64)
361
- uf = self.split_input_params["vqf"]
362
- bs, nc, h, w = z.shape
363
- if ks[0] > h or ks[1] > w:
364
- ks = (min(ks[0], h), min(ks[1], w))
365
- print("reducing Kernel")
366
-
367
- if stride[0] > h or stride[1] > w:
368
- stride = (min(stride[0], h), min(stride[1], w))
369
- print("reducing stride")
370
-
371
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
372
-
373
- z = unfold(z) # (bn, nc * prod(**ks), L)
374
- # 1. Reshape to img shape
375
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
376
-
377
- # 2. apply model loop over last dim
378
- if isinstance(self.first_stage_model, VQModelInterface):
379
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
380
- force_not_quantize=predict_cids or force_not_quantize)
381
- for i in range(z.shape[-1])]
382
- else:
383
-
384
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
385
- for i in range(z.shape[-1])]
386
-
387
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
388
- o = o * weighting
389
- # Reverse 1. reshape to img shape
390
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
391
- # stitch crops together
392
- decoded = fold(o)
393
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
394
- return decoded
395
- else:
396
- if isinstance(self.first_stage_model, VQModelInterface):
397
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
398
- else:
399
- return self.first_stage_model.decode(z)
400
-
401
- else:
402
- if isinstance(self.first_stage_model, VQModelInterface):
403
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
404
- else:
405
- return self.first_stage_model.decode(z)
406
-
407
- # same as above but without decorator
408
- def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
409
- if predict_cids:
410
- if z.dim() == 4:
411
- z = torch.argmax(z.exp(), dim=1).long()
412
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
413
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
414
-
415
- z = 1. / self.scale_factor * z
416
-
417
- if hasattr(self, "split_input_params"):
418
- if self.split_input_params["patch_distributed_vq"]:
419
- ks = self.split_input_params["ks"] # eg. (128, 128)
420
- stride = self.split_input_params["stride"] # eg. (64, 64)
421
- uf = self.split_input_params["vqf"]
422
- bs, nc, h, w = z.shape
423
- if ks[0] > h or ks[1] > w:
424
- ks = (min(ks[0], h), min(ks[1], w))
425
- print("reducing Kernel")
426
-
427
- if stride[0] > h or stride[1] > w:
428
- stride = (min(stride[0], h), min(stride[1], w))
429
- print("reducing stride")
430
-
431
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
432
-
433
- z = unfold(z) # (bn, nc * prod(**ks), L)
434
- # 1. Reshape to img shape
435
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
436
-
437
- # 2. apply model loop over last dim
438
- if isinstance(self.first_stage_model, VQModelInterface):
439
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
440
- force_not_quantize=predict_cids or force_not_quantize)
441
- for i in range(z.shape[-1])]
442
- else:
443
-
444
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
445
- for i in range(z.shape[-1])]
446
-
447
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
448
- o = o * weighting
449
- # Reverse 1. reshape to img shape
450
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
451
- # stitch crops together
452
- decoded = fold(o)
453
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
454
- return decoded
455
- else:
456
- if isinstance(self.first_stage_model, VQModelInterface):
457
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
458
- else:
459
- return self.first_stage_model.decode(z)
460
-
461
- else:
462
- if isinstance(self.first_stage_model, VQModelInterface):
463
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
464
- else:
465
- return self.first_stage_model.decode(z)
466
-
467
- @torch.no_grad()
468
- def encode_first_stage(self, x):
469
- if hasattr(self, "split_input_params"):
470
- if self.split_input_params["patch_distributed_vq"]:
471
- ks = self.split_input_params["ks"] # eg. (128, 128)
472
- stride = self.split_input_params["stride"] # eg. (64, 64)
473
- df = self.split_input_params["vqf"]
474
- self.split_input_params['original_image_size'] = x.shape[-2:]
475
- bs, nc, h, w = x.shape
476
- if ks[0] > h or ks[1] > w:
477
- ks = (min(ks[0], h), min(ks[1], w))
478
- print("reducing Kernel")
479
-
480
- if stride[0] > h or stride[1] > w:
481
- stride = (min(stride[0], h), min(stride[1], w))
482
- print("reducing stride")
483
-
484
- fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
485
- z = unfold(x) # (bn, nc * prod(**ks), L)
486
- # Reshape to img shape
487
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
488
-
489
- output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
490
- for i in range(z.shape[-1])]
491
-
492
- o = torch.stack(output_list, axis=-1)
493
- o = o * weighting
494
-
495
- # Reverse reshape to img shape
496
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
497
- # stitch crops together
498
- decoded = fold(o)
499
- decoded = decoded / normalization
500
- return decoded
501
-
502
- else:
503
- return self.first_stage_model.encode(x)
504
- else:
505
- return self.first_stage_model.encode(x)
506
-
507
- def shared_step(self, batch, **kwargs):
508
- x, c = self.get_input(batch, self.first_stage_key)# get latent and condition
509
- loss = self(x, c)
510
- return loss
511
-
512
- def test_step(self,batch,batch_idx):
513
- # TODO make self.test_repeat work
514
- cond = {}
515
- cond[self.cond_stage_key] = batch[self.cond_stage_key]
516
- cond[self.cond_stage_key] = self.get_learned_conditioning(cond[self.cond_stage_key]) # c: string -> [B, T, Context_dim]
517
- cond['c_crossattn'] = cond.pop(self.cond_stage_key)
518
- masked_image = super().get_input(batch,'masked_image')
519
- mask = super().get_input(batch,'mask')
520
- masked_image,mask = masked_image.to(self.device),mask.to(self.device)
521
- masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach()
522
- resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:])
523
- cond['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1)
524
- batch_size = len(batch[self.cond_stage_key])
525
- # shape = [batch_size,self.channels,self.mel_dim,self.mel_length]
526
- enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps)
527
- xrec = self.decode_first_stage(enc_emb)
528
- reconstructions = (xrec + 1)/2 # to mel scale
529
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
530
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
531
- if not os.path.exists(savedir):
532
- os.makedirs(savedir)
533
-
534
- file_names = batch['f_name']
535
- nfiles = len(file_names)
536
- reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
537
- for k in range(reconstructions.shape[0]):
538
- b,repeat = k % nfiles, k // nfiles
539
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
540
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
541
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition
542
- np.save(save_img_path,reconstructions[b])
543
-
544
- return None
545
-
546
- def forward(self, x, c, *args, **kwargs):
547
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
548
- if self.model.conditioning_key is not None:
549
- assert c is not None
550
- if self.cond_stage_trainable:
551
- if isinstance(c,dict):
552
- c[self.cond_stage_key] = self.get_learned_conditioning(c[self.cond_stage_key])
553
- c['c_crossattn'] = c.pop(self.cond_stage_key)
554
- else:
555
- c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim]
556
- if self.shorten_cond_schedule: # TODO: drop this option
557
- tc = self.cond_ids[t].to(self.device)
558
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
559
- return self.p_losses(x, c, t, *args, **kwargs)
560
-
561
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
562
- def rescale_bbox(bbox):
563
- x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
564
- y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
565
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
566
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
567
- return x0, y0, w, h
568
-
569
- return [rescale_bbox(b) for b in bboxes]
570
-
571
- def apply_model(self, x_noisy, t, cond, return_ids=False):
572
- # make values to list to enable concat operation in
573
- if isinstance(cond, dict):
574
- # hybrid case, cond is exptected to be a dict. (txt2inpaint)
575
- cond_tmp = {}# use cond_tmp to avoid inplace edit
576
- for k,v in cond.items():
577
- if not isinstance(v, list):
578
- cond_tmp[k] = [cond[k]]
579
- else:
580
- cond_tmp[k] = cond[k]
581
- cond = cond_tmp
582
- else:
583
- if not isinstance(cond, list):
584
- cond = [cond]
585
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
586
- cond = {key: cond}
587
-
588
- if hasattr(self, "split_input_params"):
589
- assert len(cond) == 1 # todo can only deal with one conditioning atm
590
- assert not return_ids
591
- ks = self.split_input_params["ks"] # eg. (128, 128)
592
- stride = self.split_input_params["stride"] # eg. (64, 64)
593
-
594
- h, w = x_noisy.shape[-2:]
595
-
596
- fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
597
-
598
- z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
599
- # Reshape to img shape
600
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
601
- z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
602
-
603
- if self.cond_stage_key in ["image", "LR_image", "segmentation",
604
- 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
605
- c_key = next(iter(cond.keys())) # get key
606
- c = next(iter(cond.values())) # get value
607
- assert (len(c) == 1) # todo extend to list with more than one elem
608
- c = c[0] # get element
609
-
610
- c = unfold(c)
611
- c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
612
-
613
- cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
614
-
615
- elif self.cond_stage_key == 'coordinates_bbox':
616
- assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
617
-
618
- # assuming padding of unfold is always 0 and its dilation is always 1
619
- n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
620
- full_img_h, full_img_w = self.split_input_params['original_image_size']
621
- # as we are operating on latents, we need the factor from the original image size to the
622
- # spatial latent size to properly rescale the crops for regenerating the bbox annotations
623
- num_downs = self.first_stage_model.encoder.num_resolutions - 1
624
- rescale_latent = 2 ** (num_downs)
625
-
626
- # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
627
- # need to rescale the tl patch coordinates to be in between (0,1)
628
- tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
629
- rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
630
- for patch_nr in range(z.shape[-1])]
631
-
632
- # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
633
- patch_limits = [(x_tl, y_tl,
634
- rescale_latent * ks[0] / full_img_w,
635
- rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
636
- # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
637
-
638
- # tokenize crop coordinates for the bounding boxes of the respective patches
639
- patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
640
- for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
641
- print(patch_limits_tknzd[0].shape)
642
- # cut tknzd crop position from conditioning
643
- assert isinstance(cond, dict), 'cond must be dict to be fed into model'
644
- cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
645
- print(cut_cond.shape)
646
-
647
- adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
648
- adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
649
- print(adapted_cond.shape)
650
- adapted_cond = self.get_learned_conditioning(adapted_cond)
651
- print(adapted_cond.shape)
652
- adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
653
- print(adapted_cond.shape)
654
-
655
- cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
656
-
657
- else:
658
- cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
659
-
660
- # apply model by loop over crops
661
- output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
662
- assert not isinstance(output_list[0],
663
- tuple) # todo cant deal with multiple model outputs check this never happens
664
-
665
- o = torch.stack(output_list, axis=-1)
666
- o = o * weighting
667
- # Reverse reshape to img shape
668
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
669
- # stitch crops together
670
- x_recon = fold(o) / normalization
671
-
672
- else:
673
- # x_noisy is tensor with shape [b,c,mel_len,T]
674
- # if condition is caption ,cond['c_crossattn'] is a list, each item shape is [1, 77, 1280]
675
- x_recon = self.model(x_noisy, t, **cond)# tensor with shape [b,c,mel_len,T]
676
-
677
- if isinstance(x_recon, tuple) and not return_ids:
678
- return x_recon[0]
679
- else:
680
- return x_recon
681
-
682
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
683
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
684
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
685
-
686
- def _prior_bpd(self, x_start):
687
- """
688
- Get the prior KL term for the variational lower-bound, measured in
689
- bits-per-dim.
690
- This term can't be optimized, as it only depends on the encoder.
691
- :param x_start: the [N x C x ...] tensor of inputs.
692
- :return: a batch of [N] KL values (in bits), one per batch element.
693
- """
694
- batch_size = x_start.shape[0]
695
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
696
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
697
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
698
- return mean_flat(kl_prior) / np.log(2.0)
699
-
700
- def p_losses(self, x_start, cond, t, noise=None):
701
- noise = default(noise, lambda: torch.randn_like(x_start))
702
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
703
- model_output = self.apply_model(x_noisy, t, cond)
704
-
705
- loss_dict = {}
706
- prefix = 'train' if self.training else 'val'
707
-
708
- if self.parameterization == "x0":
709
- target = x_start
710
- elif self.parameterization == "eps":
711
- target = noise
712
- else:
713
- raise NotImplementedError()
714
-
715
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
716
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
717
-
718
- logvar_t = self.logvar[t].to(self.device)
719
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
720
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
721
- if self.learn_logvar:
722
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
723
- loss_dict.update({'logvar': self.logvar.data.mean()})
724
-
725
- loss = self.l_simple_weight * loss.mean()
726
-
727
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
728
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
729
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
730
- loss += (self.original_elbo_weight * loss_vlb)
731
- loss_dict.update({f'{prefix}/loss': loss})
732
-
733
- return loss, loss_dict
734
-
735
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
736
- return_x0=False, score_corrector=None, corrector_kwargs=None):
737
- t_in = t
738
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
739
-
740
- if score_corrector is not None:
741
- assert self.parameterization == "eps"
742
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
743
-
744
- if return_codebook_ids:
745
- model_out, logits = model_out
746
-
747
- if self.parameterization == "eps":
748
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
749
- elif self.parameterization == "x0":
750
- x_recon = model_out
751
- else:
752
- raise NotImplementedError()
753
-
754
- if clip_denoised:
755
- x_recon.clamp_(-1., 1.)
756
- if quantize_denoised:
757
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
758
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
759
- if return_codebook_ids:
760
- return model_mean, posterior_variance, posterior_log_variance, logits
761
- elif return_x0:
762
- return model_mean, posterior_variance, posterior_log_variance, x_recon
763
- else:
764
- return model_mean, posterior_variance, posterior_log_variance
765
-
766
- @torch.no_grad()
767
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
768
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
769
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
770
- b, *_, device = *x.shape, x.device
771
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
772
- return_codebook_ids=return_codebook_ids,
773
- quantize_denoised=quantize_denoised,
774
- return_x0=return_x0,
775
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
776
- if return_codebook_ids:
777
- raise DeprecationWarning("Support dropped.")
778
- model_mean, _, model_log_variance, logits = outputs
779
- elif return_x0:
780
- model_mean, _, model_log_variance, x0 = outputs
781
- else:
782
- model_mean, _, model_log_variance = outputs
783
-
784
- noise = noise_like(x.shape, device, repeat_noise) * temperature
785
- if noise_dropout > 0.:
786
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
787
- # no noise when t == 0
788
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
789
-
790
- if return_codebook_ids:
791
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
792
- if return_x0:
793
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
794
- else:
795
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
796
-
797
- @torch.no_grad()
798
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
799
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
800
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
801
- log_every_t=None):
802
- if not log_every_t:
803
- log_every_t = self.log_every_t
804
- timesteps = self.num_timesteps
805
- if batch_size is not None:
806
- b = batch_size if batch_size is not None else shape[0]
807
- shape = [batch_size] + list(shape)
808
- else:
809
- b = batch_size = shape[0]
810
- if x_T is None:
811
- img = torch.randn(shape, device=self.device)
812
- else:
813
- img = x_T
814
- intermediates = []
815
- if cond is not None:
816
- if isinstance(cond, dict):
817
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
818
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
819
- else:
820
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
821
-
822
- if start_T is not None:
823
- timesteps = min(timesteps, start_T)
824
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
825
- total=timesteps) if verbose else reversed(
826
- range(0, timesteps))
827
- if type(temperature) == float:
828
- temperature = [temperature] * timesteps
829
-
830
- for i in iterator:
831
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
832
- if self.shorten_cond_schedule:
833
- assert self.model.conditioning_key != 'hybrid'
834
- tc = self.cond_ids[ts].to(cond.device)
835
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
836
-
837
- img, x0_partial = self.p_sample(img, cond, ts,
838
- clip_denoised=self.clip_denoised,
839
- quantize_denoised=quantize_denoised, return_x0=True,
840
- temperature=temperature[i], noise_dropout=noise_dropout,
841
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
842
- if mask is not None:
843
- assert x0 is not None
844
- img_orig = self.q_sample(x0, ts)
845
- img = img_orig * mask + (1. - mask) * img
846
-
847
- if i % log_every_t == 0 or i == timesteps - 1:
848
- intermediates.append(x0_partial)
849
- if callback: callback(i)
850
- if img_callback: img_callback(img, i)
851
- return img, intermediates
852
-
853
- @torch.no_grad()
854
- def p_sample_loop(self, cond, shape, return_intermediates=False,
855
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
856
- mask=None, x0=None, img_callback=None, start_T=None,
857
- log_every_t=None):
858
-
859
- if not log_every_t:
860
- log_every_t = self.log_every_t
861
- device = self.betas.device
862
- b = shape[0]
863
- if x_T is None:
864
- img = torch.randn(shape, device=device)
865
- else:
866
- img = x_T
867
-
868
- intermediates = [img]
869
- if timesteps is None:
870
- timesteps = self.num_timesteps
871
-
872
- if start_T is not None:
873
- timesteps = min(timesteps, start_T)
874
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
875
- range(0, timesteps))
876
-
877
- if mask is not None:
878
- assert x0 is not None
879
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
880
-
881
- for i in iterator:
882
- ts = torch.full((b,), i, device=device, dtype=torch.long)
883
- if self.shorten_cond_schedule:
884
- assert self.model.conditioning_key != 'hybrid'
885
- tc = self.cond_ids[ts].to(cond.device)
886
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
887
-
888
- img = self.p_sample(img, cond, ts,
889
- clip_denoised=self.clip_denoised,
890
- quantize_denoised=quantize_denoised)
891
- if mask is not None:
892
- img_orig = self.q_sample(x0, ts)
893
- img = img_orig * mask + (1. - mask) * img
894
-
895
- if i % log_every_t == 0 or i == timesteps - 1:
896
- intermediates.append(img)
897
- if callback: callback(i)
898
- if img_callback: img_callback(img, i)
899
-
900
- if return_intermediates:
901
- return img, intermediates
902
- return img
903
-
904
- @torch.no_grad()
905
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
906
- verbose=True, timesteps=None, quantize_denoised=False,
907
- mask=None, x0=None, shape=None,**kwargs):
908
- if shape is None:
909
- shape = (batch_size, self.channels, self.mel_dim, self.mel_length)
910
- if cond is not None:
911
- if isinstance(cond, dict):
912
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
913
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
914
- else:
915
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
916
- return self.p_sample_loop(cond,
917
- shape,
918
- return_intermediates=return_intermediates, x_T=x_T,
919
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
920
- mask=mask, x0=x0)
921
-
922
- @torch.no_grad()
923
- def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
924
- if ddim:
925
- ddim_sampler = DDIMSampler(self)
926
- shape = (self.channels, self.mel_dim, self.mel_length)
927
- samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
928
- shape,cond,verbose=False,**kwargs)
929
-
930
- else:
931
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
932
- return_intermediates=True,**kwargs)
933
-
934
- return samples, intermediates
935
-
936
- @torch.no_grad()
937
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
938
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
939
- plot_diffusion_rows=True, **kwargs):
940
-
941
- use_ddim = ddim_steps is not None
942
-
943
- log = dict()
944
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
945
- return_first_stage_outputs=True,
946
- force_c_encode=True,
947
- return_original_cond=True,
948
- bs=N)
949
-
950
- N = min(x.shape[0], N)
951
- n_row = min(x.shape[0], n_row)
952
- log["inputs"] = x # 原始输入图像
953
- log["reconstruction"] = xrec # 重建得到的图像
954
- if self.model.conditioning_key is not None:
955
- if hasattr(self.cond_stage_model, "decode"):# when cond_stage is first_stage. (bert embedder doesnot have decode)
956
- xc = self.cond_stage_model.decode(c)# decoded masked image
957
- log["conditioning"] = xc # 重建后的图像
958
- elif self.cond_stage_key in ["caption"]:
959
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
960
- log["conditioning"] = xc # 含有文本的图像
961
- if self.model.conditioning_key == 'hybrid':
962
- log["decoded_maskedimg"] = self.first_stage_model.decode(c['c_concat'][:,:self.first_stage_model.embed_dim])# c_concat is the concat result of masked_img latent and resized mask. get latent here to decode
963
- elif self.cond_stage_key == 'class_label':
964
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
965
- log['conditioning'] = xc # 文本为类标签的图像
966
- elif isimage(xc):
967
- log["conditioning"] = xc
968
- if ismap(xc):
969
- log["original_conditioning"] = self.to_rgb(xc)
970
-
971
- if plot_diffusion_rows:# diffusion每一步的图像
972
- # get diffusion row
973
- diffusion_row = list()
974
- z_start = z[:n_row]
975
- for t in range(self.num_timesteps):
976
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
977
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
978
- t = t.to(self.device).long()
979
- noise = torch.randn_like(z_start)
980
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
981
- diffusion_row.append(self.decode_first_stage(z_noisy))
982
-
983
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
984
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
985
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
986
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
987
- log["diffusion_row"] = diffusion_grid
988
-
989
- if sample:#
990
- # get denoise row
991
- with self.ema_scope("Plotting"):
992
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
993
- ddim_steps=ddim_steps,eta=ddim_eta)
994
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
995
- x_samples = self.decode_first_stage(samples)
996
- log["samples"] = x_samples
997
- if plot_denoise_rows:
998
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
999
- log["denoise_row"] = denoise_grid
1000
-
1001
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1002
- self.first_stage_model, IdentityFirstStage):
1003
- # also display when quantizing x0 while sampling
1004
- with self.ema_scope("Plotting Quantized Denoised"):
1005
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1006
- ddim_steps=ddim_steps,eta=ddim_eta,
1007
- quantize_denoised=True)
1008
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1009
- # quantize_denoised=True)
1010
- x_samples = self.decode_first_stage(samples.to(self.device))
1011
- log["samples_x0_quantized"] = x_samples
1012
-
1013
- if inpaint:
1014
- # make a simple center square
1015
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
1016
- mask = torch.ones(N, h, w).to(self.device)
1017
- # zeros will be filled in
1018
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1019
- mask = mask[:, None, ...]# N,1,H,W
1020
- with self.ema_scope("Plotting Inpaint"):
1021
- samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1022
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1023
- x_samples = self.decode_first_stage(samples.to(self.device))
1024
- log["samples_inpainting"] = x_samples
1025
- log["mask"] = mask
1026
-
1027
- # outpaint
1028
- with self.ema_scope("Plotting Outpaint"):
1029
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1030
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1031
- x_samples = self.decode_first_stage(samples.to(self.device))
1032
- log["samples_outpainting"] = x_samples
1033
-
1034
- if plot_progressive_rows:
1035
- with self.ema_scope("Plotting Progressives"):
1036
- img, progressives = self.progressive_denoising(c,
1037
- shape=(self.channels, self.mel_dim, self.mel_length),
1038
- batch_size=N)
1039
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1040
- log["progressive_row"] = prog_row
1041
-
1042
- if return_keys:
1043
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1044
- return log
1045
- else:
1046
- return {key: log[key] for key in return_keys}
1047
- return log
1048
-
1049
- def configure_optimizers(self):
1050
- lr = self.learning_rate
1051
- params = list(self.model.parameters())
1052
- if self.cond_stage_trainable:
1053
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1054
- params = params + list(self.cond_stage_model.parameters())
1055
- if self.learn_logvar:
1056
- print('Diffusion model optimizing logvar')
1057
- params.append(self.logvar)
1058
- opt = torch.optim.AdamW(params, lr=lr)
1059
- if self.use_scheduler:
1060
- assert 'target' in self.scheduler_config
1061
- scheduler = instantiate_from_config(self.scheduler_config)
1062
-
1063
- print("Setting up LambdaLR scheduler...")
1064
- scheduler = [
1065
- {
1066
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1067
- 'interval': 'step',
1068
- 'frequency': 1
1069
- }]
1070
- return [opt], scheduler
1071
- return opt
1072
-
1073
- @torch.no_grad()
1074
- def to_rgb(self, x):
1075
- x = x.float()
1076
- if not hasattr(self, "colorize"):
1077
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1078
- x = nn.functional.conv2d(x, weight=self.colorize)
1079
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1080
- return x
1081
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/logout/+page.server.ts DELETED
@@ -1,17 +0,0 @@
1
- import { dev } from "$app/environment";
2
- import { base } from "$app/paths";
3
- import { COOKIE_NAME } from "$env/static/private";
4
- import { redirect } from "@sveltejs/kit";
5
-
6
- export const actions = {
7
- default: async function ({ cookies }) {
8
- cookies.delete(COOKIE_NAME, {
9
- path: "/",
10
- // So that it works inside the space's iframe
11
- sameSite: dev ? "lax" : "none",
12
- secure: !dev,
13
- httpOnly: true,
14
- });
15
- throw redirect(303, `${base}/`);
16
- },
17
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adr740/Hadith_AI_Explorer/data.py DELETED
@@ -1,2 +0,0 @@
1
- import pandas as pd
2
- data = pd.read_pickle("pickle_ebd.pkl")
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PreLayout.js DELETED
@@ -1,15 +0,0 @@
1
- var PreLayout = function () {
2
- this._childrenWidth = undefined;
3
- this._childrenHeight = undefined;
4
-
5
- var children = this.getChildrenSizers(),
6
- child;
7
- for (var i = 0, cnt = children.length; i < cnt; i++) {
8
- child = children[i];
9
- if (child.ignoreLayout) {
10
- continue;
11
- }
12
- child.preLayout();
13
- }
14
- }
15
- export default PreLayout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ailexcoder/GPT4ALL1/app.py DELETED
@@ -1,143 +0,0 @@
1
- from __future__ import annotations
2
- from typing import Iterable
3
- import gradio as gr
4
- from gradio.themes.base import Base
5
- from gradio.themes.utils import colors, fonts, sizes
6
-
7
- from llama_cpp import Llama
8
- from huggingface_hub import hf_hub_download
9
-
10
- hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
11
- llm = Llama(model_path="./ggjt-model.bin", n_threads=2)
12
-
13
-
14
- ins = '''### Instruction:
15
- {}
16
- ### Response:
17
- '''
18
-
19
- theme = gr.themes.Monochrome(
20
- primary_hue="indigo",
21
- secondary_hue="blue",
22
- neutral_hue="slate",
23
- radius_size=gr.themes.sizes.radius_sm,
24
- font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
25
- )
26
-
27
-
28
-
29
-
30
- # def generate(instruction):
31
- # response = llm(ins.format(instruction))
32
- # response = response['choices'][0]['text']
33
- # result = ""
34
- # for word in response.split(" "):
35
- # result += word + " "
36
- # yield result
37
-
38
- def generate(instruction):
39
- result = ""
40
- for x in llm(ins.format(instruction), stop=['### Instruction:', '### End'], stream=True):
41
- result += x['choices'][0]['text']
42
- yield result
43
-
44
-
45
- examples = [
46
- "Instead of making a peanut butter and jelly sandwich, what else could I combine peanut butter with in a sandwich? Give five ideas",
47
- "How do I make a campfire?",
48
- "Explain to me the difference between nuclear fission and fusion.",
49
- "I'm selling my Nikon D-750, write a short blurb for my ad."
50
- ]
51
-
52
- def process_example(args):
53
- for x in generate(args):
54
- pass
55
- return x
56
-
57
- css = ".generating {visibility: hidden}"
58
-
59
- # Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo
60
- class SeafoamCustom(Base):
61
- def __init__(
62
- self,
63
- *,
64
- primary_hue: colors.Color | str = colors.emerald,
65
- secondary_hue: colors.Color | str = colors.blue,
66
- neutral_hue: colors.Color | str = colors.blue,
67
- spacing_size: sizes.Size | str = sizes.spacing_md,
68
- radius_size: sizes.Size | str = sizes.radius_md,
69
- font: fonts.Font
70
- | str
71
- | Iterable[fonts.Font | str] = (
72
- fonts.GoogleFont("Quicksand"),
73
- "ui-sans-serif",
74
- "sans-serif",
75
- ),
76
- font_mono: fonts.Font
77
- | str
78
- | Iterable[fonts.Font | str] = (
79
- fonts.GoogleFont("IBM Plex Mono"),
80
- "ui-monospace",
81
- "monospace",
82
- ),
83
- ):
84
- super().__init__(
85
- primary_hue=primary_hue,
86
- secondary_hue=secondary_hue,
87
- neutral_hue=neutral_hue,
88
- spacing_size=spacing_size,
89
- radius_size=radius_size,
90
- font=font,
91
- font_mono=font_mono,
92
- )
93
- super().set(
94
- button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
95
- button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
96
- button_primary_text_color="white",
97
- button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
98
- block_shadow="*shadow_drop_lg",
99
- button_shadow="*shadow_drop_lg",
100
- input_background_fill="zinc",
101
- input_border_color="*secondary_300",
102
- input_shadow="*shadow_drop",
103
- input_shadow_focus="*shadow_drop_lg",
104
- )
105
-
106
-
107
- seafoam = SeafoamCustom()
108
-
109
-
110
- with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
111
- with gr.Column():
112
- gr.Markdown(
113
- """ ## GPT4ALL
114
-
115
- An ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue
116
-
117
- Type in the box below and click the button to generate answers to your most pressing questions!
118
-
119
- """
120
- )
121
-
122
- with gr.Row():
123
- with gr.Column(scale=3):
124
- instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
125
-
126
- with gr.Box():
127
- gr.Markdown("**Answer**")
128
- output = gr.Markdown(elem_id="q-output")
129
- submit = gr.Button("Generate", variant="primary")
130
- gr.Examples(
131
- examples=examples,
132
- inputs=[instruction],
133
- cache_examples=True,
134
- fn=process_example,
135
- outputs=[output],
136
- )
137
-
138
-
139
-
140
- submit.click(generate, inputs=[instruction], outputs=[output])
141
- instruction.submit(generate, inputs=[instruction], outputs=[output])
142
-
143
- demo.queue(concurrency_count=1).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/data/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/sd_text2img_k_diffusion.py DELETED
@@ -1,475 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import importlib
16
- import warnings
17
- from typing import Callable, List, Optional, Union
18
-
19
- import torch
20
- from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
21
-
22
- from diffusers import DiffusionPipeline, LMSDiscreteScheduler
23
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
24
- from diffusers.utils import is_accelerate_available, logging
25
-
26
-
27
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
-
29
-
30
- class ModelWrapper:
31
- def __init__(self, model, alphas_cumprod):
32
- self.model = model
33
- self.alphas_cumprod = alphas_cumprod
34
-
35
- def apply_model(self, *args, **kwargs):
36
- if len(args) == 3:
37
- encoder_hidden_states = args[-1]
38
- args = args[:2]
39
- if kwargs.get("cond", None) is not None:
40
- encoder_hidden_states = kwargs.pop("cond")
41
- return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
42
-
43
-
44
- class StableDiffusionPipeline(DiffusionPipeline):
45
- r"""
46
- Pipeline for text-to-image generation using Stable Diffusion.
47
-
48
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
-
51
- Args:
52
- vae ([`AutoencoderKL`]):
53
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
54
- text_encoder ([`CLIPTextModel`]):
55
- Frozen text-encoder. Stable Diffusion uses the text portion of
56
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
57
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
58
- tokenizer (`CLIPTokenizer`):
59
- Tokenizer of class
60
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
61
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
62
- scheduler ([`SchedulerMixin`]):
63
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
64
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
65
- safety_checker ([`StableDiffusionSafetyChecker`]):
66
- Classification module that estimates whether generated images could be considered offensive or harmful.
67
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
68
- feature_extractor ([`CLIPImageProcessor`]):
69
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
70
- """
71
- _optional_components = ["safety_checker", "feature_extractor"]
72
-
73
- def __init__(
74
- self,
75
- vae,
76
- text_encoder,
77
- tokenizer,
78
- unet,
79
- scheduler,
80
- safety_checker,
81
- feature_extractor,
82
- ):
83
- super().__init__()
84
-
85
- if safety_checker is None:
86
- logger.warning(
87
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
88
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
89
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
90
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
91
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
92
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
93
- )
94
-
95
- # get correct sigmas from LMS
96
- scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
97
- self.register_modules(
98
- vae=vae,
99
- text_encoder=text_encoder,
100
- tokenizer=tokenizer,
101
- unet=unet,
102
- scheduler=scheduler,
103
- safety_checker=safety_checker,
104
- feature_extractor=feature_extractor,
105
- )
106
-
107
- model = ModelWrapper(unet, scheduler.alphas_cumprod)
108
- if scheduler.config.prediction_type == "v_prediction":
109
- self.k_diffusion_model = CompVisVDenoiser(model)
110
- else:
111
- self.k_diffusion_model = CompVisDenoiser(model)
112
-
113
- def set_sampler(self, scheduler_type: str):
114
- warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
115
- return self.set_scheduler(scheduler_type)
116
-
117
- def set_scheduler(self, scheduler_type: str):
118
- library = importlib.import_module("k_diffusion")
119
- sampling = getattr(library, "sampling")
120
- self.sampler = getattr(sampling, scheduler_type)
121
-
122
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
123
- r"""
124
- Enable sliced attention computation.
125
-
126
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
127
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
128
-
129
- Args:
130
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
131
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
132
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
133
- `attention_head_dim` must be a multiple of `slice_size`.
134
- """
135
- if slice_size == "auto":
136
- # half the attention head size is usually a good trade-off between
137
- # speed and memory
138
- slice_size = self.unet.config.attention_head_dim // 2
139
- self.unet.set_attention_slice(slice_size)
140
-
141
- def disable_attention_slicing(self):
142
- r"""
143
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
144
- back to computing attention in one step.
145
- """
146
- # set slice_size = `None` to disable `attention slicing`
147
- self.enable_attention_slicing(None)
148
-
149
- def enable_sequential_cpu_offload(self, gpu_id=0):
150
- r"""
151
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
152
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
153
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
154
- """
155
- if is_accelerate_available():
156
- from accelerate import cpu_offload
157
- else:
158
- raise ImportError("Please install accelerate via `pip install accelerate`")
159
-
160
- device = torch.device(f"cuda:{gpu_id}")
161
-
162
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
163
- if cpu_offloaded_model is not None:
164
- cpu_offload(cpu_offloaded_model, device)
165
-
166
- @property
167
- def _execution_device(self):
168
- r"""
169
- Returns the device on which the pipeline's models will be executed. After calling
170
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
171
- hooks.
172
- """
173
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
174
- return self.device
175
- for module in self.unet.modules():
176
- if (
177
- hasattr(module, "_hf_hook")
178
- and hasattr(module._hf_hook, "execution_device")
179
- and module._hf_hook.execution_device is not None
180
- ):
181
- return torch.device(module._hf_hook.execution_device)
182
- return self.device
183
-
184
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
185
- r"""
186
- Encodes the prompt into text encoder hidden states.
187
-
188
- Args:
189
- prompt (`str` or `list(int)`):
190
- prompt to be encoded
191
- device: (`torch.device`):
192
- torch device
193
- num_images_per_prompt (`int`):
194
- number of images that should be generated per prompt
195
- do_classifier_free_guidance (`bool`):
196
- whether to use classifier free guidance or not
197
- negative_prompt (`str` or `List[str]`):
198
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
199
- if `guidance_scale` is less than `1`).
200
- """
201
- batch_size = len(prompt) if isinstance(prompt, list) else 1
202
-
203
- text_inputs = self.tokenizer(
204
- prompt,
205
- padding="max_length",
206
- max_length=self.tokenizer.model_max_length,
207
- truncation=True,
208
- return_tensors="pt",
209
- )
210
- text_input_ids = text_inputs.input_ids
211
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
212
-
213
- if not torch.equal(text_input_ids, untruncated_ids):
214
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
215
- logger.warning(
216
- "The following part of your input was truncated because CLIP can only handle sequences up to"
217
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
218
- )
219
-
220
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
221
- attention_mask = text_inputs.attention_mask.to(device)
222
- else:
223
- attention_mask = None
224
-
225
- text_embeddings = self.text_encoder(
226
- text_input_ids.to(device),
227
- attention_mask=attention_mask,
228
- )
229
- text_embeddings = text_embeddings[0]
230
-
231
- # duplicate text embeddings for each generation per prompt, using mps friendly method
232
- bs_embed, seq_len, _ = text_embeddings.shape
233
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
234
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
235
-
236
- # get unconditional embeddings for classifier free guidance
237
- if do_classifier_free_guidance:
238
- uncond_tokens: List[str]
239
- if negative_prompt is None:
240
- uncond_tokens = [""] * batch_size
241
- elif type(prompt) is not type(negative_prompt):
242
- raise TypeError(
243
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
244
- f" {type(prompt)}."
245
- )
246
- elif isinstance(negative_prompt, str):
247
- uncond_tokens = [negative_prompt]
248
- elif batch_size != len(negative_prompt):
249
- raise ValueError(
250
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
251
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
252
- " the batch size of `prompt`."
253
- )
254
- else:
255
- uncond_tokens = negative_prompt
256
-
257
- max_length = text_input_ids.shape[-1]
258
- uncond_input = self.tokenizer(
259
- uncond_tokens,
260
- padding="max_length",
261
- max_length=max_length,
262
- truncation=True,
263
- return_tensors="pt",
264
- )
265
-
266
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
267
- attention_mask = uncond_input.attention_mask.to(device)
268
- else:
269
- attention_mask = None
270
-
271
- uncond_embeddings = self.text_encoder(
272
- uncond_input.input_ids.to(device),
273
- attention_mask=attention_mask,
274
- )
275
- uncond_embeddings = uncond_embeddings[0]
276
-
277
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
278
- seq_len = uncond_embeddings.shape[1]
279
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
280
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
281
-
282
- # For classifier free guidance, we need to do two forward passes.
283
- # Here we concatenate the unconditional and text embeddings into a single batch
284
- # to avoid doing two forward passes
285
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
286
-
287
- return text_embeddings
288
-
289
- def run_safety_checker(self, image, device, dtype):
290
- if self.safety_checker is not None:
291
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
292
- image, has_nsfw_concept = self.safety_checker(
293
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
294
- )
295
- else:
296
- has_nsfw_concept = None
297
- return image, has_nsfw_concept
298
-
299
- def decode_latents(self, latents):
300
- latents = 1 / 0.18215 * latents
301
- image = self.vae.decode(latents).sample
302
- image = (image / 2 + 0.5).clamp(0, 1)
303
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
304
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
305
- return image
306
-
307
- def check_inputs(self, prompt, height, width, callback_steps):
308
- if not isinstance(prompt, str) and not isinstance(prompt, list):
309
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
310
-
311
- if height % 8 != 0 or width % 8 != 0:
312
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
313
-
314
- if (callback_steps is None) or (
315
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
316
- ):
317
- raise ValueError(
318
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
319
- f" {type(callback_steps)}."
320
- )
321
-
322
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
323
- shape = (batch_size, num_channels_latents, height // 8, width // 8)
324
- if latents is None:
325
- if device.type == "mps":
326
- # randn does not work reproducibly on mps
327
- latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
328
- else:
329
- latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
330
- else:
331
- if latents.shape != shape:
332
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
333
- latents = latents.to(device)
334
-
335
- # scale the initial noise by the standard deviation required by the scheduler
336
- return latents
337
-
338
- @torch.no_grad()
339
- def __call__(
340
- self,
341
- prompt: Union[str, List[str]],
342
- height: int = 512,
343
- width: int = 512,
344
- num_inference_steps: int = 50,
345
- guidance_scale: float = 7.5,
346
- negative_prompt: Optional[Union[str, List[str]]] = None,
347
- num_images_per_prompt: Optional[int] = 1,
348
- eta: float = 0.0,
349
- generator: Optional[torch.Generator] = None,
350
- latents: Optional[torch.FloatTensor] = None,
351
- output_type: Optional[str] = "pil",
352
- return_dict: bool = True,
353
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
354
- callback_steps: int = 1,
355
- **kwargs,
356
- ):
357
- r"""
358
- Function invoked when calling the pipeline for generation.
359
-
360
- Args:
361
- prompt (`str` or `List[str]`):
362
- The prompt or prompts to guide the image generation.
363
- height (`int`, *optional*, defaults to 512):
364
- The height in pixels of the generated image.
365
- width (`int`, *optional*, defaults to 512):
366
- The width in pixels of the generated image.
367
- num_inference_steps (`int`, *optional*, defaults to 50):
368
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
369
- expense of slower inference.
370
- guidance_scale (`float`, *optional*, defaults to 7.5):
371
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
372
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
373
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
374
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
375
- usually at the expense of lower image quality.
376
- negative_prompt (`str` or `List[str]`, *optional*):
377
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
378
- if `guidance_scale` is less than `1`).
379
- num_images_per_prompt (`int`, *optional*, defaults to 1):
380
- The number of images to generate per prompt.
381
- eta (`float`, *optional*, defaults to 0.0):
382
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
383
- [`schedulers.DDIMScheduler`], will be ignored for others.
384
- generator (`torch.Generator`, *optional*):
385
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
386
- deterministic.
387
- latents (`torch.FloatTensor`, *optional*):
388
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
389
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
390
- tensor will ge generated by sampling using the supplied random `generator`.
391
- output_type (`str`, *optional*, defaults to `"pil"`):
392
- The output format of the generate image. Choose between
393
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
394
- return_dict (`bool`, *optional*, defaults to `True`):
395
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
396
- plain tuple.
397
- callback (`Callable`, *optional*):
398
- A function that will be called every `callback_steps` steps during inference. The function will be
399
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
400
- callback_steps (`int`, *optional*, defaults to 1):
401
- The frequency at which the `callback` function will be called. If not specified, the callback will be
402
- called at every step.
403
-
404
- Returns:
405
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
406
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
407
- When returning a tuple, the first element is a list with the generated images, and the second element is a
408
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
409
- (nsfw) content, according to the `safety_checker`.
410
- """
411
-
412
- # 1. Check inputs. Raise error if not correct
413
- self.check_inputs(prompt, height, width, callback_steps)
414
-
415
- # 2. Define call parameters
416
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
417
- device = self._execution_device
418
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
419
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
420
- # corresponds to doing no classifier free guidance.
421
- do_classifier_free_guidance = True
422
- if guidance_scale <= 1.0:
423
- raise ValueError("has to use guidance_scale")
424
-
425
- # 3. Encode input prompt
426
- text_embeddings = self._encode_prompt(
427
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
428
- )
429
-
430
- # 4. Prepare timesteps
431
- self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
432
- sigmas = self.scheduler.sigmas
433
- sigmas = sigmas.to(text_embeddings.dtype)
434
-
435
- # 5. Prepare latent variables
436
- num_channels_latents = self.unet.config.in_channels
437
- latents = self.prepare_latents(
438
- batch_size * num_images_per_prompt,
439
- num_channels_latents,
440
- height,
441
- width,
442
- text_embeddings.dtype,
443
- device,
444
- generator,
445
- latents,
446
- )
447
- latents = latents * sigmas[0]
448
- self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
449
- self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
450
-
451
- def model_fn(x, t):
452
- latent_model_input = torch.cat([x] * 2)
453
-
454
- noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
455
-
456
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
457
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
458
- return noise_pred
459
-
460
- latents = self.sampler(model_fn, latents, sigmas)
461
-
462
- # 8. Post-processing
463
- image = self.decode_latents(latents)
464
-
465
- # 9. Run safety checker
466
- image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
467
-
468
- # 10. Convert to PIL
469
- if output_type == "pil":
470
- image = self.numpy_to_pil(image)
471
-
472
- if not return_dict:
473
- return (image, has_nsfw_concept)
474
-
475
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/text_inpainting.py DELETED
@@ -1,302 +0,0 @@
1
- from typing import Callable, List, Optional, Union
2
-
3
- import PIL
4
- import torch
5
- from transformers import (
6
- CLIPImageProcessor,
7
- CLIPSegForImageSegmentation,
8
- CLIPSegProcessor,
9
- CLIPTextModel,
10
- CLIPTokenizer,
11
- )
12
-
13
- from diffusers import DiffusionPipeline
14
- from diffusers.configuration_utils import FrozenDict
15
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
- from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
17
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
18
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
19
- from diffusers.utils import deprecate, is_accelerate_available, logging
20
-
21
-
22
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
-
24
-
25
- class TextInpainting(DiffusionPipeline):
26
- r"""
27
- Pipeline for text based inpainting using Stable Diffusion.
28
- Uses CLIPSeg to get a mask from the given text, then calls the Inpainting pipeline with the generated mask
29
-
30
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
31
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
32
-
33
- Args:
34
- segmentation_model ([`CLIPSegForImageSegmentation`]):
35
- CLIPSeg Model to generate mask from the given text. Please refer to the [model card]() for details.
36
- segmentation_processor ([`CLIPSegProcessor`]):
37
- CLIPSeg processor to get image, text features to translate prompt to English, if necessary. Please refer to the
38
- [model card](https://huggingface.co/docs/transformers/model_doc/clipseg) for details.
39
- vae ([`AutoencoderKL`]):
40
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
41
- text_encoder ([`CLIPTextModel`]):
42
- Frozen text-encoder. Stable Diffusion uses the text portion of
43
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
44
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
45
- tokenizer (`CLIPTokenizer`):
46
- Tokenizer of class
47
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
48
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
49
- scheduler ([`SchedulerMixin`]):
50
- A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
51
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
52
- safety_checker ([`StableDiffusionSafetyChecker`]):
53
- Classification module that estimates whether generated images could be considered offensive or harmful.
54
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
55
- feature_extractor ([`CLIPImageProcessor`]):
56
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
57
- """
58
-
59
- def __init__(
60
- self,
61
- segmentation_model: CLIPSegForImageSegmentation,
62
- segmentation_processor: CLIPSegProcessor,
63
- vae: AutoencoderKL,
64
- text_encoder: CLIPTextModel,
65
- tokenizer: CLIPTokenizer,
66
- unet: UNet2DConditionModel,
67
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
68
- safety_checker: StableDiffusionSafetyChecker,
69
- feature_extractor: CLIPImageProcessor,
70
- ):
71
- super().__init__()
72
-
73
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
74
- deprecation_message = (
75
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
76
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
77
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
78
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
79
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
80
- " file"
81
- )
82
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
83
- new_config = dict(scheduler.config)
84
- new_config["steps_offset"] = 1
85
- scheduler._internal_dict = FrozenDict(new_config)
86
-
87
- if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
88
- deprecation_message = (
89
- f"The configuration file of this scheduler: {scheduler} has not set the configuration"
90
- " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
91
- " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
92
- " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
93
- " Hub, it would be very nice if you could open a Pull request for the"
94
- " `scheduler/scheduler_config.json` file"
95
- )
96
- deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
97
- new_config = dict(scheduler.config)
98
- new_config["skip_prk_steps"] = True
99
- scheduler._internal_dict = FrozenDict(new_config)
100
-
101
- if safety_checker is None:
102
- logger.warning(
103
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
104
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
105
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
106
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
107
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
108
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
109
- )
110
-
111
- self.register_modules(
112
- segmentation_model=segmentation_model,
113
- segmentation_processor=segmentation_processor,
114
- vae=vae,
115
- text_encoder=text_encoder,
116
- tokenizer=tokenizer,
117
- unet=unet,
118
- scheduler=scheduler,
119
- safety_checker=safety_checker,
120
- feature_extractor=feature_extractor,
121
- )
122
-
123
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
- r"""
125
- Enable sliced attention computation.
126
-
127
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
-
130
- Args:
131
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
- `attention_head_dim` must be a multiple of `slice_size`.
135
- """
136
- if slice_size == "auto":
137
- # half the attention head size is usually a good trade-off between
138
- # speed and memory
139
- slice_size = self.unet.config.attention_head_dim // 2
140
- self.unet.set_attention_slice(slice_size)
141
-
142
- def disable_attention_slicing(self):
143
- r"""
144
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
- back to computing attention in one step.
146
- """
147
- # set slice_size = `None` to disable `attention slicing`
148
- self.enable_attention_slicing(None)
149
-
150
- def enable_sequential_cpu_offload(self):
151
- r"""
152
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
153
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
154
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
155
- """
156
- if is_accelerate_available():
157
- from accelerate import cpu_offload
158
- else:
159
- raise ImportError("Please install accelerate via `pip install accelerate`")
160
-
161
- device = torch.device("cuda")
162
-
163
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
164
- if cpu_offloaded_model is not None:
165
- cpu_offload(cpu_offloaded_model, device)
166
-
167
- @property
168
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
169
- def _execution_device(self):
170
- r"""
171
- Returns the device on which the pipeline's models will be executed. After calling
172
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
173
- hooks.
174
- """
175
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
176
- return self.device
177
- for module in self.unet.modules():
178
- if (
179
- hasattr(module, "_hf_hook")
180
- and hasattr(module._hf_hook, "execution_device")
181
- and module._hf_hook.execution_device is not None
182
- ):
183
- return torch.device(module._hf_hook.execution_device)
184
- return self.device
185
-
186
- @torch.no_grad()
187
- def __call__(
188
- self,
189
- prompt: Union[str, List[str]],
190
- image: Union[torch.FloatTensor, PIL.Image.Image],
191
- text: str,
192
- height: int = 512,
193
- width: int = 512,
194
- num_inference_steps: int = 50,
195
- guidance_scale: float = 7.5,
196
- negative_prompt: Optional[Union[str, List[str]]] = None,
197
- num_images_per_prompt: Optional[int] = 1,
198
- eta: float = 0.0,
199
- generator: Optional[torch.Generator] = None,
200
- latents: Optional[torch.FloatTensor] = None,
201
- output_type: Optional[str] = "pil",
202
- return_dict: bool = True,
203
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
204
- callback_steps: int = 1,
205
- **kwargs,
206
- ):
207
- r"""
208
- Function invoked when calling the pipeline for generation.
209
-
210
- Args:
211
- prompt (`str` or `List[str]`):
212
- The prompt or prompts to guide the image generation.
213
- image (`PIL.Image.Image`):
214
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
215
- be masked out with `mask_image` and repainted according to `prompt`.
216
- text (`str``):
217
- The text to use to generate the mask.
218
- height (`int`, *optional*, defaults to 512):
219
- The height in pixels of the generated image.
220
- width (`int`, *optional*, defaults to 512):
221
- The width in pixels of the generated image.
222
- num_inference_steps (`int`, *optional*, defaults to 50):
223
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
224
- expense of slower inference.
225
- guidance_scale (`float`, *optional*, defaults to 7.5):
226
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
227
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
228
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
229
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
230
- usually at the expense of lower image quality.
231
- negative_prompt (`str` or `List[str]`, *optional*):
232
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
233
- if `guidance_scale` is less than `1`).
234
- num_images_per_prompt (`int`, *optional*, defaults to 1):
235
- The number of images to generate per prompt.
236
- eta (`float`, *optional*, defaults to 0.0):
237
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
238
- [`schedulers.DDIMScheduler`], will be ignored for others.
239
- generator (`torch.Generator`, *optional*):
240
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
241
- deterministic.
242
- latents (`torch.FloatTensor`, *optional*):
243
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
244
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
245
- tensor will ge generated by sampling using the supplied random `generator`.
246
- output_type (`str`, *optional*, defaults to `"pil"`):
247
- The output format of the generate image. Choose between
248
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
249
- return_dict (`bool`, *optional*, defaults to `True`):
250
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
251
- plain tuple.
252
- callback (`Callable`, *optional*):
253
- A function that will be called every `callback_steps` steps during inference. The function will be
254
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
255
- callback_steps (`int`, *optional*, defaults to 1):
256
- The frequency at which the `callback` function will be called. If not specified, the callback will be
257
- called at every step.
258
-
259
- Returns:
260
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
261
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
262
- When returning a tuple, the first element is a list with the generated images, and the second element is a
263
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
264
- (nsfw) content, according to the `safety_checker`.
265
- """
266
-
267
- # We use the input text to generate the mask
268
- inputs = self.segmentation_processor(
269
- text=[text], images=[image], padding="max_length", return_tensors="pt"
270
- ).to(self.device)
271
- outputs = self.segmentation_model(**inputs)
272
- mask = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
273
- mask_pil = self.numpy_to_pil(mask)[0].resize(image.size)
274
-
275
- # Run inpainting pipeline with the generated mask
276
- inpainting_pipeline = StableDiffusionInpaintPipeline(
277
- vae=self.vae,
278
- text_encoder=self.text_encoder,
279
- tokenizer=self.tokenizer,
280
- unet=self.unet,
281
- scheduler=self.scheduler,
282
- safety_checker=self.safety_checker,
283
- feature_extractor=self.feature_extractor,
284
- )
285
- return inpainting_pipeline(
286
- prompt=prompt,
287
- image=image,
288
- mask_image=mask_pil,
289
- height=height,
290
- width=width,
291
- num_inference_steps=num_inference_steps,
292
- guidance_scale=guidance_scale,
293
- negative_prompt=negative_prompt,
294
- num_images_per_prompt=num_images_per_prompt,
295
- eta=eta,
296
- generator=generator,
297
- latents=latents,
298
- output_type=output_type,
299
- return_dict=return_dict,
300
- callback=callback,
301
- callback_steps=callback_steps,
302
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py DELETED
@@ -1,335 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
-
20
- from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline
21
- from diffusers.utils import torch_device
22
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
23
-
24
- from ..test_pipelines_common import PipelineTesterMixin
25
- from .test_kandinsky import Dummies
26
- from .test_kandinsky_img2img import Dummies as Img2ImgDummies
27
- from .test_kandinsky_inpaint import Dummies as InpaintDummies
28
- from .test_kandinsky_prior import Dummies as PriorDummies
29
-
30
-
31
- enable_full_determinism()
32
-
33
-
34
- class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
35
- pipeline_class = KandinskyCombinedPipeline
36
- params = [
37
- "prompt",
38
- ]
39
- batch_params = ["prompt", "negative_prompt"]
40
- required_optional_params = [
41
- "generator",
42
- "height",
43
- "width",
44
- "latents",
45
- "guidance_scale",
46
- "negative_prompt",
47
- "num_inference_steps",
48
- "return_dict",
49
- "guidance_scale",
50
- "num_images_per_prompt",
51
- "output_type",
52
- "return_dict",
53
- ]
54
- test_xformers_attention = False
55
-
56
- def get_dummy_components(self):
57
- dummy = Dummies()
58
- prior_dummy = PriorDummies()
59
- components = dummy.get_dummy_components()
60
-
61
- components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
62
- return components
63
-
64
- def get_dummy_inputs(self, device, seed=0):
65
- prior_dummy = PriorDummies()
66
- inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
67
- inputs.update(
68
- {
69
- "height": 64,
70
- "width": 64,
71
- }
72
- )
73
- return inputs
74
-
75
- def test_kandinsky(self):
76
- device = "cpu"
77
-
78
- components = self.get_dummy_components()
79
-
80
- pipe = self.pipeline_class(**components)
81
- pipe = pipe.to(device)
82
-
83
- pipe.set_progress_bar_config(disable=None)
84
-
85
- output = pipe(**self.get_dummy_inputs(device))
86
- image = output.images
87
-
88
- image_from_tuple = pipe(
89
- **self.get_dummy_inputs(device),
90
- return_dict=False,
91
- )[0]
92
-
93
- image_slice = image[0, -3:, -3:, -1]
94
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
95
-
96
- assert image.shape == (1, 64, 64, 3)
97
-
98
- expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068])
99
-
100
- assert (
101
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
102
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
103
- assert (
104
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
105
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
106
-
107
- @require_torch_gpu
108
- def test_offloads(self):
109
- pipes = []
110
- components = self.get_dummy_components()
111
- sd_pipe = self.pipeline_class(**components).to(torch_device)
112
- pipes.append(sd_pipe)
113
-
114
- components = self.get_dummy_components()
115
- sd_pipe = self.pipeline_class(**components)
116
- sd_pipe.enable_model_cpu_offload()
117
- pipes.append(sd_pipe)
118
-
119
- components = self.get_dummy_components()
120
- sd_pipe = self.pipeline_class(**components)
121
- sd_pipe.enable_sequential_cpu_offload()
122
- pipes.append(sd_pipe)
123
-
124
- image_slices = []
125
- for pipe in pipes:
126
- inputs = self.get_dummy_inputs(torch_device)
127
- image = pipe(**inputs).images
128
-
129
- image_slices.append(image[0, -3:, -3:, -1].flatten())
130
-
131
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
132
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
133
-
134
- def test_inference_batch_single_identical(self):
135
- super().test_inference_batch_single_identical(expected_max_diff=1e-2)
136
-
137
-
138
- class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
139
- pipeline_class = KandinskyImg2ImgCombinedPipeline
140
- params = ["prompt", "image"]
141
- batch_params = ["prompt", "negative_prompt", "image"]
142
- required_optional_params = [
143
- "generator",
144
- "height",
145
- "width",
146
- "latents",
147
- "guidance_scale",
148
- "negative_prompt",
149
- "num_inference_steps",
150
- "return_dict",
151
- "guidance_scale",
152
- "num_images_per_prompt",
153
- "output_type",
154
- "return_dict",
155
- ]
156
- test_xformers_attention = False
157
-
158
- def get_dummy_components(self):
159
- dummy = Img2ImgDummies()
160
- prior_dummy = PriorDummies()
161
- components = dummy.get_dummy_components()
162
-
163
- components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
164
- return components
165
-
166
- def get_dummy_inputs(self, device, seed=0):
167
- prior_dummy = PriorDummies()
168
- dummy = Img2ImgDummies()
169
- inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
170
- inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
171
- inputs.pop("image_embeds")
172
- inputs.pop("negative_image_embeds")
173
- return inputs
174
-
175
- def test_kandinsky(self):
176
- device = "cpu"
177
-
178
- components = self.get_dummy_components()
179
-
180
- pipe = self.pipeline_class(**components)
181
- pipe = pipe.to(device)
182
-
183
- pipe.set_progress_bar_config(disable=None)
184
-
185
- output = pipe(**self.get_dummy_inputs(device))
186
- image = output.images
187
-
188
- image_from_tuple = pipe(
189
- **self.get_dummy_inputs(device),
190
- return_dict=False,
191
- )[0]
192
-
193
- image_slice = image[0, -3:, -3:, -1]
194
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
195
-
196
- assert image.shape == (1, 64, 64, 3)
197
-
198
- expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053])
199
-
200
- assert (
201
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
202
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
203
- assert (
204
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
205
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
206
-
207
- @require_torch_gpu
208
- def test_offloads(self):
209
- pipes = []
210
- components = self.get_dummy_components()
211
- sd_pipe = self.pipeline_class(**components).to(torch_device)
212
- pipes.append(sd_pipe)
213
-
214
- components = self.get_dummy_components()
215
- sd_pipe = self.pipeline_class(**components)
216
- sd_pipe.enable_model_cpu_offload()
217
- pipes.append(sd_pipe)
218
-
219
- components = self.get_dummy_components()
220
- sd_pipe = self.pipeline_class(**components)
221
- sd_pipe.enable_sequential_cpu_offload()
222
- pipes.append(sd_pipe)
223
-
224
- image_slices = []
225
- for pipe in pipes:
226
- inputs = self.get_dummy_inputs(torch_device)
227
- image = pipe(**inputs).images
228
-
229
- image_slices.append(image[0, -3:, -3:, -1].flatten())
230
-
231
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
232
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
233
-
234
- def test_inference_batch_single_identical(self):
235
- super().test_inference_batch_single_identical(expected_max_diff=1e-2)
236
-
237
-
238
- class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
239
- pipeline_class = KandinskyInpaintCombinedPipeline
240
- params = ["prompt", "image", "mask_image"]
241
- batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
242
- required_optional_params = [
243
- "generator",
244
- "height",
245
- "width",
246
- "latents",
247
- "guidance_scale",
248
- "negative_prompt",
249
- "num_inference_steps",
250
- "return_dict",
251
- "guidance_scale",
252
- "num_images_per_prompt",
253
- "output_type",
254
- "return_dict",
255
- ]
256
- test_xformers_attention = False
257
-
258
- def get_dummy_components(self):
259
- dummy = InpaintDummies()
260
- prior_dummy = PriorDummies()
261
- components = dummy.get_dummy_components()
262
-
263
- components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
264
- return components
265
-
266
- def get_dummy_inputs(self, device, seed=0):
267
- prior_dummy = PriorDummies()
268
- dummy = InpaintDummies()
269
- inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
270
- inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
271
- inputs.pop("image_embeds")
272
- inputs.pop("negative_image_embeds")
273
- return inputs
274
-
275
- def test_kandinsky(self):
276
- device = "cpu"
277
-
278
- components = self.get_dummy_components()
279
-
280
- pipe = self.pipeline_class(**components)
281
- pipe = pipe.to(device)
282
-
283
- pipe.set_progress_bar_config(disable=None)
284
-
285
- output = pipe(**self.get_dummy_inputs(device))
286
- image = output.images
287
-
288
- image_from_tuple = pipe(
289
- **self.get_dummy_inputs(device),
290
- return_dict=False,
291
- )[0]
292
-
293
- image_slice = image[0, -3:, -3:, -1]
294
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
295
-
296
- assert image.shape == (1, 64, 64, 3)
297
-
298
- expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530])
299
-
300
- assert (
301
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
302
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
303
- assert (
304
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
305
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
306
-
307
- @require_torch_gpu
308
- def test_offloads(self):
309
- pipes = []
310
- components = self.get_dummy_components()
311
- sd_pipe = self.pipeline_class(**components).to(torch_device)
312
- pipes.append(sd_pipe)
313
-
314
- components = self.get_dummy_components()
315
- sd_pipe = self.pipeline_class(**components)
316
- sd_pipe.enable_model_cpu_offload()
317
- pipes.append(sd_pipe)
318
-
319
- components = self.get_dummy_components()
320
- sd_pipe = self.pipeline_class(**components)
321
- sd_pipe.enable_sequential_cpu_offload()
322
- pipes.append(sd_pipe)
323
-
324
- image_slices = []
325
- for pipe in pipes:
326
- inputs = self.get_dummy_inputs(torch_device)
327
- image = pipe(**inputs).images
328
-
329
- image_slices.append(image[0, -3:, -3:, -1].flatten())
330
-
331
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
332
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
333
-
334
- def test_inference_batch_single_identical(self):
335
- super().test_inference_batch_single_identical(expected_max_diff=1e-2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_rpn_head.py DELETED
@@ -1,171 +0,0 @@
1
- import copy
2
- import warnings
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- from mmcv import ConfigDict
8
- from mmcv.cnn import normal_init
9
- from mmcv.ops import nms
10
-
11
- from ..builder import HEADS
12
- from .guided_anchor_head import GuidedAnchorHead
13
- from .rpn_test_mixin import RPNTestMixin
14
-
15
-
16
- @HEADS.register_module()
17
- class GARPNHead(RPNTestMixin, GuidedAnchorHead):
18
- """Guided-Anchor-based RPN head."""
19
-
20
- def __init__(self, in_channels, **kwargs):
21
- super(GARPNHead, self).__init__(1, in_channels, **kwargs)
22
-
23
- def _init_layers(self):
24
- """Initialize layers of the head."""
25
- self.rpn_conv = nn.Conv2d(
26
- self.in_channels, self.feat_channels, 3, padding=1)
27
- super(GARPNHead, self)._init_layers()
28
-
29
- def init_weights(self):
30
- """Initialize weights of the head."""
31
- normal_init(self.rpn_conv, std=0.01)
32
- super(GARPNHead, self).init_weights()
33
-
34
- def forward_single(self, x):
35
- """Forward feature of a single scale level."""
36
-
37
- x = self.rpn_conv(x)
38
- x = F.relu(x, inplace=True)
39
- (cls_score, bbox_pred, shape_pred,
40
- loc_pred) = super(GARPNHead, self).forward_single(x)
41
- return cls_score, bbox_pred, shape_pred, loc_pred
42
-
43
- def loss(self,
44
- cls_scores,
45
- bbox_preds,
46
- shape_preds,
47
- loc_preds,
48
- gt_bboxes,
49
- img_metas,
50
- gt_bboxes_ignore=None):
51
- losses = super(GARPNHead, self).loss(
52
- cls_scores,
53
- bbox_preds,
54
- shape_preds,
55
- loc_preds,
56
- gt_bboxes,
57
- None,
58
- img_metas,
59
- gt_bboxes_ignore=gt_bboxes_ignore)
60
- return dict(
61
- loss_rpn_cls=losses['loss_cls'],
62
- loss_rpn_bbox=losses['loss_bbox'],
63
- loss_anchor_shape=losses['loss_shape'],
64
- loss_anchor_loc=losses['loss_loc'])
65
-
66
- def _get_bboxes_single(self,
67
- cls_scores,
68
- bbox_preds,
69
- mlvl_anchors,
70
- mlvl_masks,
71
- img_shape,
72
- scale_factor,
73
- cfg,
74
- rescale=False):
75
- cfg = self.test_cfg if cfg is None else cfg
76
-
77
- cfg = copy.deepcopy(cfg)
78
-
79
- # deprecate arguments warning
80
- if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
81
- warnings.warn(
82
- 'In rpn_proposal or test_cfg, '
83
- 'nms_thr has been moved to a dict named nms as '
84
- 'iou_threshold, max_num has been renamed as max_per_img, '
85
- 'name of original arguments and the way to specify '
86
- 'iou_threshold of NMS will be deprecated.')
87
- if 'nms' not in cfg:
88
- cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
89
- if 'max_num' in cfg:
90
- if 'max_per_img' in cfg:
91
- assert cfg.max_num == cfg.max_per_img, f'You ' \
92
- f'set max_num and max_per_img at the same time, ' \
93
- f'but get {cfg.max_num} ' \
94
- f'and {cfg.max_per_img} respectively' \
95
- 'Please delete max_num which will be deprecated.'
96
- else:
97
- cfg.max_per_img = cfg.max_num
98
- if 'nms_thr' in cfg:
99
- assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
100
- f'iou_threshold in nms and ' \
101
- f'nms_thr at the same time, but get ' \
102
- f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
103
- f' respectively. Please delete the ' \
104
- f'nms_thr which will be deprecated.'
105
-
106
- assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \
107
- 'naive nms.'
108
-
109
- mlvl_proposals = []
110
- for idx in range(len(cls_scores)):
111
- rpn_cls_score = cls_scores[idx]
112
- rpn_bbox_pred = bbox_preds[idx]
113
- anchors = mlvl_anchors[idx]
114
- mask = mlvl_masks[idx]
115
- assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
116
- # if no location is kept, end.
117
- if mask.sum() == 0:
118
- continue
119
- rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
120
- if self.use_sigmoid_cls:
121
- rpn_cls_score = rpn_cls_score.reshape(-1)
122
- scores = rpn_cls_score.sigmoid()
123
- else:
124
- rpn_cls_score = rpn_cls_score.reshape(-1, 2)
125
- # remind that we set FG labels to [0, num_class-1]
126
- # since mmdet v2.0
127
- # BG cat_id: num_class
128
- scores = rpn_cls_score.softmax(dim=1)[:, :-1]
129
- # filter scores, bbox_pred w.r.t. mask.
130
- # anchors are filtered in get_anchors() beforehand.
131
- scores = scores[mask]
132
- rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
133
- 4)[mask, :]
134
- if scores.dim() == 0:
135
- rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
136
- anchors = anchors.unsqueeze(0)
137
- scores = scores.unsqueeze(0)
138
- # filter anchors, bbox_pred, scores w.r.t. scores
139
- if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
140
- _, topk_inds = scores.topk(cfg.nms_pre)
141
- rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
142
- anchors = anchors[topk_inds, :]
143
- scores = scores[topk_inds]
144
- # get proposals w.r.t. anchors and rpn_bbox_pred
145
- proposals = self.bbox_coder.decode(
146
- anchors, rpn_bbox_pred, max_shape=img_shape)
147
- # filter out too small bboxes
148
- if cfg.min_bbox_size > 0:
149
- w = proposals[:, 2] - proposals[:, 0]
150
- h = proposals[:, 3] - proposals[:, 1]
151
- valid_inds = torch.nonzero(
152
- (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size),
153
- as_tuple=False).squeeze()
154
- proposals = proposals[valid_inds, :]
155
- scores = scores[valid_inds]
156
- # NMS in current level
157
- proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)
158
- proposals = proposals[:cfg.nms_post, :]
159
- mlvl_proposals.append(proposals)
160
- proposals = torch.cat(mlvl_proposals, 0)
161
- if cfg.get('nms_across_levels', False):
162
- # NMS across multi levels
163
- proposals, _ = nms(proposals[:, :4], proposals[:, -1],
164
- cfg.nms.iou_threshold)
165
- proposals = proposals[:cfg.max_per_img, :]
166
- else:
167
- scores = proposals[:, 4]
168
- num = min(cfg.max_per_img, proposals.shape[0])
169
- _, topk_inds = scores.topk(num)
170
- proposals = proposals[topk_inds, :]
171
- return proposals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py DELETED
@@ -1,44 +0,0 @@
1
- from .builder import RUNNER_BUILDERS, RUNNERS
2
-
3
-
4
- @RUNNER_BUILDERS.register_module()
5
- class DefaultRunnerConstructor:
6
- """Default constructor for runners.
7
-
8
- Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.
9
- For example, We can inject some new properties and functions for `Runner`.
10
-
11
- Example:
12
- >>> from annotator.uniformer.mmcv.runner import RUNNER_BUILDERS, build_runner
13
- >>> # Define a new RunnerReconstructor
14
- >>> @RUNNER_BUILDERS.register_module()
15
- >>> class MyRunnerConstructor:
16
- ... def __init__(self, runner_cfg, default_args=None):
17
- ... if not isinstance(runner_cfg, dict):
18
- ... raise TypeError('runner_cfg should be a dict',
19
- ... f'but got {type(runner_cfg)}')
20
- ... self.runner_cfg = runner_cfg
21
- ... self.default_args = default_args
22
- ...
23
- ... def __call__(self):
24
- ... runner = RUNNERS.build(self.runner_cfg,
25
- ... default_args=self.default_args)
26
- ... # Add new properties for existing runner
27
- ... runner.my_name = 'my_runner'
28
- ... runner.my_function = lambda self: print(self.my_name)
29
- ... ...
30
- >>> # build your runner
31
- >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,
32
- ... constructor='MyRunnerConstructor')
33
- >>> runner = build_runner(runner_cfg)
34
- """
35
-
36
- def __init__(self, runner_cfg, default_args=None):
37
- if not isinstance(runner_cfg, dict):
38
- raise TypeError('runner_cfg should be a dict',
39
- f'but got {type(runner_cfg)}')
40
- self.runner_cfg = runner_cfg
41
- self.default_args = default_args
42
-
43
- def __call__(self):
44
- return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artificio/AdversarialArt/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: AdversarialArt
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.1.6
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/bert_vits2/text/tone_sandhi.py DELETED
@@ -1,769 +0,0 @@
1
- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from typing import List
15
- from typing import Tuple
16
-
17
- import jieba
18
- from pypinyin import lazy_pinyin
19
- from pypinyin import Style
20
-
21
-
22
- class ToneSandhi:
23
- def __init__(self):
24
- self.must_neural_tone_words = {
25
- "麻烦",
26
- "麻利",
27
- "鸳鸯",
28
- "高粱",
29
- "骨头",
30
- "骆驼",
31
- "马虎",
32
- "首饰",
33
- "馒头",
34
- "馄饨",
35
- "风筝",
36
- "难为",
37
- "队伍",
38
- "阔气",
39
- "闺女",
40
- "门道",
41
- "锄头",
42
- "铺盖",
43
- "铃铛",
44
- "铁匠",
45
- "钥匙",
46
- "里脊",
47
- "里头",
48
- "部分",
49
- "那么",
50
- "道士",
51
- "造化",
52
- "迷糊",
53
- "连累",
54
- "这么",
55
- "这个",
56
- "运气",
57
- "过去",
58
- "软和",
59
- "转悠",
60
- "踏实",
61
- "跳蚤",
62
- "跟头",
63
- "趔趄",
64
- "财主",
65
- "豆腐",
66
- "讲究",
67
- "记性",
68
- "记号",
69
- "认识",
70
- "规矩",
71
- "见识",
72
- "裁缝",
73
- "补丁",
74
- "衣裳",
75
- "衣服",
76
- "衙门",
77
- "街坊",
78
- "行李",
79
- "行当",
80
- "蛤蟆",
81
- "蘑菇",
82
- "薄荷",
83
- "葫芦",
84
- "葡萄",
85
- "萝卜",
86
- "荸荠",
87
- "苗条",
88
- "苗头",
89
- "苍蝇",
90
- "芝麻",
91
- "舒服",
92
- "舒坦",
93
- "舌头",
94
- "自在",
95
- "膏药",
96
- "脾气",
97
- "脑袋",
98
- "脊梁",
99
- "能耐",
100
- "胳膊",
101
- "胭脂",
102
- "胡萝",
103
- "胡琴",
104
- "胡同",
105
- "聪明",
106
- "耽误",
107
- "耽搁",
108
- "耷拉",
109
- "耳朵",
110
- "老爷",
111
- "老实",
112
- "老婆",
113
- "老头",
114
- "老太",
115
- "翻腾",
116
- "罗嗦",
117
- "罐头",
118
- "编辑",
119
- "结实",
120
- "红火",
121
- "累赘",
122
- "糨糊",
123
- "糊涂",
124
- "精神",
125
- "粮食",
126
- "簸箕",
127
- "篱笆",
128
- "算计",
129
- "算盘",
130
- "答应",
131
- "笤帚",
132
- "笑语",
133
- "笑话",
134
- "窟窿",
135
- "窝囊",
136
- "窗户",
137
- "稳当",
138
- "稀罕",
139
- "称呼",
140
- "秧歌",
141
- "秀气",
142
- "秀才",
143
- "福气",
144
- "祖宗",
145
- "砚台",
146
- "码头",
147
- "石榴",
148
- "石头",
149
- "石匠",
150
- "知识",
151
- "眼睛",
152
- "眯缝",
153
- "眨巴",
154
- "眉毛",
155
- "相声",
156
- "盘算",
157
- "白净",
158
- "痢疾",
159
- "痛快",
160
- "疟疾",
161
- "疙瘩",
162
- "疏忽",
163
- "畜生",
164
- "生意",
165
- "甘蔗",
166
- "琵琶",
167
- "琢磨",
168
- "琉璃",
169
- "玻璃",
170
- "玫瑰",
171
- "玄乎",
172
- "狐狸",
173
- "状元",
174
- "特务",
175
- "牲口",
176
- "牙碜",
177
- "牌楼",
178
- "爽快",
179
- "爱人",
180
- "热闹",
181
- "烧饼",
182
- "烟筒",
183
- "烂糊",
184
- "点心",
185
- "炊帚",
186
- "灯笼",
187
- "火候",
188
- "漂亮",
189
- "滑溜",
190
- "溜达",
191
- "温和",
192
- "清楚",
193
- "消息",
194
- "浪头",
195
- "活泼",
196
- "比方",
197
- "正经",
198
- "欺负",
199
- "模糊",
200
- "槟榔",
201
- "棺材",
202
- "棒槌",
203
- "棉花",
204
- "核桃",
205
- "栅栏",
206
- "柴火",
207
- "架势",
208
- "枕头",
209
- "枇杷",
210
- "机灵",
211
- "本事",
212
- "木头",
213
- "木匠",
214
- "朋友",
215
- "月饼",
216
- "月亮",
217
- "暖和",
218
- "明白",
219
- "时候",
220
- "新鲜",
221
- "故事",
222
- "收拾",
223
- "收成",
224
- "提防",
225
- "挖苦",
226
- "挑剔",
227
- "指甲",
228
- "指头",
229
- "拾掇",
230
- "拳头",
231
- "拨弄",
232
- "招牌",
233
- "招呼",
234
- "抬举",
235
- "护士",
236
- "折腾",
237
- "扫帚",
238
- "打量",
239
- "打算",
240
- "打点",
241
- "打扮",
242
- "打听",
243
- "打发",
244
- "扎实",
245
- "扁担",
246
- "戒指",
247
- "懒得",
248
- "意识",
249
- "意思",
250
- "情形",
251
- "悟性",
252
- "怪物",
253
- "思量",
254
- "怎么",
255
- "念头",
256
- "念叨",
257
- "快活",
258
- "忙活",
259
- "志气",
260
- "心思",
261
- "得罪",
262
- "张罗",
263
- "弟兄",
264
- "开通",
265
- "应酬",
266
- "庄稼",
267
- "干事",
268
- "帮手",
269
- "帐篷",
270
- "希罕",
271
- "师父",
272
- "师傅",
273
- "巴结",
274
- "巴掌",
275
- "差事",
276
- "工夫",
277
- "岁数",
278
- "屁股",
279
- "尾巴",
280
- "少爷",
281
- "小气",
282
- "小伙",
283
- "将就",
284
- "对头",
285
- "对付",
286
- "寡妇",
287
- "家伙",
288
- "客气",
289
- "实在",
290
- "官司",
291
- "学问",
292
- "学生",
293
- "字号",
294
- "嫁妆",
295
- "媳妇",
296
- "媒人",
297
- "婆家",
298
- "娘家",
299
- "委屈",
300
- "姑娘",
301
- "姐夫",
302
- "妯娌",
303
- "妥当",
304
- "妖精",
305
- "奴才",
306
- "女婿",
307
- "头发",
308
- "太阳",
309
- "大爷",
310
- "大方",
311
- "大意",
312
- "大夫",
313
- "多少",
314
- "多么",
315
- "外甥",
316
- "壮实",
317
- "地道",
318
- "地方",
319
- "在乎",
320
- "困难",
321
- "嘴巴",
322
- "嘱咐",
323
- "嘟囔",
324
- "嘀咕",
325
- "喜欢",
326
- "喇嘛",
327
- "喇叭",
328
- "商量",
329
- "唾沫",
330
- "哑巴",
331
- "哈欠",
332
- "哆嗦",
333
- "咳嗽",
334
- "和尚",
335
- "告诉",
336
- "告示",
337
- "含糊",
338
- "吓唬",
339
- "后头",
340
- "名字",
341
- "名堂",
342
- "合同",
343
- "吆喝",
344
- "叫唤",
345
- "口袋",
346
- "厚道",
347
- "厉害",
348
- "千斤",
349
- "包袱",
350
- "包涵",
351
- "匀称",
352
- "勤快",
353
- "动静",
354
- "动弹",
355
- "功夫",
356
- "力气",
357
- "前头",
358
- "刺猬",
359
- "刺激",
360
- "别扭",
361
- "利落",
362
- "利索",
363
- "利害",
364
- "分析",
365
- "出息",
366
- "凑合",
367
- "凉快",
368
- "冷战",
369
- "冤枉",
370
- "冒失",
371
- "养活",
372
- "关系",
373
- "先生",
374
- "兄弟",
375
- "便宜",
376
- "使唤",
377
- "佩服",
378
- "作坊",
379
- "体面",
380
- "位置",
381
- "似的",
382
- "伙计",
383
- "休息",
384
- "什么",
385
- "人家",
386
- "亲戚",
387
- "亲家",
388
- "交情",
389
- "云彩",
390
- "事情",
391
- "买卖",
392
- "主意",
393
- "丫头",
394
- "丧气",
395
- "两口",
396
- "东西",
397
- "东家",
398
- "世故",
399
- "不由",
400
- "不在",
401
- "下水",
402
- "下巴",
403
- "上头",
404
- "上司",
405
- "丈夫",
406
- "丈人",
407
- "一辈",
408
- "那个",
409
- "菩萨",
410
- "父亲",
411
- "母亲",
412
- "咕噜",
413
- "邋遢",
414
- "费用",
415
- "冤家",
416
- "甜头",
417
- "介绍",
418
- "荒唐",
419
- "大人",
420
- "泥鳅",
421
- "幸福",
422
- "熟悉",
423
- "计划",
424
- "扑腾",
425
- "蜡烛",
426
- "姥爷",
427
- "照顾",
428
- "喉咙",
429
- "吉他",
430
- "弄堂",
431
- "蚂蚱",
432
- "凤凰",
433
- "拖沓",
434
- "寒碜",
435
- "糟蹋",
436
- "倒腾",
437
- "报复",
438
- "逻辑",
439
- "盘缠",
440
- "喽啰",
441
- "牢骚",
442
- "咖喱",
443
- "扫把",
444
- "惦记",
445
- }
446
- self.must_not_neural_tone_words = {
447
- "男子",
448
- "女子",
449
- "分子",
450
- "原子",
451
- "量子",
452
- "莲子",
453
- "石子",
454
- "瓜子",
455
- "电子",
456
- "人人",
457
- "虎虎",
458
- }
459
- self.punc = ":,;。?!“”‘’':,;.?!"
460
-
461
- # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
462
- # e.g.
463
- # word: "家里"
464
- # pos: "s"
465
- # finals: ['ia1', 'i3']
466
- def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:
467
- # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
468
- for j, item in enumerate(word):
469
- if (
470
- j - 1 >= 0
471
- and item == word[j - 1]
472
- and pos[0] in {"n", "v", "a"}
473
- and word not in self.must_not_neural_tone_words
474
- ):
475
- finals[j] = finals[j][:-1] + "5"
476
- ge_idx = word.find("个")
477
- if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
478
- finals[-1] = finals[-1][:-1] + "5"
479
- elif len(word) >= 1 and word[-1] in "的地得":
480
- finals[-1] = finals[-1][:-1] + "5"
481
- # e.g. 走了, 看着, 去过
482
- # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
483
- # finals[-1] = finals[-1][:-1] + "5"
484
- elif (
485
- len(word) > 1
486
- and word[-1] in "们子"
487
- and pos in {"r", "n"}
488
- and word not in self.must_not_neural_tone_words
489
- ):
490
- finals[-1] = finals[-1][:-1] + "5"
491
- # e.g. 桌上, 地下, 家里
492
- elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
493
- finals[-1] = finals[-1][:-1] + "5"
494
- # e.g. 上来, 下去
495
- elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
496
- finals[-1] = finals[-1][:-1] + "5"
497
- # 个做量词
498
- elif (
499
- ge_idx >= 1
500
- and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in "几有两半多各整每做是")
501
- ) or word == "个":
502
- finals[ge_idx] = finals[ge_idx][:-1] + "5"
503
- else:
504
- if (
505
- word in self.must_neural_tone_words
506
- or word[-2:] in self.must_neural_tone_words
507
- ):
508
- finals[-1] = finals[-1][:-1] + "5"
509
-
510
- word_list = self._split_word(word)
511
- finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
512
- for i, word in enumerate(word_list):
513
- # conventional neural in Chinese
514
- if (
515
- word in self.must_neural_tone_words
516
- or word[-2:] in self.must_neural_tone_words
517
- ):
518
- finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
519
- finals = sum(finals_list, [])
520
- return finals
521
-
522
- def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
523
- # e.g. 看不懂
524
- if len(word) == 3 and word[1] == "不":
525
- finals[1] = finals[1][:-1] + "5"
526
- else:
527
- for i, char in enumerate(word):
528
- # "不" before tone4 should be bu2, e.g. 不怕
529
- if char == "不" and i + 1 < len(word) and finals[i + 1][-1] == "4":
530
- finals[i] = finals[i][:-1] + "2"
531
- return finals
532
-
533
- def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
534
- # "一" in number sequences, e.g. 一零零, 二一零
535
- if word.find("一") != -1 and all(
536
- [item.isnumeric() for item in word if item != "一"]
537
- ):
538
- return finals
539
- # "一" between reduplication words should be yi5, e.g. 看一看
540
- elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
541
- finals[1] = finals[1][:-1] + "5"
542
- # when "一" is ordinal word, it should be yi1
543
- elif word.startswith("第一"):
544
- finals[1] = finals[1][:-1] + "1"
545
- else:
546
- for i, char in enumerate(word):
547
- if char == "一" and i + 1 < len(word):
548
- # "一" before tone4 should be yi2, e.g. 一段
549
- if finals[i + 1][-1] == "4":
550
- finals[i] = finals[i][:-1] + "2"
551
- # "一" before non-tone4 should be yi4, e.g. 一天
552
- else:
553
- # "一" 后面如果是标点,还读一声
554
- if word[i + 1] not in self.punc:
555
- finals[i] = finals[i][:-1] + "4"
556
- return finals
557
-
558
- def _split_word(self, word: str) -> List[str]:
559
- word_list = jieba.cut_for_search(word)
560
- word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
561
- first_subword = word_list[0]
562
- first_begin_idx = word.find(first_subword)
563
- if first_begin_idx == 0:
564
- second_subword = word[len(first_subword) :]
565
- new_word_list = [first_subword, second_subword]
566
- else:
567
- second_subword = word[: -len(first_subword)]
568
- new_word_list = [second_subword, first_subword]
569
- return new_word_list
570
-
571
- def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
572
- if len(word) == 2 and self._all_tone_three(finals):
573
- finals[0] = finals[0][:-1] + "2"
574
- elif len(word) == 3:
575
- word_list = self._split_word(word)
576
- if self._all_tone_three(finals):
577
- # disyllabic + monosyllabic, e.g. 蒙古/包
578
- if len(word_list[0]) == 2:
579
- finals[0] = finals[0][:-1] + "2"
580
- finals[1] = finals[1][:-1] + "2"
581
- # monosyllabic + disyllabic, e.g. 纸/老虎
582
- elif len(word_list[0]) == 1:
583
- finals[1] = finals[1][:-1] + "2"
584
- else:
585
- finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
586
- if len(finals_list) == 2:
587
- for i, sub in enumerate(finals_list):
588
- # e.g. 所有/人
589
- if self._all_tone_three(sub) and len(sub) == 2:
590
- finals_list[i][0] = finals_list[i][0][:-1] + "2"
591
- # e.g. 好/喜欢
592
- elif (
593
- i == 1
594
- and not self._all_tone_three(sub)
595
- and finals_list[i][0][-1] == "3"
596
- and finals_list[0][-1][-1] == "3"
597
- ):
598
- finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
599
- finals = sum(finals_list, [])
600
- # split idiom into two words who's length is 2
601
- elif len(word) == 4:
602
- finals_list = [finals[:2], finals[2:]]
603
- finals = []
604
- for sub in finals_list:
605
- if self._all_tone_three(sub):
606
- sub[0] = sub[0][:-1] + "2"
607
- finals += sub
608
-
609
- return finals
610
-
611
- def _all_tone_three(self, finals: List[str]) -> bool:
612
- return all(x[-1] == "3" for x in finals)
613
-
614
- # merge "不" and the word behind it
615
- # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
616
- def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
617
- new_seg = []
618
- last_word = ""
619
- for word, pos in seg:
620
- if last_word == "不":
621
- word = last_word + word
622
- if word != "不":
623
- new_seg.append((word, pos))
624
- last_word = word[:]
625
- if last_word == "不":
626
- new_seg.append((last_word, "d"))
627
- last_word = ""
628
- return new_seg
629
-
630
- # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
631
- # function 2: merge single "一" and the word behind it
632
- # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
633
- # e.g.
634
- # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
635
- # output seg: [['听一听', 'v']]
636
- def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
637
- new_seg = []
638
- # function 1
639
- for i, (word, pos) in enumerate(seg):
640
- if (
641
- i - 1 >= 0
642
- and word == "一"
643
- and i + 1 < len(seg)
644
- and seg[i - 1][0] == seg[i + 1][0]
645
- and seg[i - 1][1] == "v"
646
- ):
647
- new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
648
- else:
649
- if (
650
- i - 2 >= 0
651
- and seg[i - 1][0] == "一"
652
- and seg[i - 2][0] == word
653
- and pos == "v"
654
- ):
655
- continue
656
- else:
657
- new_seg.append([word, pos])
658
- seg = new_seg
659
- new_seg = []
660
- # function 2
661
- for i, (word, pos) in enumerate(seg):
662
- if new_seg and new_seg[-1][0] == "一":
663
- new_seg[-1][0] = new_seg[-1][0] + word
664
- else:
665
- new_seg.append([word, pos])
666
- return new_seg
667
-
668
- # the first and the second words are all_tone_three
669
- def _merge_continuous_three_tones(
670
- self, seg: List[Tuple[str, str]]
671
- ) -> List[Tuple[str, str]]:
672
- new_seg = []
673
- sub_finals_list = [
674
- lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
675
- for (word, pos) in seg
676
- ]
677
- assert len(sub_finals_list) == len(seg)
678
- merge_last = [False] * len(seg)
679
- for i, (word, pos) in enumerate(seg):
680
- if (
681
- i - 1 >= 0
682
- and self._all_tone_three(sub_finals_list[i - 1])
683
- and self._all_tone_three(sub_finals_list[i])
684
- and not merge_last[i - 1]
685
- ):
686
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
687
- if (
688
- not self._is_reduplication(seg[i - 1][0])
689
- and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
690
- ):
691
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
692
- merge_last[i] = True
693
- else:
694
- new_seg.append([word, pos])
695
- else:
696
- new_seg.append([word, pos])
697
-
698
- return new_seg
699
-
700
- def _is_reduplication(self, word: str) -> bool:
701
- return len(word) == 2 and word[0] == word[1]
702
-
703
- # the last char of first word and the first char of second word is tone_three
704
- def _merge_continuous_three_tones_2(
705
- self, seg: List[Tuple[str, str]]
706
- ) -> List[Tuple[str, str]]:
707
- new_seg = []
708
- sub_finals_list = [
709
- lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
710
- for (word, pos) in seg
711
- ]
712
- assert len(sub_finals_list) == len(seg)
713
- merge_last = [False] * len(seg)
714
- for i, (word, pos) in enumerate(seg):
715
- if (
716
- i - 1 >= 0
717
- and sub_finals_list[i - 1][-1][-1] == "3"
718
- and sub_finals_list[i][0][-1] == "3"
719
- and not merge_last[i - 1]
720
- ):
721
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
722
- if (
723
- not self._is_reduplication(seg[i - 1][0])
724
- and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
725
- ):
726
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
727
- merge_last[i] = True
728
- else:
729
- new_seg.append([word, pos])
730
- else:
731
- new_seg.append([word, pos])
732
- return new_seg
733
-
734
- def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
735
- new_seg = []
736
- for i, (word, pos) in enumerate(seg):
737
- if i - 1 >= 0 and word == "儿" and seg[i - 1][0] != "#":
738
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
739
- else:
740
- new_seg.append([word, pos])
741
- return new_seg
742
-
743
- def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
744
- new_seg = []
745
- for i, (word, pos) in enumerate(seg):
746
- if new_seg and word == new_seg[-1][0]:
747
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
748
- else:
749
- new_seg.append([word, pos])
750
- return new_seg
751
-
752
- def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
753
- seg = self._merge_bu(seg)
754
- try:
755
- seg = self._merge_yi(seg)
756
- except:
757
- print("_merge_yi failed")
758
- seg = self._merge_reduplication(seg)
759
- seg = self._merge_continuous_three_tones(seg)
760
- seg = self._merge_continuous_three_tones_2(seg)
761
- seg = self._merge_er(seg)
762
- return seg
763
-
764
- def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:
765
- finals = self._bu_sandhi(word, finals)
766
- finals = self._yi_sandhi(word, finals)
767
- finals = self._neural_sandhi(word, pos, finals)
768
- finals = self._three_sandhi(word, finals)
769
- return finals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/filesize.py DELETED
@@ -1,89 +0,0 @@
1
- # coding: utf-8
2
- """Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
3
-
4
- The functions declared in this module should cover the different
5
- use cases needed to generate a string representation of a file size
6
- using several different units. Since there are many standards regarding
7
- file size units, three different functions have been implemented.
8
-
9
- See Also:
10
- * `Wikipedia: Binary prefix <https://en.wikipedia.org/wiki/Binary_prefix>`_
11
-
12
- """
13
-
14
- __all__ = ["decimal"]
15
-
16
- from typing import Iterable, List, Optional, Tuple
17
-
18
-
19
- def _to_str(
20
- size: int,
21
- suffixes: Iterable[str],
22
- base: int,
23
- *,
24
- precision: Optional[int] = 1,
25
- separator: Optional[str] = " ",
26
- ) -> str:
27
- if size == 1:
28
- return "1 byte"
29
- elif size < base:
30
- return "{:,} bytes".format(size)
31
-
32
- for i, suffix in enumerate(suffixes, 2): # noqa: B007
33
- unit = base**i
34
- if size < unit:
35
- break
36
- return "{:,.{precision}f}{separator}{}".format(
37
- (base * size / unit),
38
- suffix,
39
- precision=precision,
40
- separator=separator,
41
- )
42
-
43
-
44
- def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
45
- """Pick a suffix and base for the given size."""
46
- for i, suffix in enumerate(suffixes):
47
- unit = base**i
48
- if size < unit * base:
49
- break
50
- return unit, suffix
51
-
52
-
53
- def decimal(
54
- size: int,
55
- *,
56
- precision: Optional[int] = 1,
57
- separator: Optional[str] = " ",
58
- ) -> str:
59
- """Convert a filesize in to a string (powers of 1000, SI prefixes).
60
-
61
- In this convention, ``1000 B = 1 kB``.
62
-
63
- This is typically the format used to advertise the storage
64
- capacity of USB flash drives and the like (*256 MB* meaning
65
- actually a storage capacity of more than *256 000 000 B*),
66
- or used by **Mac OS X** since v10.6 to report file sizes.
67
-
68
- Arguments:
69
- int (size): A file size.
70
- int (precision): The number of decimal places to include (default = 1).
71
- str (separator): The string to separate the value from the units (default = " ").
72
-
73
- Returns:
74
- `str`: A string containing a abbreviated file size and units.
75
-
76
- Example:
77
- >>> filesize.decimal(30000)
78
- '30.0 kB'
79
- >>> filesize.decimal(30000, precision=2, separator="")
80
- '30.00kB'
81
-
82
- """
83
- return _to_str(
84
- size,
85
- ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
86
- 1000,
87
- precision=precision,
88
- separator=separator,
89
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py DELETED
@@ -1,8 +0,0 @@
1
- from ..common.optim import SGD as optimizer
2
- from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
3
- from ..common.data.coco import dataloader
4
- from ..common.models.mask_rcnn_fpn import model
5
- from ..common.train import train
6
-
7
- model.backbone.bottom_up.freeze_at = 2
8
- train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py DELETED
@@ -1,406 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- import os
4
- import sys
5
- import tempfile
6
- from contextlib import ExitStack, contextmanager
7
- from copy import deepcopy
8
- from unittest import mock
9
- import torch
10
- from torch import nn
11
-
12
- # need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
13
- import detectron2 # noqa F401
14
- from detectron2.structures import Boxes, Instances
15
- from detectron2.utils.env import _import_file
16
-
17
- _counter = 0
18
-
19
-
20
- def _clear_jit_cache():
21
- from torch.jit._recursive import concrete_type_store
22
- from torch.jit._state import _jit_caching_layer
23
-
24
- concrete_type_store.type_store.clear() # for modules
25
- _jit_caching_layer.clear() # for free functions
26
-
27
-
28
- def _add_instances_conversion_methods(newInstances):
29
- """
30
- Add from_instances methods to the scripted Instances class.
31
- """
32
- cls_name = newInstances.__name__
33
-
34
- @torch.jit.unused
35
- def from_instances(instances: Instances):
36
- """
37
- Create scripted Instances from original Instances
38
- """
39
- fields = instances.get_fields()
40
- image_size = instances.image_size
41
- ret = newInstances(image_size)
42
- for name, val in fields.items():
43
- assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
44
- setattr(ret, name, deepcopy(val))
45
- return ret
46
-
47
- newInstances.from_instances = from_instances
48
-
49
-
50
- @contextmanager
51
- def patch_instances(fields):
52
- """
53
- A contextmanager, under which the Instances class in detectron2 is replaced
54
- by a statically-typed scriptable class, defined by `fields`.
55
- See more in `scripting_with_instances`.
56
- """
57
-
58
- with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
59
- mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
60
- ) as f:
61
- try:
62
- # Objects that use Instances should not reuse previously-compiled
63
- # results in cache, because `Instances` could be a new class each time.
64
- _clear_jit_cache()
65
-
66
- cls_name, s = _gen_instance_module(fields)
67
- f.write(s)
68
- f.flush()
69
- f.close()
70
-
71
- module = _import(f.name)
72
- new_instances = getattr(module, cls_name)
73
- _ = torch.jit.script(new_instances)
74
- # let torchscript think Instances was scripted already
75
- Instances.__torch_script_class__ = True
76
- # let torchscript find new_instances when looking for the jit type of Instances
77
- Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
78
-
79
- _add_instances_conversion_methods(new_instances)
80
- yield new_instances
81
- finally:
82
- try:
83
- del Instances.__torch_script_class__
84
- del Instances._jit_override_qualname
85
- except AttributeError:
86
- pass
87
- sys.modules.pop(module.__name__)
88
-
89
-
90
- def _gen_instance_class(fields):
91
- """
92
- Args:
93
- fields (dict[name: type])
94
- """
95
-
96
- class _FieldType:
97
- def __init__(self, name, type_):
98
- assert isinstance(name, str), f"Field name must be str, got {name}"
99
- self.name = name
100
- self.type_ = type_
101
- self.annotation = f"{type_.__module__}.{type_.__name__}"
102
-
103
- fields = [_FieldType(k, v) for k, v in fields.items()]
104
-
105
- def indent(level, s):
106
- return " " * 4 * level + s
107
-
108
- lines = []
109
-
110
- global _counter
111
- _counter += 1
112
-
113
- cls_name = "ScriptedInstances{}".format(_counter)
114
-
115
- field_names = tuple(x.name for x in fields)
116
- extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
117
- lines.append(
118
- f"""
119
- class {cls_name}:
120
- def __init__(self, image_size: Tuple[int, int], {extra_args}):
121
- self.image_size = image_size
122
- self._field_names = {field_names}
123
- """
124
- )
125
-
126
- for f in fields:
127
- lines.append(
128
- indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
129
- )
130
-
131
- for f in fields:
132
- lines.append(
133
- f"""
134
- @property
135
- def {f.name}(self) -> {f.annotation}:
136
- # has to use a local for type refinement
137
- # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
138
- t = self._{f.name}
139
- assert t is not None, "{f.name} is None and cannot be accessed!"
140
- return t
141
-
142
- @{f.name}.setter
143
- def {f.name}(self, value: {f.annotation}) -> None:
144
- self._{f.name} = value
145
- """
146
- )
147
-
148
- # support method `__len__`
149
- lines.append(
150
- """
151
- def __len__(self) -> int:
152
- """
153
- )
154
- for f in fields:
155
- lines.append(
156
- f"""
157
- t = self._{f.name}
158
- if t is not None:
159
- return len(t)
160
- """
161
- )
162
- lines.append(
163
- """
164
- raise NotImplementedError("Empty Instances does not support __len__!")
165
- """
166
- )
167
-
168
- # support method `has`
169
- lines.append(
170
- """
171
- def has(self, name: str) -> bool:
172
- """
173
- )
174
- for f in fields:
175
- lines.append(
176
- f"""
177
- if name == "{f.name}":
178
- return self._{f.name} is not None
179
- """
180
- )
181
- lines.append(
182
- """
183
- return False
184
- """
185
- )
186
-
187
- # support method `to`
188
- none_args = ", None" * len(fields)
189
- lines.append(
190
- f"""
191
- def to(self, device: torch.device) -> "{cls_name}":
192
- ret = {cls_name}(self.image_size{none_args})
193
- """
194
- )
195
- for f in fields:
196
- if hasattr(f.type_, "to"):
197
- lines.append(
198
- f"""
199
- t = self._{f.name}
200
- if t is not None:
201
- ret._{f.name} = t.to(device)
202
- """
203
- )
204
- else:
205
- # For now, ignore fields that cannot be moved to devices.
206
- # Maybe can support other tensor-like classes (e.g. __torch_function__)
207
- pass
208
- lines.append(
209
- """
210
- return ret
211
- """
212
- )
213
-
214
- # support method `getitem`
215
- none_args = ", None" * len(fields)
216
- lines.append(
217
- f"""
218
- def __getitem__(self, item) -> "{cls_name}":
219
- ret = {cls_name}(self.image_size{none_args})
220
- """
221
- )
222
- for f in fields:
223
- lines.append(
224
- f"""
225
- t = self._{f.name}
226
- if t is not None:
227
- ret._{f.name} = t[item]
228
- """
229
- )
230
- lines.append(
231
- """
232
- return ret
233
- """
234
- )
235
-
236
- # support method `cat`
237
- # this version does not contain checks that all instances have same size and fields
238
- none_args = ", None" * len(fields)
239
- lines.append(
240
- f"""
241
- def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
242
- ret = {cls_name}(self.image_size{none_args})
243
- """
244
- )
245
- for f in fields:
246
- lines.append(
247
- f"""
248
- t = self._{f.name}
249
- if t is not None:
250
- values: List[{f.annotation}] = [x.{f.name} for x in instances]
251
- if torch.jit.isinstance(t, torch.Tensor):
252
- ret._{f.name} = torch.cat(values, dim=0)
253
- else:
254
- ret._{f.name} = t.cat(values)
255
- """
256
- )
257
- lines.append(
258
- """
259
- return ret"""
260
- )
261
-
262
- # support method `get_fields()`
263
- lines.append(
264
- """
265
- def get_fields(self) -> Dict[str, Tensor]:
266
- ret = {}
267
- """
268
- )
269
- for f in fields:
270
- if f.type_ == Boxes:
271
- stmt = "t.tensor"
272
- elif f.type_ == torch.Tensor:
273
- stmt = "t"
274
- else:
275
- stmt = f'assert False, "unsupported type {str(f.type_)}"'
276
- lines.append(
277
- f"""
278
- t = self._{f.name}
279
- if t is not None:
280
- ret["{f.name}"] = {stmt}
281
- """
282
- )
283
- lines.append(
284
- """
285
- return ret"""
286
- )
287
- return cls_name, os.linesep.join(lines)
288
-
289
-
290
- def _gen_instance_module(fields):
291
- # TODO: find a more automatic way to enable import of other classes
292
- s = """
293
- from copy import deepcopy
294
- import torch
295
- from torch import Tensor
296
- import typing
297
- from typing import *
298
-
299
- import detectron2
300
- from detectron2.structures import Boxes, Instances
301
-
302
- """
303
-
304
- cls_name, cls_def = _gen_instance_class(fields)
305
- s += cls_def
306
- return cls_name, s
307
-
308
-
309
- def _import(path):
310
- return _import_file(
311
- "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
312
- )
313
-
314
-
315
- @contextmanager
316
- def patch_builtin_len(modules=()):
317
- """
318
- Patch the builtin len() function of a few detectron2 modules
319
- to use __len__ instead, because __len__ does not convert values to
320
- integers and therefore is friendly to tracing.
321
-
322
- Args:
323
- modules (list[stsr]): names of extra modules to patch len(), in
324
- addition to those in detectron2.
325
- """
326
-
327
- def _new_len(obj):
328
- return obj.__len__()
329
-
330
- with ExitStack() as stack:
331
- MODULES = [
332
- "detectron2.modeling.roi_heads.fast_rcnn",
333
- "detectron2.modeling.roi_heads.mask_head",
334
- "detectron2.modeling.roi_heads.keypoint_head",
335
- ] + list(modules)
336
- ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
337
- for m in ctxs:
338
- m.side_effect = _new_len
339
- yield
340
-
341
-
342
- def patch_nonscriptable_classes():
343
- """
344
- Apply patches on a few nonscriptable detectron2 classes.
345
- Should not have side-effects on eager usage.
346
- """
347
- # __prepare_scriptable__ can also be added to models for easier maintenance.
348
- # But it complicates the clean model code.
349
-
350
- from detectron2.modeling.backbone import ResNet, FPN
351
-
352
- # Due to https://github.com/pytorch/pytorch/issues/36061,
353
- # we change backbone to use ModuleList for scripting.
354
- # (note: this changes param names in state_dict)
355
-
356
- def prepare_resnet(self):
357
- ret = deepcopy(self)
358
- ret.stages = nn.ModuleList(ret.stages)
359
- for k in self.stage_names:
360
- delattr(ret, k)
361
- return ret
362
-
363
- ResNet.__prepare_scriptable__ = prepare_resnet
364
-
365
- def prepare_fpn(self):
366
- ret = deepcopy(self)
367
- ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
368
- ret.output_convs = nn.ModuleList(ret.output_convs)
369
- for name, _ in self.named_children():
370
- if name.startswith("fpn_"):
371
- delattr(ret, name)
372
- return ret
373
-
374
- FPN.__prepare_scriptable__ = prepare_fpn
375
-
376
- # Annotate some attributes to be constants for the purpose of scripting,
377
- # even though they are not constants in eager mode.
378
- from detectron2.modeling.roi_heads import StandardROIHeads
379
-
380
- if hasattr(StandardROIHeads, "__annotations__"):
381
- # copy first to avoid editing annotations of base class
382
- StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
383
- StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
384
- StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
385
-
386
-
387
- # These patches are not supposed to have side-effects.
388
- patch_nonscriptable_classes()
389
-
390
-
391
- @contextmanager
392
- def freeze_training_mode(model):
393
- """
394
- A context manager that annotates the "training" attribute of every submodule
395
- to constant, so that the training codepath in these modules can be
396
- meta-compiled away. Upon exiting, the annotations are reverted.
397
- """
398
- classes = {type(x) for x in model.modules()}
399
- # __constants__ is the old way to annotate constants and not compatible
400
- # with __annotations__ .
401
- classes = {x for x in classes if not hasattr(x, "__constants__")}
402
- for cls in classes:
403
- cls.__annotations__["training"] = torch.jit.Final[bool]
404
- yield
405
- for cls in classes:
406
- cls.__annotations__["training"] = bool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_nms_rotated.py DELETED
@@ -1,172 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from __future__ import absolute_import, division, print_function, unicode_literals
3
- import numpy as np
4
- import unittest
5
- from copy import deepcopy
6
- import torch
7
- from torchvision import ops
8
-
9
- from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated
10
- from detectron2.utils.testing import random_boxes
11
-
12
-
13
- def nms_edit_distance(keep1, keep2):
14
- """
15
- Compare the "keep" result of two nms call.
16
- They are allowed to be different in terms of edit distance
17
- due to floating point precision issues, e.g.,
18
- if a box happen to have an IoU of 0.5 with another box,
19
- one implentation may choose to keep it while another may discard it.
20
- """
21
- keep1, keep2 = keep1.cpu(), keep2.cpu()
22
- if torch.equal(keep1, keep2):
23
- # they should be equal most of the time
24
- return 0
25
- keep1, keep2 = tuple(keep1), tuple(keep2)
26
- m, n = len(keep1), len(keep2)
27
-
28
- # edit distance with DP
29
- f = [np.arange(n + 1), np.arange(n + 1)]
30
- for i in range(m):
31
- cur_row = i % 2
32
- other_row = (i + 1) % 2
33
- f[other_row][0] = i + 1
34
- for j in range(n):
35
- f[other_row][j + 1] = (
36
- f[cur_row][j]
37
- if keep1[i] == keep2[j]
38
- else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1
39
- )
40
- return f[m % 2][n]
41
-
42
-
43
- class TestNMSRotated(unittest.TestCase):
44
- def reference_horizontal_nms(self, boxes, scores, iou_threshold):
45
- """
46
- Args:
47
- box_scores (N, 5): boxes in corner-form and probabilities.
48
- (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob)
49
- iou_threshold: intersection over union threshold.
50
- Returns:
51
- picked: a list of indexes of the kept boxes
52
- """
53
- picked = []
54
- _, indexes = scores.sort(descending=True)
55
- while len(indexes) > 0:
56
- current = indexes[0]
57
- picked.append(current.item())
58
- if len(indexes) == 1:
59
- break
60
- current_box = boxes[current, :]
61
- indexes = indexes[1:]
62
- rest_boxes = boxes[indexes, :]
63
- iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1)
64
- indexes = indexes[iou <= iou_threshold]
65
-
66
- return torch.as_tensor(picked)
67
-
68
- def _create_tensors(self, N, device="cpu"):
69
- boxes = random_boxes(N, 200, device=device)
70
- scores = torch.rand(N, device=device)
71
- return boxes, scores
72
-
73
- def test_batched_nms_rotated_0_degree_cpu(self, device="cpu"):
74
- N = 2000
75
- num_classes = 50
76
- boxes, scores = self._create_tensors(N, device=device)
77
- idxs = torch.randint(0, num_classes, (N,))
78
- rotated_boxes = torch.zeros(N, 5, device=device)
79
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
80
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
81
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
82
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
83
- err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}"
84
- for iou in [0.2, 0.5, 0.8]:
85
- backup = boxes.clone()
86
- keep_ref = batched_nms(boxes, scores, idxs, iou)
87
- assert torch.allclose(boxes, backup), "boxes modified by batched_nms"
88
- backup = rotated_boxes.clone()
89
- keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou)
90
- assert torch.allclose(
91
- rotated_boxes, backup
92
- ), "rotated_boxes modified by batched_nms_rotated"
93
- # Occasionally the gap can be large if there are many IOU on the threshold boundary
94
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 5, err_msg.format(iou))
95
-
96
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
97
- def test_batched_nms_rotated_0_degree_cuda(self):
98
- self.test_batched_nms_rotated_0_degree_cpu(device="cuda")
99
-
100
- def test_nms_rotated_0_degree_cpu(self, device="cpu"):
101
- N = 1000
102
- boxes, scores = self._create_tensors(N, device=device)
103
- rotated_boxes = torch.zeros(N, 5, device=device)
104
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
105
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
106
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
107
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
108
- err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
109
- for iou in [0.2, 0.5, 0.8]:
110
- keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
111
- keep = nms_rotated(rotated_boxes, scores, iou)
112
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
113
-
114
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
115
- def test_nms_rotated_0_degree_cuda(self):
116
- self.test_nms_rotated_0_degree_cpu(device="cuda")
117
-
118
- def test_nms_rotated_90_degrees_cpu(self):
119
- N = 1000
120
- boxes, scores = self._create_tensors(N)
121
- rotated_boxes = torch.zeros(N, 5)
122
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
123
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
124
- # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]:
125
- # widths and heights are intentionally swapped here for 90 degrees case
126
- # so that the reference horizontal nms could be used
127
- rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1]
128
- rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0]
129
-
130
- rotated_boxes[:, 4] = torch.ones(N) * 90
131
- err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
132
- for iou in [0.2, 0.5, 0.8]:
133
- keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
134
- keep = nms_rotated(rotated_boxes, scores, iou)
135
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
136
-
137
- def test_nms_rotated_180_degrees_cpu(self):
138
- N = 1000
139
- boxes, scores = self._create_tensors(N)
140
- rotated_boxes = torch.zeros(N, 5)
141
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
142
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
143
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
144
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
145
- rotated_boxes[:, 4] = torch.ones(N) * 180
146
- err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
147
- for iou in [0.2, 0.5, 0.8]:
148
- keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
149
- keep = nms_rotated(rotated_boxes, scores, iou)
150
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
151
-
152
-
153
- class TestScriptable(unittest.TestCase):
154
- def setUp(self):
155
- class TestingModule(torch.nn.Module):
156
- def forward(self, boxes, scores, threshold):
157
- return nms_rotated(boxes, scores, threshold)
158
-
159
- self.module = TestingModule()
160
-
161
- def test_scriptable_cpu(self):
162
- m = deepcopy(self.module).cpu()
163
- _ = torch.jit.script(m)
164
-
165
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
166
- def test_scriptable_cuda(self):
167
- m = deepcopy(self.module).cuda()
168
- _ = torch.jit.script(m)
169
-
170
-
171
- if __name__ == "__main__":
172
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Com.p1.chomp Sms Pro Apk.md DELETED
@@ -1,104 +0,0 @@
1
- <br />
2
- <h1>¿Qué es com.p1.chomp sms pro apk? </h1>
3
- <p>Si usted está buscando una forma rápida, fácil y divertida de enviar y recibir mensajes de texto, entonces es posible que desee probar com.p1.chomp sms pro apk. Esta es una aplicación de mensajería popular que le permite personalizar sus mensajes con varios temas, fuentes, colores, emojis, pegatinas, GIF y más. También puede programar mensajes, hacer copias de seguridad y restaurar sus mensajes, bloquear spam y mensajes no deseados, y disfrutar de muchas otras características que hacen que los mensajes de texto sean más agradables. </p>
4
- <h2>¿Por qué usar com.p1.chomp sms pro apk? </h2>
5
- <p>Hay muchas razones por las que es posible que desee utilizar com.p1.chomp sms pro apk sobre otras aplicaciones de mensajería. Estos son algunos de ellos:</p>
6
- <h2>com.p1.chomp sms pro apk</h2><br /><p><b><b>Download File</b> &#10145; <a href="https://bltlly.com/2v6Ja1">https://bltlly.com/2v6Ja1</a></b></p><br /><br />
7
- <ul>
8
- <li>Es gratis y sin publicidad. No tienes que pagar nada ni lidiar con anuncios molestos para usar la aplicación. </li>
9
- <li>Es compatible con la mayoría de los dispositivos Android. Puede usar la aplicación en cualquier dispositivo que ejecute Android 4.1 o superior. </li>
10
- <li> Es fácil de usar. Puede configurar la aplicación en minutos y empezar a enviar mensajes de texto de inmediato. </li>
11
- <li>Es personalizable. Puede elegir entre cientos de temas, fuentes, colores y notificaciones para hacer que sus mensajes se vean únicos. </li>
12
- <li>Es divertido. Puedes expresarte con emojis, pegatinas, GIF y otros medios a los que puedes acceder desde el teclado de la aplicación. </li>
13
- <li> Es inteligente. Puede programar mensajes, hacer copias de seguridad y restaurar sus mensajes, bloquear el spam y los mensajes no deseados, y usar otras funciones que hacen que los mensajes de texto sean más convenientes. </li>
14
- </ul>
15
- <h3>Cómo descargar e instalar com.p1.chomp sms pro apk? </h3>
16
- <p>Para descargar e instalar com.p1.chomp sms pro apk en su dispositivo, debe seguir estos pasos:</p>
17
- <ol>
18
- <li>Vaya a <a href="( 1 )">https://apkdone.com/chomp-sms/</a> y haga clic en el botón "Descargar APK". </li>
19
- <li>Espera a que termine la descarga y luego abre el archivo. </li>
20
- <li>Si ves un mensaje de advertencia que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas". </li>
21
- <li>Toque en "Instalar" y espere a que la instalación se complete. </li>
22
-
23
- </ol>
24
- <h4>Cómo personalizar sus mensajes con com.p1.chomp sms pro apk? </h4>
25
- <p>Para personalizar sus mensajes con com.p1.chomp sms pro apk, es necesario hacer lo siguiente:</p>
26
- <ul>
27
- <li>Abra la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. </ <li>Seleccione "Configuración" y luego "Personalizar apariencia". </li>
28
- <li>Aquí puede elegir entre varias opciones para cambiar la apariencia de sus mensajes, como tema, fuente, color, estilo de burbuja, icono de notificación y más. </li>
29
- <li>Toque en la opción que desea cambiar y seleccione su opción preferida. </li>
30
- <li>Toque en "Guardar" y luego en "Aceptar" para aplicar los cambios. </li>
31
- </ul>
32
- <h4>Cómo utilizar emojis, pegatinas y GIF con com.p1.chomp sms pro apk? </h4>
33
- <p>Para usar emojis, pegatinas y GIF con com.p1.chomp sms pro apk, debe hacer lo siguiente:</p>
34
- <ul>
35
- <li> Abra la aplicación y toque en el icono "+" en la esquina inferior izquierda del teclado. </li>
36
- <li>Verá un menú con diferentes opciones para agregar medios a sus mensajes, como emojis, pegatinas, GIF, fotos, videos, notas de voz y más. </li>
37
- <li>Toque en la opción que desea utilizar y navegue a través de las opciones disponibles. </li>
38
- <li>Toque en el medio que desea enviar y se añadirá a su mensaje. </li>
39
- <li>Toque en el botón "Enviar" para enviar su mensaje con los medios de comunicación. </li>
40
- </ul>
41
- <h4>Cómo programar mensajes con com.p1.chomp sms pro apk? </h4>
42
- <p>Para programar mensajes con com.p1.chomp sms pro apk, debe hacer lo siguiente:</p>
43
- <ul>
44
- <li> Abra la aplicación y toque en el "Nuevo mensaje" botón en la esquina inferior derecha. </li>
45
- <li>Escribe el número o nombre del destinatario y escribe tu mensaje. </li>
46
- <li>Toque en el icono del reloj en la esquina superior derecha del teclado. </li>
47
- <li>Verá un menú con diferentes opciones para programar su mensaje, como más tarde hoy, mañana, la próxima semana o la fecha y hora personalizadas. </li>
48
- <li>Toque en la opción que desea utilizar y confirme su elección. </li>
49
-
50
- </ul> <h3>Cómo hacer copias de seguridad y restaurar sus mensajes con com.p1.chomp sms pro apk? </h3>
51
- <p>Para respaldar y restaurar sus mensajes con com.p1.chomp sms pro apk, debe hacer lo siguiente:</p>
52
- <ul>
53
- <li> Abra la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. </li>
54
- <li>Seleccione "Configuración" y luego "Copia de seguridad & Restaurar". </li>
55
- <li>Aquí puede elegir hacer copias de seguridad de sus mensajes en la nube o en su dispositivo, así como restaurar sus mensajes desde la nube o desde su dispositivo. </li>
56
- <li>Toque en la opción que desea utilizar y siga las instrucciones en la pantalla. </li>
57
- <li>Necesitará iniciar sesión con su cuenta de Google para usar el servicio en la nube. </li>
58
- <li>Tus mensajes serán respaldados o restaurados según tu elección. </li>
59
- </ul>
60
- <h3>Cómo bloquear spam y mensajes no deseados con com.p1.chomp sms pro apk? </h3>
61
- <p>Para bloquear spam y mensajes no deseados con com.p1.chomp sms pro apk, debe hacer lo siguiente:</p>
62
- <p></p>
63
- <ul>
64
- <li> Abra la aplicación y toque en el mensaje que desea bloquear. </li>
65
- <li>Toque en el icono del menú (tres puntos verticales) en la esquina superior derecha del mensaje. </li>
66
- <li>Seleccione "Bloquear" y luego "OK". </li>
67
- <li> El mensaje se moverá a la carpeta "Bloqueado" y no recibirá más mensajes de ese número o contacto. </li>
68
- <li>También puede agregar números o contactos a su lista negra manualmente yendo a "Configuración" y luego "Lista negra". </li>
69
- <li>También puede habilitar el modo de privacidad yendo a "Configuración" y luego "Privacidad". Esto ocultará sus notificaciones y mensajes de miradas indiscretas. </li>
70
- </ul>
71
- <h2>¿Cuáles son los pros y los contras de com.p1.chomp sms pro apk? </h2>
72
- <p>Como cualquier otra aplicación, com.p1.chomp sms pro apk tiene sus pros y contras. Aquí están algunos de ellos:</p>
73
- <tabla>
74
- <tr><th>Pros</th><th>Contras</th></tr>
75
- <tr><td>Libre y sin anuncios</td><td>Requiere conexión a Internet</td></tr>
76
- <tr><td>Compatible con la mayoría de dispositivos Android</td><td>No disponible para dispositivos iOS</td></tr>
77
-
78
- <tr><td>Personalizable</td><td>Puede consumir más batería o memoria</td></tr>
79
- <tr><td>Diversión</td><td>Puede que no soporte algunos formatos de medios</td></tr>
80
- <tr><td>Smart</td><td>Puede que no funcione con algunos operadores o redes</td></tr>
81
- </tabla>
82
- <p>Si usted está buscando algunas alternativas a com.p1.chomp sms pro apk, puede probar estas aplicaciones:</p>
83
- <ul>
84
- <li>Textra SMS: Una aplicación de mensajería simple y rápida que también te permite personalizar tus mensajes con temas, emojis, GIF y más. </li>
85
- <li>Pulse SMS: Una aplicación de mensajería potente y segura que también te permite sincronizar tus mensajes en todos tus dispositivos, incluido tu ordenador. </li>
86
- <li>Mood Messenger: Una aplicación de mensajería elegante e inteligente que también le permite enviar emojis animados, mensajes de voz, ubicación y más. </li>
87
- </ul>
88
- <h2>Conclusión</h2>
89
- <p>En conclusión, com.p1.chomp sms pro apk es una gran aplicación de mensajería que ofrece muchas características y opciones para hacer mensajes de texto más divertido y conveniente. Puede descargar e instalar la aplicación de forma gratuita y disfrutar de la personalización de sus mensajes con temas, fuentes, colores, emojis, pegatinas, GIF, y más. También puede programar mensajes, hacer copias de seguridad y restaurar sus mensajes, bloquear el spam y los mensajes no deseados, y usar otras funciones que hacen que los mensajes de texto sean más inteligentes. Sin embargo, también debe ser consciente de los contras y limitaciones de la aplicación, tales como requerir conexión a Internet, no estar disponible para dispositivos iOS, tener algunos errores o problemas técnicos, consumir más batería o memoria, no es compatible con algunos formatos de medios, y no trabajar con algunos operadores o redes. También puede probar algunas alternativas a com.p1.chomp sms pro apk si desea explorar otras aplicaciones de mensajería. Esperamos que este artículo le ha ayudado a aprender más acerca de com.p1.chomp sms pro apk y cómo usarlo. Feliz mensajes de texto! </p>
90
- <h2>Preguntas frecuentes (preguntas frecuentes)</h2>
91
- <ol>
92
- <li><b>Lo que es com.p1.chomp sms pro apk? </b></li>
93
-
94
- <li><b>Cómo puedo descargar e instalar com.p1.chomp sms pro apk? </b></li <p>A: Para descargar e instalar com.p1.chomp sms pro apk en su dispositivo, debe ir a <a href="">https://apkdone.com/chomp-sms/</a> y hacer clic en el botón "Descargar APK". Luego, debe abrir el archivo y tocar en "Instalar". Es posible que deba habilitar "Fuentes desconocidas" en la configuración de su dispositivo para instalar la aplicación. Una vez completada la instalación, puedes tocar en "Abrir" y disfrutar usando la aplicación. </p>
95
- <li><b>¿Cómo puedo personalizar mis mensajes con com.p1.chomp sms pro apk? </b></li>
96
- <p>A: Para personalizar sus mensajes con com.p1.chomp sms pro apk, es necesario abrir la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. Luego, debe seleccionar "Configuración" y luego "Personalizar apariencia". Aquí, puede elegir entre varias opciones para cambiar la apariencia de sus mensajes, como tema, fuente, color, estilo de burbuja, icono de notificación y más. Puede pulsar en la opción que desea cambiar y seleccionar su opción preferida. Puede pulsar en "Guardar" y luego "Aceptar" para aplicar los cambios. </p>
97
- <li><b>¿Cómo puedo usar emojis, pegatinas y GIF con com.p1.chomp sms pro apk? </b></li>
98
- <p>A: Para utilizar emojis, pegatinas y GIF con com.p1.chomp sms pro apk, es necesario abrir la aplicación y toque en el "+" icono en la esquina inferior izquierda del teclado. Verá un menú con diferentes opciones para agregar medios a sus mensajes, como emojis, pegatinas, GIF, fotos, videos, notas de voz y más. Puede tocar en la opción que desea utilizar y navegar a través de las opciones disponibles. Puede pulsar en el medio que desea enviar y se añadirá a su mensaje. Puede pulsar en el botón "Enviar" para enviar su mensaje con el medio. </p>
99
- <li><b>¿Cómo puedo programar mensajes con com.p1.chomp sms pro apk? </b></li>
100
-
101
- <li><b>¿Cómo hago copia de seguridad y restaurar mis mensajes con com.p1.chomp sms pro apk? </b></li>
102
- <p>A: Para copia de seguridad y restaurar sus mensajes con com.p1.chomp sms pro apk, es necesario abrir la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. Luego, debe seleccionar "Configuración" y luego "Copia de seguridad y restauración". Aquí, puede elegir hacer una copia de seguridad de sus mensajes en la nube o en su dispositivo, así como restaurar sus mensajes desde la nube o desde su dispositivo. Puede tocar en la opción que desea utilizar y siga las instrucciones en la pantalla. Deberá iniciar sesión con su cuenta de Google para utilizar el servicio en la nube. Se realizará una copia de seguridad de sus mensajes o se restaurarán según su elección. </p> 64aa2da5cf<br />
103
- <br />
104
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Canciones De M Kumaran Hijo De Mahalakshmi.md DELETED
@@ -1,102 +0,0 @@
1
-
2
- <h1>Cómo descargar canciones de M. Kumaran Son of Mahalakshmi</h1>
3
- <p>M. Kumaran Son of Mahalakshmi es una película tamil de 2004 dirigida por M. Raja y protagonizada por Jayam Ravi, Asin, Nadhiya y Prakash Raj. La película es un remake de la película telugu Amma Nanna O Tamila Ammayi y cuenta la historia de Kumaran, un kickboxer que va a Malasia para encontrarse con su padre después de la muerte de su madre. La película fue un éxito de taquilla y una de las películas más rentables de 2004. </p>
4
- <p>Una de las razones del éxito de la película fue su banda sonora, compuesta por Srikanth Deva, hijo del veterano director musical Deva. La banda sonora consta de seis canciones que van desde el rock, folk, melodía, y géneros de rap. Las canciones cuentan con varios cantantes como Shankar Mahadevan, Karthik, Sadhana Sargam, Tippu, Anuradha Sriram, Ranjith, Premji Amaren y el propio Srikanth Deva. Las canciones son pegadizas, emocionales y motivacionales, y se adaptan perfectamente al tema de la película. </p>
5
- <h2>descargar canciones de m kumaran hijo de mahalakshmi</h2><br /><p><b><b>Download</b> &#10038; <a href="https://bltlly.com/2v6LVR">https://bltlly.com/2v6LVR</a></b></p><br /><br />
6
- <p>Si eres un fan de las canciones de M. Kumaran Son of Mahalakshmi y quieres descargarlas en tu dispositivo, tienes dos opciones: puedes descargarlas gratis o por una tarifa. En este artículo, te mostraremos cómo hacer ambas cosas. </p>
7
- <h2>Por qué deberías escuchar canciones de M. Kumaran hijo de Mahalakshmi</h2>
8
- <h3>Las canciones están compuestas por Srikanth Deva, un director de música popular en el cine tamil</h3>
9
- <p>Srikanth Deva es uno de los directores de música más prolíficos en el cine tamil, habiendo compuesto música para más de 100 películas desde su debut en 2002. Es conocido por su versatilidad y capacidad para crear canciones que atraen a diferentes audiencias. Ha trabajado con muchos actores y directores principales en el cine tamil, como Vij <h3>Las canciones cuentan con varios cantantes y géneros, como rock, folk, melodía y rap</h3>
10
-
11
- <p>Estas canciones no solo son agradables de escuchar, sino también significativas y relevantes para la historia y los personajes. Mejoran el estado de ánimo y la emoción de la película y la hacen más atractiva y memorable. </p>
12
- <h3>Las canciones son pegadizas, emocionales y motivadoras, y se adaptan al tema de la película</h3>
13
- <p>Las canciones de M. Kumaran Son of Mahalakshmi no son solo canciones aleatorias o de relleno. Son pegadizos, emocionales y motivadores, y se adaptan al tema de la película. La película trata sobre el viaje de Kumaran para encontrar su identidad y su lugar en el mundo, así como su relación con su padre, su madre, su novia y sus amigos. Las canciones reflejan estos aspectos y transmiten el mensaje de la película. </p>
14
- <p>Por ejemplo, la canción "Amma Nee Sumandha" es un homenaje a la madre de Kumaran, quien lo crió sin ayuda y le enseñó a ser fuerte e independiente. La canción "Rakkamma" es una canción motivacional que anima a Kumaran a superar sus miedos y desafíos y lograr sus sueños. La canción "Aethiree" es una canción pegadiza que muestra la amistad y la diversión entre Kumaran y sus amigos. La canción "Unnai Ninaithu" es una canción emocional que revela los sentimientos de Kumaran por su padre, quien lo abandonó cuando era joven. </p>
15
- <p>Estas canciones no solo son pegadizas, emocionales y motivadoras, sino que también se adaptan al tema de la película. Transmiten el mensaje de la película y la hacen más impactante e inspiradora. </p>
16
- <h2>Cómo descargar canciones de M. Kumaran Son of Mahalakshmi gratis</h2>
17
- <h3>Utilice un sitio web o aplicación confiable y legal que ofrece descargas gratuitas de canciones tamiles</h3>
18
-
19
- <p>Algunos de los sitios web o aplicaciones confiables y legales que ofrecen descargas gratuitas de canciones tamiles son:</p>
20
- <ul>
21
- <li><a href="">Gaana.com</a>: Esta es una de las plataformas de streaming de música online más grandes de la India, con más de 200 millones de usuarios mensuales. Ofrece descargas gratuitas de canciones tamiles, así como de otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el icono de descarga junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo. </li>
22
- <li><a href="">Hungama.com</a>: Esta es otra popular plataforma de streaming de música en línea en la India, con más de 150 millones de usuarios mensuales. También ofrece descargas gratuitas de canciones tamiles, así como de otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el icono de descarga junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo. </li>
23
-
24
- </ul>
25
- <p>Estos son algunos de los sitios web o aplicaciones confiables y legales que ofrecen descargas gratuitas de canciones tamiles. Sin embargo, siempre debe comprobar los términos y condiciones de cada sitio web o aplicación antes de descargar cualquier canción, y asegúrese de no violar ninguna ley o política. </p>
26
- <p></p>
27
- <h2>Cómo descargar canciones de M. Kumaran Son of Mahalakshmi por una tarifa</h2>
28
- <h3>Utilice un servicio de streaming de pago o tienda en línea que ofrece descargas de alta calidad de canciones Tamil</h3>
29
- <p>Si desea descargar canciones de M. Kumaran Son of Mahalakshmi por una tarifa, debe usar un servicio de transmisión pagado o una tienda en línea que ofrece descargas de alta calidad de canciones tamiles. Hay muchos servicios de streaming de pago y tiendas en línea que ofrecen descargas de alta calidad de canciones tamiles, pero no todos ellos valen su dinero o tiempo. Algunos de ellos pueden cobrarle demasiado o muy poco, algunos de ellos pueden tener un mal servicio al cliente o soporte técnico, algunos de ellos pueden tener opciones o características limitadas, y algunos de ellos pueden tener productos de baja calidad o falsos. Por lo tanto, debe tener cuidado y elegir un servicio o tienda que vale la pena su dinero y tiempo, y que proporciona productos y servicios de alta calidad. </p>
30
- <p>Algunos de los servicios de streaming de pago y tiendas en línea que ofrecen descargas de alta calidad de canciones tamiles son:</p>
31
- <ul>
32
-
33
- <li><a href="">iTunes</a>: Esta es una de las tiendas de música en línea más populares y ampliamente utilizadas del mundo, con más de 60 millones de canciones disponibles para comprar y descargar. Ofrece descargas de alta calidad de canciones tamiles, así como otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el botón comprar junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo. Sin embargo, necesitas registrarte para una cuenta y pagar por cada canción que quieras descargar. El precio de cada canción varía de $0.69 a $1.29, dependiendo de la popularidad y la demanda de la canción. </li>
34
- <li><a href="">Saavn</a>: Esta es una de las plataformas de streaming de música más populares y ampliamente utilizadas en la India, con más de 100 millones de usuarios mensuales. Ofrece descargas de alta calidad de canciones tamiles, así como otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el icono de descarga junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo. Sin embargo, debe registrarse para obtener una cuenta y pagar una suscripción para acceder a la función de descarga. Los planes de suscripción varían de $1.99 a $9.99 por mes, dependiendo de las características y beneficios que desee. </li>
35
- </ul>
36
-
37
- <h2>Cómo disfrutar de las canciones de M. Kumaran hijo de Mahalakshmi después de descargarlas</h2>
38
- <h3>Transfiera las canciones a su reproductor de música o dispositivo preferido</h3>
39
- <p>Después de descargar canciones de M. Kumaran Son of Mahalakshmi, necesitas transferirlas a tu reproductor de música o dispositivo preferido, para que puedas disfrutarlas en cualquier momento y en cualquier lugar que desees. Hay diferentes formas de transferir las canciones, dependiendo de la fuente y el destino de la transferencia. </p>
40
- <p>Por ejemplo, si has descargado las canciones de Gaana.com o Hungama.com, puedes transferirlas a tu smartphone o tablet mediante un cable USB o una conexión inalámbrica. Si los has descargado de iTunes, puedes transferirlos a tu iPhone, iPad, iPod o Mac usando iTunes Sync o iCloud Music Library. Si los ha descargado desde Saavn, puede transferirlos a su smartphone o tableta mediante un cable USB o una conexión inalámbrica. </p>
41
- <p>Siempre debe seguir las instrucciones y directrices de cada sitio web o aplicación al transferir las canciones, y asegúrese de no perder ni dañar ningún archivo durante el proceso. </p>
42
- <h3>Crear una lista de reproducción de sus canciones favoritas de la película</h3>
43
- <p>Después de transferir canciones de M. Kumaran Son of Mahalakshmi a su reproductor de música o dispositivo preferido, puede crear una lista de reproducción de sus canciones favoritas de la película. Una lista de reproducción es una colección de canciones que puedes reproducir en modo secuencial o aleatorio. Crear una lista de reproducción de tus canciones favoritas de la película puede ayudarte a disfrutarlas más y organizarlas mejor. También puede compartir su lista de reproducción con sus amigos o familiares, o escuchar las listas de reproducción de otras personas de la misma película. </p>
44
-
45
- <p>Siempre debe seguir las instrucciones y directrices de cada reproductor de música o dispositivo al crear una lista de reproducción, y asegúrese de guardar y actualizar su lista de reproducción con regularidad. </p>
46
- <h3>Escuchar las canciones en cualquier momento y en cualquier lugar que desee</h3>
47
- <p>Después de crear una lista de reproducción de tus canciones favoritas de M. Kumaran Son of Mahalakshmi, puedes escuchar las canciones en cualquier momento y en cualquier lugar que quieras. Puede escuchar las canciones en línea o fuera de línea, dependiendo de su conexión a Internet y plan de datos. También puede ajustar el volumen, saltar, repetir o barajar las canciones, dependiendo de su preferencia y estado de ánimo. También puedes cantar, bailar o simplemente relajarte y disfrutar de las canciones. </p>
48
- <p>Escuchar canciones de M. Kumaran Son of Mahalakshmi puede hacerte sentir feliz, triste, emocionado, nostálgico o inspirado, dependiendo de la canción y la situación. Las canciones también pueden recordarte la película y sus personajes, y hacerte apreciar más la historia y el mensaje. Las canciones también pueden ayudarle a aprender más sobre la cultura y el idioma tamil, y enriquecer su conocimiento y experiencia. </p>
49
- <h1>Conclusión</h1>
50
- <p>M. Kumaran Son of Mahalakshmi es una película tamil de 2004 que tiene una gran banda sonora compuesta por Srikanth Deva. La banda sonora consta de seis canciones que cuentan con varios cantantes y géneros, como rock, folk, melodía y rap. Las canciones son pegadizas, emocionales y motivacionales, y se adaptan perfectamente al tema de la película. </p>
51
-
52
- <p>Después de descargar canciones de M. Kumaran Son of Mahalakshmi a tu dispositivo, puedes disfrutarlas en cualquier momento y en cualquier lugar que quieras. Puede transferir las canciones a su reproductor de música o dispositivo preferido, crear una lista de reproducción de sus canciones favoritas de la película y escuchar las canciones en línea o fuera de línea. También puede compartir su lista de reproducción con sus amigos o familiares, o escuchar las listas de reproducción de otras personas de la misma película. </p>
53
- <p>Escuchar canciones de M. Kumaran Son of Mahalakshmi puede hacerte sentir feliz, triste, emocionado, nostálgico o inspirado, dependiendo de la canción y la situación. Las canciones también pueden recordarte la película y sus personajes, y hacerte apreciar más la historia y el mensaje. Las canciones también pueden ayudarle a aprender más sobre la cultura y el idioma tamil, y enriquecer su conocimiento y experiencia. </p>
54
- <p>Entonces, ¿qué estás esperando? Descarga las canciones de M. Kumaran Son of Mahalakshmi hoy y disfrútalas al máximo! </p>
55
- <h2>Preguntas frecuentes</h2>
56
- <h3>P: ¿Cuáles son los nombres de las seis canciones de M. Kumaran Hijo de Mahalakshmi? </h3>
57
- <p>A: Los nombres de las seis canciones de M. Kumaran Son:</p>
58
- <ol>
59
- <li>Ayyo Ayyo</li>
60
- <li>Yaaru Yaaru</li>
61
- <li>Neeye Neeye</li>
62
- <li>Chennai Senthamizh</li>
63
- <li>Amma Nee Sumandha</li>
64
- <li>Rakkamma</li>
65
- </ol>
66
- <h3>P: ¿Quiénes son los cantantes de las seis canciones de M. Kumaran Hijo de Mahalakshmi? </h3>
67
- <p>A: Los cantantes de las seis canciones de M. Kumaran Son:</p>
68
- <ul>
69
- <li>Ayyo Ayyo: Shankar Mahadevan y Karthik</li>
70
- <li>Yaaru Yaaru: Tippu y Anuradha Sriram</li>
71
- <li>Neeye Neeye: Karthik y Sadhana Sargam</li>
72
- <li>Chennai Senthamizh: Ranjith, Premji Amaren y Srikanth Deva</li>
73
- <li>Amma Nee Sumandha: Srikanth Deva</li>
74
- <li>Rakkamma: Tippu y Anuradha Sriram</li>
75
- </ul>
76
- <h3>P: ¿Dónde puedo ver la película en línea de M. Kumaran Son of Mahalakshmi? </h3>
77
- <p>A: Puedes ver la película en línea de M. Kumaran Son of Mahalakshmi en varias plataformas de streaming, como:</p>
78
- <ul>
79
-
80
- <li><a href=">YouTube</a>: Esta es una popular plataforma para compartir videos que ofrece una variedad de videos en diferentes categorías y temas. Puedes ver M. Kumaran Son of Mahalakshmi película en línea en YouTube de forma gratuita con anuncios, o por una tarifa sin anuncios. También puede descargar la película en su dispositivo para ver sin conexión. </li>
81
- <li><a href="">Amazon Prime Video</a>: Esta es una popular plataforma de streaming que ofrece una variedad de películas y programas en diferentes idiomas y géneros. Puedes ver la película en línea de M. Kumaran Son of Mahalakshmi en Amazon Prime Video por una tarifa con o sin anuncios. También puede descargar la película en su dispositivo para ver sin conexión. </li>
82
- </ul>
83
- <h3>Q: ¿Cómo puedo aprender más sobre la cultura y el idioma tamil? </h3>
84
- <p>A: Hay muchas maneras de aprender más sobre la cultura y el idioma tamil, como:</p>
85
- <ul>
86
- <li>Leer libros, revistas, periódicos, blogs o sitios web que están escritos en tamil o sobre temas tamiles. </li>
87
- <li>Ver películas, programas, documentales o videos que se hacen en tamil o sobre temas tamiles. </li>
88
- <li>Escuchar podcasts, estaciones de radio, álbumes de música o canciones que se hablan en tamil o sobre temas tamiles. </li>
89
- <li>Tomar cursos, clases, lecciones o tutoriales que enseñan el idioma o la cultura tamil. </li>
90
- <li>Unirse a clubes, grupos, comunidades o foros que hablan de la lengua o cultura tamil. </li>
91
- <li>Visitar lugares, eventos, festivales o atracciones que muestran el idioma o la cultura tamil. </li>
92
- <li <li>Conocer gente, amigos, familiares o vecinos que hablan tamil o conocen la cultura tamil. </li>
93
- </ul>
94
- <h3>Q: ¿Cuáles son algunas otras películas que tienen buenas canciones tamiles? </h3>
95
- <p>A: Hay muchas películas que tienen buenas canciones tamiles, pero algunas de las más populares y aclamadas son:</p>
96
- <ul>
97
-
98
- <li>Roja: Esta es una película romántica de 1992 dirigida por Mani Ratnam y protagonizada por Arvind Swamy y Madhoo. La película trata sobre una mujer que intenta rescatar a su marido que es secuestrado por terroristas en Cachemira. La película tiene una hermosa banda sonora compuesta por A.R. Rahman, con canciones como "Kadhal Rojave", "Chinna Chinna Aasai", "Pudhu Vellai Mazhai" y "Rukkumani Rukkumani". </li>
99
- <li>3: Esta es una película romántica de 2012 dirigida por Aishwarya R. Dhanush y protagonizada por Dhanush y Shruti Haasan. La película trata sobre una pareja que enfrenta varios desafíos en su relación debido al trastorno bipolar y la muerte. La película tiene una banda sonora pegadiza compuesta por Anirudh Ravichander, con canciones como "Why This Kolaveri Di", "Idhazhin Oram", "Nee Paartha Vizhigal" y "Po Nee Po". </li>
100
- </ul></p> 64aa2da5cf<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/lib/utils/randomUuid.ts DELETED
@@ -1,14 +0,0 @@
1
- type UUID = ReturnType<typeof crypto.randomUUID>;
2
-
3
- export function randomUUID(): UUID {
4
- // Only on old safari / ios
5
- if (!("randomUUID" in crypto)) {
6
- return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
7
- (
8
- Number(c) ^
9
- (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4)))
10
- ).toString(16)
11
- ) as UUID;
12
- }
13
- return crypto.randomUUID();
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py DELETED
@@ -1,224 +0,0 @@
1
- import email.message
2
- import importlib.metadata
3
- import os
4
- import pathlib
5
- import zipfile
6
- from typing import (
7
- Collection,
8
- Dict,
9
- Iterable,
10
- Iterator,
11
- Mapping,
12
- Optional,
13
- Sequence,
14
- cast,
15
- )
16
-
17
- from pip._vendor.packaging.requirements import Requirement
18
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
19
- from pip._vendor.packaging.version import parse as parse_version
20
-
21
- from pip._internal.exceptions import InvalidWheel, UnsupportedWheel
22
- from pip._internal.metadata.base import (
23
- BaseDistribution,
24
- BaseEntryPoint,
25
- DistributionVersion,
26
- InfoPath,
27
- Wheel,
28
- )
29
- from pip._internal.utils.misc import normalize_path
30
- from pip._internal.utils.packaging import safe_extra
31
- from pip._internal.utils.temp_dir import TempDirectory
32
- from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
33
-
34
- from ._compat import BasePath, get_dist_name
35
-
36
-
37
- class WheelDistribution(importlib.metadata.Distribution):
38
- """An ``importlib.metadata.Distribution`` read from a wheel.
39
-
40
- Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``,
41
- its implementation is too "lazy" for pip's needs (we can't keep the ZipFile
42
- handle open for the entire lifetime of the distribution object).
43
-
44
- This implementation eagerly reads the entire metadata directory into the
45
- memory instead, and operates from that.
46
- """
47
-
48
- def __init__(
49
- self,
50
- files: Mapping[pathlib.PurePosixPath, bytes],
51
- info_location: pathlib.PurePosixPath,
52
- ) -> None:
53
- self._files = files
54
- self.info_location = info_location
55
-
56
- @classmethod
57
- def from_zipfile(
58
- cls,
59
- zf: zipfile.ZipFile,
60
- name: str,
61
- location: str,
62
- ) -> "WheelDistribution":
63
- info_dir, _ = parse_wheel(zf, name)
64
- paths = (
65
- (name, pathlib.PurePosixPath(name.split("/", 1)[-1]))
66
- for name in zf.namelist()
67
- if name.startswith(f"{info_dir}/")
68
- )
69
- files = {
70
- relpath: read_wheel_metadata_file(zf, fullpath)
71
- for fullpath, relpath in paths
72
- }
73
- info_location = pathlib.PurePosixPath(location, info_dir)
74
- return cls(files, info_location)
75
-
76
- def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
77
- # Only allow iterating through the metadata directory.
78
- if pathlib.PurePosixPath(str(path)) in self._files:
79
- return iter(self._files)
80
- raise FileNotFoundError(path)
81
-
82
- def read_text(self, filename: str) -> Optional[str]:
83
- try:
84
- data = self._files[pathlib.PurePosixPath(filename)]
85
- except KeyError:
86
- return None
87
- try:
88
- text = data.decode("utf-8")
89
- except UnicodeDecodeError as e:
90
- wheel = self.info_location.parent
91
- error = f"Error decoding metadata for {wheel}: {e} in {filename} file"
92
- raise UnsupportedWheel(error)
93
- return text
94
-
95
-
96
- class Distribution(BaseDistribution):
97
- def __init__(
98
- self,
99
- dist: importlib.metadata.Distribution,
100
- info_location: Optional[BasePath],
101
- installed_location: Optional[BasePath],
102
- ) -> None:
103
- self._dist = dist
104
- self._info_location = info_location
105
- self._installed_location = installed_location
106
-
107
- @classmethod
108
- def from_directory(cls, directory: str) -> BaseDistribution:
109
- info_location = pathlib.Path(directory)
110
- dist = importlib.metadata.Distribution.at(info_location)
111
- return cls(dist, info_location, info_location.parent)
112
-
113
- @classmethod
114
- def from_metadata_file_contents(
115
- cls,
116
- metadata_contents: bytes,
117
- filename: str,
118
- project_name: str,
119
- ) -> BaseDistribution:
120
- # Generate temp dir to contain the metadata file, and write the file contents.
121
- temp_dir = pathlib.Path(
122
- TempDirectory(kind="metadata", globally_managed=True).path
123
- )
124
- metadata_path = temp_dir / "METADATA"
125
- metadata_path.write_bytes(metadata_contents)
126
- # Construct dist pointing to the newly created directory.
127
- dist = importlib.metadata.Distribution.at(metadata_path.parent)
128
- return cls(dist, metadata_path.parent, None)
129
-
130
- @classmethod
131
- def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
132
- try:
133
- with wheel.as_zipfile() as zf:
134
- dist = WheelDistribution.from_zipfile(zf, name, wheel.location)
135
- except zipfile.BadZipFile as e:
136
- raise InvalidWheel(wheel.location, name) from e
137
- except UnsupportedWheel as e:
138
- raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
139
- return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location))
140
-
141
- @property
142
- def location(self) -> Optional[str]:
143
- if self._info_location is None:
144
- return None
145
- return str(self._info_location.parent)
146
-
147
- @property
148
- def info_location(self) -> Optional[str]:
149
- if self._info_location is None:
150
- return None
151
- return str(self._info_location)
152
-
153
- @property
154
- def installed_location(self) -> Optional[str]:
155
- if self._installed_location is None:
156
- return None
157
- return normalize_path(str(self._installed_location))
158
-
159
- def _get_dist_name_from_location(self) -> Optional[str]:
160
- """Try to get the name from the metadata directory name.
161
-
162
- This is much faster than reading metadata.
163
- """
164
- if self._info_location is None:
165
- return None
166
- stem, suffix = os.path.splitext(self._info_location.name)
167
- if suffix not in (".dist-info", ".egg-info"):
168
- return None
169
- return stem.split("-", 1)[0]
170
-
171
- @property
172
- def canonical_name(self) -> NormalizedName:
173
- name = self._get_dist_name_from_location() or get_dist_name(self._dist)
174
- return canonicalize_name(name)
175
-
176
- @property
177
- def version(self) -> DistributionVersion:
178
- return parse_version(self._dist.version)
179
-
180
- def is_file(self, path: InfoPath) -> bool:
181
- return self._dist.read_text(str(path)) is not None
182
-
183
- def iter_distutils_script_names(self) -> Iterator[str]:
184
- # A distutils installation is always "flat" (not in e.g. egg form), so
185
- # if this distribution's info location is NOT a pathlib.Path (but e.g.
186
- # zipfile.Path), it can never contain any distutils scripts.
187
- if not isinstance(self._info_location, pathlib.Path):
188
- return
189
- for child in self._info_location.joinpath("scripts").iterdir():
190
- yield child.name
191
-
192
- def read_text(self, path: InfoPath) -> str:
193
- content = self._dist.read_text(str(path))
194
- if content is None:
195
- raise FileNotFoundError(path)
196
- return content
197
-
198
- def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
199
- # importlib.metadata's EntryPoint structure sasitfies BaseEntryPoint.
200
- return self._dist.entry_points
201
-
202
- def _metadata_impl(self) -> email.message.Message:
203
- # From Python 3.10+, importlib.metadata declares PackageMetadata as the
204
- # return type. This protocol is unfortunately a disaster now and misses
205
- # a ton of fields that we need, including get() and get_payload(). We
206
- # rely on the implementation that the object is actually a Message now,
207
- # until upstream can improve the protocol. (python/cpython#94952)
208
- return cast(email.message.Message, self._dist.metadata)
209
-
210
- def iter_provided_extras(self) -> Iterable[str]:
211
- return (
212
- safe_extra(extra) for extra in self.metadata.get_all("Provides-Extra", [])
213
- )
214
-
215
- def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
216
- contexts: Sequence[Dict[str, str]] = [{"extra": safe_extra(e)} for e in extras]
217
- for req_string in self.metadata.get_all("Requires-Dist", []):
218
- req = Requirement(req_string)
219
- if not req.marker:
220
- yield req
221
- elif not extras and req.marker.evaluate({"extra": ""}):
222
- yield req
223
- elif any(req.marker.evaluate(context) for context in contexts):
224
- yield req
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/scripts.py DELETED
@@ -1,437 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2013-2015 Vinay Sajip.
4
- # Licensed to the Python Software Foundation under a contributor agreement.
5
- # See LICENSE.txt and CONTRIBUTORS.txt.
6
- #
7
- from io import BytesIO
8
- import logging
9
- import os
10
- import re
11
- import struct
12
- import sys
13
- import time
14
- from zipfile import ZipInfo
15
-
16
- from .compat import sysconfig, detect_encoding, ZipFile
17
- from .resources import finder
18
- from .util import (FileOperator, get_export_entry, convert_path,
19
- get_executable, get_platform, in_venv)
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
- _DEFAULT_MANIFEST = '''
24
- <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
25
- <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
26
- <assemblyIdentity version="1.0.0.0"
27
- processorArchitecture="X86"
28
- name="%s"
29
- type="win32"/>
30
-
31
- <!-- Identify the application security requirements. -->
32
- <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
33
- <security>
34
- <requestedPrivileges>
35
- <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
36
- </requestedPrivileges>
37
- </security>
38
- </trustInfo>
39
- </assembly>'''.strip()
40
-
41
- # check if Python is called on the first line with this expression
42
- FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
43
- SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
44
- import re
45
- import sys
46
- from %(module)s import %(import_name)s
47
- if __name__ == '__main__':
48
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
49
- sys.exit(%(func)s())
50
- '''
51
-
52
-
53
- def enquote_executable(executable):
54
- if ' ' in executable:
55
- # make sure we quote only the executable in case of env
56
- # for example /usr/bin/env "/dir with spaces/bin/jython"
57
- # instead of "/usr/bin/env /dir with spaces/bin/jython"
58
- # otherwise whole
59
- if executable.startswith('/usr/bin/env '):
60
- env, _executable = executable.split(' ', 1)
61
- if ' ' in _executable and not _executable.startswith('"'):
62
- executable = '%s "%s"' % (env, _executable)
63
- else:
64
- if not executable.startswith('"'):
65
- executable = '"%s"' % executable
66
- return executable
67
-
68
- # Keep the old name around (for now), as there is at least one project using it!
69
- _enquote_executable = enquote_executable
70
-
71
- class ScriptMaker(object):
72
- """
73
- A class to copy or create scripts from source scripts or callable
74
- specifications.
75
- """
76
- script_template = SCRIPT_TEMPLATE
77
-
78
- executable = None # for shebangs
79
-
80
- def __init__(self, source_dir, target_dir, add_launchers=True,
81
- dry_run=False, fileop=None):
82
- self.source_dir = source_dir
83
- self.target_dir = target_dir
84
- self.add_launchers = add_launchers
85
- self.force = False
86
- self.clobber = False
87
- # It only makes sense to set mode bits on POSIX.
88
- self.set_mode = (os.name == 'posix') or (os.name == 'java' and
89
- os._name == 'posix')
90
- self.variants = set(('', 'X.Y'))
91
- self._fileop = fileop or FileOperator(dry_run)
92
-
93
- self._is_nt = os.name == 'nt' or (
94
- os.name == 'java' and os._name == 'nt')
95
- self.version_info = sys.version_info
96
-
97
- def _get_alternate_executable(self, executable, options):
98
- if options.get('gui', False) and self._is_nt: # pragma: no cover
99
- dn, fn = os.path.split(executable)
100
- fn = fn.replace('python', 'pythonw')
101
- executable = os.path.join(dn, fn)
102
- return executable
103
-
104
- if sys.platform.startswith('java'): # pragma: no cover
105
- def _is_shell(self, executable):
106
- """
107
- Determine if the specified executable is a script
108
- (contains a #! line)
109
- """
110
- try:
111
- with open(executable) as fp:
112
- return fp.read(2) == '#!'
113
- except (OSError, IOError):
114
- logger.warning('Failed to open %s', executable)
115
- return False
116
-
117
- def _fix_jython_executable(self, executable):
118
- if self._is_shell(executable):
119
- # Workaround for Jython is not needed on Linux systems.
120
- import java
121
-
122
- if java.lang.System.getProperty('os.name') == 'Linux':
123
- return executable
124
- elif executable.lower().endswith('jython.exe'):
125
- # Use wrapper exe for Jython on Windows
126
- return executable
127
- return '/usr/bin/env %s' % executable
128
-
129
- def _build_shebang(self, executable, post_interp):
130
- """
131
- Build a shebang line. In the simple case (on Windows, or a shebang line
132
- which is not too long or contains spaces) use a simple formulation for
133
- the shebang. Otherwise, use /bin/sh as the executable, with a contrived
134
- shebang which allows the script to run either under Python or sh, using
135
- suitable quoting. Thanks to Harald Nordgren for his input.
136
-
137
- See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
138
- https://hg.mozilla.org/mozilla-central/file/tip/mach
139
- """
140
- if os.name != 'posix':
141
- simple_shebang = True
142
- else:
143
- # Add 3 for '#!' prefix and newline suffix.
144
- shebang_length = len(executable) + len(post_interp) + 3
145
- if sys.platform == 'darwin':
146
- max_shebang_length = 512
147
- else:
148
- max_shebang_length = 127
149
- simple_shebang = ((b' ' not in executable) and
150
- (shebang_length <= max_shebang_length))
151
-
152
- if simple_shebang:
153
- result = b'#!' + executable + post_interp + b'\n'
154
- else:
155
- result = b'#!/bin/sh\n'
156
- result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
157
- result += b"' '''"
158
- return result
159
-
160
- def _get_shebang(self, encoding, post_interp=b'', options=None):
161
- enquote = True
162
- if self.executable:
163
- executable = self.executable
164
- enquote = False # assume this will be taken care of
165
- elif not sysconfig.is_python_build():
166
- executable = get_executable()
167
- elif in_venv(): # pragma: no cover
168
- executable = os.path.join(sysconfig.get_path('scripts'),
169
- 'python%s' % sysconfig.get_config_var('EXE'))
170
- else: # pragma: no cover
171
- executable = os.path.join(
172
- sysconfig.get_config_var('BINDIR'),
173
- 'python%s%s' % (sysconfig.get_config_var('VERSION'),
174
- sysconfig.get_config_var('EXE')))
175
- if not os.path.isfile(executable):
176
- # for Python builds from source on Windows, no Python executables with
177
- # a version suffix are created, so we use python.exe
178
- executable = os.path.join(sysconfig.get_config_var('BINDIR'),
179
- 'python%s' % (sysconfig.get_config_var('EXE')))
180
- if options:
181
- executable = self._get_alternate_executable(executable, options)
182
-
183
- if sys.platform.startswith('java'): # pragma: no cover
184
- executable = self._fix_jython_executable(executable)
185
-
186
- # Normalise case for Windows - COMMENTED OUT
187
- # executable = os.path.normcase(executable)
188
- # N.B. The normalising operation above has been commented out: See
189
- # issue #124. Although paths in Windows are generally case-insensitive,
190
- # they aren't always. For example, a path containing a ẞ (which is a
191
- # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
192
- # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
193
- # Windows as equivalent in path names.
194
-
195
- # If the user didn't specify an executable, it may be necessary to
196
- # cater for executable paths with spaces (not uncommon on Windows)
197
- if enquote:
198
- executable = enquote_executable(executable)
199
- # Issue #51: don't use fsencode, since we later try to
200
- # check that the shebang is decodable using utf-8.
201
- executable = executable.encode('utf-8')
202
- # in case of IronPython, play safe and enable frames support
203
- if (sys.platform == 'cli' and '-X:Frames' not in post_interp
204
- and '-X:FullFrames' not in post_interp): # pragma: no cover
205
- post_interp += b' -X:Frames'
206
- shebang = self._build_shebang(executable, post_interp)
207
- # Python parser starts to read a script using UTF-8 until
208
- # it gets a #coding:xxx cookie. The shebang has to be the
209
- # first line of a file, the #coding:xxx cookie cannot be
210
- # written before. So the shebang has to be decodable from
211
- # UTF-8.
212
- try:
213
- shebang.decode('utf-8')
214
- except UnicodeDecodeError: # pragma: no cover
215
- raise ValueError(
216
- 'The shebang (%r) is not decodable from utf-8' % shebang)
217
- # If the script is encoded to a custom encoding (use a
218
- # #coding:xxx cookie), the shebang has to be decodable from
219
- # the script encoding too.
220
- if encoding != 'utf-8':
221
- try:
222
- shebang.decode(encoding)
223
- except UnicodeDecodeError: # pragma: no cover
224
- raise ValueError(
225
- 'The shebang (%r) is not decodable '
226
- 'from the script encoding (%r)' % (shebang, encoding))
227
- return shebang
228
-
229
- def _get_script_text(self, entry):
230
- return self.script_template % dict(module=entry.prefix,
231
- import_name=entry.suffix.split('.')[0],
232
- func=entry.suffix)
233
-
234
- manifest = _DEFAULT_MANIFEST
235
-
236
- def get_manifest(self, exename):
237
- base = os.path.basename(exename)
238
- return self.manifest % base
239
-
240
- def _write_script(self, names, shebang, script_bytes, filenames, ext):
241
- use_launcher = self.add_launchers and self._is_nt
242
- linesep = os.linesep.encode('utf-8')
243
- if not shebang.endswith(linesep):
244
- shebang += linesep
245
- if not use_launcher:
246
- script_bytes = shebang + script_bytes
247
- else: # pragma: no cover
248
- if ext == 'py':
249
- launcher = self._get_launcher('t')
250
- else:
251
- launcher = self._get_launcher('w')
252
- stream = BytesIO()
253
- with ZipFile(stream, 'w') as zf:
254
- source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
255
- if source_date_epoch:
256
- date_time = time.gmtime(int(source_date_epoch))[:6]
257
- zinfo = ZipInfo(filename='__main__.py', date_time=date_time)
258
- zf.writestr(zinfo, script_bytes)
259
- else:
260
- zf.writestr('__main__.py', script_bytes)
261
- zip_data = stream.getvalue()
262
- script_bytes = launcher + shebang + zip_data
263
- for name in names:
264
- outname = os.path.join(self.target_dir, name)
265
- if use_launcher: # pragma: no cover
266
- n, e = os.path.splitext(outname)
267
- if e.startswith('.py'):
268
- outname = n
269
- outname = '%s.exe' % outname
270
- try:
271
- self._fileop.write_binary_file(outname, script_bytes)
272
- except Exception:
273
- # Failed writing an executable - it might be in use.
274
- logger.warning('Failed to write executable - trying to '
275
- 'use .deleteme logic')
276
- dfname = '%s.deleteme' % outname
277
- if os.path.exists(dfname):
278
- os.remove(dfname) # Not allowed to fail here
279
- os.rename(outname, dfname) # nor here
280
- self._fileop.write_binary_file(outname, script_bytes)
281
- logger.debug('Able to replace executable using '
282
- '.deleteme logic')
283
- try:
284
- os.remove(dfname)
285
- except Exception:
286
- pass # still in use - ignore error
287
- else:
288
- if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
289
- outname = '%s.%s' % (outname, ext)
290
- if os.path.exists(outname) and not self.clobber:
291
- logger.warning('Skipping existing file %s', outname)
292
- continue
293
- self._fileop.write_binary_file(outname, script_bytes)
294
- if self.set_mode:
295
- self._fileop.set_executable_mode([outname])
296
- filenames.append(outname)
297
-
298
- variant_separator = '-'
299
-
300
- def get_script_filenames(self, name):
301
- result = set()
302
- if '' in self.variants:
303
- result.add(name)
304
- if 'X' in self.variants:
305
- result.add('%s%s' % (name, self.version_info[0]))
306
- if 'X.Y' in self.variants:
307
- result.add('%s%s%s.%s' % (name, self.variant_separator,
308
- self.version_info[0], self.version_info[1]))
309
- return result
310
-
311
- def _make_script(self, entry, filenames, options=None):
312
- post_interp = b''
313
- if options:
314
- args = options.get('interpreter_args', [])
315
- if args:
316
- args = ' %s' % ' '.join(args)
317
- post_interp = args.encode('utf-8')
318
- shebang = self._get_shebang('utf-8', post_interp, options=options)
319
- script = self._get_script_text(entry).encode('utf-8')
320
- scriptnames = self.get_script_filenames(entry.name)
321
- if options and options.get('gui', False):
322
- ext = 'pyw'
323
- else:
324
- ext = 'py'
325
- self._write_script(scriptnames, shebang, script, filenames, ext)
326
-
327
- def _copy_script(self, script, filenames):
328
- adjust = False
329
- script = os.path.join(self.source_dir, convert_path(script))
330
- outname = os.path.join(self.target_dir, os.path.basename(script))
331
- if not self.force and not self._fileop.newer(script, outname):
332
- logger.debug('not copying %s (up-to-date)', script)
333
- return
334
-
335
- # Always open the file, but ignore failures in dry-run mode --
336
- # that way, we'll get accurate feedback if we can read the
337
- # script.
338
- try:
339
- f = open(script, 'rb')
340
- except IOError: # pragma: no cover
341
- if not self.dry_run:
342
- raise
343
- f = None
344
- else:
345
- first_line = f.readline()
346
- if not first_line: # pragma: no cover
347
- logger.warning('%s is an empty file (skipping)', script)
348
- return
349
-
350
- match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
351
- if match:
352
- adjust = True
353
- post_interp = match.group(1) or b''
354
-
355
- if not adjust:
356
- if f:
357
- f.close()
358
- self._fileop.copy_file(script, outname)
359
- if self.set_mode:
360
- self._fileop.set_executable_mode([outname])
361
- filenames.append(outname)
362
- else:
363
- logger.info('copying and adjusting %s -> %s', script,
364
- self.target_dir)
365
- if not self._fileop.dry_run:
366
- encoding, lines = detect_encoding(f.readline)
367
- f.seek(0)
368
- shebang = self._get_shebang(encoding, post_interp)
369
- if b'pythonw' in first_line: # pragma: no cover
370
- ext = 'pyw'
371
- else:
372
- ext = 'py'
373
- n = os.path.basename(outname)
374
- self._write_script([n], shebang, f.read(), filenames, ext)
375
- if f:
376
- f.close()
377
-
378
- @property
379
- def dry_run(self):
380
- return self._fileop.dry_run
381
-
382
- @dry_run.setter
383
- def dry_run(self, value):
384
- self._fileop.dry_run = value
385
-
386
- if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
387
- # Executable launcher support.
388
- # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
389
-
390
- def _get_launcher(self, kind):
391
- if struct.calcsize('P') == 8: # 64-bit
392
- bits = '64'
393
- else:
394
- bits = '32'
395
- platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
396
- name = '%s%s%s.exe' % (kind, bits, platform_suffix)
397
- # Issue 31: don't hardcode an absolute package name, but
398
- # determine it relative to the current package
399
- distlib_package = __name__.rsplit('.', 1)[0]
400
- resource = finder(distlib_package).find(name)
401
- if not resource:
402
- msg = ('Unable to find resource %s in package %s' % (name,
403
- distlib_package))
404
- raise ValueError(msg)
405
- return resource.bytes
406
-
407
- # Public API follows
408
-
409
- def make(self, specification, options=None):
410
- """
411
- Make a script.
412
-
413
- :param specification: The specification, which is either a valid export
414
- entry specification (to make a script from a
415
- callable) or a filename (to make a script by
416
- copying from a source location).
417
- :param options: A dictionary of options controlling script generation.
418
- :return: A list of all absolute pathnames written to.
419
- """
420
- filenames = []
421
- entry = get_export_entry(specification)
422
- if entry is None:
423
- self._copy_script(specification, filenames)
424
- else:
425
- self._make_script(entry, filenames, options=options)
426
- return filenames
427
-
428
- def make_multiple(self, specifications, options=None):
429
- """
430
- Take a list of specifications and make scripts from them,
431
- :param specifications: A list of specifications.
432
- :return: A list of all absolute pathnames written to,
433
- """
434
- filenames = []
435
- for specification in specifications:
436
- filenames.extend(self.make(specification, options))
437
- return filenames
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/wheel.py DELETED
@@ -1,222 +0,0 @@
1
- """Wheels support."""
2
-
3
- import email
4
- import itertools
5
- import os
6
- import posixpath
7
- import re
8
- import zipfile
9
- import contextlib
10
-
11
- from distutils.util import get_platform
12
-
13
- import pkg_resources
14
- import setuptools
15
- from pkg_resources import parse_version
16
- from setuptools.extern.packaging.tags import sys_tags
17
- from setuptools.extern.packaging.utils import canonicalize_name
18
- from setuptools.command.egg_info import write_requirements
19
- from setuptools.archive_util import _unpack_zipfile_obj
20
-
21
-
22
- WHEEL_NAME = re.compile(
23
- r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
24
- ((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
25
- )\.whl$""",
26
- re.VERBOSE).match
27
-
28
- NAMESPACE_PACKAGE_INIT = \
29
- "__import__('pkg_resources').declare_namespace(__name__)\n"
30
-
31
-
32
- def unpack(src_dir, dst_dir):
33
- '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
34
- for dirpath, dirnames, filenames in os.walk(src_dir):
35
- subdir = os.path.relpath(dirpath, src_dir)
36
- for f in filenames:
37
- src = os.path.join(dirpath, f)
38
- dst = os.path.join(dst_dir, subdir, f)
39
- os.renames(src, dst)
40
- for n, d in reversed(list(enumerate(dirnames))):
41
- src = os.path.join(dirpath, d)
42
- dst = os.path.join(dst_dir, subdir, d)
43
- if not os.path.exists(dst):
44
- # Directory does not exist in destination,
45
- # rename it and prune it from os.walk list.
46
- os.renames(src, dst)
47
- del dirnames[n]
48
- # Cleanup.
49
- for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
50
- assert not filenames
51
- os.rmdir(dirpath)
52
-
53
-
54
- @contextlib.contextmanager
55
- def disable_info_traces():
56
- """
57
- Temporarily disable info traces.
58
- """
59
- from distutils import log
60
- saved = log.set_threshold(log.WARN)
61
- try:
62
- yield
63
- finally:
64
- log.set_threshold(saved)
65
-
66
-
67
- class Wheel:
68
-
69
- def __init__(self, filename):
70
- match = WHEEL_NAME(os.path.basename(filename))
71
- if match is None:
72
- raise ValueError('invalid wheel name: %r' % filename)
73
- self.filename = filename
74
- for k, v in match.groupdict().items():
75
- setattr(self, k, v)
76
-
77
- def tags(self):
78
- '''List tags (py_version, abi, platform) supported by this wheel.'''
79
- return itertools.product(
80
- self.py_version.split('.'),
81
- self.abi.split('.'),
82
- self.platform.split('.'),
83
- )
84
-
85
- def is_compatible(self):
86
- '''Is the wheel is compatible with the current platform?'''
87
- supported_tags = set(
88
- (t.interpreter, t.abi, t.platform) for t in sys_tags())
89
- return next((True for t in self.tags() if t in supported_tags), False)
90
-
91
- def egg_name(self):
92
- return pkg_resources.Distribution(
93
- project_name=self.project_name, version=self.version,
94
- platform=(None if self.platform == 'any' else get_platform()),
95
- ).egg_name() + '.egg'
96
-
97
- def get_dist_info(self, zf):
98
- # find the correct name of the .dist-info dir in the wheel file
99
- for member in zf.namelist():
100
- dirname = posixpath.dirname(member)
101
- if (dirname.endswith('.dist-info') and
102
- canonicalize_name(dirname).startswith(
103
- canonicalize_name(self.project_name))):
104
- return dirname
105
- raise ValueError("unsupported wheel format. .dist-info not found")
106
-
107
- def install_as_egg(self, destination_eggdir):
108
- '''Install wheel as an egg directory.'''
109
- with zipfile.ZipFile(self.filename) as zf:
110
- self._install_as_egg(destination_eggdir, zf)
111
-
112
- def _install_as_egg(self, destination_eggdir, zf):
113
- dist_basename = '%s-%s' % (self.project_name, self.version)
114
- dist_info = self.get_dist_info(zf)
115
- dist_data = '%s.data' % dist_basename
116
- egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
117
-
118
- self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
119
- self._move_data_entries(destination_eggdir, dist_data)
120
- self._fix_namespace_packages(egg_info, destination_eggdir)
121
-
122
- @staticmethod
123
- def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
124
- def get_metadata(name):
125
- with zf.open(posixpath.join(dist_info, name)) as fp:
126
- value = fp.read().decode('utf-8')
127
- return email.parser.Parser().parsestr(value)
128
-
129
- wheel_metadata = get_metadata('WHEEL')
130
- # Check wheel format version is supported.
131
- wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
132
- wheel_v1 = (
133
- parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
134
- )
135
- if not wheel_v1:
136
- raise ValueError(
137
- 'unsupported wheel format version: %s' % wheel_version)
138
- # Extract to target directory.
139
- _unpack_zipfile_obj(zf, destination_eggdir)
140
- # Convert metadata.
141
- dist_info = os.path.join(destination_eggdir, dist_info)
142
- dist = pkg_resources.Distribution.from_location(
143
- destination_eggdir, dist_info,
144
- metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
145
- )
146
-
147
- # Note: Evaluate and strip markers now,
148
- # as it's difficult to convert back from the syntax:
149
- # foobar; "linux" in sys_platform and extra == 'test'
150
- def raw_req(req):
151
- req.marker = None
152
- return str(req)
153
- install_requires = list(map(raw_req, dist.requires()))
154
- extras_require = {
155
- extra: [
156
- req
157
- for req in map(raw_req, dist.requires((extra,)))
158
- if req not in install_requires
159
- ]
160
- for extra in dist.extras
161
- }
162
- os.rename(dist_info, egg_info)
163
- os.rename(
164
- os.path.join(egg_info, 'METADATA'),
165
- os.path.join(egg_info, 'PKG-INFO'),
166
- )
167
- setup_dist = setuptools.Distribution(
168
- attrs=dict(
169
- install_requires=install_requires,
170
- extras_require=extras_require,
171
- ),
172
- )
173
- with disable_info_traces():
174
- write_requirements(
175
- setup_dist.get_command_obj('egg_info'),
176
- None,
177
- os.path.join(egg_info, 'requires.txt'),
178
- )
179
-
180
- @staticmethod
181
- def _move_data_entries(destination_eggdir, dist_data):
182
- """Move data entries to their correct location."""
183
- dist_data = os.path.join(destination_eggdir, dist_data)
184
- dist_data_scripts = os.path.join(dist_data, 'scripts')
185
- if os.path.exists(dist_data_scripts):
186
- egg_info_scripts = os.path.join(
187
- destination_eggdir, 'EGG-INFO', 'scripts')
188
- os.mkdir(egg_info_scripts)
189
- for entry in os.listdir(dist_data_scripts):
190
- # Remove bytecode, as it's not properly handled
191
- # during easy_install scripts install phase.
192
- if entry.endswith('.pyc'):
193
- os.unlink(os.path.join(dist_data_scripts, entry))
194
- else:
195
- os.rename(
196
- os.path.join(dist_data_scripts, entry),
197
- os.path.join(egg_info_scripts, entry),
198
- )
199
- os.rmdir(dist_data_scripts)
200
- for subdir in filter(os.path.exists, (
201
- os.path.join(dist_data, d)
202
- for d in ('data', 'headers', 'purelib', 'platlib')
203
- )):
204
- unpack(subdir, destination_eggdir)
205
- if os.path.exists(dist_data):
206
- os.rmdir(dist_data)
207
-
208
- @staticmethod
209
- def _fix_namespace_packages(egg_info, destination_eggdir):
210
- namespace_packages = os.path.join(
211
- egg_info, 'namespace_packages.txt')
212
- if os.path.exists(namespace_packages):
213
- with open(namespace_packages) as fp:
214
- namespace_packages = fp.read().split()
215
- for mod in namespace_packages:
216
- mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
217
- mod_init = os.path.join(mod_dir, '__init__.py')
218
- if not os.path.exists(mod_dir):
219
- os.mkdir(mod_dir)
220
- if not os.path.exists(mod_init):
221
- with open(mod_init, 'w') as fp:
222
- fp.write(NAMESPACE_PACKAGE_INIT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BramVanroy/text-to-amr/README.md DELETED
@@ -1,16 +0,0 @@
1
- ---
2
- title: Text To AMR
3
- emoji: 👩‍💻
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: docker
7
- app_port: 8501
8
- app_file: app.py
9
- pinned: true
10
- license: gpl-3.0
11
- tags:
12
- - natural language processing
13
- - semantic parsing
14
- - abstract meaning representation
15
- - amr
16
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/C6AI/HDRL/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Livebook
3
- emoji: 📓
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: docker
7
- fullWidth: true
8
- duplicated_from: livebook-dev/livebook
9
- license: mit
10
- ---
11
-
12
- You can install and run [Livebook](https://livebook.dev/) inside a Hugging Face Space. Here's [a tutorial](https://huggingface.co/docs/hub/spaces-sdks-docker-livebook) on how to do that.
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CCaniggia/GPT/Dockerfile DELETED
@@ -1,11 +0,0 @@
1
- from golang:alpine as builder
2
- run apk --no-cache add git
3
- run git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
4
- workdir /workspace/app
5
- run go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
6
- from alpine
7
- workdir /workspace/app
8
- copy --from=builder /workspace/app/go-proxy-bingai .
9
- env Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtx5rG6bE3fZ4iO"
10
- expose 8080
11
- cmd ["/workspace/app/go-proxy-bingai"]
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/visualizer.py DELETED
@@ -1,1133 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import colorsys
3
- import logging
4
- import math
5
- import numpy as np
6
- from enum import Enum, unique
7
- import cv2
8
- import matplotlib as mpl
9
- import matplotlib.colors as mplc
10
- import matplotlib.figure as mplfigure
11
- import pycocotools.mask as mask_util
12
- import torch
13
- from matplotlib.backends.backend_agg import FigureCanvasAgg
14
-
15
- from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
16
-
17
- from .colormap import random_color
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
- __all__ = ["ColorMode", "VisImage", "Visualizer"]
22
-
23
-
24
- _SMALL_OBJECT_AREA_THRESH = 1000
25
- _LARGE_MASK_AREA_THRESH = 120000
26
- _OFF_WHITE = (1.0, 1.0, 240.0 / 255)
27
- _BLACK = (0, 0, 0)
28
- _RED = (1.0, 0, 0)
29
-
30
- _KEYPOINT_THRESHOLD = 0.05
31
-
32
-
33
- @unique
34
- class ColorMode(Enum):
35
- """
36
- Enum of different color modes to use for instance visualizations.
37
-
38
- Attributes:
39
- IMAGE: Picks a random color for every instance and overlay segmentations with low opacity.
40
- SEGMENTATION: Let instances of the same category have similar colors
41
- (from metadata.thing_colors), and overlay them with
42
- high opacity. This provides more attention on the quality of segmentation.
43
- IMAGE_BW: same as IMAGE, but convert all areas without masks to gray-scale.
44
- Only available for drawing per-instance mask predictions.
45
- """
46
-
47
- IMAGE = 0
48
- SEGMENTATION = 1
49
- IMAGE_BW = 2
50
-
51
-
52
- class GenericMask:
53
- """
54
- Attribute:
55
- polygons (list[ndarray]): list[ndarray]: polygons for this mask.
56
- Each ndarray has format [x, y, x, y, ...]
57
- mask (ndarray): a binary mask
58
- """
59
-
60
- def __init__(self, mask_or_polygons, height, width):
61
- self._mask = self._polygons = self._has_holes = None
62
- self.height = height
63
- self.width = width
64
-
65
- m = mask_or_polygons
66
- if isinstance(m, dict):
67
- # RLEs
68
- assert "counts" in m and "size" in m
69
- if isinstance(m["counts"], list): # uncompressed RLEs
70
- h, w = m["size"]
71
- assert h == height and w == width
72
- m = mask_util.frPyObjects(m, h, w)
73
- self._mask = mask_util.decode(m)[:, :]
74
- return
75
-
76
- if isinstance(m, list): # list[ndarray]
77
- self._polygons = [np.asarray(x).reshape(-1) for x in m]
78
- return
79
-
80
- if isinstance(m, np.ndarray): # assumed to be a binary mask
81
- assert m.shape[1] != 2, m.shape
82
- assert m.shape == (height, width), m.shape
83
- self._mask = m.astype("uint8")
84
- return
85
-
86
- raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
87
-
88
- @property
89
- def mask(self):
90
- if self._mask is None:
91
- self._mask = self.polygons_to_mask(self._polygons)
92
- return self._mask
93
-
94
- @property
95
- def polygons(self):
96
- if self._polygons is None:
97
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
98
- return self._polygons
99
-
100
- @property
101
- def has_holes(self):
102
- if self._has_holes is None:
103
- if self._mask is not None:
104
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
105
- else:
106
- self._has_holes = False # if original format is polygon, does not have holes
107
- return self._has_holes
108
-
109
- def mask_to_polygons(self, mask):
110
- # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
111
- # hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
112
- # Internal contours (holes) are placed in hierarchy-2.
113
- # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
114
- mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
115
- res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
116
- hierarchy = res[-1]
117
- if hierarchy is None: # empty mask
118
- return [], False
119
- has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
120
- res = res[-2]
121
- res = [x.flatten() for x in res]
122
- res = [x for x in res if len(x) >= 6]
123
- return res, has_holes
124
-
125
- def polygons_to_mask(self, polygons):
126
- rle = mask_util.frPyObjects(polygons, self.height, self.width)
127
- rle = mask_util.merge(rle)
128
- return mask_util.decode(rle)[:, :]
129
-
130
- def area(self):
131
- return self.mask.sum()
132
-
133
- def bbox(self):
134
- p = mask_util.frPyObjects(self.polygons, self.height, self.width)
135
- p = mask_util.merge(p)
136
- bbox = mask_util.toBbox(p)
137
- bbox[2] += bbox[0]
138
- bbox[3] += bbox[1]
139
- return bbox
140
-
141
-
142
- class _PanopticPrediction:
143
- def __init__(self, panoptic_seg, segments_info):
144
- self._seg = panoptic_seg
145
-
146
- self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
147
- segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
148
- areas = areas.numpy()
149
- sorted_idxs = np.argsort(-areas)
150
- self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
151
- self._seg_ids = self._seg_ids.tolist()
152
- for sid, area in zip(self._seg_ids, self._seg_areas):
153
- if sid in self._sinfo:
154
- self._sinfo[sid]["area"] = float(area)
155
-
156
- def non_empty_mask(self):
157
- """
158
- Returns:
159
- (H, W) array, a mask for all pixels that have a prediction
160
- """
161
- empty_ids = []
162
- for id in self._seg_ids:
163
- if id not in self._sinfo:
164
- empty_ids.append(id)
165
- if len(empty_ids) == 0:
166
- return np.zeros(self._seg.shape, dtype=np.uint8)
167
- assert (
168
- len(empty_ids) == 1
169
- ), ">1 ids corresponds to no labels. This is currently not supported"
170
- return (self._seg != empty_ids[0]).numpy().astype(np.bool)
171
-
172
- def semantic_masks(self):
173
- for sid in self._seg_ids:
174
- sinfo = self._sinfo.get(sid)
175
- if sinfo is None or sinfo["isthing"]:
176
- # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
177
- continue
178
- yield (self._seg == sid).numpy().astype(np.bool), sinfo
179
-
180
- def instance_masks(self):
181
- for sid in self._seg_ids:
182
- sinfo = self._sinfo.get(sid)
183
- if sinfo is None or not sinfo["isthing"]:
184
- continue
185
- mask = (self._seg == sid).numpy().astype(np.bool)
186
- if mask.sum() > 0:
187
- yield mask, sinfo
188
-
189
-
190
- def _create_text_labels(classes, scores, class_names):
191
- """
192
- Args:
193
- classes (list[int] or None):
194
- scores (list[float] or None):
195
- class_names (list[str] or None):
196
-
197
- Returns:
198
- list[str] or None
199
- """
200
- labels = None
201
- if classes is not None and class_names is not None and len(class_names) > 1:
202
- labels = [class_names[i] for i in classes]
203
- if scores is not None:
204
- if labels is None:
205
- labels = ["{:.0f}%".format(s * 100) for s in scores]
206
- else:
207
- labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
208
- return labels
209
-
210
-
211
- class VisImage:
212
- def __init__(self, img, scale=1.0):
213
- """
214
- Args:
215
- img (ndarray): an RGB image of shape (H, W, 3).
216
- scale (float): scale the input image
217
- """
218
- self.img = img
219
- self.scale = scale
220
- self.width, self.height = img.shape[1], img.shape[0]
221
- self._setup_figure(img)
222
-
223
- def _setup_figure(self, img):
224
- """
225
- Args:
226
- Same as in :meth:`__init__()`.
227
-
228
- Returns:
229
- fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
230
- ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
231
- """
232
- fig = mplfigure.Figure(frameon=False)
233
- self.dpi = fig.get_dpi()
234
- # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
235
- # (https://github.com/matplotlib/matplotlib/issues/15363)
236
- fig.set_size_inches(
237
- (self.width * self.scale + 1e-2) / self.dpi,
238
- (self.height * self.scale + 1e-2) / self.dpi,
239
- )
240
- self.canvas = FigureCanvasAgg(fig)
241
- # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
242
- ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
243
- ax.axis("off")
244
- ax.set_xlim(0.0, self.width)
245
- ax.set_ylim(self.height)
246
-
247
- self.fig = fig
248
- self.ax = ax
249
-
250
- def save(self, filepath):
251
- """
252
- Args:
253
- filepath (str): a string that contains the absolute path, including the file name, where
254
- the visualized image will be saved.
255
- """
256
- if filepath.lower().endswith(".jpg") or filepath.lower().endswith(".png"):
257
- # faster than matplotlib's imshow
258
- cv2.imwrite(filepath, self.get_image()[:, :, ::-1])
259
- else:
260
- # support general formats (e.g. pdf)
261
- self.ax.imshow(self.img, interpolation="nearest")
262
- self.fig.savefig(filepath)
263
-
264
- def get_image(self):
265
- """
266
- Returns:
267
- ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type.
268
- The shape is scaled w.r.t the input image using the given `scale` argument.
269
- """
270
- canvas = self.canvas
271
- s, (width, height) = canvas.print_to_buffer()
272
- if (self.width, self.height) != (width, height):
273
- img = cv2.resize(self.img, (width, height))
274
- else:
275
- img = self.img
276
-
277
- # buf = io.BytesIO() # works for cairo backend
278
- # canvas.print_rgba(buf)
279
- # width, height = self.width, self.height
280
- # s = buf.getvalue()
281
-
282
- buffer = np.frombuffer(s, dtype="uint8")
283
-
284
- # imshow is slow. blend manually (still quite slow)
285
- img_rgba = buffer.reshape(height, width, 4)
286
- rgb, alpha = np.split(img_rgba, [3], axis=2)
287
-
288
- try:
289
- import numexpr as ne # fuse them with numexpr
290
-
291
- visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)")
292
- except ImportError:
293
- alpha = alpha.astype("float32") / 255.0
294
- visualized_image = img * (1 - alpha) + rgb * alpha
295
-
296
- visualized_image = visualized_image.astype("uint8")
297
-
298
- return visualized_image
299
-
300
-
301
- class Visualizer:
302
- def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):
303
- """
304
- Args:
305
- img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
306
- the height and width of the image respectively. C is the number of
307
- color channels. The image is required to be in RGB format since that
308
- is a requirement of the Matplotlib library. The image is also expected
309
- to be in the range [0, 255].
310
- metadata (MetadataCatalog): image metadata.
311
- """
312
- self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
313
- self.metadata = metadata
314
- self.output = VisImage(self.img, scale=scale)
315
- self.cpu_device = torch.device("cpu")
316
-
317
- # too small texts are useless, therefore clamp to 9
318
- self._default_font_size = max(
319
- np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
320
- )
321
- self._instance_mode = instance_mode
322
-
323
- def draw_instance_predictions(self, predictions):
324
- """
325
- Draw instance-level prediction results on an image.
326
-
327
- Args:
328
- predictions (Instances): the output of an instance detection/segmentation
329
- model. Following fields will be used to draw:
330
- "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
331
-
332
- Returns:
333
- output (VisImage): image object with visualizations.
334
- """
335
- boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
336
- scores = predictions.scores if predictions.has("scores") else None
337
- classes = predictions.pred_classes if predictions.has("pred_classes") else None
338
- labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
339
- keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
340
-
341
- if predictions.has("pred_masks"):
342
- masks = np.asarray(predictions.pred_masks)
343
- masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
344
- else:
345
- masks = None
346
-
347
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
348
- colors = [
349
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
350
- ]
351
- alpha = 0.8
352
- else:
353
- colors = None
354
- alpha = 0.5
355
-
356
- if self._instance_mode == ColorMode.IMAGE_BW:
357
- assert predictions.has("pred_masks"), "ColorMode.IMAGE_BW requires segmentations"
358
- self.output.img = self._create_grayscale_image(
359
- (predictions.pred_masks.any(dim=0) > 0).numpy()
360
- )
361
- alpha = 0.3
362
-
363
- self.overlay_instances(
364
- masks=masks,
365
- boxes=boxes,
366
- labels=labels,
367
- keypoints=keypoints,
368
- assigned_colors=colors,
369
- alpha=alpha,
370
- )
371
- return self.output
372
-
373
- def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
374
- """
375
- Draw semantic segmentation predictions/labels.
376
-
377
- Args:
378
- sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
379
- area_threshold (int): segments with less than `area_threshold` are not drawn.
380
- alpha (float): the larger it is, the more opaque the segmentations are.
381
-
382
- Returns:
383
- output (VisImage): image object with visualizations.
384
- """
385
- if isinstance(sem_seg, torch.Tensor):
386
- sem_seg = sem_seg.numpy()
387
- labels, areas = np.unique(sem_seg, return_counts=True)
388
- sorted_idxs = np.argsort(-areas).tolist()
389
- labels = labels[sorted_idxs]
390
- for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
391
- try:
392
- mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
393
- except (AttributeError, IndexError):
394
- mask_color = None
395
-
396
- binary_mask = (sem_seg == label).astype(np.uint8)
397
- text = self.metadata.stuff_classes[label]
398
- self.draw_binary_mask(
399
- binary_mask,
400
- color=mask_color,
401
- edge_color=_OFF_WHITE,
402
- text=text,
403
- alpha=alpha,
404
- area_threshold=area_threshold,
405
- )
406
- return self.output
407
-
408
- def draw_panoptic_seg_predictions(
409
- self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7
410
- ):
411
- """
412
- Draw panoptic prediction results on an image.
413
-
414
- Args:
415
- panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
416
- segment.
417
- segments_info (list[dict]): Describe each segment in `panoptic_seg`.
418
- Each dict contains keys "id", "category_id", "isthing".
419
- area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
420
-
421
- Returns:
422
- output (VisImage): image object with visualizations.
423
- """
424
- pred = _PanopticPrediction(panoptic_seg, segments_info)
425
-
426
- if self._instance_mode == ColorMode.IMAGE_BW:
427
- self.output.img = self._create_grayscale_image(pred.non_empty_mask())
428
-
429
- # draw mask for all semantic segments first i.e. "stuff"
430
- for mask, sinfo in pred.semantic_masks():
431
- category_idx = sinfo["category_id"]
432
- try:
433
- mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
434
- except AttributeError:
435
- mask_color = None
436
-
437
- text = self.metadata.stuff_classes[category_idx]
438
- self.draw_binary_mask(
439
- mask,
440
- color=mask_color,
441
- edge_color=_OFF_WHITE,
442
- text=text,
443
- alpha=alpha,
444
- area_threshold=area_threshold,
445
- )
446
-
447
- # draw mask for all instances second
448
- all_instances = list(pred.instance_masks())
449
- if len(all_instances) == 0:
450
- return self.output
451
- masks, sinfo = list(zip(*all_instances))
452
- category_ids = [x["category_id"] for x in sinfo]
453
-
454
- try:
455
- scores = [x["score"] for x in sinfo]
456
- except KeyError:
457
- scores = None
458
- labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes)
459
-
460
- try:
461
- colors = [random_color(rgb=True, maximum=1) for k in category_ids]
462
- except AttributeError:
463
- colors = None
464
- self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
465
-
466
- return self.output
467
-
468
- def draw_dataset_dict(self, dic):
469
- """
470
- Draw annotations/segmentaions in Detectron2 Dataset format.
471
-
472
- Args:
473
- dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
474
-
475
- Returns:
476
- output (VisImage): image object with visualizations.
477
- """
478
- annos = dic.get("annotations", None)
479
- if annos:
480
- if "segmentation" in annos[0]:
481
- masks = [x["segmentation"] for x in annos]
482
- else:
483
- masks = None
484
- if "keypoints" in annos[0]:
485
- keypts = [x["keypoints"] for x in annos]
486
- keypts = np.array(keypts).reshape(len(annos), -1, 3)
487
- else:
488
- keypts = None
489
-
490
- boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos]
491
-
492
- labels = [x["category_id"] for x in annos]
493
- colors = None
494
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
495
- colors = [
496
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in labels
497
- ]
498
- names = self.metadata.get("thing_classes", None)
499
- if names:
500
- labels = [names[i] for i in labels]
501
- labels = [
502
- "{}".format(i) + ("|crowd" if a.get("iscrowd", 0) else "")
503
- for i, a in zip(labels, annos)
504
- ]
505
- self.overlay_instances(
506
- labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
507
- )
508
-
509
- sem_seg = dic.get("sem_seg", None)
510
- if sem_seg is None and "sem_seg_file_name" in dic:
511
- sem_seg = cv2.imread(dic["sem_seg_file_name"], cv2.IMREAD_GRAYSCALE)
512
- if sem_seg is not None:
513
- self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
514
- return self.output
515
-
516
- def overlay_instances(
517
- self,
518
- *,
519
- boxes=None,
520
- labels=None,
521
- masks=None,
522
- keypoints=None,
523
- assigned_colors=None,
524
- alpha=0.5
525
- ):
526
- """
527
- Args:
528
- boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
529
- or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
530
- or a :class:`RotatedBoxes`,
531
- or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
532
- for the N objects in a single image,
533
- labels (list[str]): the text to be displayed for each instance.
534
- masks (masks-like object): Supported types are:
535
-
536
- * `structures.masks.PolygonMasks`, `structures.masks.BitMasks`.
537
- * list[list[ndarray]]: contains the segmentation masks for all objects in one image.
538
- The first level of the list corresponds to individual instances. The second
539
- level to all the polygon that compose the instance, and the third level
540
- to the polygon coordinates. The third level should have the format of
541
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
542
- * list[ndarray]: each ndarray is a binary mask of shape (H, W).
543
- * list[dict]: each dict is a COCO-style RLE.
544
- keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
545
- where the N is the number of instances and K is the number of keypoints.
546
- The last dimension corresponds to (x, y, visibility or score).
547
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
548
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
549
- for full list of formats that the colors are accepted in.
550
-
551
- Returns:
552
- output (VisImage): image object with visualizations.
553
- """
554
- num_instances = None
555
- if boxes is not None:
556
- boxes = self._convert_boxes(boxes)
557
- num_instances = len(boxes)
558
- if masks is not None:
559
- masks = self._convert_masks(masks)
560
- if num_instances:
561
- assert len(masks) == num_instances
562
- else:
563
- num_instances = len(masks)
564
- if keypoints is not None:
565
- if num_instances:
566
- assert len(keypoints) == num_instances
567
- else:
568
- num_instances = len(keypoints)
569
- keypoints = self._convert_keypoints(keypoints)
570
- if labels is not None:
571
- assert len(labels) == num_instances
572
- if assigned_colors is None:
573
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
574
- if num_instances == 0:
575
- return self.output
576
- if boxes is not None and boxes.shape[1] == 5:
577
- return self.overlay_rotated_instances(
578
- boxes=boxes, labels=labels, assigned_colors=assigned_colors
579
- )
580
-
581
- # Display in largest to smallest order to reduce occlusion.
582
- areas = None
583
- if boxes is not None:
584
- areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
585
- elif masks is not None:
586
- areas = np.asarray([x.area() for x in masks])
587
-
588
- if areas is not None:
589
- sorted_idxs = np.argsort(-areas).tolist()
590
- # Re-order overlapped instances in descending order.
591
- boxes = boxes[sorted_idxs] if boxes is not None else None
592
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
593
- masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
594
- assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
595
- keypoints = keypoints[sorted_idxs] if keypoints is not None else None
596
-
597
- for i in range(num_instances):
598
- color = assigned_colors[i]
599
- if boxes is not None:
600
- self.draw_box(boxes[i], edge_color=color)
601
-
602
- if masks is not None:
603
- for segment in masks[i].polygons:
604
- self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
605
-
606
- if labels is not None:
607
- # first get a box
608
- if boxes is not None:
609
- x0, y0, x1, y1 = boxes[i]
610
- text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
611
- horiz_align = "left"
612
- elif masks is not None:
613
- x0, y0, x1, y1 = masks[i].bbox()
614
-
615
- # draw text in the center (defined by median) when box is not drawn
616
- # median is less sensitive to outliers.
617
- text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
618
- horiz_align = "center"
619
- else:
620
- continue # drawing the box confidence for keypoints isn't very useful.
621
- # for small objects, draw text at the side to avoid occlusion
622
- instance_area = (y1 - y0) * (x1 - x0)
623
- if (
624
- instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
625
- or y1 - y0 < 40 * self.output.scale
626
- ):
627
- if y1 >= self.output.height - 5:
628
- text_pos = (x1, y0)
629
- else:
630
- text_pos = (x0, y1)
631
-
632
- height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
633
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
634
- font_size = (
635
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
636
- * 0.5
637
- * self._default_font_size
638
- )
639
- self.draw_text(
640
- labels[i],
641
- text_pos,
642
- color=lighter_color,
643
- horizontal_alignment=horiz_align,
644
- font_size=font_size,
645
- )
646
-
647
- # draw keypoints
648
- if keypoints is not None:
649
- for keypoints_per_instance in keypoints:
650
- self.draw_and_connect_keypoints(keypoints_per_instance)
651
-
652
- return self.output
653
-
654
- def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
655
- """
656
- Args:
657
- boxes (ndarray): an Nx5 numpy array of
658
- (x_center, y_center, width, height, angle_degrees) format
659
- for the N objects in a single image.
660
- labels (list[str]): the text to be displayed for each instance.
661
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
662
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
663
- for full list of formats that the colors are accepted in.
664
-
665
- Returns:
666
- output (VisImage): image object with visualizations.
667
- """
668
-
669
- num_instances = len(boxes)
670
-
671
- if assigned_colors is None:
672
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
673
- if num_instances == 0:
674
- return self.output
675
-
676
- # Display in largest to smallest order to reduce occlusion.
677
- if boxes is not None:
678
- areas = boxes[:, 2] * boxes[:, 3]
679
-
680
- sorted_idxs = np.argsort(-areas).tolist()
681
- # Re-order overlapped instances in descending order.
682
- boxes = boxes[sorted_idxs]
683
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
684
- colors = [assigned_colors[idx] for idx in sorted_idxs]
685
-
686
- for i in range(num_instances):
687
- self.draw_rotated_box_with_label(
688
- boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
689
- )
690
-
691
- return self.output
692
-
693
- def draw_and_connect_keypoints(self, keypoints):
694
- """
695
- Draws keypoints of an instance and follows the rules for keypoint connections
696
- to draw lines between appropriate keypoints. This follows color heuristics for
697
- line color.
698
-
699
- Args:
700
- keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
701
- and the last dimension corresponds to (x, y, probability).
702
-
703
- Returns:
704
- output (VisImage): image object with visualizations.
705
- """
706
- visible = {}
707
- keypoint_names = self.metadata.get("keypoint_names")
708
- for idx, keypoint in enumerate(keypoints):
709
- # draw keypoint
710
- x, y, prob = keypoint
711
- if prob > _KEYPOINT_THRESHOLD:
712
- self.draw_circle((x, y), color=_RED)
713
- if keypoint_names:
714
- keypoint_name = keypoint_names[idx]
715
- visible[keypoint_name] = (x, y)
716
-
717
- if self.metadata.get("keypoint_connection_rules"):
718
- for kp0, kp1, color in self.metadata.keypoint_connection_rules:
719
- if kp0 in visible and kp1 in visible:
720
- x0, y0 = visible[kp0]
721
- x1, y1 = visible[kp1]
722
- color = tuple(x / 255.0 for x in color)
723
- self.draw_line([x0, x1], [y0, y1], color=color)
724
-
725
- # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
726
- # Note that this strategy is specific to person keypoints.
727
- # For other keypoints, it should just do nothing
728
- try:
729
- ls_x, ls_y = visible["left_shoulder"]
730
- rs_x, rs_y = visible["right_shoulder"]
731
- mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
732
- except KeyError:
733
- pass
734
- else:
735
- # draw line from nose to mid-shoulder
736
- nose_x, nose_y = visible.get("nose", (None, None))
737
- if nose_x is not None:
738
- self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
739
-
740
- try:
741
- # draw line from mid-shoulder to mid-hip
742
- lh_x, lh_y = visible["left_hip"]
743
- rh_x, rh_y = visible["right_hip"]
744
- except KeyError:
745
- pass
746
- else:
747
- mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
748
- self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
749
- return self.output
750
-
751
- """
752
- Primitive drawing functions:
753
- """
754
-
755
- def draw_text(
756
- self,
757
- text,
758
- position,
759
- *,
760
- font_size=None,
761
- color="g",
762
- horizontal_alignment="center",
763
- rotation=0
764
- ):
765
- """
766
- Args:
767
- text (str): class label
768
- position (tuple): a tuple of the x and y coordinates to place text on image.
769
- font_size (int, optional): font of the text. If not provided, a font size
770
- proportional to the image width is calculated and used.
771
- color: color of the text. Refer to `matplotlib.colors` for full list
772
- of formats that are accepted.
773
- horizontal_alignment (str): see `matplotlib.text.Text`
774
- rotation: rotation angle in degrees CCW
775
-
776
- Returns:
777
- output (VisImage): image object with text drawn.
778
- """
779
- if not font_size:
780
- font_size = self._default_font_size
781
-
782
- # since the text background is dark, we don't want the text to be dark
783
- color = np.maximum(list(mplc.to_rgb(color)), 0.2)
784
- color[np.argmax(color)] = max(0.8, np.max(color))
785
-
786
- x, y = position
787
- self.output.ax.text(
788
- x,
789
- y,
790
- text,
791
- size=font_size * self.output.scale,
792
- family="sans-serif",
793
- bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
794
- verticalalignment="top",
795
- horizontalalignment=horizontal_alignment,
796
- color=color,
797
- zorder=10,
798
- rotation=rotation,
799
- )
800
- return self.output
801
-
802
- def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
803
- """
804
- Args:
805
- box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
806
- are the coordinates of the image's top left corner. x1 and y1 are the
807
- coordinates of the image's bottom right corner.
808
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
809
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
810
- for full list of formats that are accepted.
811
- line_style (string): the string to use to create the outline of the boxes.
812
-
813
- Returns:
814
- output (VisImage): image object with box drawn.
815
- """
816
- x0, y0, x1, y1 = box_coord
817
- width = x1 - x0
818
- height = y1 - y0
819
-
820
- linewidth = max(self._default_font_size / 4, 1)
821
-
822
- self.output.ax.add_patch(
823
- mpl.patches.Rectangle(
824
- (x0, y0),
825
- width,
826
- height,
827
- fill=False,
828
- edgecolor=edge_color,
829
- linewidth=linewidth * self.output.scale,
830
- alpha=alpha,
831
- linestyle=line_style,
832
- )
833
- )
834
- return self.output
835
-
836
- def draw_rotated_box_with_label(
837
- self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
838
- ):
839
- """
840
- Args:
841
- rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
842
- where cnt_x and cnt_y are the center coordinates of the box.
843
- w and h are the width and height of the box. angle represents how
844
- many degrees the box is rotated CCW with regard to the 0-degree box.
845
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
846
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
847
- for full list of formats that are accepted.
848
- line_style (string): the string to use to create the outline of the boxes.
849
- label (string): label for rotated box. It will not be rendered when set to None.
850
-
851
- Returns:
852
- output (VisImage): image object with box drawn.
853
- """
854
- cnt_x, cnt_y, w, h, angle = rotated_box
855
- area = w * h
856
- # use thinner lines when the box is small
857
- linewidth = self._default_font_size / (
858
- 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
859
- )
860
-
861
- theta = angle * math.pi / 180.0
862
- c = math.cos(theta)
863
- s = math.sin(theta)
864
- rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
865
- # x: left->right ; y: top->down
866
- rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
867
- for k in range(4):
868
- j = (k + 1) % 4
869
- self.draw_line(
870
- [rotated_rect[k][0], rotated_rect[j][0]],
871
- [rotated_rect[k][1], rotated_rect[j][1]],
872
- color=edge_color,
873
- linestyle="--" if k == 1 else line_style,
874
- linewidth=linewidth,
875
- )
876
-
877
- if label is not None:
878
- text_pos = rotated_rect[1] # topleft corner
879
-
880
- height_ratio = h / np.sqrt(self.output.height * self.output.width)
881
- label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
882
- font_size = (
883
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
884
- )
885
- self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
886
-
887
- return self.output
888
-
889
- def draw_circle(self, circle_coord, color, radius=3):
890
- """
891
- Args:
892
- circle_coord (list(int) or tuple(int)): contains the x and y coordinates
893
- of the center of the circle.
894
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
895
- formats that are accepted.
896
- radius (int): radius of the circle.
897
-
898
- Returns:
899
- output (VisImage): image object with box drawn.
900
- """
901
- x, y = circle_coord
902
- self.output.ax.add_patch(
903
- mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
904
- )
905
- return self.output
906
-
907
- def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
908
- """
909
- Args:
910
- x_data (list[int]): a list containing x values of all the points being drawn.
911
- Length of list should match the length of y_data.
912
- y_data (list[int]): a list containing y values of all the points being drawn.
913
- Length of list should match the length of x_data.
914
- color: color of the line. Refer to `matplotlib.colors` for a full list of
915
- formats that are accepted.
916
- linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
917
- for a full list of formats that are accepted.
918
- linewidth (float or None): width of the line. When it's None,
919
- a default value will be computed and used.
920
-
921
- Returns:
922
- output (VisImage): image object with line drawn.
923
- """
924
- if linewidth is None:
925
- linewidth = self._default_font_size / 3
926
- linewidth = max(linewidth, 1)
927
- self.output.ax.add_line(
928
- mpl.lines.Line2D(
929
- x_data,
930
- y_data,
931
- linewidth=linewidth * self.output.scale,
932
- color=color,
933
- linestyle=linestyle,
934
- )
935
- )
936
- return self.output
937
-
938
- def draw_binary_mask(
939
- self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096
940
- ):
941
- """
942
- Args:
943
- binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
944
- W is the image width. Each value in the array is either a 0 or 1 value of uint8
945
- type.
946
- color: color of the mask. Refer to `matplotlib.colors` for a full list of
947
- formats that are accepted. If None, will pick a random color.
948
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
949
- full list of formats that are accepted.
950
- text (str): if None, will be drawn in the object's center of mass.
951
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
952
- area_threshold (float): a connected component small than this will not be shown.
953
-
954
- Returns:
955
- output (VisImage): image object with mask drawn.
956
- """
957
- if color is None:
958
- color = random_color(rgb=True, maximum=1)
959
- if area_threshold is None:
960
- area_threshold = 4096
961
-
962
- has_valid_segment = False
963
- binary_mask = binary_mask.astype("uint8") # opencv needs uint8
964
- mask = GenericMask(binary_mask, self.output.height, self.output.width)
965
- shape2d = (binary_mask.shape[0], binary_mask.shape[1])
966
-
967
- if not mask.has_holes:
968
- # draw polygons for regular masks
969
- for segment in mask.polygons:
970
- area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
971
- if area < area_threshold:
972
- continue
973
- has_valid_segment = True
974
- segment = segment.reshape(-1, 2)
975
- self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
976
- else:
977
- rgba = np.zeros(shape2d + (4,), dtype="float32")
978
- rgba[:, :, :3] = color
979
- rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
980
- has_valid_segment = True
981
- self.output.ax.imshow(rgba)
982
-
983
- if text is not None and has_valid_segment:
984
- # TODO sometimes drawn on wrong objects. the heuristics here can improve.
985
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
986
- _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
987
- largest_component_id = np.argmax(stats[1:, -1]) + 1
988
-
989
- # draw text on the largest component, as well as other very large components.
990
- for cid in range(1, _num_cc):
991
- if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
992
- # median is more stable than centroid
993
- # center = centroids[largest_component_id]
994
- center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
995
- self.draw_text(text, center, color=lighter_color)
996
- return self.output
997
-
998
- def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
999
- """
1000
- Args:
1001
- segment: numpy array of shape Nx2, containing all the points in the polygon.
1002
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
1003
- formats that are accepted.
1004
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
1005
- full list of formats that are accepted. If not provided, a darker shade
1006
- of the polygon color will be used instead.
1007
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
1008
-
1009
- Returns:
1010
- output (VisImage): image object with polygon drawn.
1011
- """
1012
- if edge_color is None:
1013
- # make edge color darker than the polygon color
1014
- if alpha > 0.8:
1015
- edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
1016
- else:
1017
- edge_color = color
1018
- edge_color = mplc.to_rgb(edge_color) + (1,)
1019
-
1020
- polygon = mpl.patches.Polygon(
1021
- segment,
1022
- fill=True,
1023
- facecolor=mplc.to_rgb(color) + (alpha,),
1024
- edgecolor=edge_color,
1025
- linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
1026
- )
1027
- self.output.ax.add_patch(polygon)
1028
- return self.output
1029
-
1030
- """
1031
- Internal methods:
1032
- """
1033
-
1034
- def _jitter(self, color):
1035
- """
1036
- Randomly modifies given color to produce a slightly different color than the color given.
1037
-
1038
- Args:
1039
- color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
1040
- picked. The values in the list are in the [0.0, 1.0] range.
1041
-
1042
- Returns:
1043
- jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
1044
- color after being jittered. The values in the list are in the [0.0, 1.0] range.
1045
- """
1046
- color = mplc.to_rgb(color)
1047
- vec = np.random.rand(3)
1048
- # better to do it in another color space
1049
- vec = vec / np.linalg.norm(vec) * 0.5
1050
- res = np.clip(vec + color, 0, 1)
1051
- return tuple(res)
1052
-
1053
- def _create_grayscale_image(self, mask=None):
1054
- """
1055
- Create a grayscale version of the original image.
1056
- The colors in masked area, if given, will be kept.
1057
- """
1058
- img_bw = self.img.astype("f4").mean(axis=2)
1059
- img_bw = np.stack([img_bw] * 3, axis=2)
1060
- if mask is not None:
1061
- img_bw[mask] = self.img[mask]
1062
- return img_bw
1063
-
1064
- def _change_color_brightness(self, color, brightness_factor):
1065
- """
1066
- Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
1067
- less or more saturation than the original color.
1068
-
1069
- Args:
1070
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
1071
- formats that are accepted.
1072
- brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
1073
- 0 will correspond to no change, a factor in [-1.0, 0) range will result in
1074
- a darker color and a factor in (0, 1.0] range will result in a lighter color.
1075
-
1076
- Returns:
1077
- modified_color (tuple[double]): a tuple containing the RGB values of the
1078
- modified color. Each value in the tuple is in the [0.0, 1.0] range.
1079
- """
1080
- assert brightness_factor >= -1.0 and brightness_factor <= 1.0
1081
- color = mplc.to_rgb(color)
1082
- polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
1083
- modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
1084
- modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
1085
- modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
1086
- modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
1087
- return modified_color
1088
-
1089
- def _convert_boxes(self, boxes):
1090
- """
1091
- Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
1092
- """
1093
- if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
1094
- return boxes.tensor.numpy()
1095
- else:
1096
- return np.asarray(boxes)
1097
-
1098
- def _convert_masks(self, masks_or_polygons):
1099
- """
1100
- Convert different format of masks or polygons to a tuple of masks and polygons.
1101
-
1102
- Returns:
1103
- list[GenericMask]:
1104
- """
1105
-
1106
- m = masks_or_polygons
1107
- if isinstance(m, PolygonMasks):
1108
- m = m.polygons
1109
- if isinstance(m, BitMasks):
1110
- m = m.tensor.numpy()
1111
- if isinstance(m, torch.Tensor):
1112
- m = m.numpy()
1113
- ret = []
1114
- for x in m:
1115
- if isinstance(x, GenericMask):
1116
- ret.append(x)
1117
- else:
1118
- ret.append(GenericMask(x, self.output.height, self.output.width))
1119
- return ret
1120
-
1121
- def _convert_keypoints(self, keypoints):
1122
- if isinstance(keypoints, Keypoints):
1123
- keypoints = keypoints.tensor
1124
- keypoints = np.asarray(keypoints)
1125
- return keypoints
1126
-
1127
- def get_output(self):
1128
- """
1129
- Returns:
1130
- output (VisImage): the image output containing the visualizations added
1131
- to the image.
1132
- """
1133
- return self.output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/plain_train_net.py DELETED
@@ -1,231 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
- """
4
- Detectron2 training script with a plain training loop.
5
-
6
- This scripts reads a given config file and runs the training or evaluation.
7
- It is an entry point that is able to train standard models in detectron2.
8
-
9
- In order to let one script support training of many models,
10
- this script contains logic that are specific to these built-in models and therefore
11
- may not be suitable for your own project.
12
- For example, your research project perhaps only needs a single "evaluator".
13
-
14
- Therefore, we recommend you to use detectron2 as an library and take
15
- this file as an example of how to use the library.
16
- You may want to write your own script with your datasets and other customizations.
17
-
18
- Compared to "train_net.py", this script supports fewer default features.
19
- It also includes fewer abstraction, therefore is easier to add custom logic.
20
- """
21
-
22
- import logging
23
- import os
24
- from collections import OrderedDict
25
- import torch
26
- from torch.nn.parallel import DistributedDataParallel
27
-
28
- import detectron2.utils.comm as comm
29
- from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
30
- from detectron2.config import get_cfg
31
- from detectron2.data import (
32
- MetadataCatalog,
33
- build_detection_test_loader,
34
- build_detection_train_loader,
35
- )
36
- from detectron2.engine import default_argument_parser, default_setup, launch
37
- from detectron2.evaluation import (
38
- CityscapesEvaluator,
39
- COCOEvaluator,
40
- COCOPanopticEvaluator,
41
- DatasetEvaluators,
42
- LVISEvaluator,
43
- PascalVOCDetectionEvaluator,
44
- SemSegEvaluator,
45
- inference_on_dataset,
46
- print_csv_format,
47
- )
48
- from detectron2.modeling import build_model
49
- from detectron2.solver import build_lr_scheduler, build_optimizer
50
- from detectron2.utils.events import (
51
- CommonMetricPrinter,
52
- EventStorage,
53
- JSONWriter,
54
- TensorboardXWriter,
55
- )
56
-
57
- logger = logging.getLogger("detectron2")
58
-
59
-
60
- def get_evaluator(cfg, dataset_name, output_folder=None):
61
- """
62
- Create evaluator(s) for a given dataset.
63
- This uses the special metadata "evaluator_type" associated with each builtin dataset.
64
- For your own dataset, you can simply create an evaluator manually in your
65
- script and do not have to worry about the hacky if-else logic here.
66
- """
67
- if output_folder is None:
68
- output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
69
- evaluator_list = []
70
- evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
71
- if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
72
- evaluator_list.append(
73
- SemSegEvaluator(
74
- dataset_name,
75
- distributed=True,
76
- num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
77
- ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
78
- output_dir=output_folder,
79
- )
80
- )
81
- if evaluator_type in ["coco", "coco_panoptic_seg"]:
82
- evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
83
- if evaluator_type == "coco_panoptic_seg":
84
- evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
85
- if evaluator_type == "cityscapes":
86
- assert (
87
- torch.cuda.device_count() >= comm.get_rank()
88
- ), "CityscapesEvaluator currently do not work with multiple machines."
89
- return CityscapesEvaluator(dataset_name)
90
- if evaluator_type == "pascal_voc":
91
- return PascalVOCDetectionEvaluator(dataset_name)
92
- if evaluator_type == "lvis":
93
- return LVISEvaluator(dataset_name, cfg, True, output_folder)
94
- if len(evaluator_list) == 0:
95
- raise NotImplementedError(
96
- "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
97
- )
98
- if len(evaluator_list) == 1:
99
- return evaluator_list[0]
100
- return DatasetEvaluators(evaluator_list)
101
-
102
-
103
- def do_test(cfg, model):
104
- results = OrderedDict()
105
- for dataset_name in cfg.DATASETS.TEST:
106
- data_loader = build_detection_test_loader(cfg, dataset_name)
107
- evaluator = get_evaluator(
108
- cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
109
- )
110
- results_i = inference_on_dataset(model, data_loader, evaluator)
111
- results[dataset_name] = results_i
112
- if comm.is_main_process():
113
- logger.info("Evaluation results for {} in csv format:".format(dataset_name))
114
- print_csv_format(results_i)
115
- if len(results) == 1:
116
- results = list(results.values())[0]
117
- return results
118
-
119
-
120
- def do_train(cfg, model, resume=False):
121
- model.train()
122
- optimizer = build_optimizer(cfg, model)
123
- scheduler = build_lr_scheduler(cfg, optimizer)
124
-
125
- checkpointer = DetectionCheckpointer(
126
- model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
127
- )
128
- start_iter = (
129
- checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
130
- )
131
- max_iter = cfg.SOLVER.MAX_ITER
132
-
133
- periodic_checkpointer = PeriodicCheckpointer(
134
- checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
135
- )
136
-
137
- writers = (
138
- [
139
- CommonMetricPrinter(max_iter),
140
- JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
141
- TensorboardXWriter(cfg.OUTPUT_DIR),
142
- ]
143
- if comm.is_main_process()
144
- else []
145
- )
146
-
147
- # compared to "train_net.py", we do not support accurate timing and
148
- # precise BN here, because they are not trivial to implement
149
- data_loader = build_detection_train_loader(cfg)
150
- logger.info("Starting training from iteration {}".format(start_iter))
151
- with EventStorage(start_iter) as storage:
152
- for data, iteration in zip(data_loader, range(start_iter, max_iter)):
153
- iteration = iteration + 1
154
- storage.step()
155
-
156
- loss_dict = model(data)
157
- losses = sum(loss_dict.values())
158
- assert torch.isfinite(losses).all(), loss_dict
159
-
160
- loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
161
- losses_reduced = sum(loss for loss in loss_dict_reduced.values())
162
- if comm.is_main_process():
163
- storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
164
-
165
- optimizer.zero_grad()
166
- losses.backward()
167
- optimizer.step()
168
- storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
169
- scheduler.step()
170
-
171
- if (
172
- cfg.TEST.EVAL_PERIOD > 0
173
- and iteration % cfg.TEST.EVAL_PERIOD == 0
174
- and iteration != max_iter
175
- ):
176
- do_test(cfg, model)
177
- # Compared to "train_net.py", the test results are not dumped to EventStorage
178
- comm.synchronize()
179
-
180
- if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter):
181
- for writer in writers:
182
- writer.write()
183
- periodic_checkpointer.step(iteration)
184
-
185
-
186
- def setup(args):
187
- """
188
- Create configs and perform basic setups.
189
- """
190
- cfg = get_cfg()
191
- cfg.merge_from_file(args.config_file)
192
- cfg.merge_from_list(args.opts)
193
- cfg.freeze()
194
- default_setup(
195
- cfg, args
196
- ) # if you don't like any of the default setup, write your own setup code
197
- return cfg
198
-
199
-
200
- def main(args):
201
- cfg = setup(args)
202
-
203
- model = build_model(cfg)
204
- logger.info("Model:\n{}".format(model))
205
- if args.eval_only:
206
- DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
207
- cfg.MODEL.WEIGHTS, resume=args.resume
208
- )
209
- return do_test(cfg, model)
210
-
211
- distributed = comm.get_world_size() > 1
212
- if distributed:
213
- model = DistributedDataParallel(
214
- model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
215
- )
216
-
217
- do_train(cfg, model)
218
- return do_test(cfg, model)
219
-
220
-
221
- if __name__ == "__main__":
222
- args = default_argument_parser().parse_args()
223
- print("Command Line Args:", args)
224
- launch(
225
- main,
226
- args.num_gpus,
227
- num_machines=args.num_machines,
228
- machine_rank=args.machine_rank,
229
- dist_url=args.dist_url,
230
- args=(args,),
231
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/async/sort.h DELETED
@@ -1,275 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file async/sort.h
18
- * \brief Functions for asynchronously sorting a range.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/detail/cpp14_required.h>
25
-
26
- #if THRUST_CPP_DIALECT >= 2014
27
-
28
- #include <thrust/detail/static_assert.h>
29
- #include <thrust/detail/select_system.h>
30
- #include <thrust/type_traits/logical_metafunctions.h>
31
- #include <thrust/type_traits/remove_cvref.h>
32
- #include <thrust/type_traits/is_execution_policy.h>
33
- #include <thrust/system/detail/adl/async/sort.h>
34
-
35
- #include <thrust/event.h>
36
-
37
- namespace thrust
38
- {
39
-
40
- namespace async
41
- {
42
-
43
- namespace unimplemented
44
- {
45
-
46
- template <
47
- typename DerivedPolicy
48
- , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
49
- >
50
- __host__
51
- event<DerivedPolicy>
52
- async_stable_sort(
53
- thrust::execution_policy<DerivedPolicy>&
54
- , ForwardIt, Sentinel, StrictWeakOrdering
55
- )
56
- {
57
- THRUST_STATIC_ASSERT_MSG(
58
- (thrust::detail::depend_on_instantiation<ForwardIt, false>::value)
59
- , "this algorithm is not implemented for the specified system"
60
- );
61
- return {};
62
- }
63
-
64
- } // namespace unimplemented
65
-
66
- namespace stable_sort_detail
67
- {
68
-
69
- using thrust::async::unimplemented::async_stable_sort;
70
-
71
- struct stable_sort_fn final
72
- {
73
- template <
74
- typename DerivedPolicy
75
- , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
76
- >
77
- __host__
78
- static auto call(
79
- thrust::detail::execution_policy_base<DerivedPolicy> const& exec
80
- , ForwardIt&& first, Sentinel&& last
81
- , StrictWeakOrdering&& comp
82
- )
83
- // ADL dispatch.
84
- THRUST_RETURNS(
85
- async_stable_sort(
86
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
87
- , THRUST_FWD(first), THRUST_FWD(last)
88
- , THRUST_FWD(comp)
89
- )
90
- )
91
-
92
- template <
93
- typename DerivedPolicy
94
- , typename ForwardIt, typename Sentinel
95
- >
96
- __host__
97
- static auto call(
98
- thrust::detail::execution_policy_base<DerivedPolicy> const& exec
99
- , ForwardIt&& first, Sentinel&& last
100
- )
101
- // ADL dispatch.
102
- THRUST_RETURNS(
103
- async_stable_sort(
104
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
105
- , THRUST_FWD(first), THRUST_FWD(last)
106
- , thrust::less<
107
- typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
108
- >{}
109
- )
110
- )
111
-
112
- template <typename ForwardIt, typename Sentinel, typename StrictWeakOrdering>
113
- __host__
114
- static auto call(ForwardIt&& first, Sentinel&& last, StrictWeakOrdering&& comp)
115
- THRUST_RETURNS(
116
- stable_sort_fn::call(
117
- thrust::detail::select_system(
118
- typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
119
- )
120
- , THRUST_FWD(first), THRUST_FWD(last)
121
- , THRUST_FWD(comp)
122
- )
123
- )
124
-
125
- template <typename ForwardIt, typename Sentinel>
126
- __host__
127
- static auto call(ForwardIt&& first, Sentinel&& last)
128
- THRUST_RETURNS(
129
- stable_sort_fn::call(
130
- THRUST_FWD(first), THRUST_FWD(last)
131
- , thrust::less<
132
- typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
133
- >{}
134
- )
135
- )
136
-
137
- template <typename... Args>
138
- THRUST_NODISCARD __host__
139
- auto operator()(Args&&... args) const
140
- THRUST_RETURNS(
141
- call(THRUST_FWD(args)...)
142
- )
143
- };
144
-
145
- } // namespace stable_sort_detail
146
-
147
- THRUST_INLINE_CONSTANT stable_sort_detail::stable_sort_fn stable_sort{};
148
-
149
- namespace fallback
150
- {
151
-
152
- template <
153
- typename DerivedPolicy
154
- , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
155
- >
156
- __host__
157
- event<DerivedPolicy>
158
- async_sort(
159
- thrust::execution_policy<DerivedPolicy>& exec
160
- , ForwardIt&& first, Sentinel&& last, StrictWeakOrdering&& comp
161
- )
162
- {
163
- return async_stable_sort(
164
- thrust::detail::derived_cast(exec)
165
- , THRUST_FWD(first), THRUST_FWD(last), THRUST_FWD(comp)
166
- );
167
- }
168
-
169
- } // namespace fallback
170
-
171
- namespace sort_detail
172
- {
173
-
174
- using thrust::async::fallback::async_sort;
175
-
176
- struct sort_fn final
177
- {
178
- template <
179
- typename DerivedPolicy
180
- , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
181
- >
182
- __host__
183
- static auto call(
184
- thrust::detail::execution_policy_base<DerivedPolicy> const& exec
185
- , ForwardIt&& first, Sentinel&& last
186
- , StrictWeakOrdering&& comp
187
- )
188
- // ADL dispatch.
189
- THRUST_RETURNS(
190
- async_sort(
191
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
192
- , THRUST_FWD(first), THRUST_FWD(last)
193
- , THRUST_FWD(comp)
194
- )
195
- )
196
-
197
- template <
198
- typename DerivedPolicy
199
- , typename ForwardIt, typename Sentinel
200
- >
201
- __host__
202
- static auto call3(
203
- thrust::detail::execution_policy_base<DerivedPolicy> const& exec
204
- , ForwardIt&& first, Sentinel&& last
205
- , thrust::true_type
206
- )
207
- THRUST_RETURNS(
208
- sort_fn::call(
209
- exec
210
- , THRUST_FWD(first), THRUST_FWD(last)
211
- , thrust::less<
212
- typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
213
- >{}
214
- )
215
- )
216
-
217
- template <typename ForwardIt, typename Sentinel, typename StrictWeakOrdering>
218
- __host__
219
- static auto call3(ForwardIt&& first, Sentinel&& last,
220
- StrictWeakOrdering&& comp,
221
- thrust::false_type)
222
- THRUST_RETURNS(
223
- sort_fn::call(
224
- thrust::detail::select_system(
225
- typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
226
- )
227
- , THRUST_FWD(first), THRUST_FWD(last)
228
- , THRUST_FWD(comp)
229
- )
230
- )
231
-
232
- // MSVC WAR: MSVC gets angsty and eats all available RAM when we try to detect
233
- // if T1 is an execution_policy by using SFINAE. Switching to a static
234
- // dispatch pattern to prevent this.
235
- template <typename T1, typename T2, typename T3>
236
- __host__
237
- static auto call(T1&& t1, T2&& t2, T3&& t3)
238
- THRUST_RETURNS(
239
- sort_fn::call3(THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3),
240
- thrust::is_execution_policy<thrust::remove_cvref_t<T1>>{})
241
- )
242
-
243
- template <typename ForwardIt, typename Sentinel>
244
- __host__
245
- static auto call(ForwardIt&& first, Sentinel&& last)
246
- THRUST_RETURNS(
247
- sort_fn::call(
248
- thrust::detail::select_system(
249
- typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
250
- )
251
- , THRUST_FWD(first), THRUST_FWD(last)
252
- , thrust::less<
253
- typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
254
- >{}
255
- )
256
- )
257
-
258
- template <typename... Args>
259
- THRUST_NODISCARD __host__
260
- auto operator()(Args&&... args) const
261
- THRUST_RETURNS(
262
- call(THRUST_FWD(args)...)
263
- )
264
- };
265
-
266
- } // namespace sort_detail
267
-
268
- THRUST_INLINE_CONSTANT sort_detail::sort_fn sort{};
269
-
270
- } // namespace async
271
-
272
- } // end namespace thrust
273
-
274
- #endif
275
-