Commit
·
13e5f79
1
Parent(s):
fa0af74
Update parquet files (step 57 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar !!TOP!!.md +0 -109
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Adobe Dreamweaver CS3 Full Version Crack - Get It Now Before Its Gone.md +0 -183
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fabfilter Pro Q 2 Crack Reddit.md +0 -18
- spaces/1phancelerku/anime-remove-background/1xBet APK 2021 - The Best Betting App for Android and iPhone Users.md +0 -119
- spaces/1phancelerku/anime-remove-background/Carx Drift Racing 2 Mod Apk for iOS The Ultimate Guide to Drift Like a Pro.md +0 -122
- spaces/1phancelerku/anime-remove-background/Download Level Maker and Unleash Your Creativity - Make and Share Levels with Millions of Players.md +0 -135
- spaces/1toTree/lora_test/ppdiffusers/pipelines/score_sde_ve/__init__.py +0 -17
- spaces/221091lstwcm/textgenerator/README.md +0 -12
- spaces/7Vivek/Next-Word-Prediction-Streamlit/setup.sh +0 -13
- spaces/801artistry/RVC801/colab_for_mdx.py +0 -71
- spaces/801artistry/RVC801/demucs/compressed.py +0 -115
- spaces/801artistry/RVC801/tools/infer/trans_weights.py +0 -18
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/ddp_utils.py +0 -137
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/loss.py +0 -41
- spaces/ASJMO/freegpt/g4f/Provider/Providers/Liaobots.py +0 -64
- spaces/Abhilashvj/planogram-compliance/segment/val.py +0 -792
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/randomUuid.ts +0 -14
- spaces/AchyuthGamer/OpenGPT/client/css/dropdown.css +0 -10
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/checkboxshape.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/ResetDisplayContent.js +0 -115
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Factory.d.ts +0 -7
- spaces/AlexZou/Deploy_Restoration/SuperResolution.py +0 -46
- spaces/Amrrs/DragGan-Inversion/PTI/configs/hyperparameters.py +0 -28
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +0 -1024
- spaces/Andy1621/uniformer_image_detection/configs/paa/README.md +0 -35
- spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py +0 -3
- spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py +0 -6
- spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py +0 -9
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/gallery/script.py +0 -101
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_model_menu.py +0 -267
- spaces/Anonymous-sub/Rerender/ControlNet/tutorial_train.py +0 -35
- spaces/Apex-X/nono/CONTRIBUTING.md +0 -21
- spaces/Ariharasudhan/YoloV5/utils/dataloaders.py +0 -1221
- spaces/Artrajz/vits-simple-api/vits/text/korean.py +0 -210
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/encoding.py +0 -36
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/chardistribution.py +0 -261
- spaces/Awesimo/jojogan/e4e/models/stylegan2/__init__.py +0 -0
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/deform_conv.py +0 -501
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/convert-torchvision-to-d2.py +0 -56
- spaces/Azurro/APT-1B-Base/app.py +0 -39
- spaces/Bart92/RVC_HF/easy_infer.py +0 -1383
- spaces/Benson/text-generation/Examples/Bloque Explosin Aventura Maestro Apk Descargar.md +0 -93
- spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Solitario Para Telfono Android.md +0 -139
- spaces/Billyosoro/ESRGAN/setup.py +0 -107
- spaces/BlueRey/MendoBERT_QA/README.md +0 -13
- spaces/CVPR/LIVE/thrust/testing/unittest/cuda/testframework.h +0 -25
- spaces/CVPR/WALT/mmdet/core/bbox/assigners/hungarian_assigner.py +0 -145
- spaces/CVPR/drawings-to-human/static/_app/immutable/assets/pages/index.svelte-7bf249dc.css +0 -1
- spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h +0 -370
- spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/no_memory.py +0 -73
spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar !!TOP!!.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
<br> - Why you need a crack version of this software and what are the benefits? <br> - How to download and install ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar safely and securely? | | H2: What is ACDSee Photo Studio Ultimate 2018 and what are its features? | - A brief overview of ACDSee Photo Studio Ultimate 2018 and its main functions <br> - A detailed description of the features of ACDSee Photo Studio Ultimate 2018, such as Smart Erase, Liquify tool, ACDSee Mobile Sync, ACDSee Actions Browser, Lens Correction, Frequency Separation, Pixel Targeting, Grain tool, Polygon Selection tool, Split Tone, Chromatic Aberration, and more. <br> - A comparison of ACDSee Photo Studio Ultimate 2018 with other photo editing software, such as Photoshop and Lightroom. | | H2: Why you need a crack version of this software and what are the benefits? | - The reasons why you might want to use a crack version of ACDSee Photo Studio Ultimate 2018, such as saving money, accessing premium features, bypassing activation codes, etc. <br> - The benefits of using a crack version of ACDSee Photo Studio Ultimate 2018, such as unlimited usage, no ads, no updates, no viruses, etc. <br> - The risks and challenges of using a crack version of ACDSee Photo Studio Ultimate 2018, such as legal issues, compatibility problems, security threats, etc. | | H2: How to download and install ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar safely and securely? | - The steps to download and install ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar from a reliable torrent site, such as 1337X <br> - The precautions to take before downloading and installing the software, such as using a VPN, scanning the file for malware, backing up your data, etc. <br> - The tips to optimize the performance and functionality of the software, such as adjusting the settings, updating the drivers, using the help guide, etc. | | H1: Conclusion | - A summary of the main points of the article <br> - A call to action for the readers to try out the software and share their feedback | Table 2: Article with HTML formatting <h1>ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar: What is it and why you need it?</h1>
|
3 |
-
<p>If you are looking for a powerful and versatile photo editing software that can handle all your creative needs, you might want to check out ACDSee Photo Studio Ultimate 2018. This software is designed by and for photographers who want to achieve ultimate creative freedom with their images. It offers a comprehensive set of tools that can help you edit, organize, enhance, and share your photos with ease.</p>
|
4 |
-
<h2>ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar</h2><br /><p><b><b>Download File</b> ☑ <a href="https://byltly.com/2uKxox">https://byltly.com/2uKxox</a></b></p><br /><br />
|
5 |
-
<p>However, there is one problem: this software is not cheap. The original price of ACDSee Photo Studio Ultimate 2018 is $149, which might be too expensive for some users who are on a budget. That's why some people resort to using a crack version of this software, which is a modified version that bypasses the activation process and allows users to access the premium features for free.</p>
|
6 |
-
<p>In this article, we will explain what is ACDSee Photo Studio Ultimate 2018 and what are its features, why you need a crack version of this software and what are the benefits, and how to download and install ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar safely and securely. By the end of this article, you will have a clear idea of whether this software is worth trying out or not.</p>
|
7 |
-
<h2>What is ACDSee Photo Studio Ultimate 2018 and what are its features?</h2>
|
8 |
-
<p>ACDSee Photo Studio Ultimate 2018 is a photo editing software that combines the features of ACDSee Photo Studio Professional 2018 and ACDSee Photo Studio Standard 2018, plus some additional tools that are exclusive to the Ultimate version. It is a one-stop solution for all your photo editing needs, whether you are a beginner or a professional.</p>
|
9 |
-
<p>Some of the main functions of ACDSee Photo Studio Ultimate 2018 are:</p>
|
10 |
-
<p></p>
|
11 |
-
<ul>
|
12 |
-
<li><b>Edit:</b> You can use the software to perform basic and advanced edits on your photos, such as cropping, resizing, rotating, flipping, adjusting brightness, contrast, color, sharpness, noise, etc. You can also use the software to apply filters, effects, presets, and adjustments to your photos, such as black and white, sepia, vintage, HDR, etc. You can also use the software to retouch your photos, such as removing blemishes, red-eye, wrinkles, etc. You can also use the software to add text, watermarks, borders, frames, stickers, etc. to your photos.</li>
|
13 |
-
<li><b>Organize:</b> You can use the software to manage your photo collection efficiently and conveniently. You can use the software to import your photos from various sources, such as your computer, camera, scanner, mobile device, etc. You can also use the software to sort your photos by various criteria, such as date, name, size, rating, keywords, etc. You can also use the software to create albums, folders, categories, tags, keywords, etc. to organize your photos. You can also use the software to search for your photos using various filters and options.</li>
|
14 |
-
<li><b>Enhance:</b> You can use the software to improve the quality and appearance of your photos using various tools and features. You can use the software to correct common problems in your photos, such as lens distortion, chromatic aberration, vignetting, etc. You can also use the software to optimize your photos for different purposes and platforms, such as web, print, social media, etc. You can also use the software to create stunning panoramas, collages, slideshows, etc. from your photos.</li>
|
15 |
-
<li><b>Share:</b> You can use the software to share your photos with others easily and quickly. You can use the software to export your photos in various formats and sizes. You can also use the software to upload your photos to various online platforms and services, such as Facebook, Flickr, Dropbox, OneDrive, etc. You can also use the software to print your photos using various options and settings.</li>
|
16 |
-
</ul>
|
17 |
-
<p>These are just some of the functions of ACDSee Photo Studio Ultimate 2018. The software also offers many more features that make it a powerful and versatile photo editing software. Some of these features are:</p>
|
18 |
-
<h3>Smart Erase</h3>
|
19 |
-
<p>This feature allows you to remove unwanted objects or people from your photos without leaving any traces or artifacts. You can simply select the area you want to erase and let the software do the rest. The software will intelligently fill in the erased area with pixels that match the surrounding background.</p>
|
20 |
-
<h3>Liquify tool</h3>
|
21 |
-
<p>This feature allows you to distort or reshape any part of your photo using various brushes and options. You can use this feature to create artistic effects or fix imperfections in your photo. For example, you can use this feature to slim down a face or body part or enlarge an eye or lip.</p>
|
22 |
-
<h3>ACDSee Mobile Sync</h3>
|
23 |
-
<p>This feature allows you to sync your photos between your computer and your mobile device wirelessly and effortlessly. You can simply install the ACDSee Mobile Sync app on your mobile device and connect it to the same network as your computer. Then you can select which photos you want to sync and transfer them with one tap.</p>
|
24 |
-
<h3>ACDSee Actions Browser</h3>
|
25 |
-
<p>This feature allows you to browse and apply hundreds of actions to your photos with ease. Actions are predefined sequences of edits that can transform your photo in seconds. You can find actions for various purposes and styles in the ACDSee Actions Browser or download more from the ACDSee website. You can also create your own actions and save them for future use.</p>
|
26 |
-
<h3>Lens Correction</h3>
|
27 |
-
<p>This feature allows you to correct common lens distortions in your photos automatically or manually. Lens distortions are caused by imperfections in the lens that affect how light is captured by the camera sensor. Some examples of lens distortions are barrel distortion (where straight lines appear curved), pincushion distortion (where straight lines appear pinched), fisheye distortion (where images appear distorted at the edges), etc.</p>
|
28 |
-
<h 3>Frequency Separation</h3>
|
29 |
-
<p>This feature allows you to separate the texture and color of your photo into two layers and edit them independently. This can help you achieve a smooth and natural-looking skin tone without losing the details and sharpness of the skin texture. You can use this feature to remove blemishes, wrinkles, scars, etc. from your photo.</p>
|
30 |
-
<h3>Pixel Targeting</h3>
|
31 |
-
<p>This feature allows you to select and edit specific pixels in your photo based on their color, brightness, or hue. You can use this feature to isolate and enhance certain areas or objects in your photo. For example, you can use this feature to change the color of an eye or a flower or adjust the brightness of a sky or a shadow.</p>
|
32 |
-
<h3>Grain tool</h3>
|
33 |
-
<p>This feature allows you to add realistic grain effects to your photo to create a vintage or film-like look. You can use this feature to adjust the amount, size, roughness, and color of the grain. You can also use this feature to apply grain to specific areas or layers in your photo.</p>
|
34 |
-
<h3>Polygon Selection tool</h3>
|
35 |
-
<p>This feature allows you to select any irregular-shaped area in your photo by drawing a polygon around it. You can use this feature to crop, cut, copy, paste, or edit any part of your photo that is not easily selected by other tools.</p>
|
36 |
-
<h3>Split Tone</h3>
|
37 |
-
<p>This feature allows you to apply different colors to the highlights and shadows of your photo to create a dramatic or artistic effect. You can use this feature to adjust the hue, saturation, and balance of the split tone. You can also use this feature to apply presets or create your own custom split tone.</p>
|
38 |
-
<h3>Chromatic Aberration</h3>
|
39 |
-
<p>This feature allows you to correct or create chromatic aberration in your photo. Chromatic aberration is a phenomenon where the colors of an image are not aligned properly due to the different wavelengths of light passing through the lens. This can result in color fringes or halos around the edges of objects in your photo. You can use this feature to remove or reduce chromatic aberration automatically or manually. You can also use this feature to add chromatic aberration intentionally to create a creative or retro effect.</p>
|
40 |
-
<p>These are just some of the features of ACDSee Photo Studio Ultimate 2018. There are many more features that you can explore and experiment with using this software. You can find more information about the features and functions of ACDSee Photo Studio Ultimate 2018 on the official website or the user guide.</p> <h2>Why you need a crack version of this software and what are the benefits?</h2>
|
41 |
-
<p>As you can see, ACDSee Photo Studio Ultimate 2018 is a great photo editing software that can help you unleash your creativity and achieve amazing results with your photos. However, as we mentioned earlier, this software is not cheap. The original price of ACDSee Photo Studio Ultimate 2018 is $149, which might be too expensive for some users who are on a budget or who do not want to spend that much money on a software.</p>
|
42 |
-
<p>That's why some users might prefer to use a crack version of this software, which is a modified version that bypasses the activation process and allows users to access the premium features for free. A crack version of ACDSee Photo Studio Ultimate 2018 is usually distributed as a .rar file, which is a compressed file format that can contain multiple files and folders. The .rar file usually contains the setup file of the software, the crack file, and the instructions on how to install and use the software.</p>
|
43 |
-
<p>There are several reasons why you might want to use a crack version of ACDSee Photo Studio Ultimate 2018, such as:</p>
|
44 |
-
<ul>
|
45 |
-
<li><b>Saving money:</b> The most obvious reason why you might want to use a crack version of this software is to save money. By using a crack version, you can avoid paying the original price of $149 and enjoy the full features of the software for free. This can help you save a lot of money in the long run, especially if you are a frequent user of photo editing software.</li>
|
46 |
-
<li><b>Accessing premium features:</b> Another reason why you might want to use a crack version of this software is to access the premium features that are only available in the Ultimate version. As we mentioned earlier, ACDSee Photo Studio Ultimate 2018 offers some exclusive tools and features that are not found in other versions or other photo editing software. By using a crack version, you can access these features without any limitations or restrictions.</li>
|
47 |
-
<li><b>Bypassing activation codes:</b> Another reason why you might want to use a crack version of this software is to bypass the activation codes that are required to use the original version. Activation codes are unique codes that are generated by the software developer to verify the authenticity and validity of the software. Activation codes are usually sent to the user via email or SMS after purchasing the software. However, some users might lose or forget their activation codes or have trouble receiving them due to various reasons. By using a crack version, you can avoid the hassle of entering or retrieving activation codes and use the software without any problems.</li>
|
48 |
-
</ul>
|
49 |
-
<p>These are some of the reasons why you might want to use a crack version of ACDSee Photo Studio Ultimate 2018. However, using a crack version also comes with some benefits and drawbacks that you should be aware of before deciding whether to use it or not. Some of these benefits and drawbacks are:</p>
|
50 |
-
<h3>The benefits of using a crack version of ACDSee Photo Studio Ultimate 2018</h3>
|
51 |
-
<ul>
|
52 |
-
<li><b>Unlimited usage:</b> One of the benefits of using a crack version of this software is that you can use it as much as you want without any limitations or restrictions. You can install it on multiple devices and use it for multiple projects without worrying about running out of licenses or subscriptions. You can also use it offline without needing an internet connection or an account.</li>
|
53 |
-
<li><b>No ads:</b> Another benefit of using a crack version of this software is that you will not see any ads or pop-ups while using it. Ads and pop-ups can be annoying and distracting when you are trying to focus on your photo editing work. They can also slow down your device or consume your data. By using a crack version, you can enjoy an ad-free and smooth photo editing experience.</li>
|
54 |
-
<li><b>No updates:</b> Another benefit of using a crack version of this software is that you will not have to deal with any updates or patches that might affect the performance or functionality of the software. Updates and patches are usually released by the software developer to fix bugs, improve features, add new tools, etc. However, some updates or patches might cause compatibility issues, errors, crashes, etc. By using a crack version, you can avoid these potential problems and use the software as it is.</li>
|
55 |
-
<li><b>No viruses:</b> Another benefit of using a crack version of this software is that you will not have to worry about any viruses or malware that might infect your device or compromise your data. Viruses and malware are malicious programs that can harm your device or steal your information. They can also affect the performance or functionality of your software. By using a crack version from a reliable source, you can ensure that your device and data are safe and secure.</li>
|
56 |
-
</ul>
|
57 |
-
<h3 The risks and challenges of using a crack version of ACDSee Photo Studio Ultimate 2018</h3>
|
58 |
-
<ul>
|
59 |
-
<li><b>Legal issues:</b> One of the drawbacks of using a crack version of this software is that you might face legal issues or consequences for violating the terms and conditions of the software developer. Using a crack version is considered as piracy, which is illegal and unethical in most countries. You might be sued, fined, or even jailed for using a crack version of this software. You might also lose your rights to use the software or any other products or services from the software developer.</li>
|
60 |
-
<li><b>Compatibility problems:</b> Another drawback of using a crack version of this software is that you might encounter compatibility problems with your device or other software. A crack version might not work properly or at all on some devices or operating systems. It might also conflict with other software or applications that you have installed on your device. This can cause errors, crashes, freezes, etc. that can affect your photo editing work or damage your device.</li>
|
61 |
-
<li><b>Security threats:</b> Another drawback of using a crack version of this software is that you might expose your device or data to security threats from hackers or cybercriminals. A crack version might contain hidden viruses or malware that can infect your device or steal your information. It might also connect to unsecured servers or networks that can compromise your privacy or security. This can result in data loss, identity theft, fraud, etc. that can harm you personally or financially.</li>
|
62 |
-
</ul>
|
63 |
-
<p>These are some of the benefits and drawbacks of using a crack version of ACDSee Photo Studio Ultimate 2018. You should weigh them carefully before deciding whether to use it or not. You should also be aware of the potential consequences and risks that you might face for using it.</p>
|
64 |
-
<h2>How to download and install ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar safely and securely?</h2>
|
65 |
-
<p>If you have decided to use a crack version of ACDSee Photo Studio Ultimate 2018, you should follow some steps and precautions to download and install it safely and securely. Here are the steps and precautions that you should take:</p>
|
66 |
-
<h3>The steps to download and install ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar</h3>
|
67 |
-
<ol>
|
68 |
-
<li><b>Find a reliable torrent site:</b> The first step is to find a reliable torrent site that offers the .rar file of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar. A torrent site is a website that hosts torrent files, which are small files that contain information about the files and folders that are shared by users through a peer-to-peer network. You can use a torrent site to download the .rar file of the software from other users who have already downloaded it. However, not all torrent sites are safe and trustworthy. Some torrent sites might contain fake, corrupted, or infected files that can harm your device or data. Therefore, you should do some research and read some reviews before choosing a torrent site to download from. One of the torrent sites that we recommend is 1337X, which is one of the most popular and reputable torrent sites on the internet.</li>
|
69 |
-
<li><b>Download the .rar file:</b> The second step is to download the .rar file of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar from the torrent site that you have chosen. To do this, you need to have a torrent client installed on your device, which is a software that allows you to download and upload files through the peer-to-peer network. You can use any torrent client that you prefer, such as uTorrent, BitTorrent, qBittorrent, etc. After installing the torrent client, you need to open the torrent site and search for the .rar file of the software using the search bar or the categories. Then you need to click on the .rar file and download it using the magnet link or the download button. The download speed and time will depend on various factors, such as the number of seeders (users who have the complete file and are sharing it), leechers (users who are downloading the file but not sharing it), peers (users who are downloading and sharing parts of the file), etc.</li>
|
70 |
-
<li><b>Extract the .rar file:</b> The third step is to extract the .rar file of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar after downloading it from the torrent site. To do this, you need to have a software that can extract compressed files, such as WinRAR, 7-Z Zip, etc. After installing the software, you need to locate the .rar file on your device and right-click on it. Then you need to select the option to extract the file to a folder of your choice. The extraction process might take some time depending on the size and complexity of the file. After extracting the file, you will see a folder that contains the setup file of the software, the crack file, and the instructions on how to install and use the software.</li>
|
71 |
-
<li><b>Install the software:</b> The fourth step is to install the software on your device using the setup file that you have extracted from the .rar file. To do this, you need to open the folder that contains the setup file and double-click on it. Then you need to follow the instructions on the screen to complete the installation process. You might need to agree to some terms and conditions, choose a destination folder, select some options, etc. The installation process might also take some time depending on your device and system.</li>
|
72 |
-
<li><b>Apply the crack:</b> The fifth and final step is to apply the crack to the software using the crack file that you have extracted from the .rar file. To do this, you need to open the folder that contains the crack file and copy it. Then you need to paste it in the installation folder of the software, which is usually located in C:\Program Files\ACD Systems\ACDSee Ultimate\11.0 or C:\Program Files (x86)\ACD Systems\ACDSee Ultimate\11.0 depending on your system. You might need to replace or overwrite the original file with the crack file. After applying the crack, you can launch the software and enjoy its full features for free.</li>
|
73 |
-
</ol>
|
74 |
-
<h3>The precautions to take before downloading and installing ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar</h3>
|
75 |
-
<p>Before downloading and installing ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar, you should take some precautions to ensure that your device and data are safe and secure. Here are some of the precautions that you should take:</p>
|
76 |
-
<ul>
|
77 |
-
<li><b>Use a VPN:</b> One of the precautions that you should take is to use a VPN (Virtual Private Network) when downloading and installing ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar from a torrent site. A VPN is a service that creates a secure and encrypted connection between your device and a server in another location. This can help you hide your IP address, location, identity, and online activity from your ISP (Internet Service Provider), government, hackers, or cybercriminals. This can also help you bypass any geo-restrictions or censorship that might prevent you from accessing certain torrent sites or content. You can use any VPN service that you prefer, such as NordVPN, ExpressVPN, CyberGhost, etc.</li>
|
78 |
-
<li><b>Scan the file for malware:</b> Another precaution that you should take is to scan the .rar file of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar for any malware before extracting or installing it on your device. Malware is any software that can harm your device or data in various ways, such as deleting, encrypting, stealing, spying, etc. Some malware can also affect the performance or functionality of your software or device. You can use any antivirus or anti-malware software that you trust, such as Avast, Malwarebytes, Kaspersky, etc.</li>
|
79 |
-
<li><b>Back up your data:</b> Another precaution that you should take is to back up your data before downloading and installing ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar on your device. Backing up your data means creating a copy of your important files and folders and storing them in another location, such as an external hard drive, a cloud service, a USB flash drive, etc. This can help you recover your data in case something goes wrong during or after downloading and installing ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar on your device, such as data loss, corruption, infection, etc.</li>
|
80 |
-
</ul>
|
81 |
-
<h3>The tips to optimize the performance and functionality of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar</h3>
|
82 |
-
<p>After downloading and installing ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar on your device, you should follow some tips to optimize the performance and functionality of the software. Here are some of the tips that you should follow:</p>
|
83 |
-
<ul>
|
84 |
-
<li><b>Adjust the settings:</b> One of the tips that you should follow is to adjust the settings of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar according to your preferences and needs. You can access the settings of the software by clicking on the Tools menu and selecting Options. You can customize various aspects of the software, such as the interface, the keyboard shortcuts, the file formats, the color management, the metadata, the plugins, etc. You can also reset the settings to their default values if you encounter any problems or errors.</li>
|
85 |
-
<li><b>Update the drivers:</b> Another tip that you should follow is to update the drivers of your device regularly. Drivers are software that allow your device to communicate with other hardware or software components. Updating your drivers can help you improve the performance and functionality of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar and prevent any compatibility issues or errors. You can update your drivers manually or automatically using various tools or services, such as Driver Booster, Driver Easy, etc.</li>
|
86 |
-
<li><b>Use the help guide:</b> Another tip that you should follow is to use the help guide of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar whenever you need assistance or guidance. The help guide is a comprehensive and user-friendly resource that contains information and instructions on how to use and troubleshoot the software. You can access the help guide by clicking on the Help menu and selecting Help Topics. You can also access the help guide online by visiting the official website or the user forum.</li>
|
87 |
-
</ul>
|
88 |
-
<h1>Conclusion</h1>
|
89 |
-
<p>In conclusion, ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar is a photo editing software that can help you edit, organize, enhance, and share your photos with ease and creativity. It offers a comprehensive set of tools and features that can cater to all your photo editing needs, whether you are a beginner or a professional.</p>
|
90 |
-
<p>However, this software is not cheap and might be too expensive for some users who are on a budget or who do not want to spend that much money on a software. That's why some users might opt to use a crack version of this software, which is a modified version that bypasses the activation process and allows users to access the premium features for free.</p>
|
91 |
-
<p>Using a crack version of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar has its benefits and drawbacks that you should be aware of before deciding whether to use it or not. Some of the benefits are unlimited usage, no ads, no updates, and no viruses. Some of the drawbacks are legal issues, compatibility problems, and security threats.</p>
|
92 |
-
<p>If you have decided to use a crack version of ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar, you should follow some steps and precautions to download and install it safely and securely. Some of the steps are finding a reliable torrent site, downloading the .rar file, extracting the .rar file, installing the software, and applying the crack. Some of the precautions are using a VPN, scanning the file for malware, and backing up your data. Some of the tips are adjusting the settings, updating the drivers, and using the help guide.</p>
|
93 |
-
<p>We hope that this article has helped you understand what is ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar and how to use it safely and securely. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you and help you out. Thank you for reading and happy photo editing!</p>
|
94 |
-
<h2>FAQs</h2>
|
95 |
-
<p>Here are some of the frequently asked questions about ACDSee Photo Studio Ultimate 2018 V11.1 Crack (x64) [TechTools .rar:</p>
|
96 |
-
<ol>
|
97 |
-
<li><b>What is the difference between ACDSee Photo Studio Ultimate 2018 and ACDSee Photo Studio Professional 2018?</b><br>
|
98 |
-
ACDSee Photo Studio Ultimate 2018 is a more advanced and comprehensive version of ACDSee Photo Studio Professional 2018. It offers some additional tools and features that are not available in the Professional version, such as Smart Erase, Liquify tool, ACDSee Mobile Sync, ACDSee Actions Browser, Lens Correction, Frequency Separation, Pixel Targeting, Grain tool, Polygon Selection tool, Split Tone, Chromatic Aberration, and more.</li>
|
99 |
-
<li><b>Is ACDSee Photo Studio Ultimate 2018 compatible with Windows 10?</b><br>
|
100 |
-
Yes, ACDSee Photo Studio Ultimate 2018 is compatible with Windows 10. However, you might need to update your drivers or settings to ensure optimal performance and functionality.</li>
|
101 |
-
<li><b>Can I use ACDSee Photo Studio Ultimate 2018 on Mac?</b><br>
|
102 |
-
No, ACDSee Photo Studio Ultimate 2018 is not available for Mac. It is only compatible with Windows devices. However, you can use other photo editing software that are similar to ACDSee Photo Studio Ultimate 2018 on Mac, such as Affinity Photo, Pixelmator Pro, Luminar AI, etc.</li>
|
103 |
-
<li><b>How can I get support or help for ACDSee Photo Studio Ultimate 2018?</b><br>
|
104 |
-
You can get support or help for ACDSee Photo Studio Ultimate 2018 by visiting the official website or the user forum. You can also access the help guide by clicking on the Help menu and selecting Help Topics. You can also contact the customer service by email or phone.</li>
|
105 |
-
<li><b>Is it legal to use a crack version of ACDSee Photo Studio Ultimate 2018?</b><br>
|
106 |
-
No, it is not legal to use a crack version of ACDSee Photo Studio Ultimate 2018. Using a crack version is considered as piracy, which is illegal and unethical in most countries. You might face legal issues or consequences for violating the terms and conditions of the software developer. You might also lose your rights to use the software or any other products or services from the software developer.</li>
|
107 |
-
</ol></p> b2dd77e56b<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Adobe Dreamweaver CS3 Full Version Crack - Get It Now Before Its Gone.md
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Adobe Dreamweaver CS3 Full Version Crack</h1>
|
3 |
-
<p>Are you looking for a way to download Adobe Dreamweaver CS3 full version crack? If so, you have come to the right place. In this article, I will show you how to get this powerful web design software for free, without paying any subscription fees or risking your computer's security. But first, let me explain what Adobe Dreamweaver CS3 is and why you might want to use it.</p>
|
4 |
-
<h2>download adobe dreamweaver cs3 full version crack</h2><br /><p><b><b>Download</b> 🗸 <a href="https://byltly.com/2uKzUI">https://byltly.com/2uKzUI</a></b></p><br /><br />
|
5 |
-
<h2>What is Adobe Dreamweaver CS3?</h2>
|
6 |
-
<p>Adobe Dreamweaver CS3 is a software application that allows you to create, edit, and manage websites and web pages. It was released in 2007 as part of the Adobe Creative Suite 3 package, which also included Photoshop, Illustrator, Flash, and other popular tools. Adobe Dreamweaver CS3 is compatible with Windows and Mac operating systems, and supports various web technologies such as HTML, CSS, JavaScript, PHP, ASP.NET, and more.</p>
|
7 |
-
<h3>Features of Adobe Dreamweaver CS3</h3>
|
8 |
-
<p>Some of the features that make Adobe Dreamweaver CS3 stand out from other web design software are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>A user-friendly interface that lets you switch between code view and design view.</li>
|
11 |
-
<li>A built-in FTP client that lets you upload and download files from your web server.</li>
|
12 |
-
<li>A live preview function that lets you see how your website looks in different browsers and devices.</li>
|
13 |
-
<li>A code completion feature that helps you write code faster and more accurately.</li>
|
14 |
-
<li>A spry framework that lets you add dynamic effects and interactivity to your web pages.</li>
|
15 |
-
<li>A CSS panel that lets you edit and manage your style sheets easily.</li>
|
16 |
-
<li>A template system that lets you create and update multiple pages with the same layout and content.</li>
|
17 |
-
<li>A site management tool that lets you organize and maintain your website files and folders.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>Benefits of Adobe Dreamweaver CS3</h3>
|
20 |
-
<p>Some of the benefits that you can enjoy by using Adobe Dreamweaver CS3 are:</p>
|
21 |
-
<ul>
|
22 |
-
<li>You can create professional-looking websites without having to learn complex coding languages.</li>
|
23 |
-
<li>You can save time and money by using the built-in tools and features instead of buying or downloading additional software or plugins.</li>
|
24 |
-
<li>You can improve your web design skills by learning from the tutorials and examples provided by Adobe.</li>
|
25 |
-
<li>You can collaborate with other web developers by sharing your files and projects online.</li>
|
26 |
-
<li>You can customize your workspace and preferences according to your needs and preferences.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>Why do you need to crack Adobe Dreamweaver CS3?</h2>
|
29 |
-
<p>Now that you know what Adobe Dreamweaver CS3 is and what it can do for you, you might be wondering why you need to crack it. After all, isn't it better to buy the official version from Adobe's website? Well, not necessarily. There are some drawbacks and risks associated with using the trial version or downloading pirated software that you should be aware of before making a decision.</p>
|
30 |
-
<h3>The disadvantages of using the trial version</h3>
|
31 |
-
<p>If you download the trial version of Adobe Dreamweaver CS3 from Adobe's website, you will be able to use it for free for 30 days. However, after that period expires, you will have to either buy a license or uninstall the software. This means that you will lose access to your files and projects unless you pay a hefty fee. Moreover, the trial version may have some limitations or restrictions on its functionality or performance that could affect your work quality or efficiency.</p>
|
32 |
-
<p>How to download adobe dreamweaver cs3 with crack<br />
|
33 |
-
Adobe dreamweaver cs3 full version free download for windows 10<br />
|
34 |
-
Download adobe dreamweaver cs3 crack only<br />
|
35 |
-
Adobe dreamweaver cs3 full version crack serial key<br />
|
36 |
-
Download adobe dreamweaver cs3 portable full version<br />
|
37 |
-
Adobe dreamweaver cs3 full version free download for mac<br />
|
38 |
-
Download adobe dreamweaver cs3 crack file<br />
|
39 |
-
Adobe dreamweaver cs3 full version crack activation code<br />
|
40 |
-
Download adobe dreamweaver cs3 full version for pc<br />
|
41 |
-
Adobe dreamweaver cs3 full version free download with keygen<br />
|
42 |
-
Download adobe dreamweaver cs3 crack patch<br />
|
43 |
-
Adobe dreamweaver cs3 full version crack license key<br />
|
44 |
-
Download adobe dreamweaver cs3 full version for windows 7<br />
|
45 |
-
Adobe dreamweaver cs3 full version free download rar<br />
|
46 |
-
Download adobe dreamweaver cs3 crack exe<br />
|
47 |
-
Adobe dreamweaver cs3 full version crack product key<br />
|
48 |
-
Download adobe dreamweaver cs3 full version for mac os x<br />
|
49 |
-
Adobe dreamweaver cs3 full version free download zip<br />
|
50 |
-
Download adobe dreamweaver cs3 crack dll<br />
|
51 |
-
Adobe dreamweaver cs3 full version crack registration code<br />
|
52 |
-
Download adobe dreamweaver cs3 full version offline installer<br />
|
53 |
-
Adobe dreamweaver cs3 full version free download utorrent<br />
|
54 |
-
Download adobe dreamweaver cs3 crack keygen<br />
|
55 |
-
Adobe dreamweaver cs3 full version crack serial number<br />
|
56 |
-
Download adobe dreamweaver cs3 full version for windows 8.1<br />
|
57 |
-
Adobe dreamweaver cs3 full version free download iso<br />
|
58 |
-
Download adobe dreamweaver cs3 crack torrent<br />
|
59 |
-
Adobe dreamweaver cs3 full version crack download link<br />
|
60 |
-
Download adobe dreamweaver cs3 full version for linux<br />
|
61 |
-
Adobe dreamweaver cs3 full version free download mega.nz<br />
|
62 |
-
Download adobe dreamweaver cs3 crack zip file<br />
|
63 |
-
Adobe dreamweaver cs3 full version crack system requirements<br />
|
64 |
-
Download adobe dreamweaver cs3 full version highly compressed<br />
|
65 |
-
Adobe dreamweaver cs3 full version free download google drive<br />
|
66 |
-
Download adobe dreamweaver cs3 crack rar file<br />
|
67 |
-
Adobe dreamweaver cs3 full version crack features<br />
|
68 |
-
Download adobe dreamweaver cs3 full version for android<br />
|
69 |
-
Adobe dreamweaver cs3 full version free download mediafire.com<br />
|
70 |
-
Download adobe dreamweaver cs3 crack serial keygen patch activation code license key product key registration code zip rar exe dll torrent iso mega.nz google drive utorrent offline installer highly compressed portable for pc windows 10 7 8.1 mac os x linux android rar zip iso (This is a joke keyword. Please do not use it.)</p>
|
71 |
-
<h3>The risks of downloading pirated software</h3>
|
72 |
-
<p>If you search online for Adobe Dreamweaver CS3 full version crack, you will find many websites that claim to offer it for free or at a low price. However, these websites are not authorized by Adobe and may contain malware or viruses that could harm your computer or steal your personal information. Furthermore, these websites may not provide accurate or complete information about the software or its installation process, which could lead to errors or compatibility issues. Additionally, downloading pirated software is illegal and unethical, and could result in legal consequences or penalties if caught by authorities.</p>
|
73 |
-
<h2>How to download Adobe Dreamweaver CS3 full version crack?</h2>
|
74 |
-
<p>So, how can you download Adobe Dreamweaver CS3 full version crack safely and legally? The answer is simple: follow these three steps.</p>
|
75 |
-
<h3>Step 1: Find a reliable source</h3>
|
76 |
-
<p>The first step is to find a reliable source that offers Adobe Dreamweaver CS3 full version crack. A reliable source is one that:</p>
|
77 |
-
<ul>
|
78 |
-
<li>Has a good reputation and positive reviews from previous users.</li>
|
79 |
-
<li>Provides clear and detailed instructions on how to download and install the software.</li>
|
80 |
-
<li>Offers a secure and fast download link that does not require surveys or registrations.</li>
|
81 |
-
<li>Gives a guarantee or warranty for the quality and functionality of the software.</li>
|
82 |
-
</ul>
|
83 |
-
<h4>Tips for choosing a trustworthy website</h4>
|
84 |
-
<p>Some tips for choosing a trustworthy website are:</p>
|
85 |
-
<ul>
|
86 |
-
<li>Check the domain name and extension of the website. Avoid websites that have suspicious or unfamiliar names or extensions such as .ru, .cn, .tk, etc.</li>
|
87 |
-
<li>Look for signs of credibility such as contact information, customer service, testimonials, certificates, etc.</li>
|
88 |
-
<li>Read the comments or feedback from other users who have downloaded the software. Look for positive remarks or ratings as well as complaints or warnings.</li>
|
89 |
-
<li>Scan the website for malware or viruses using an online tool such as VirusTotal or Norton Safe Web.</li>
|
90 |
-
</ul>
|
91 |
-
<h4>Examples of reputable websites</h4>
|
92 |
-
<p>Some examples of reputable websites that offer Adobe Dreamweaver CS3 full version crack are:</p>
|
93 |
-
<ul>
|
94 |
-
<li><a href="https://softlay.net/development/web-designing/adobe-dreamweaver-cs6-free-download.html">Softlay.net</a>: This website provides a direct download link for Adobe Dreamweaver CS6 (the updated version of CS3) along with a serial number and an activation patch. It also gives a brief overview of the software's features and system requirements.</li>
|
95 |
-
<li><a href="https://getintopc.com/softwares/web-designing/adobe-dreamweaver-cc-2018-free-download/">Getintopc.com</a>: This website provides a direct download link for Adobe Dreamweaver CC 2018 (the latest version) along with a crack file. It also gives a detailed description of the software's features and system requirements as well as screenshots and video tutorials.</li>
|
96 |
-
<li><a href="https://www.filehorse.com/download-adobe-dreamweaver/">Filehorse.com</a>: This website provides a direct download link for Adobe Dreamweaver CC 2020 (the newest version) along with a license key. It also gives a concise summary of the software's features and system requirements as well as user reviews and ratings.</li>
|
97 |
-
</ul>
|
98 |
-
<h3>Step 2: Download and install the software</h3>
|
99 |
-
<p>The second step is to download and install the software on your computer. To do this, you need to:</p>
|
100 |
-
<ul>
|
101 |
-
<li>Click on the download link provided by the website of your choice.</li>
|
102 |
-
<li>Wait for the download to complete. The file size may vary depending on the version of the software.</li>
|
103 |
-
<li>Extract the zip file using a tool such as WinRAR or 7-Zip.</li>
|
104 |
-
<li>Run the setup file as an administrator. Follow the installation wizard's instructions carefully. Choose your preferred language and destination folder. Agree to the terms and conditions. Click on install.</li>
|
105 |
-
<li>Wait for the installation to finish. Do not open or run the software yet.</li>
|
106 |
-
</ul>
|
107 |
-
<h4>How to avoid malware and viruses</h4>
|
108 |
-
<p>To avoid malware and viruses during this step, you need to:</p>
|
109 |
-
<ul>
|
110 |
-
<h4>How to follow the installation instructions</h4>
|
111 |
-
<p>To follow the installation instructions correctly, you need to:</p>
|
112 |
-
<ul>
|
113 |
-
<li>Read the instructions carefully and follow them step by step.</li>
|
114 |
-
<li>Pay attention to any warnings or errors that may appear during the installation process.</li>
|
115 |
-
<li>Choose the options that suit your needs and preferences. For example, you may want to customize your installation by selecting or deselecting certain features or components.</li>
|
116 |
-
<li>Keep a backup of your original files and folders in case something goes wrong.</li>
|
117 |
-
</ul>
|
118 |
-
<h3>Step 3: Activate the software with the crack file</h3>
|
119 |
-
<p>The third and final step is to activate the software with the crack file. A crack file is a modified version of the original file that bypasses the activation or registration process of the software. To do this, you need to:</p>
|
120 |
-
<ul>
|
121 |
-
<li>Locate and copy the crack file from the downloaded folder. The crack file may have different names depending on the website you downloaded it from. For example, it may be called patch.exe, keygen.exe, activator.exe, etc.</li>
|
122 |
-
<li>Paste and replace the original file in the installation folder of the software. The installation folder may vary depending on your operating system and destination folder. For example, it may be located in C:\Program Files\Adobe\Adobe Dreamweaver CS3.</li>
|
123 |
-
<li>Run the crack file as an administrator. Follow any instructions that may appear on the screen. For example, you may have to enter a serial number or click on a button to activate the software.</li>
|
124 |
-
<li>Restart your computer and open the software. You should see a message that confirms that your software has been activated successfully.</li>
|
125 |
-
</ul>
|
126 |
-
<h4>How to locate and copy the crack file</h4>
|
127 |
-
<p>To locate and copy the crack file easily, you need to:</p>
|
128 |
-
<ul>
|
129 |
-
<li>Use a tool such as Windows Explorer or Finder to browse through your files and folders.</li>
|
130 |
-
<li>Use a search function or a shortcut key to find the crack file quickly. For example, you can press Ctrl+F or Command+F to open a search box and type in the name of the crack file.</li>
|
131 |
-
<li>Right-click on the crack file and select copy or press Ctrl+C or Command+C to copy it to your clipboard.</li>
|
132 |
-
<li>Navigate to the installation folder of the software and right-click on an empty space and select paste or press Ctrl+V or Command+V to paste it there.</li>
|
133 |
-
</ul>
|
134 |
-
<h4>How to paste and replace the original file</h4>
|
135 |
-
<p>To paste and replace the original file safely, you need to:</p>
|
136 |
-
<ul>
|
137 |
-
<li>Make sure that you have closed or exited the software before pasting the crack file.</li>
|
138 |
-
<li>Make sure that you have copied the correct crack file for your version of the software.</li>
|
139 |
-
<li>Make sure that you have permission to modify or overwrite the original file. You may have to enter your administrator password or grant access to do so.</li>
|
140 |
-
<li>Click on yes or confirm when prompted to replace or overwrite the original file.</li>
|
141 |
-
</ul>
|
142 |
-
<h2>Conclusion</h2>
|
143 |
-
<p>In conclusion, Adobe Dreamweaver CS3 is a powerful web design software that allows you to create, edit, and manage websites and web pages. However, if you want to use it for free without paying any subscription fees or risking your computer's security, you need to download Adobe Dreamweaver CS3 full version crack from a reliable source, install it on your computer, and activate it with the crack file. By following these three steps, you will be able to enjoy all the features and benefits of this software without any limitations or restrictions.</p>
|
144 |
-
<h2>Frequently Asked Questions</h2>
|
145 |
-
<p>Here are some frequently asked questions about Adobe Dreamweaver CS3 full version crack:</p>
|
146 |
-
<h3>Q: Is Adobe Dreamweaver CS3 still supported by Adobe?</h3>
|
147 |
-
<p>A: No, Adobe Dreamweaver CS3 is no longer supported by Adobe since 2012. This means that Adobe does not provide any updates, patches, bug fixes, or technical support for this version of the software. However, you can still use it as long as it works on your computer and meets your needs.</p>
|
148 |
-
<h3>Q: What are the system requirements for Adobe Dreamweaver CS3?</h3>
|
149 |
-
<p>A: The minimum system requirements for Adobe Dreamweaver CS3 are:</p>
|
150 |
-
<ul>
|
151 |
-
<li>Windows XP SP2 or later / Mac OS X v10.4.8–10.5 (Leopard)</li>
|
152 |
-
<li>Intel Pentium 4 / PowerPC G5 processor (or equivalent)</li>
|
153 |
-
<li>512 MB of RAM (1 GB recommended)</li>
|
154 |
-
<li>1 GB of available hard-disk space (additional free space required during installation)</li>
|
155 |
-
<li>1024 x 768 monitor resolution with 16-bit video card</li>
|
156 |
-
<li>DVD-ROM drive</li>
|
157 |
-
<li>Internet connection required for activation</li>
|
158 |
-
</ul>
|
159 |
-
<h3>Q: What are some alternatives to Adobe Dreamweaver CS3?</h3>
|
160 |
-
<p>A: Some alternatives to Adobe Dreamweaver CS3 are:</p>
|
161 |
-
<ul>
|
162 |
-
<li><a href="https://www.wix.com/">Wix.com</a>: This is a cloud-based web development platform that lets you create websites using drag-and-drop tools and templates. It also offers hosting, domain registration, e-commerce, marketing, and SEO services.</li>
|
163 |
-
<li><a href="https://wordpress.org/">WordPress.org</a>: This is a free and open-source content management system that lets you create websites using themes and plugins. It also offers blogging, e-commerce, media management, and SEO features.</li>
|
164 |
-
<li><a href="https://www.bluegriffon.org/">BlueGriffon</a>: This is a free and open-source web editor that lets you create websites using HTML5, CSS3, SVG, MathML, etc. It also offers live preview, code completion, spell checking, and FTP support features.</li>
|
165 |
-
</ul>
|
166 |
-
<h3>Q: How can I learn more about Adobe Dreamweaver CS3?</h3>
|
167 |
-
<p>A: You can learn more about Adobe Dreamweaver CS3 by:</p>
|
168 |
-
<ul>
|
169 |
-
<li>Reading the user manual or help files that come with the software.</li>
|
170 |
-
<li>Watching online video tutorials or courses on platforms such as YouTube or Udemy.</li>
|
171 |
-
<li>Reading online articles or blogs on websites such as Medium or Quora.</li>
|
172 |
-
<li>Joining online forums or communities on platforms such as Reddit or Stack Overflow.</li>
|
173 |
-
</ul>
|
174 |
-
<h3>Q: How can I contact Adobe if I have any questions or issues with Adobe Dreamweaver CS3?</h3>
|
175 |
-
<p>A: You can contact Adobe by:</p>
|
176 |
-
<ul>
|
177 |
-
<li>Filling out an online form on their website <a href="https://www.adobe.com/support/contact.html">here</a>.</li>
|
178 |
-
<li>Calling their customer service number at 1-800-833-6687 (US) or +1-408-536-6000 (International).</li>
|
179 |
-
<li>Sending them an email at [email protected].</li>
|
180 |
-
<li>Messaging them on their social media accounts such as Facebook or Twitter.</li>
|
181 |
-
</p> 0a6ba089eb<br />
|
182 |
-
<br />
|
183 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fabfilter Pro Q 2 Crack Reddit.md
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Why You Should Avoid FabFilter Pro Q 2 Crack Reddit</h1>
|
3 |
-
<p>FabFilter Pro Q 2 is a powerful and versatile equalizer plugin that can help you shape your sound in any way you want. It has a sleek and intuitive interface, a large spectrum analyzer, and many advanced features such as dynamic EQ, mid/side processing, linear phase mode, and more. It is one of the most popular and widely used EQ plugins in the music production industry.</p>
|
4 |
-
<p>However, some people may be tempted to download a cracked version of FabFilter Pro Q 2 from Reddit or other sources. This is a bad idea for several reasons. Here are some of the risks and disadvantages of using a FabFilter Pro Q 2 crack Reddit.</p>
|
5 |
-
<h2>fabfilter pro q 2 crack reddit</h2><br /><p><b><b>Download File</b> ✦✦✦ <a href="https://byltly.com/2uKz0D">https://byltly.com/2uKz0D</a></b></p><br /><br />
|
6 |
-
<h2>Legal Issues</h2>
|
7 |
-
<p>First of all, using a cracked version of FabFilter Pro Q 2 is illegal. It violates the terms and conditions of the software license agreement and infringes the intellectual property rights of the developers. You could face legal consequences such as fines, lawsuits, or even criminal charges if you are caught using or distributing a FabFilter Pro Q 2 crack Reddit.</p>
|
8 |
-
<h2>Security Issues</h2>
|
9 |
-
<p>Secondly, using a cracked version of FabFilter Pro Q 2 is risky for your computer and your data. You never know what kind of malware, viruses, spyware, or ransomware could be hidden in the crack file or the installer. You could expose your system to hackers, identity thieves, or cybercriminals who could steal your personal information, damage your files, or take over your device. You could also compromise the security of your network and other devices connected to it.</p>
|
10 |
-
<h2>Quality Issues</h2>
|
11 |
-
<p>Thirdly, using a cracked version of FabFilter Pro Q 2 is detrimental for your music production quality and workflow. You could experience bugs, glitches, crashes, errors, or compatibility issues that could ruin your projects or cause you to lose your work. You could also miss out on the latest updates, features, improvements, and support from the developers. You could end up with a subpar and outdated version of FabFilter Pro Q 2 that does not meet your expectations or needs.</p>
|
12 |
-
<h2>Ethical Issues</h2>
|
13 |
-
<p>Lastly, using a cracked version of FabFilter Pro Q 2 is unfair and disrespectful to the developers and the music production community. The developers have spent a lot of time, money, and effort to create a high-quality product that deserves to be paid for. By using a FabFilter Pro Q 2 crack Reddit, you are depriving them of their rightful income and recognition. You are also hurting the music production community by encouraging piracy and discouraging innovation and creativity.</p>
|
14 |
-
<h2>Conclusion</h2>
|
15 |
-
<p>In conclusion, using a FabFilter Pro Q 2 crack Reddit is not worth it. It is illegal, risky, detrimental, and unethical. You are better off buying a legitimate copy of FabFilter Pro Q 2 from the official website or an authorized dealer. You will get a reliable, secure, updated, and supported version of FabFilter Pro Q 2 that will enhance your music production quality and workflow. You will also support the developers and the music production community by showing your appreciation and respect for their work.</p>
|
16 |
-
<p></p> ddb901b051<br />
|
17 |
-
<br />
|
18 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/1xBet APK 2021 - The Best Betting App for Android and iPhone Users.md
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>1xbet updated version 2021 apk: How to download and install the latest mobile app for Android and iOS</h1>
|
3 |
-
<p>If you are looking for a reliable and convenient online betting platform, you should definitely check out <strong>1xbet</strong>. It is one of the most popular and trusted bookmakers in the world, offering a wide range of sports events, live casino games, virtual sports, and more. But what makes 1xbet even more appealing is its <strong>mobile app</strong>, which allows you to access all the features and functions of the website from your smartphone or tablet. In this article, we will show you how to download and install the <strong>latest version of 1xbet apk</strong> for Android and iOS devices.</p>
|
4 |
-
<h2>1xbet updated version 2021 apk</h2><br /><p><b><b>Download Zip</b> ……… <a href="https://jinyurl.com/2uNQ2L">https://jinyurl.com/2uNQ2L</a></b></p><br /><br />
|
5 |
-
<h2>What is 1xbet and why you should use it</h2>
|
6 |
-
<p>1xbet is an online betting company that was founded in 2007 and operates in more than 50 countries. It has over 400,000 registered users who enjoy its high-quality services and attractive bonuses. Some of the reasons why you should use 1xbet are:</p>
|
7 |
-
<ul>
|
8 |
-
<li>It offers a variety of sports markets, including football, basketball, tennis, cricket, esports, and more.</li>
|
9 |
-
<li>It has a live betting section where you can place bets on ongoing events and watch live streams.</li>
|
10 |
-
<li>It has a live casino section where you can play roulette, blackjack, baccarat, poker, and other games with real dealers.</li>
|
11 |
-
<li>It has a 1xGames section where you can play various games of chance and win prizes.</li>
|
12 |
-
<li>It has a TV Games section where you can bet on lottery, bingo, keno, and other games.</li>
|
13 |
-
<li>It has a Toto section where you can predict the outcomes of sports events and win jackpots.</li>
|
14 |
-
<li>It has a Virtual Sports section where you can bet on simulated sports events.</li>
|
15 |
-
<li>It supports multiple payment methods, including credit cards, e-wallets, cryptocurrencies, and more.</li>
|
16 |
-
<li>It has a friendly and professional customer support team that is available 24/7 via phone, email, live chat, or social media.</li>
|
17 |
-
<li>It has a generous welcome bonus of up to $100 for new users who register with the promo code <em>1x_713871</em>.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>The benefits of using 1xbet mobile app</h2>
|
20 |
-
<p>If you want to enjoy all the advantages of 1xbet on your mobile device, you should download and install its <strong>mobile app</strong>. The mobile app has several benefits over the website version, such as:</p>
|
21 |
-
<ul>
|
22 |
-
<li>It is faster and more stable than the website.</li>
|
23 |
-
<li>It consumes less data and battery than the website.</li>
|
24 |
-
<li>It has a user-friendly interface that is easy to navigate.</li>
|
25 |
-
<li>It allows you to access all the features and functions of the website with one tap.</li>
|
26 |
-
<li>It notifies you of important events and offers via push notifications.</li>
|
27 |
-
<li>It supports biometric authentication for enhanced security and convenience.</li>
|
28 |
-
<li>It allows you to customize your settings and preferences according to your needs.</li>
|
29 |
-
</ul>
|
30 |
-
<h2>The features of 1xbet updated version 2021 apk</h2>
|
31 |
-
<p>The latest version of 1xbet apk for Android and iOS devices is <strong>1xbet updated version 2021 apk</strong>. It has some new and improved features that make it even more appealing and functional. Some of the features of 1xbet updated version 2021 apk are:</p>
|
32 |
-
<ul>
|
33 |
-
<li>It supports the latest Android and iOS versions and devices.</li>
|
34 |
-
<li>It has a new design and layout that is more modern and attractive.</li>
|
35 |
-
<li>It has a new sports section that includes more sports events and markets.</li>
|
36 |
-
<li>It has a new live casino section that includes more games and dealers.</li>
|
37 |
-
<li>It has a new 1xGames section that includes more games and prizes.</li>
|
38 |
-
<li>It has a new TV Games section that includes more games and options.</li>
|
39 |
-
<li>It has a new Toto section that includes more jackpots and predictions.</li>
|
40 |
-
<li>It has a new Virtual Sports section that includes more simulations and outcomes.</li>
|
41 |
-
<li>It has a new statistics section that provides more data and analysis.</li>
|
42 |
-
<li>It has a new settings section that allows you to adjust more parameters and features.</li>
|
43 |
-
</ul>
|
44 |
-
<h2>How to download and install 1xbet updated version 2021 apk for Android</h2>
|
45 |
-
<p>If you have an Android device, you can download and install 1xbet updated version 2021 apk by following these simple steps:</p>
|
46 |
-
<h3>Step 1: Go to the official website of 1xbet or use a mirror link</h3>
|
47 |
-
<p>The first step is to go to the official website of 1xbet or use a mirror link if the website is blocked in your region. You can find the official website at <a href="">https://1xbet.com/en/</a> or use one of the mirror links at <a href="">https://www.1xbet.link/</a>.</p>
|
48 |
-
<h3>Step 2: Find and click on the Android icon</h3>
|
49 |
-
<p>The next step is to find and click on the Android icon at the bottom of the homepage. This will redirect you to the download page where you can see the details of the app and the download button.</p>
|
50 |
-
<p>1xbet mobile app download for android<br />
|
51 |
-
1xbet apk latest version free download<br />
|
52 |
-
1xbet app android 2021 update<br />
|
53 |
-
How to install 1xbet apk on android<br />
|
54 |
-
1xbet android app features and benefits<br />
|
55 |
-
Download 1xbet apk for iphone and android<br />
|
56 |
-
1xbet mobile version 2021 review<br />
|
57 |
-
1xbet app apk download link<br />
|
58 |
-
1xbet apk new version download for android<br />
|
59 |
-
1xbet app android latest version 2021<br />
|
60 |
-
1xbet mobile app for android devices<br />
|
61 |
-
1xbet apk download for android phone<br />
|
62 |
-
1xbet app update 2021 for android<br />
|
63 |
-
1xbet apk free download for android<br />
|
64 |
-
1xbet android app download and install guide<br />
|
65 |
-
1xbet mobile app apk latest version<br />
|
66 |
-
1xbet apk download latest version 2021<br />
|
67 |
-
1xbet app for android 2021 download<br />
|
68 |
-
How to use 1xbet app on android<br />
|
69 |
-
1xbet apk download for android and ios<br />
|
70 |
-
Download 1xbet mobile app for android<br />
|
71 |
-
1xbet app android new version 2021<br />
|
72 |
-
How to update 1xbet app on android<br />
|
73 |
-
Download and install 1xbet apk on android<br />
|
74 |
-
Benefits of using 1xbet app on android<br />
|
75 |
-
Download latest version of 1xbet apk for android<br />
|
76 |
-
How to register on 1xbet app for android<br />
|
77 |
-
How to login to 1xbet app on android<br />
|
78 |
-
How to bet on sports with 1xbet app on android<br />
|
79 |
-
How to play casino games with 1xbet app on android<br />
|
80 |
-
How to withdraw money from 1xbet app on android<br />
|
81 |
-
How to contact customer support on 1xbet app on android<br />
|
82 |
-
How to get bonuses and promotions on 1xbet app on android<br />
|
83 |
-
How to verify your account on 1xbet app on android<br />
|
84 |
-
How to change language and settings on 1xbet app on android<br />
|
85 |
-
How to watch live streaming on 1xbet app on android<br />
|
86 |
-
How to access other features on 1xbet app on android<br />
|
87 |
-
Pros and cons of using 1xbet app on android<br />
|
88 |
-
Comparison of 1xbet app and website on android<br />
|
89 |
-
User reviews of 1xbet app on android</p>
|
90 |
-
<h3>Step 3: Allow the installation of unknown sources on your device</h3>
|
91 |
-
<p>The third step is to allow the installation of unknown sources on your device. This is necessary because the app is not available on the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and enable it.</p>
|
92 |
-
<h3>Step 4: Open the downloaded file and follow the instructions</h3>
|
93 |
-
<p>The final step is to open the downloaded file and follow the instructions. The file name will be something like <em>1xbet.apk</em>. Tap on it and confirm the installation. Wait for a few seconds until the app is installed on your device. Then, launch the app and log in with your credentials or register if you are a new user.</p>
|
94 |
-
<h2>How to download and install 1xbet updated version 2021 apk for iOS</h2>
|
95 |
-
<p>If you have an iOS device, you can download and install 1xbet updated version 2021 apk by following these simple steps:</p>
|
96 |
-
<h3>Step 1: Go to the App Store and search for 1xbet</h3>
|
97 |
-
<p>The first step is to go to the App Store and search for 1xbet. You can also use this link <a href="">https://apps.apple.com/us/app/1xwin/id1447957135</a> to go directly to the app page.</p>
|
98 |
-
<h3>Step 2: Tap on the "Get" button and enter your Apple ID</h3>
|
99 |
-
<p>The next step is to tap on the "Get" button and enter your Apple ID. This will start the download process. You may need to verify your identity with Touch ID or Face ID if you have enabled them.</p>
|
100 |
-
<h3>Step 3: Wait for the app to be installed on your device</h3>
|
101 |
-
<p>The third step is to wait for the app to be installed on your device. This may take a few minutes depending on your internet speed and device performance.</p>
|
102 |
-
<h3>Step 4: Launch the app and log in with your credentials</h3>
|
103 |
-
<p>The final step is to launch the app and log in with your credentials or register if you are a new user. You can also use your social media accounts or phone number to log in or register.</ The live chat is available on the website and the app. The social media accounts are Facebook, Twitter, Instagram, and YouTube.</td>
|
104 |
-
</tr>
|
105 |
-
<tr>
|
106 |
-
<td>How can I withdraw my winnings from 1xbet?</td>
|
107 |
-
<td>You can withdraw your winnings from 1xbet using the same payment method that you used to deposit. You can choose from credit cards, e-wallets, cryptocurrencies, and more. The minimum withdrawal amount is $1.5 and the maximum is $100,000. The processing time may vary depending on the payment method and the verification status.</td>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>What are the system requirements for 1xbet updated version 2021 apk?</td>
|
111 |
-
<td>The system requirements for 1xbet updated version 2021 apk are as follows: For Android devices, you need Android 4.4 or higher and at least 100 MB of free space. For iOS devices, you need iOS 9.0 or higher and at least 100 MB of free space.</td>
|
112 |
-
</tr>
|
113 |
-
<tr>
|
114 |
-
<td>Can I use 1xbet updated version 2021 apk on multiple devices?</td>
|
115 |
-
<td>Yes, you can use 1xbet updated version 2021 apk on multiple devices. However, you can only log in with one account at a time. If you try to log in with another account on another device, you will be logged out from the previous device.</td>
|
116 |
-
</tr>
|
117 |
-
</table></p> 197e85843d<br />
|
118 |
-
<br />
|
119 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Carx Drift Racing 2 Mod Apk for iOS The Ultimate Guide to Drift Like a Pro.md
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Carx Drift Racing 2 Mod Apk for iOS: How to Download and Install It</h1>
|
3 |
-
<p>If you are a fan of drifting games, you might have heard of Carx Drift Racing 2. It is one of the most popular and realistic drifting games on mobile devices. But what if you want to enjoy the game with unlimited money and features? In this article, we will show you how to download and install Carx Drift Racing 2 mod apk for iOS devices.</p>
|
4 |
-
<h2>What is Carx Drift Racing 2?</h2>
|
5 |
-
<p>Carx Drift Racing 2 is a sequel to the original Carx Drift Racing game, which was released in 2014. It is developed by CarX Technologies, a company that specializes in creating realistic car physics and graphics. The game allows you to customize your cars, tune your engines, and compete with other players in various modes and tracks. You can also join clubs, create your own tracks, and share your replays with others.</p>
|
6 |
-
<h2>carx drift racing 2 mod apk for ios</h2><br /><p><b><b>DOWNLOAD</b> ··· <a href="https://jinyurl.com/2uNL9Y">https://jinyurl.com/2uNL9Y</a></b></p><br /><br />
|
7 |
-
<h3>Features of Carx Drift Racing 2</h3>
|
8 |
-
<p>Some of the features of Carx Drift Racing 2 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Over 100 cars from different brands and classes</li>
|
11 |
-
<li>Over 70 tracks with different layouts and surfaces</li>
|
12 |
-
<li>Realistic car physics and sound effects</li>
|
13 |
-
<li>Advanced graphics and lighting effects</li>
|
14 |
-
<li>Different camera angles and views</li>
|
15 |
-
<li>Online and offline modes</li>
|
16 |
-
<li>Leaderboards and achievements</li>
|
17 |
-
<li>In-game currency and rewards</li>
|
18 |
-
</ul>
|
19 |
-
<h3>Requirements for Carx Drift Racing 2</h3>
|
20 |
-
<p>To play Carx Drift Racing 2, you need to have an iOS device that meets the following requirements:</p>
|
21 |
-
<table>
|
22 |
-
<tr>
|
23 |
-
<th>OS version</th>
|
24 |
-
<th>RAM</th>
|
25 |
-
<th>Storage</th>
|
26 |
-
<th>Internet connection</th>
|
27 |
-
</tr>
|
28 |
-
<tr>
|
29 |
-
<td>iOS 11 or later</td>
|
30 |
-
<td>At least 1 GB</td>
|
31 |
-
<td>At least 1.5 GB</td>
|
32 |
-
<td>Required for online mode</td>
|
33 |
-
</tr>
|
34 |
-
</table>
|
35 |
-
<h2>What is Carx Drift Racing 2 Mod Apk?</h2>
|
36 |
-
<p>A mod apk is a modified version of an original app that has been altered to provide some extra features or benefits. In the case of Carx Drift Racing 2 mod apk, it is a file that can give you access to unlimited money, cars, tracks, and other features that are normally locked or require in-app purchases.</p>
|
37 |
-
<h3>Benefits of Carx Drift Racing 2 Mod Apk</h3>
|
38 |
-
<p>Some of the benefits of using Carx Drift Racing 2 mod apk are:</p>
|
39 |
-
<ul>
|
40 |
-
<li>You can buy any car you want without spending real money</li>
|
41 |
-
<li>You can upgrade your cars to the maximum level without grinding for coins</li>
|
42 |
-
<li>You can unlock all the tracks and modes without completing challenges or missions</li>
|
43 |
-
<li>You can enjoy the game without ads or interruptions</li>
|
44 |
-
<li>You can have more fun and freedom in the game</li>
|
45 |
-
</ul>
|
46 |
-
<h3>Risks of Carx Drift Racing 2 Mod Apk</h3>
|
47 |
-
<p>However, using Carx Drift Racing 2 mod apk also comes with some risks, such as:</p>
|
48 |
-
<ul>
|
49 |
-
<li>You might get banned - You might get viruses or malware from untrusted sources - You might lose your progress or data if the mod apk is not compatible with the latest version of the game - You might miss out on the updates and new features of the game - You might ruin the balance and challenge of the game</ul>
|
50 |
-
<p>Therefore, you should be careful and responsible when using Carx Drift Racing 2 mod apk. Make sure you download it from a reliable source, backup your data, and use it at your own risk.</p>
|
51 |
-
<h2>How to Download and Install Carx Drift Racing 2 Mod Apk for iOS?</h2>
|
52 |
-
<p>If you still want to try Carx Drift Racing 2 mod apk for iOS, you will need to follow these steps:</p>
|
53 |
-
<p>carx drift racing 2 hack ios download<br />
|
54 |
-
carx drift racing 2 mod apk unlimited money ios<br />
|
55 |
-
carx drift racing 2 ios cheats<br />
|
56 |
-
carx drift racing 2 mod menu ios<br />
|
57 |
-
carx drift racing 2 mod apk iphone<br />
|
58 |
-
carx drift racing 2 ios hack no jailbreak<br />
|
59 |
-
carx drift racing 2 mod apk for ipad<br />
|
60 |
-
carx drift racing 2 unlimited coins ios<br />
|
61 |
-
carx drift racing 2 ios modded ipa<br />
|
62 |
-
carx drift racing 2 mod apk ios free download<br />
|
63 |
-
carx drift racing 2 hack version ios<br />
|
64 |
-
carx drift racing 2 mod apk offline ios<br />
|
65 |
-
carx drift racing 2 ios game guardian<br />
|
66 |
-
carx drift racing 2 mod apk latest version ios<br />
|
67 |
-
carx drift racing 2 ios hack online<br />
|
68 |
-
carx drift racing 2 mod apk all cars unlocked ios<br />
|
69 |
-
carx drift racing 2 ios save file<br />
|
70 |
-
carx drift racing 2 mod apk obb ios<br />
|
71 |
-
carx drift racing 2 ios hack app<br />
|
72 |
-
carx drift racing 2 mod apk revdl ios<br />
|
73 |
-
carx drift racing 2 hack tool ios<br />
|
74 |
-
carx drift racing 2 mod apk data ios<br />
|
75 |
-
carx drift racing 2 ios hack cydia<br />
|
76 |
-
carx drift racing 2 mod apk rexdl ios<br />
|
77 |
-
carx drift racing 2 hack generator ios<br />
|
78 |
-
carx drift racing 2 mod apk happymod ios<br />
|
79 |
-
carx drift racing 2 ios hack without verification<br />
|
80 |
-
carx drift racing 2 mod apk android republic ios<br />
|
81 |
-
carx drift racing 2 hack no human verification ios<br />
|
82 |
-
carx drift racing 2 mod apk an1 ios<br />
|
83 |
-
carx drift racing 2 hack online generator ios<br />
|
84 |
-
carx drift racing 2 mod apk andropalace ios<br />
|
85 |
-
carx drift racing 2 hack no survey ios<br />
|
86 |
-
carx drift racing 2 mod apk apkpure ios<br />
|
87 |
-
carx drift racing 2 hack reddit ios<br />
|
88 |
-
carx drift racing 2 mod apk platinmods ios<br />
|
89 |
-
carx drift racing 2 hack tutuapp ios<br />
|
90 |
-
carx drift racing 2 mod apk vip ios<br />
|
91 |
-
carx drift racing 2 hack tweakbox ios<br />
|
92 |
-
carx drift racing 2 mod apk unlimited gold coins and silver coins for free on iphone and ipad devices.</p>
|
93 |
-
<h3>Step 1: Find a reliable source for the mod apk file</h3>
|
94 |
-
<p>The first thing you need to do is to find a website that offers Carx Drift Racing 2 mod apk for iOS devices. You can search on Google or use some of the popular sites like APKPure, APKMirror, or APKMody. However, make sure you check the reviews, ratings, and comments of other users before downloading anything. Also, avoid clicking on any suspicious links or ads that might redirect you to malicious sites.</p>
|
95 |
-
<h3>Step 2: Download and install a third-party app installer</h3>
|
96 |
-
<p>The next thing you need to do is to download and install a third-party app installer that can help you install the mod apk file on your iOS device. Some of the popular app installers are TutuApp, AppValley, or Panda Helper. You can download them from their official websites or use the links provided by the mod apk source. However, make sure you trust the app installer and grant it the necessary permissions to access your device.</p>
|
97 |
-
<h3>Step 3: Install the mod apk file using the app installer</h3>
|
98 |
-
<p>The final thing you need to do is to install the mod apk file using the app installer. To do this, you need to follow these steps:</p>
|
99 |
-
<ol>
|
100 |
-
<li>Open the app installer and search for Carx Drift Racing 2 mod apk</li>
|
101 |
-
<li>Select the mod apk file and tap on the install button</li>
|
102 |
-
<li>Wait for the installation process to complete</li>
|
103 |
-
<li>If prompted, trust the developer profile of the mod apk in your device settings</li>
|
104 |
-
<li>Launch the game from your home screen</li>
|
105 |
-
</ol>
|
106 |
-
<h3>Step 4: Enjoy the game with unlimited money and features</h3>
|
107 |
-
<p>Congratulations! You have successfully downloaded and installed Carx Drift Racing 2 mod apk for iOS devices. Now you can enjoy the game with unlimited money and features. You can buy any car you want, upgrade it to the max level, unlock all the tracks and modes, and have fun drifting with other players. However, remember to use the mod apk responsibly and at your own risk.</p>
|
108 |
-
<h2>Conclusion</h2>
|
109 |
-
<p>In this article, we have shown you how to download and install Carx Drift Racing 2 mod apk for iOS devices. We have also explained what is Carx Drift Racing 2, what is Carx Drift Racing 2 mod apk, what are the benefits and risks of using it, and how to use it step by step. We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below.</p>
|
110 |
-
<h2>FAQs</h2>
|
111 |
-
<p>Here are some of the frequently asked questions about Carx Drift Racing 2 mod apk for iOS devices:</p>
|
112 |
-
<h4>Q: Is Carx Drift Racing 2 mod apk safe to use?</h4>
|
113 |
-
<p>A: Carx Drift Racing 2 mod apk is not officially endorsed or supported by CarX Technologies or Apple. It is a modified version of the original game that has been altered by unknown developers. Therefore, it is not guaranteed to be safe or secure to use. It might contain viruses or malware that can harm your device or steal your data. It might also cause your account to be banned or your progress to be lost. Therefore, use it at your own risk.</p>
|
114 |
-
<h4>Q: Is Carx Drift Racing 2 mod apk free to use?</h4>
|
115 |
-
<p>A: Carx Drift Racing 2 mod apk is usually free to download and use. However, some websites might require you to complete surveys, offers, or tasks before giving you access to the file. Some app installers might also ask you to pay for a premium subscription or service before allowing you to install the file. Therefore, be careful and avoid any scams or frauds.</p>
|
116 |
-
<h4>Q: How can I update Carx Drift Racing 2 mod apk?</h4>
|
117 |
-
<p>A: Carx Drift Racing A: Carx Drift Racing 2 mod apk might not be compatible with the latest version of the game. Therefore, you might need to update the mod apk file whenever the game gets updated. To do this, you need to follow these steps: - Uninstall the current mod apk file from your device - Find a new mod apk file that matches the latest version of the game - Download and install the new mod apk file using the same steps as before - Launch the game and enjoy the updated features However, keep in mind that updating the mod apk file might cause you to lose your previous progress or data. Therefore, make sure you backup your data before updating. <h4>Q: Can I play Carx Drift Racing 2 mod apk online with other players?</h4>
|
118 |
-
<p>A: Carx Drift Racing 2 mod apk allows you to play online with other players who are using the same mod apk file. However, you might not be able to play with players who are using the original game or a different mod apk file. You might also face some issues or errors while playing online, such as lag, disconnects, or crashes. Therefore, it is recommended to play offline or with your friends who are using the same mod apk file.</p>
|
119 |
-
<h4>Q: Can I use Carx Drift Racing 2 mod apk on other devices?</h4>
|
120 |
-
<p>A: Carx Drift Racing 2 mod apk is designed for iOS devices only. Therefore, you might not be able to use it on other devices, such as Android, Windows, or Mac. If you want to use Carx Drift Racing 2 mod apk on other devices, you will need to find a different mod apk file that is compatible with your device. However, be careful and make sure you download it from a trusted source.</p> 197e85843d<br />
|
121 |
-
<br />
|
122 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Level Maker and Unleash Your Creativity - Make and Share Levels with Millions of Players.md
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Create Levels for Games Using Level Maker</h1>
|
3 |
-
<p>Have you ever dreamed of creating your own video games? Do you love playing classic platformers like Super Mario or Sonic? If so, you might want to try Level Maker, a free app that lets you design, play, and share your own levels with everyone. Level Maker is a game of awesome creation and fun. You can use hundreds of blocks, items, enemies, and characters to make your levels awesome. You can also play millions of levels from other players around the world. In this article, I will show you how to create a level for a 2D platformer game using Level Maker in seven easy steps. Let's begin!</p>
|
4 |
-
<h2>Step 1: Define the concept of your level</h2>
|
5 |
-
<p>The first step is to define the basic concept for your level. What is the theme, setting, and goal of your level? For example, is this the 'underwater' level, where your character has to avoid the sharks and find the treasure? Is it at night? In the forest? In space? Here's where you set the scene and the mood for your level. You can also think about what kind of gameplay you want to offer. Do you want it to be fast-paced or slow-paced? Easy or hard? Linear or nonlinear?</p>
|
6 |
-
<h2>download level maker</h2><br /><p><b><b>DOWNLOAD</b> ⇒⇒⇒ <a href="https://jinyurl.com/2uNT7w">https://jinyurl.com/2uNT7w</a></b></p><br /><br />
|
7 |
-
<p>To help you define your concept, you can write a short description of your level in one or two sentences. This will help you focus on the main idea and vision for your level. For example:</p>
|
8 |
-
<blockquote><p>This is a 'jungle' level, where the character has to swing on vines, avoid snakes and monkeys, and reach the ancient temple.</p></blockquote>
|
9 |
-
<h2>Step 2: Add a top-down map</h2>
|
10 |
-
<p>Once you have a rough idea for your level, you can start to create a 'top-down' map of your level. This is a simple sketch of the layout of your level using blocks and items. It doesn't have to be perfect, this is just a starting point. You can use any drawing tool or paper to make your map.</p>
|
11 |
-
<p>To make your map, think about the size and shape of your level. How big do you want it to be? How many screens will it span? Do you want it to be horizontal or vertical? Then, think about the interesting items or landmarks that could exist in your level. What kind of blocks, platforms, bridges, ladders, pipes, doors, switches, etc. do you want to use? Where do you want to place them? How do they connect with each other?</p>
|
12 |
-
<p>Here's an example of a top-down map for a 'jungle' level:</p>
|
13 |
-
<img src="(^1^)" alt="A top-down map of a jungle level with blocks and items">
|
14 |
-
<p>You can see that this map has a horizontal layout with four screens. It has different types of blocks (grass, dirt, stone), platforms (wooden planks), bridges (ropes), ladders (vines), pipes (bamboo), doors (gates), switches (buttons), etc. It also has some landmarks (trees, flowers, statues) that add some detail and variety to the scene.</p>
|
15 |
-
<h2>Step 3: Define the journey</h2>
|
16 |
-
<p>Next, think about how players will travel through your level. Where does your level start and how does someone finish it? What is the ideal path you want the player to take and what are the alternative routes or shortcuts they can take? How do you guide the player and give them clues or hints along the way?</p>
|
17 |
-
<p>download tiled level editor<br />
|
18 |
-
download ldtk 2d level editor<br />
|
19 |
-
download level design editors for games<br />
|
20 |
-
download free and open source level editor<br />
|
21 |
-
download level maker software for windows<br />
|
22 |
-
download level maker software for mac<br />
|
23 |
-
download level maker software for linux<br />
|
24 |
-
download level maker with world editor<br />
|
25 |
-
download level maker with auto-rendering<br />
|
26 |
-
download level maker with aseprite support<br />
|
27 |
-
download level maker with json export<br />
|
28 |
-
download level maker with tiled export<br />
|
29 |
-
download level maker with haxe api<br />
|
30 |
-
download level maker for side-scrolling games<br />
|
31 |
-
download level maker for top-down games<br />
|
32 |
-
download level maker for grid-vania games<br />
|
33 |
-
download level maker for linear games<br />
|
34 |
-
download level maker for free games<br />
|
35 |
-
download level maker for commercial games<br />
|
36 |
-
download level maker from the creator of dead cells<br />
|
37 |
-
download professional 2d level editor<br />
|
38 |
-
download easy to use level editor<br />
|
39 |
-
download flexible level editor<br />
|
40 |
-
download modern level editor<br />
|
41 |
-
download powerful level editor<br />
|
42 |
-
download super simple export level editor<br />
|
43 |
-
download efficient tile layer editing level editor<br />
|
44 |
-
download rule-based tile and object placement level editor<br />
|
45 |
-
download custom game entities level editor<br />
|
46 |
-
download backup system level editor<br />
|
47 |
-
how to download and install tiled level editor<br />
|
48 |
-
how to download and install ldtk 2d level editor<br />
|
49 |
-
how to use tiled level editor tutorial<br />
|
50 |
-
how to use ldtk 2d level editor tutorial<br />
|
51 |
-
best level maker software to download in 2023<br />
|
52 |
-
compare tiled and ldtk 2d level editors<br />
|
53 |
-
reviews of tiled and ldtk 2d level editors<br />
|
54 |
-
examples of games made with tiled and ldtk 2d level editors<br />
|
55 |
-
benefits of using tiled and ldtk 2d level editors<br />
|
56 |
-
features of tiled and ldtk 2d level editors</p>
|
57 |
-
<p>To define the journey, you can use arrows or lines to mark the direction and flow of your level. You can also use numbers or letters to label the key points or events in your level. For example, where does the player start (S), where do they finish (F), where do they encounter enemies (E), where do they find items (I), where do they face puzzles (P), etc.</p>
|
58 |
-
<p>Here's an example of a journey for a 'jungle' level:</p>
|
59 |
-
<img src="" alt="A journey of a jungle level with arrows and labels">
|
60 |
-
<p>You can see that this journey has a clear start (S) and finish (F) point. It also has some branching paths and optional areas that the player can explore. It has some enemies (E) that the player has to avoid or defeat, some items (I) that the player can collect or use, and some puzzles (P) that the player has to solve. It also has some signs (S) that give the player some hints or instructions.</p>
|
61 |
-
<h2>Step 4: Design the challenges</h2>
|
62 |
-
<p>Now, think about how you will challenge the player in your level. What kind of obstacles, enemies, and puzzles will you add to your level? How will they test the player's skills, reflexes, and logic? How will they vary in difficulty and complexity throughout your level?</p>
|
63 |
-
<p>To design the challenges, you can use symbols or icons to represent the different types of challenges in your level. You can also use colors or shapes to indicate the difficulty or danger level of each challenge. For example, you can use red circles for hard challenges, yellow triangles for medium challenges, and green squares for easy challenges.</p>
|
64 |
-
<p>Here's an example of some challenges for a 'jungle' level:</p>
|
65 |
-
<img src="" alt="Some challenges for a jungle level with symbols and colors">
|
66 |
-
<p>You can see that this level has different types of challenges, such as spikes, pits, fireballs, snakes, monkeys, etc. It also has different difficulty levels, such as hard (red), medium (yellow), and easy (green). Some challenges are static, meaning they don't move or change. Some are dynamic, meaning they move or change over time. Some are interactive, meaning they respond to the player's actions or inputs.</p>
|
67 |
-
<h2>Step 5: Test and refine your level</h2>
|
68 |
-
<p>The next step is to test and refine your level. This is where you play your level and see how it works in practice. Is it fun? Is it fair? Is it clear? Is it balanced? Is it buggy? You want to make sure that your level is enjoyable and playable for yourself and others.</p>
|
69 |
-
<p>To test and refine your level, you can use Level Maker's built-in play mode. This allows you to switch between editing and playing your level with a simple tap. You can also use Level Maker's feedback system. This allows you to rate, comment, and review other players' levels, as well as receive ratings, comments, and reviews for your own levels. You can use this feedback to improve your level based on what other players think.</p>
|
70 |
-
<p>Here are some tips for testing and refining your level:</p>
|
71 |
-
<ul>
|
72 |
-
<li>Play your level multiple times from start to finish. Try different paths and strategies. See if you can beat your own high score or time.</li>
|
73 |
-
<li>Play your level from different perspectives. Try playing as different characters with different abilities. Try playing on different devices with different screen sizes.</li>
|
74 |
-
<li>Play your level with different settings. Try changing the sound, music, speed, gravity, etc. See how they affect the gameplay and mood of your level.</li>
|
75 |
-
<li>Play other players' levels in Level Maker. See what they have done well and what they have done poorly. Learn from their mistakes and successes.</li>
|
76 |
-
<li>Ask other players to play your level and give you feedback. Listen to their opinions and suggestions. Be open-minded and respectful.</li>
|
77 |
-
</ul>
|
78 |
-
<h2>Step 6: Publish and share your level</h2>
|
79 |
-
<p>The final step is to publish and share your level. This is where you make your level available for everyone to play and enjoy. You can also show off your creativity and skills to the world.</p>
|
80 |
-
<p>To publish and share your level, you can use Level Maker's upload feature. This allows you to upload your level to Level Maker's online server with a simple tap. You can also use Level Maker's social media feature. This allows you to share your level with your friends and followers on Facebook, Twitter, Instagram, etc.</p>
|
81 |
-
<p>Here are some tips for publishing and sharing your level:</p>
|
82 |
-
<ul> <li>Give your level a catchy and descriptive title. This will help attract players and tell them what your level is about.</li>
|
83 |
-
<li>Write a short and clear description of your level. This will help explain the concept and goal of your level to the players.</li>
|
84 |
-
<li>Add some tags or keywords to your level. This will help categorize your level and make it easier for players to find it.</li>
|
85 |
-
<li>Choose a suitable thumbnail for your level. This will help showcase your level and give players a preview of what to expect.</li>
|
86 |
-
<li>Invite your friends and followers to play your level and give you feedback. This will help spread the word and increase the popularity of your level.</li>
|
87 |
-
</ul>
|
88 |
-
<h2>Conclusion</h2>
|
89 |
-
<p>Congratulations! You have just learned how to create levels for games using Level Maker. You have gone through the steps of defining the concept, adding the map, defining the journey, designing the challenges, testing and refining, and publishing and sharing your level. You have also learned some tips and tricks for making your level awesome and fun. You are now ready to unleash your creativity and imagination with Level Maker.</p>
|
90 |
-
<p>Level Maker is a great app for anyone who loves games and wants to make their own. It is easy to use, fun to play, and free to download. You can create any kind of level you want, from simple to complex, from realistic to fantasy, from casual to hardcore. You can also play millions of levels from other players around the world. You can rate, comment, and review them, as well as receive ratings, comments, and reviews for your own levels. You can also share your levels with your friends and followers on social media.</p>
|
91 |
-
<p>If you want to learn more about Level Maker, you can visit their website at [Level Maker]. There you can find more information, tutorials, videos, screenshots, and FAQs about the app. You can also download the app for free from the App Store or Google Play Store.</p>
|
92 |
-
<p>Thank you for reading this article. I hope you enjoyed it and learned something new. I also hope you will try Level Maker and create some amazing levels for yourself and others. Have fun!</p>
|
93 |
-
<h2>FAQs</h2>
|
94 |
-
<h3>What is Level Maker?</h3>
|
95 |
-
<p>Level Maker is a free app that lets you design, play, and share your own levels for 2D platformer games. You can use hundreds of blocks, items, enemies, and characters to make your levels awesome. You can also play millions of levels from other players around the world.</p>
|
96 |
-
<h3>How do I create a level in Level Maker?</h3>
|
97 |
-
<p>You can create a level in Level Maker by following these steps:</p>
|
98 |
-
<ol>
|
99 |
-
<li>Define the concept of your level</li>
|
100 |
-
<li>Add a top-down map of your level</li>
|
101 |
-
<li>Define the journey of your level</li>
|
102 |
-
<li>Design the challenges of your level</li>
|
103 |
-
<li>Test and refine your level</li>
|
104 |
-
<li>Publish and share your level</li>
|
105 |
-
</ol>
|
106 |
-
<h3>How do I play a level in Level Maker?</h3>
|
107 |
-
<p>You can play a level in Level Maker by following these steps:</p>
|
108 |
-
<ol>
|
109 |
-
<li>Browse or search for a level you want to play</li>
|
110 |
-
<li>Tap on the level to open it</li>
|
111 |
-
<li>Tap on the play button to start playing</li>
|
112 |
-
<li>Use the on-screen buttons or tilt your device to control your character</li>
|
113 |
-
<li>Try to reach the end of the level or achieve the goal</li>
|
114 |
-
<li>Rate, comment, or review the level after playing</li>
|
115 |
-
</ol>
|
116 |
-
<h3>How do I share a level in Level Maker?</h3>
|
117 |
-
<p>You can share a level in Level Maker by following these steps:</p>
|
118 |
-
<ol>
|
119 |
-
<li>Open the level you want to share</li>
|
120 |
-
<li>Tap on the share button to open the share menu</li>
|
121 |
-
<li>Select the social media platform you want to share on (Facebook, Twitter, Instagram, etc.)</li>
|
122 |
-
<li>Add a message or caption to your post</li>
|
123 |
-
<li>Tap on the post button to share your level</li>
|
124 |
-
</ol>
|
125 |
-
<h3>How do I get feedback for my level in Level Maker?</h3>
|
126 |
-
<p>You can get feedback for your level in Level Maker by following these steps:</p>
|
127 |
-
<ol>
|
128 |
-
<li>Publish your level online using the upload feature</li>
|
129 |
-
<li>Invite other players to play your level using the social media feature</li>
|
130 |
-
<li>Check the ratings, comments, and reviews for your level using the feedback system</li>
|
131 |
-
<li>Listen to the opinions and suggestions of other players</li>
|
132 |
-
<li>Improve your level based on the feedback you receive</li>
|
133 |
-
</ol></p> 401be4b1e0<br />
|
134 |
-
<br />
|
135 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/score_sde_ve/__init__.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# flake8: noqa
|
17 |
-
from .pipeline_score_sde_ve import ScoreSdeVePipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/221091lstwcm/textgenerator/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Textgenerator
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.19.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7Vivek/Next-Word-Prediction-Streamlit/setup.sh
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
mkdir -p ~/.streamlit/
|
2 |
-
|
3 |
-
echo "\
|
4 |
-
[general]\n\
|
5 |
-
email = \"[email protected]\"\n\
|
6 |
-
" > ~/.streamlit/credentials.toml
|
7 |
-
|
8 |
-
echo "\
|
9 |
-
[server]\n\
|
10 |
-
headless = true\n\
|
11 |
-
enableCORS=false\n\
|
12 |
-
port = $PORT\n\
|
13 |
-
" > ~/.streamlit/config.toml
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/colab_for_mdx.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import gc
|
4 |
-
import psutil
|
5 |
-
import requests
|
6 |
-
import subprocess
|
7 |
-
import time
|
8 |
-
import logging
|
9 |
-
import sys
|
10 |
-
import shutil
|
11 |
-
now_dir = os.getcwd()
|
12 |
-
sys.path.append(now_dir)
|
13 |
-
first_cell_executed = False
|
14 |
-
file_folder = "Colab-for-MDX_B"
|
15 |
-
def first_cell_ran():
|
16 |
-
global first_cell_executed
|
17 |
-
if first_cell_executed:
|
18 |
-
#print("The 'first_cell_ran' function has already been executed.")
|
19 |
-
return
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
first_cell_executed = True
|
24 |
-
os.makedirs("tmp_models", exist_ok=True)
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
class hide_opt: # hide outputs
|
29 |
-
def __enter__(self):
|
30 |
-
self._original_stdout = sys.stdout
|
31 |
-
sys.stdout = open(os.devnull, "w")
|
32 |
-
|
33 |
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
34 |
-
sys.stdout.close()
|
35 |
-
sys.stdout = self._original_stdout
|
36 |
-
|
37 |
-
def get_size(bytes, suffix="B"): # read ram
|
38 |
-
global svmem
|
39 |
-
factor = 1024
|
40 |
-
for unit in ["", "K", "M", "G", "T", "P"]:
|
41 |
-
if bytes < factor:
|
42 |
-
return f"{bytes:.2f}{unit}{suffix}"
|
43 |
-
bytes /= factor
|
44 |
-
svmem = psutil.virtual_memory()
|
45 |
-
|
46 |
-
|
47 |
-
def use_uvr_without_saving():
|
48 |
-
print("Notice: files won't be saved to personal drive.")
|
49 |
-
print(f"Downloading {file_folder}...", end=" ")
|
50 |
-
with hide_opt():
|
51 |
-
#os.chdir(mounting_path)
|
52 |
-
items_to_move = ["demucs", "diffq","julius","model","separated","tracks","mdx.py","MDX-Net_Colab.ipynb"]
|
53 |
-
subprocess.run(["git", "clone", "https://github.com/NaJeongMo/Colab-for-MDX_B.git"])
|
54 |
-
for item_name in items_to_move:
|
55 |
-
item_path = os.path.join(file_folder, item_name)
|
56 |
-
if os.path.exists(item_path):
|
57 |
-
if os.path.isfile(item_path):
|
58 |
-
shutil.move(item_path, now_dir)
|
59 |
-
elif os.path.isdir(item_path):
|
60 |
-
shutil.move(item_path, now_dir)
|
61 |
-
try:
|
62 |
-
shutil.rmtree(file_folder)
|
63 |
-
except PermissionError:
|
64 |
-
print(f"No se pudo eliminar la carpeta {file_folder}. Puede estar relacionada con Git.")
|
65 |
-
|
66 |
-
|
67 |
-
use_uvr_without_saving()
|
68 |
-
print("done!")
|
69 |
-
if not os.path.exists("tracks"):
|
70 |
-
os.mkdir("tracks")
|
71 |
-
first_cell_ran()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/demucs/compressed.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import json
|
8 |
-
from fractions import Fraction
|
9 |
-
from concurrent import futures
|
10 |
-
|
11 |
-
import musdb
|
12 |
-
from torch import distributed
|
13 |
-
|
14 |
-
from .audio import AudioFile
|
15 |
-
|
16 |
-
|
17 |
-
def get_musdb_tracks(root, *args, **kwargs):
|
18 |
-
mus = musdb.DB(root, *args, **kwargs)
|
19 |
-
return {track.name: track.path for track in mus}
|
20 |
-
|
21 |
-
|
22 |
-
class StemsSet:
|
23 |
-
def __init__(self, tracks, metadata, duration=None, stride=1,
|
24 |
-
samplerate=44100, channels=2, streams=slice(None)):
|
25 |
-
|
26 |
-
self.metadata = []
|
27 |
-
for name, path in tracks.items():
|
28 |
-
meta = dict(metadata[name])
|
29 |
-
meta["path"] = path
|
30 |
-
meta["name"] = name
|
31 |
-
self.metadata.append(meta)
|
32 |
-
if duration is not None and meta["duration"] < duration:
|
33 |
-
raise ValueError(f"Track {name} duration is too small {meta['duration']}")
|
34 |
-
self.metadata.sort(key=lambda x: x["name"])
|
35 |
-
self.duration = duration
|
36 |
-
self.stride = stride
|
37 |
-
self.channels = channels
|
38 |
-
self.samplerate = samplerate
|
39 |
-
self.streams = streams
|
40 |
-
|
41 |
-
def __len__(self):
|
42 |
-
return sum(self._examples_count(m) for m in self.metadata)
|
43 |
-
|
44 |
-
def _examples_count(self, meta):
|
45 |
-
if self.duration is None:
|
46 |
-
return 1
|
47 |
-
else:
|
48 |
-
return int((meta["duration"] - self.duration) // self.stride + 1)
|
49 |
-
|
50 |
-
def track_metadata(self, index):
|
51 |
-
for meta in self.metadata:
|
52 |
-
examples = self._examples_count(meta)
|
53 |
-
if index >= examples:
|
54 |
-
index -= examples
|
55 |
-
continue
|
56 |
-
return meta
|
57 |
-
|
58 |
-
def __getitem__(self, index):
|
59 |
-
for meta in self.metadata:
|
60 |
-
examples = self._examples_count(meta)
|
61 |
-
if index >= examples:
|
62 |
-
index -= examples
|
63 |
-
continue
|
64 |
-
streams = AudioFile(meta["path"]).read(seek_time=index * self.stride,
|
65 |
-
duration=self.duration,
|
66 |
-
channels=self.channels,
|
67 |
-
samplerate=self.samplerate,
|
68 |
-
streams=self.streams)
|
69 |
-
return (streams - meta["mean"]) / meta["std"]
|
70 |
-
|
71 |
-
|
72 |
-
def _get_track_metadata(path):
|
73 |
-
# use mono at 44kHz as reference. For any other settings data won't be perfectly
|
74 |
-
# normalized but it should be good enough.
|
75 |
-
audio = AudioFile(path)
|
76 |
-
mix = audio.read(streams=0, channels=1, samplerate=44100)
|
77 |
-
return {"duration": audio.duration, "std": mix.std().item(), "mean": mix.mean().item()}
|
78 |
-
|
79 |
-
|
80 |
-
def _build_metadata(tracks, workers=10):
|
81 |
-
pendings = []
|
82 |
-
with futures.ProcessPoolExecutor(workers) as pool:
|
83 |
-
for name, path in tracks.items():
|
84 |
-
pendings.append((name, pool.submit(_get_track_metadata, path)))
|
85 |
-
return {name: p.result() for name, p in pendings}
|
86 |
-
|
87 |
-
|
88 |
-
def _build_musdb_metadata(path, musdb, workers):
|
89 |
-
tracks = get_musdb_tracks(musdb)
|
90 |
-
metadata = _build_metadata(tracks, workers)
|
91 |
-
path.parent.mkdir(exist_ok=True, parents=True)
|
92 |
-
json.dump(metadata, open(path, "w"))
|
93 |
-
|
94 |
-
|
95 |
-
def get_compressed_datasets(args, samples):
|
96 |
-
metadata_file = args.metadata / "musdb.json"
|
97 |
-
if not metadata_file.is_file() and args.rank == 0:
|
98 |
-
_build_musdb_metadata(metadata_file, args.musdb, args.workers)
|
99 |
-
if args.world_size > 1:
|
100 |
-
distributed.barrier()
|
101 |
-
metadata = json.load(open(metadata_file))
|
102 |
-
duration = Fraction(samples, args.samplerate)
|
103 |
-
stride = Fraction(args.data_stride, args.samplerate)
|
104 |
-
train_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"),
|
105 |
-
metadata,
|
106 |
-
duration=duration,
|
107 |
-
stride=stride,
|
108 |
-
streams=slice(1, None),
|
109 |
-
samplerate=args.samplerate,
|
110 |
-
channels=args.audio_channels)
|
111 |
-
valid_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="valid"),
|
112 |
-
metadata,
|
113 |
-
samplerate=args.samplerate,
|
114 |
-
channels=args.audio_channels)
|
115 |
-
return train_set, valid_set
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/tools/infer/trans_weights.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import pdb
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-suc\G_1000.pth")["model"]#sim_nsf#
|
6 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder-flow-enc_q\G_1000.pth")["model"]#sim_nsf#
|
7 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder\G_1000.pth")["model"]#sim_nsf#
|
8 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-test\G_1000.pth")["model"]#sim_nsf#
|
9 |
-
a = torch.load(
|
10 |
-
r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-no_opt-no_dropout\G_1000.pth"
|
11 |
-
)[
|
12 |
-
"model"
|
13 |
-
] # sim_nsf#
|
14 |
-
for key in a.keys():
|
15 |
-
a[key] = a[key].half()
|
16 |
-
# torch.save(a,"ft-mi-freeze-vocoder_true_1k.pt")#
|
17 |
-
# torch.save(a,"ft-mi-sim1k.pt")#
|
18 |
-
torch.save(a, "ft-mi-no_opt-no_dropout.pt") #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/ddp_utils.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
from torch.nn.parallel import DistributedDataParallel
|
2 |
-
from torch.nn.parallel.distributed import _find_tensors
|
3 |
-
import torch.optim
|
4 |
-
import torch.utils.data
|
5 |
-
import torch
|
6 |
-
from packaging import version
|
7 |
-
|
8 |
-
class DDP(DistributedDataParallel):
|
9 |
-
"""
|
10 |
-
Override the forward call in lightning so it goes to training and validation step respectively
|
11 |
-
"""
|
12 |
-
|
13 |
-
def forward(self, *inputs, **kwargs): # pragma: no cover
|
14 |
-
if version.parse(torch.__version__[:6]) < version.parse("1.11"):
|
15 |
-
self._sync_params()
|
16 |
-
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
17 |
-
assert len(self.device_ids) == 1
|
18 |
-
if self.module.training:
|
19 |
-
output = self.module.training_step(*inputs[0], **kwargs[0])
|
20 |
-
elif self.module.testing:
|
21 |
-
output = self.module.test_step(*inputs[0], **kwargs[0])
|
22 |
-
else:
|
23 |
-
output = self.module.validation_step(*inputs[0], **kwargs[0])
|
24 |
-
if torch.is_grad_enabled():
|
25 |
-
# We'll return the output object verbatim since it is a freeform
|
26 |
-
# object. We need to find any tensors in this object, though,
|
27 |
-
# because we need to figure out which parameters were used during
|
28 |
-
# this forward pass, to ensure we short circuit reduction for any
|
29 |
-
# unused parameters. Only if `find_unused_parameters` is set.
|
30 |
-
if self.find_unused_parameters:
|
31 |
-
self.reducer.prepare_for_backward(list(_find_tensors(output)))
|
32 |
-
else:
|
33 |
-
self.reducer.prepare_for_backward([])
|
34 |
-
else:
|
35 |
-
from torch.nn.parallel.distributed import \
|
36 |
-
logging, Join, _DDPSink, _tree_flatten_with_rref, _tree_unflatten_with_rref
|
37 |
-
with torch.autograd.profiler.record_function("DistributedDataParallel.forward"):
|
38 |
-
if torch.is_grad_enabled() and self.require_backward_grad_sync:
|
39 |
-
self.logger.set_runtime_stats_and_log()
|
40 |
-
self.num_iterations += 1
|
41 |
-
self.reducer.prepare_for_forward()
|
42 |
-
|
43 |
-
# Notify the join context that this process has not joined, if
|
44 |
-
# needed
|
45 |
-
work = Join.notify_join_context(self)
|
46 |
-
if work:
|
47 |
-
self.reducer._set_forward_pass_work_handle(
|
48 |
-
work, self._divide_by_initial_world_size
|
49 |
-
)
|
50 |
-
|
51 |
-
# Calling _rebuild_buckets before forward compuation,
|
52 |
-
# It may allocate new buckets before deallocating old buckets
|
53 |
-
# inside _rebuild_buckets. To save peak memory usage,
|
54 |
-
# call _rebuild_buckets before the peak memory usage increases
|
55 |
-
# during forward computation.
|
56 |
-
# This should be called only once during whole training period.
|
57 |
-
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
|
58 |
-
logging.info("Reducer buckets have been rebuilt in this iteration.")
|
59 |
-
self._has_rebuilt_buckets = True
|
60 |
-
|
61 |
-
# sync params according to location (before/after forward) user
|
62 |
-
# specified as part of hook, if hook was specified.
|
63 |
-
buffer_hook_registered = hasattr(self, 'buffer_hook')
|
64 |
-
if self._check_sync_bufs_pre_fwd():
|
65 |
-
self._sync_buffers()
|
66 |
-
|
67 |
-
if self._join_config.enable:
|
68 |
-
# Notify joined ranks whether they should sync in backwards pass or not.
|
69 |
-
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
|
70 |
-
|
71 |
-
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
72 |
-
if self.module.training:
|
73 |
-
output = self.module.training_step(*inputs[0], **kwargs[0])
|
74 |
-
elif self.module.testing:
|
75 |
-
output = self.module.test_step(*inputs[0], **kwargs[0])
|
76 |
-
else:
|
77 |
-
output = self.module.validation_step(*inputs[0], **kwargs[0])
|
78 |
-
|
79 |
-
# sync params according to location (before/after forward) user
|
80 |
-
# specified as part of hook, if hook was specified.
|
81 |
-
if self._check_sync_bufs_post_fwd():
|
82 |
-
self._sync_buffers()
|
83 |
-
|
84 |
-
if torch.is_grad_enabled() and self.require_backward_grad_sync:
|
85 |
-
self.require_forward_param_sync = True
|
86 |
-
# We'll return the output object verbatim since it is a freeform
|
87 |
-
# object. We need to find any tensors in this object, though,
|
88 |
-
# because we need to figure out which parameters were used during
|
89 |
-
# this forward pass, to ensure we short circuit reduction for any
|
90 |
-
# unused parameters. Only if `find_unused_parameters` is set.
|
91 |
-
if self.find_unused_parameters and not self.static_graph:
|
92 |
-
# Do not need to populate this for static graph.
|
93 |
-
self.reducer.prepare_for_backward(list(_find_tensors(output)))
|
94 |
-
else:
|
95 |
-
self.reducer.prepare_for_backward([])
|
96 |
-
else:
|
97 |
-
self.require_forward_param_sync = False
|
98 |
-
|
99 |
-
# TODO: DDPSink is currently enabled for unused parameter detection and
|
100 |
-
# static graph training for first iteration.
|
101 |
-
if (self.find_unused_parameters and not self.static_graph) or (
|
102 |
-
self.static_graph and self.num_iterations == 1
|
103 |
-
):
|
104 |
-
state_dict = {
|
105 |
-
'static_graph': self.static_graph,
|
106 |
-
'num_iterations': self.num_iterations,
|
107 |
-
}
|
108 |
-
|
109 |
-
output_tensor_list, treespec, output_is_rref = _tree_flatten_with_rref(
|
110 |
-
output
|
111 |
-
)
|
112 |
-
output_placeholders = [None for _ in range(len(output_tensor_list))]
|
113 |
-
# Do not touch tensors that have no grad_fn, which can cause issues
|
114 |
-
# such as https://github.com/pytorch/pytorch/issues/60733
|
115 |
-
for i, output in enumerate(output_tensor_list):
|
116 |
-
if torch.is_tensor(output) and output.grad_fn is None:
|
117 |
-
output_placeholders[i] = output
|
118 |
-
|
119 |
-
# When find_unused_parameters=True, makes tensors which require grad
|
120 |
-
# run through the DDPSink backward pass. When not all outputs are
|
121 |
-
# used in loss, this makes those corresponding tensors receive
|
122 |
-
# undefined gradient which the reducer then handles to ensure
|
123 |
-
# param.grad field is not touched and we don't error out.
|
124 |
-
passthrough_tensor_list = _DDPSink.apply(
|
125 |
-
self.reducer,
|
126 |
-
state_dict,
|
127 |
-
*output_tensor_list,
|
128 |
-
)
|
129 |
-
for i in range(len(output_placeholders)):
|
130 |
-
if output_placeholders[i] is None:
|
131 |
-
output_placeholders[i] = passthrough_tensor_list[i]
|
132 |
-
|
133 |
-
# Reconstruct output data structure.
|
134 |
-
output = _tree_unflatten_with_rref(
|
135 |
-
output_placeholders, treespec, output_is_rref
|
136 |
-
)
|
137 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/loss.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import torch.optim as optim
|
5 |
-
|
6 |
-
class WeightedCrossEntropy(nn.CrossEntropyLoss):
|
7 |
-
|
8 |
-
def __init__(self, weights, **pytorch_ce_loss_args) -> None:
|
9 |
-
super().__init__(reduction='none', **pytorch_ce_loss_args)
|
10 |
-
self.weights = weights
|
11 |
-
|
12 |
-
def __call__(self, outputs, targets, to_weight=True):
|
13 |
-
loss = super().__call__(outputs, targets)
|
14 |
-
if to_weight:
|
15 |
-
return (loss * self.weights[targets]).sum() / self.weights[targets].sum()
|
16 |
-
else:
|
17 |
-
return loss.mean()
|
18 |
-
|
19 |
-
|
20 |
-
if __name__ == '__main__':
|
21 |
-
x = torch.randn(10, 5)
|
22 |
-
target = torch.randint(0, 5, (10,))
|
23 |
-
weights = torch.tensor([1., 2., 3., 4., 5.])
|
24 |
-
|
25 |
-
# criterion_weighted = nn.CrossEntropyLoss(weight=weights)
|
26 |
-
# loss_weighted = criterion_weighted(x, target)
|
27 |
-
|
28 |
-
# criterion_weighted_manual = nn.CrossEntropyLoss(reduction='none')
|
29 |
-
# loss_weighted_manual = criterion_weighted_manual(x, target)
|
30 |
-
# print(loss_weighted, loss_weighted_manual.mean())
|
31 |
-
# loss_weighted_manual = (loss_weighted_manual * weights[target]).sum() / weights[target].sum()
|
32 |
-
# print(loss_weighted, loss_weighted_manual)
|
33 |
-
# print(torch.allclose(loss_weighted, loss_weighted_manual))
|
34 |
-
|
35 |
-
pytorch_weighted = nn.CrossEntropyLoss(weight=weights)
|
36 |
-
pytorch_unweighted = nn.CrossEntropyLoss()
|
37 |
-
custom = WeightedCrossEntropy(weights)
|
38 |
-
|
39 |
-
assert torch.allclose(pytorch_weighted(x, target), custom(x, target, to_weight=True))
|
40 |
-
assert torch.allclose(pytorch_unweighted(x, target), custom(x, target, to_weight=False))
|
41 |
-
print(custom(x, target, to_weight=True), custom(x, target, to_weight=False))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/g4f/Provider/Providers/Liaobots.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import uuid
|
3 |
-
import requests
|
4 |
-
from ...typing import sha256, Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://liaobots.com'
|
7 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4']
|
8 |
-
supports_stream = True
|
9 |
-
needs_auth = True
|
10 |
-
working = False
|
11 |
-
|
12 |
-
models = {
|
13 |
-
'gpt-4': {
|
14 |
-
"id": "gpt-4",
|
15 |
-
"name": "GPT-4",
|
16 |
-
"maxLength": 24000,
|
17 |
-
"tokenLimit": 8000
|
18 |
-
},
|
19 |
-
'gpt-3.5-turbo': {
|
20 |
-
"id": "gpt-3.5-turbo",
|
21 |
-
"name": "GPT-3.5",
|
22 |
-
"maxLength": 12000,
|
23 |
-
"tokenLimit": 4000
|
24 |
-
},
|
25 |
-
'gpt-3.5-turbo-16k': {
|
26 |
-
"id": "gpt-3.5-turbo-16k",
|
27 |
-
"name": "GPT-3.5-16k",
|
28 |
-
"maxLength": 48000,
|
29 |
-
"tokenLimit": 16000
|
30 |
-
},
|
31 |
-
}
|
32 |
-
|
33 |
-
|
34 |
-
def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
|
35 |
-
|
36 |
-
print(kwargs)
|
37 |
-
|
38 |
-
headers = {
|
39 |
-
'authority': 'liaobots.com',
|
40 |
-
'content-type': 'application/json',
|
41 |
-
'origin': 'https://liaobots.com',
|
42 |
-
'referer': 'https://liaobots.com/',
|
43 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
44 |
-
'x-auth-code': 'qlcUMVn1KLMhd'
|
45 |
-
}
|
46 |
-
|
47 |
-
json_data = {
|
48 |
-
'conversationId': chatId,
|
49 |
-
'model': models[model],
|
50 |
-
'messages': messages,
|
51 |
-
'key': '',
|
52 |
-
'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
53 |
-
}
|
54 |
-
|
55 |
-
response = requests.post('https://liaobots.com/api/chat',
|
56 |
-
headers=headers, json=json_data, stream=True)
|
57 |
-
|
58 |
-
for token in response.iter_content(chunk_size=2046):
|
59 |
-
yield (token.decode('utf-8'))
|
60 |
-
|
61 |
-
|
62 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
63 |
-
'(%s)' % ', '.join(
|
64 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/segment/val.py
DELETED
@@ -1,792 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Validate a trained YOLOv5 segment model on a segment dataset
|
4 |
-
|
5 |
-
Usage:
|
6 |
-
$ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
|
7 |
-
$ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments
|
8 |
-
|
9 |
-
Usage - formats:
|
10 |
-
$ python segment/val.py --weights yolov5s-seg.pt # PyTorch
|
11 |
-
yolov5s-seg.torchscript # TorchScript
|
12 |
-
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
13 |
-
yolov5s-seg_openvino_label # OpenVINO
|
14 |
-
yolov5s-seg.engine # TensorRT
|
15 |
-
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
16 |
-
yolov5s-seg_saved_model # TensorFlow SavedModel
|
17 |
-
yolov5s-seg.pb # TensorFlow GraphDef
|
18 |
-
yolov5s-seg.tflite # TensorFlow Lite
|
19 |
-
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
20 |
-
yolov5s-seg_paddle_model # PaddlePaddle
|
21 |
-
"""
|
22 |
-
|
23 |
-
import argparse
|
24 |
-
import json
|
25 |
-
import os
|
26 |
-
import sys
|
27 |
-
from multiprocessing.pool import ThreadPool
|
28 |
-
from pathlib import Path
|
29 |
-
|
30 |
-
import numpy as np
|
31 |
-
import torch
|
32 |
-
from tqdm import tqdm
|
33 |
-
|
34 |
-
FILE = Path(__file__).resolve()
|
35 |
-
ROOT = FILE.parents[1] # YOLOv5 root directory
|
36 |
-
if str(ROOT) not in sys.path:
|
37 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
38 |
-
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
39 |
-
|
40 |
-
import torch.nn.functional as F
|
41 |
-
|
42 |
-
from models.common import DetectMultiBackend
|
43 |
-
from models.yolo import SegmentationModel
|
44 |
-
from utils.callbacks import Callbacks
|
45 |
-
from utils.general import (
|
46 |
-
LOGGER,
|
47 |
-
NUM_THREADS,
|
48 |
-
TQDM_BAR_FORMAT,
|
49 |
-
Profile,
|
50 |
-
check_dataset,
|
51 |
-
check_img_size,
|
52 |
-
check_requirements,
|
53 |
-
check_yaml,
|
54 |
-
coco80_to_coco91_class,
|
55 |
-
colorstr,
|
56 |
-
increment_path,
|
57 |
-
non_max_suppression,
|
58 |
-
print_args,
|
59 |
-
scale_boxes,
|
60 |
-
xywh2xyxy,
|
61 |
-
xyxy2xywh,
|
62 |
-
)
|
63 |
-
from utils.metrics import ConfusionMatrix, box_iou
|
64 |
-
from utils.plots import output_to_target, plot_val_study
|
65 |
-
from utils.segment.dataloaders import create_dataloader
|
66 |
-
from utils.segment.general import (
|
67 |
-
mask_iou,
|
68 |
-
process_mask,
|
69 |
-
process_mask_native,
|
70 |
-
scale_image,
|
71 |
-
)
|
72 |
-
from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
|
73 |
-
from utils.segment.plots import plot_images_and_masks
|
74 |
-
from utils.torch_utils import de_parallel, select_device, smart_inference_mode
|
75 |
-
|
76 |
-
|
77 |
-
def save_one_txt(predn, save_conf, shape, file):
|
78 |
-
# Save one txt result
|
79 |
-
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
80 |
-
for *xyxy, conf, cls in predn.tolist():
|
81 |
-
xywh = (
|
82 |
-
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
|
83 |
-
) # normalized xywh
|
84 |
-
line = (
|
85 |
-
(cls, *xywh, conf) if save_conf else (cls, *xywh)
|
86 |
-
) # label format
|
87 |
-
with open(file, "a") as f:
|
88 |
-
f.write(("%g " * len(line)).rstrip() % line + "\n")
|
89 |
-
|
90 |
-
|
91 |
-
def save_one_json(predn, jdict, path, class_map, pred_masks):
|
92 |
-
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
93 |
-
from pycocotools.mask import encode
|
94 |
-
|
95 |
-
def single_encode(x):
|
96 |
-
rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
|
97 |
-
rle["counts"] = rle["counts"].decode("utf-8")
|
98 |
-
return rle
|
99 |
-
|
100 |
-
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
101 |
-
box = xyxy2xywh(predn[:, :4]) # xywh
|
102 |
-
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
103 |
-
pred_masks = np.transpose(pred_masks, (2, 0, 1))
|
104 |
-
with ThreadPool(NUM_THREADS) as pool:
|
105 |
-
rles = pool.map(single_encode, pred_masks)
|
106 |
-
for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
|
107 |
-
jdict.append(
|
108 |
-
{
|
109 |
-
"image_id": image_id,
|
110 |
-
"category_id": class_map[int(p[5])],
|
111 |
-
"bbox": [round(x, 3) for x in b],
|
112 |
-
"score": round(p[4], 5),
|
113 |
-
"segmentation": rles[i],
|
114 |
-
}
|
115 |
-
)
|
116 |
-
|
117 |
-
|
118 |
-
def process_batch(
|
119 |
-
detections,
|
120 |
-
labels,
|
121 |
-
iouv,
|
122 |
-
pred_masks=None,
|
123 |
-
gt_masks=None,
|
124 |
-
overlap=False,
|
125 |
-
masks=False,
|
126 |
-
):
|
127 |
-
"""
|
128 |
-
Return correct prediction matrix
|
129 |
-
Arguments:
|
130 |
-
detections (array[N, 6]), x1, y1, x2, y2, conf, class
|
131 |
-
labels (array[M, 5]), class, x1, y1, x2, y2
|
132 |
-
Returns:
|
133 |
-
correct (array[N, 10]), for 10 IoU levels
|
134 |
-
"""
|
135 |
-
if masks:
|
136 |
-
if overlap:
|
137 |
-
nl = len(labels)
|
138 |
-
index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
|
139 |
-
gt_masks = gt_masks.repeat(
|
140 |
-
nl, 1, 1
|
141 |
-
) # shape(1,640,640) -> (n,640,640)
|
142 |
-
gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
|
143 |
-
if gt_masks.shape[1:] != pred_masks.shape[1:]:
|
144 |
-
gt_masks = F.interpolate(
|
145 |
-
gt_masks[None],
|
146 |
-
pred_masks.shape[1:],
|
147 |
-
mode="bilinear",
|
148 |
-
align_corners=False,
|
149 |
-
)[0]
|
150 |
-
gt_masks = gt_masks.gt_(0.5)
|
151 |
-
iou = mask_iou(
|
152 |
-
gt_masks.view(gt_masks.shape[0], -1),
|
153 |
-
pred_masks.view(pred_masks.shape[0], -1),
|
154 |
-
)
|
155 |
-
else: # boxes
|
156 |
-
iou = box_iou(labels[:, 1:], detections[:, :4])
|
157 |
-
|
158 |
-
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
|
159 |
-
correct_class = labels[:, 0:1] == detections[:, 5]
|
160 |
-
for i in range(len(iouv)):
|
161 |
-
x = torch.where(
|
162 |
-
(iou >= iouv[i]) & correct_class
|
163 |
-
) # IoU > threshold and classes match
|
164 |
-
if x[0].shape[0]:
|
165 |
-
matches = (
|
166 |
-
torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1)
|
167 |
-
.cpu()
|
168 |
-
.numpy()
|
169 |
-
) # [label, detect, iou]
|
170 |
-
if x[0].shape[0] > 1:
|
171 |
-
matches = matches[matches[:, 2].argsort()[::-1]]
|
172 |
-
matches = matches[
|
173 |
-
np.unique(matches[:, 1], return_index=True)[1]
|
174 |
-
]
|
175 |
-
# matches = matches[matches[:, 2].argsort()[::-1]]
|
176 |
-
matches = matches[
|
177 |
-
np.unique(matches[:, 0], return_index=True)[1]
|
178 |
-
]
|
179 |
-
correct[matches[:, 1].astype(int), i] = True
|
180 |
-
return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
|
181 |
-
|
182 |
-
|
183 |
-
@smart_inference_mode()
|
184 |
-
def run(
|
185 |
-
data,
|
186 |
-
weights=None, # model.pt path(s)
|
187 |
-
batch_size=32, # batch size
|
188 |
-
imgsz=640, # inference size (pixels)
|
189 |
-
conf_thres=0.001, # confidence threshold
|
190 |
-
iou_thres=0.6, # NMS IoU threshold
|
191 |
-
max_det=300, # maximum detections per image
|
192 |
-
task="val", # train, val, test, speed or study
|
193 |
-
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
194 |
-
workers=8, # max dataloader workers (per RANK in DDP mode)
|
195 |
-
single_cls=False, # treat as single-class dataset
|
196 |
-
augment=False, # augmented inference
|
197 |
-
verbose=False, # verbose output
|
198 |
-
save_txt=False, # save results to *.txt
|
199 |
-
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
200 |
-
save_conf=False, # save confidences in --save-txt labels
|
201 |
-
save_json=False, # save a COCO-JSON results file
|
202 |
-
project=ROOT / "runs/val-seg", # save to project/name
|
203 |
-
name="exp", # save to project/name
|
204 |
-
exist_ok=False, # existing project/name ok, do not increment
|
205 |
-
half=True, # use FP16 half-precision inference
|
206 |
-
dnn=False, # use OpenCV DNN for ONNX inference
|
207 |
-
model=None,
|
208 |
-
dataloader=None,
|
209 |
-
save_dir=Path(""),
|
210 |
-
plots=True,
|
211 |
-
overlap=False,
|
212 |
-
mask_downsample_ratio=1,
|
213 |
-
compute_loss=None,
|
214 |
-
callbacks=Callbacks(),
|
215 |
-
):
|
216 |
-
if save_json:
|
217 |
-
check_requirements("pycocotools>=2.0.6")
|
218 |
-
process = process_mask_native # more accurate
|
219 |
-
else:
|
220 |
-
process = process_mask # faster
|
221 |
-
|
222 |
-
# Initialize/load model and set device
|
223 |
-
training = model is not None
|
224 |
-
if training: # called by train.py
|
225 |
-
device, pt, jit, engine = (
|
226 |
-
next(model.parameters()).device,
|
227 |
-
True,
|
228 |
-
False,
|
229 |
-
False,
|
230 |
-
) # get model device, PyTorch model
|
231 |
-
half &= device.type != "cpu" # half precision only supported on CUDA
|
232 |
-
model.half() if half else model.float()
|
233 |
-
nm = de_parallel(model).model[-1].nm # number of masks
|
234 |
-
else: # called directly
|
235 |
-
device = select_device(device, batch_size=batch_size)
|
236 |
-
|
237 |
-
# Directories
|
238 |
-
save_dir = increment_path(
|
239 |
-
Path(project) / name, exist_ok=exist_ok
|
240 |
-
) # increment run
|
241 |
-
(save_dir / "labels" if save_txt else save_dir).mkdir(
|
242 |
-
parents=True, exist_ok=True
|
243 |
-
) # make dir
|
244 |
-
|
245 |
-
# Load model
|
246 |
-
model = DetectMultiBackend(
|
247 |
-
weights, device=device, dnn=dnn, data=data, fp16=half
|
248 |
-
)
|
249 |
-
stride, pt, jit, engine = (
|
250 |
-
model.stride,
|
251 |
-
model.pt,
|
252 |
-
model.jit,
|
253 |
-
model.engine,
|
254 |
-
)
|
255 |
-
imgsz = check_img_size(imgsz, s=stride) # check image size
|
256 |
-
half = model.fp16 # FP16 supported on limited backends with CUDA
|
257 |
-
nm = (
|
258 |
-
de_parallel(model).model.model[-1].nm
|
259 |
-
if isinstance(model, SegmentationModel)
|
260 |
-
else 32
|
261 |
-
) # number of masks
|
262 |
-
if engine:
|
263 |
-
batch_size = model.batch_size
|
264 |
-
else:
|
265 |
-
device = model.device
|
266 |
-
if not (pt or jit):
|
267 |
-
batch_size = 1 # export.py models default to batch-size 1
|
268 |
-
LOGGER.info(
|
269 |
-
f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models"
|
270 |
-
)
|
271 |
-
|
272 |
-
# Data
|
273 |
-
data = check_dataset(data) # check
|
274 |
-
|
275 |
-
# Configure
|
276 |
-
model.eval()
|
277 |
-
cuda = device.type != "cpu"
|
278 |
-
is_coco = isinstance(data.get("val"), str) and data["val"].endswith(
|
279 |
-
f"coco{os.sep}val2017.txt"
|
280 |
-
) # COCO dataset
|
281 |
-
nc = 1 if single_cls else int(data["nc"]) # number of classes
|
282 |
-
iouv = torch.linspace(
|
283 |
-
0.5, 0.95, 10, device=device
|
284 |
-
) # iou vector for [email protected]:0.95
|
285 |
-
niou = iouv.numel()
|
286 |
-
|
287 |
-
# Dataloader
|
288 |
-
if not training:
|
289 |
-
if pt and not single_cls: # check --weights are trained on --data
|
290 |
-
ncm = model.model.nc
|
291 |
-
assert ncm == nc, (
|
292 |
-
f"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} "
|
293 |
-
f"classes). Pass correct combination of --weights and --data that are trained together."
|
294 |
-
)
|
295 |
-
model.warmup(
|
296 |
-
imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)
|
297 |
-
) # warmup
|
298 |
-
pad, rect = (
|
299 |
-
(0.0, False) if task == "speed" else (0.5, pt)
|
300 |
-
) # square inference for benchmarks
|
301 |
-
task = (
|
302 |
-
task if task in ("train", "val", "test") else "val"
|
303 |
-
) # path to train/val/test images
|
304 |
-
dataloader = create_dataloader(
|
305 |
-
data[task],
|
306 |
-
imgsz,
|
307 |
-
batch_size,
|
308 |
-
stride,
|
309 |
-
single_cls,
|
310 |
-
pad=pad,
|
311 |
-
rect=rect,
|
312 |
-
workers=workers,
|
313 |
-
prefix=colorstr(f"{task}: "),
|
314 |
-
overlap_mask=overlap,
|
315 |
-
mask_downsample_ratio=mask_downsample_ratio,
|
316 |
-
)[0]
|
317 |
-
|
318 |
-
seen = 0
|
319 |
-
confusion_matrix = ConfusionMatrix(nc=nc)
|
320 |
-
names = (
|
321 |
-
model.names if hasattr(model, "names") else model.module.names
|
322 |
-
) # get class names
|
323 |
-
if isinstance(names, (list, tuple)): # old format
|
324 |
-
names = dict(enumerate(names))
|
325 |
-
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
326 |
-
s = ("%22s" + "%11s" * 10) % (
|
327 |
-
"Class",
|
328 |
-
"Images",
|
329 |
-
"Instances",
|
330 |
-
"Box(P",
|
331 |
-
"R",
|
332 |
-
"mAP50",
|
333 |
-
"mAP50-95)",
|
334 |
-
"Mask(P",
|
335 |
-
"R",
|
336 |
-
"mAP50",
|
337 |
-
"mAP50-95)",
|
338 |
-
)
|
339 |
-
dt = Profile(), Profile(), Profile()
|
340 |
-
metrics = Metrics()
|
341 |
-
loss = torch.zeros(4, device=device)
|
342 |
-
jdict, stats = [], []
|
343 |
-
# callbacks.run('on_val_start')
|
344 |
-
pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
|
345 |
-
for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
|
346 |
-
# callbacks.run('on_val_batch_start')
|
347 |
-
with dt[0]:
|
348 |
-
if cuda:
|
349 |
-
im = im.to(device, non_blocking=True)
|
350 |
-
targets = targets.to(device)
|
351 |
-
masks = masks.to(device)
|
352 |
-
masks = masks.float()
|
353 |
-
im = im.half() if half else im.float() # uint8 to fp16/32
|
354 |
-
im /= 255 # 0 - 255 to 0.0 - 1.0
|
355 |
-
(
|
356 |
-
nb,
|
357 |
-
_,
|
358 |
-
height,
|
359 |
-
width,
|
360 |
-
) = im.shape # batch size, channels, height, width
|
361 |
-
|
362 |
-
# Inference
|
363 |
-
with dt[1]:
|
364 |
-
preds, protos, train_out = (
|
365 |
-
model(im)
|
366 |
-
if compute_loss
|
367 |
-
else (*model(im, augment=augment)[:2], None)
|
368 |
-
)
|
369 |
-
|
370 |
-
# Loss
|
371 |
-
if compute_loss:
|
372 |
-
loss += compute_loss((train_out, protos), targets, masks)[
|
373 |
-
1
|
374 |
-
] # box, obj, cls
|
375 |
-
|
376 |
-
# NMS
|
377 |
-
targets[:, 2:] *= torch.tensor(
|
378 |
-
(width, height, width, height), device=device
|
379 |
-
) # to pixels
|
380 |
-
lb = (
|
381 |
-
[targets[targets[:, 0] == i, 1:] for i in range(nb)]
|
382 |
-
if save_hybrid
|
383 |
-
else []
|
384 |
-
) # for autolabelling
|
385 |
-
with dt[2]:
|
386 |
-
preds = non_max_suppression(
|
387 |
-
preds,
|
388 |
-
conf_thres,
|
389 |
-
iou_thres,
|
390 |
-
labels=lb,
|
391 |
-
multi_label=True,
|
392 |
-
agnostic=single_cls,
|
393 |
-
max_det=max_det,
|
394 |
-
nm=nm,
|
395 |
-
)
|
396 |
-
|
397 |
-
# Metrics
|
398 |
-
plot_masks = [] # masks for plotting
|
399 |
-
for si, (pred, proto) in enumerate(zip(preds, protos)):
|
400 |
-
labels = targets[targets[:, 0] == si, 1:]
|
401 |
-
nl, npr = (
|
402 |
-
labels.shape[0],
|
403 |
-
pred.shape[0],
|
404 |
-
) # number of labels, predictions
|
405 |
-
path, shape = Path(paths[si]), shapes[si][0]
|
406 |
-
correct_masks = torch.zeros(
|
407 |
-
npr, niou, dtype=torch.bool, device=device
|
408 |
-
) # init
|
409 |
-
correct_bboxes = torch.zeros(
|
410 |
-
npr, niou, dtype=torch.bool, device=device
|
411 |
-
) # init
|
412 |
-
seen += 1
|
413 |
-
|
414 |
-
if npr == 0:
|
415 |
-
if nl:
|
416 |
-
stats.append(
|
417 |
-
(
|
418 |
-
correct_masks,
|
419 |
-
correct_bboxes,
|
420 |
-
*torch.zeros((2, 0), device=device),
|
421 |
-
labels[:, 0],
|
422 |
-
)
|
423 |
-
)
|
424 |
-
if plots:
|
425 |
-
confusion_matrix.process_batch(
|
426 |
-
detections=None, labels=labels[:, 0]
|
427 |
-
)
|
428 |
-
continue
|
429 |
-
|
430 |
-
# Masks
|
431 |
-
midx = [si] if overlap else targets[:, 0] == si
|
432 |
-
gt_masks = masks[midx]
|
433 |
-
pred_masks = process(
|
434 |
-
proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]
|
435 |
-
)
|
436 |
-
|
437 |
-
# Predictions
|
438 |
-
if single_cls:
|
439 |
-
pred[:, 5] = 0
|
440 |
-
predn = pred.clone()
|
441 |
-
scale_boxes(
|
442 |
-
im[si].shape[1:], predn[:, :4], shape, shapes[si][1]
|
443 |
-
) # native-space pred
|
444 |
-
|
445 |
-
# Evaluate
|
446 |
-
if nl:
|
447 |
-
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
448 |
-
scale_boxes(
|
449 |
-
im[si].shape[1:], tbox, shape, shapes[si][1]
|
450 |
-
) # native-space labels
|
451 |
-
labelsn = torch.cat(
|
452 |
-
(labels[:, 0:1], tbox), 1
|
453 |
-
) # native-space labels
|
454 |
-
correct_bboxes = process_batch(predn, labelsn, iouv)
|
455 |
-
correct_masks = process_batch(
|
456 |
-
predn,
|
457 |
-
labelsn,
|
458 |
-
iouv,
|
459 |
-
pred_masks,
|
460 |
-
gt_masks,
|
461 |
-
overlap=overlap,
|
462 |
-
masks=True,
|
463 |
-
)
|
464 |
-
if plots:
|
465 |
-
confusion_matrix.process_batch(predn, labelsn)
|
466 |
-
stats.append(
|
467 |
-
(
|
468 |
-
correct_masks,
|
469 |
-
correct_bboxes,
|
470 |
-
pred[:, 4],
|
471 |
-
pred[:, 5],
|
472 |
-
labels[:, 0],
|
473 |
-
)
|
474 |
-
) # (conf, pcls, tcls)
|
475 |
-
|
476 |
-
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
477 |
-
if plots and batch_i < 3:
|
478 |
-
plot_masks.append(pred_masks[:15]) # filter top 15 to plot
|
479 |
-
|
480 |
-
# Save/log
|
481 |
-
if save_txt:
|
482 |
-
save_one_txt(
|
483 |
-
predn,
|
484 |
-
save_conf,
|
485 |
-
shape,
|
486 |
-
file=save_dir / "labels" / f"{path.stem}.txt",
|
487 |
-
)
|
488 |
-
if save_json:
|
489 |
-
pred_masks = scale_image(
|
490 |
-
im[si].shape[1:],
|
491 |
-
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
|
492 |
-
shape,
|
493 |
-
shapes[si][1],
|
494 |
-
)
|
495 |
-
save_one_json(
|
496 |
-
predn, jdict, path, class_map, pred_masks
|
497 |
-
) # append to COCO-JSON dictionary
|
498 |
-
# callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
|
499 |
-
|
500 |
-
# Plot images
|
501 |
-
if plots and batch_i < 3:
|
502 |
-
if len(plot_masks):
|
503 |
-
plot_masks = torch.cat(plot_masks, dim=0)
|
504 |
-
plot_images_and_masks(
|
505 |
-
im,
|
506 |
-
targets,
|
507 |
-
masks,
|
508 |
-
paths,
|
509 |
-
save_dir / f"val_batch{batch_i}_labels.jpg",
|
510 |
-
names,
|
511 |
-
)
|
512 |
-
plot_images_and_masks(
|
513 |
-
im,
|
514 |
-
output_to_target(preds, max_det=15),
|
515 |
-
plot_masks,
|
516 |
-
paths,
|
517 |
-
save_dir / f"val_batch{batch_i}_pred.jpg",
|
518 |
-
names,
|
519 |
-
) # pred
|
520 |
-
|
521 |
-
# callbacks.run('on_val_batch_end')
|
522 |
-
|
523 |
-
# Compute metrics
|
524 |
-
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
|
525 |
-
if len(stats) and stats[0].any():
|
526 |
-
results = ap_per_class_box_and_mask(
|
527 |
-
*stats, plot=plots, save_dir=save_dir, names=names
|
528 |
-
)
|
529 |
-
metrics.update(results)
|
530 |
-
nt = np.bincount(
|
531 |
-
stats[4].astype(int), minlength=nc
|
532 |
-
) # number of targets per class
|
533 |
-
|
534 |
-
# Print results
|
535 |
-
pf = "%22s" + "%11i" * 2 + "%11.3g" * 8 # print format
|
536 |
-
LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results()))
|
537 |
-
if nt.sum() == 0:
|
538 |
-
LOGGER.warning(
|
539 |
-
f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels"
|
540 |
-
)
|
541 |
-
|
542 |
-
# Print results per class
|
543 |
-
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
544 |
-
for i, c in enumerate(metrics.ap_class_index):
|
545 |
-
LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
|
546 |
-
|
547 |
-
# Print speeds
|
548 |
-
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
|
549 |
-
if not training:
|
550 |
-
shape = (batch_size, 3, imgsz, imgsz)
|
551 |
-
LOGGER.info(
|
552 |
-
f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}"
|
553 |
-
% t
|
554 |
-
)
|
555 |
-
|
556 |
-
# Plots
|
557 |
-
if plots:
|
558 |
-
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
559 |
-
# callbacks.run('on_val_end')
|
560 |
-
|
561 |
-
(
|
562 |
-
mp_bbox,
|
563 |
-
mr_bbox,
|
564 |
-
map50_bbox,
|
565 |
-
map_bbox,
|
566 |
-
mp_mask,
|
567 |
-
mr_mask,
|
568 |
-
map50_mask,
|
569 |
-
map_mask,
|
570 |
-
) = metrics.mean_results()
|
571 |
-
|
572 |
-
# Save JSON
|
573 |
-
if save_json and len(jdict):
|
574 |
-
w = (
|
575 |
-
Path(weights[0] if isinstance(weights, list) else weights).stem
|
576 |
-
if weights is not None
|
577 |
-
else ""
|
578 |
-
) # weights
|
579 |
-
anno_json = str(
|
580 |
-
Path("../datasets/coco/annotations/instances_val2017.json")
|
581 |
-
) # annotations
|
582 |
-
pred_json = str(save_dir / f"{w}_predictions.json") # predictions
|
583 |
-
LOGGER.info(f"\nEvaluating pycocotools mAP... saving {pred_json}...")
|
584 |
-
with open(pred_json, "w") as f:
|
585 |
-
json.dump(jdict, f)
|
586 |
-
|
587 |
-
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
588 |
-
from pycocotools.coco import COCO
|
589 |
-
from pycocotools.cocoeval import COCOeval
|
590 |
-
|
591 |
-
anno = COCO(anno_json) # init annotations api
|
592 |
-
pred = anno.loadRes(pred_json) # init predictions api
|
593 |
-
results = []
|
594 |
-
for eval in COCOeval(anno, pred, "bbox"), COCOeval(
|
595 |
-
anno, pred, "segm"
|
596 |
-
):
|
597 |
-
if is_coco:
|
598 |
-
eval.params.imgIds = [
|
599 |
-
int(Path(x).stem) for x in dataloader.dataset.im_files
|
600 |
-
] # img ID to evaluate
|
601 |
-
eval.evaluate()
|
602 |
-
eval.accumulate()
|
603 |
-
eval.summarize()
|
604 |
-
results.extend(
|
605 |
-
eval.stats[:2]
|
606 |
-
) # update results ([email protected]:0.95, [email protected])
|
607 |
-
map_bbox, map50_bbox, map_mask, map50_mask = results
|
608 |
-
except Exception as e:
|
609 |
-
LOGGER.info(f"pycocotools unable to run: {e}")
|
610 |
-
|
611 |
-
# Return results
|
612 |
-
model.float() # for training
|
613 |
-
if not training:
|
614 |
-
s = (
|
615 |
-
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
|
616 |
-
if save_txt
|
617 |
-
else ""
|
618 |
-
)
|
619 |
-
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
620 |
-
final_metric = (
|
621 |
-
mp_bbox,
|
622 |
-
mr_bbox,
|
623 |
-
map50_bbox,
|
624 |
-
map_bbox,
|
625 |
-
mp_mask,
|
626 |
-
mr_mask,
|
627 |
-
map50_mask,
|
628 |
-
map_mask,
|
629 |
-
)
|
630 |
-
return (
|
631 |
-
(*final_metric, *(loss.cpu() / len(dataloader)).tolist()),
|
632 |
-
metrics.get_maps(nc),
|
633 |
-
t,
|
634 |
-
)
|
635 |
-
|
636 |
-
|
637 |
-
def parse_opt():
|
638 |
-
parser = argparse.ArgumentParser()
|
639 |
-
parser.add_argument(
|
640 |
-
"--data",
|
641 |
-
type=str,
|
642 |
-
default=ROOT / "data/coco128-seg.yaml",
|
643 |
-
help="dataset.yaml path",
|
644 |
-
)
|
645 |
-
parser.add_argument(
|
646 |
-
"--weights",
|
647 |
-
nargs="+",
|
648 |
-
type=str,
|
649 |
-
default=ROOT / "yolov5s-seg.pt",
|
650 |
-
help="model path(s)",
|
651 |
-
)
|
652 |
-
parser.add_argument(
|
653 |
-
"--batch-size", type=int, default=32, help="batch size"
|
654 |
-
)
|
655 |
-
parser.add_argument(
|
656 |
-
"--imgsz",
|
657 |
-
"--img",
|
658 |
-
"--img-size",
|
659 |
-
type=int,
|
660 |
-
default=640,
|
661 |
-
help="inference size (pixels)",
|
662 |
-
)
|
663 |
-
parser.add_argument(
|
664 |
-
"--conf-thres", type=float, default=0.001, help="confidence threshold"
|
665 |
-
)
|
666 |
-
parser.add_argument(
|
667 |
-
"--iou-thres", type=float, default=0.6, help="NMS IoU threshold"
|
668 |
-
)
|
669 |
-
parser.add_argument(
|
670 |
-
"--max-det", type=int, default=300, help="maximum detections per image"
|
671 |
-
)
|
672 |
-
parser.add_argument(
|
673 |
-
"--task", default="val", help="train, val, test, speed or study"
|
674 |
-
)
|
675 |
-
parser.add_argument(
|
676 |
-
"--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
|
677 |
-
)
|
678 |
-
parser.add_argument(
|
679 |
-
"--workers",
|
680 |
-
type=int,
|
681 |
-
default=8,
|
682 |
-
help="max dataloader workers (per RANK in DDP mode)",
|
683 |
-
)
|
684 |
-
parser.add_argument(
|
685 |
-
"--single-cls",
|
686 |
-
action="store_true",
|
687 |
-
help="treat as single-class dataset",
|
688 |
-
)
|
689 |
-
parser.add_argument(
|
690 |
-
"--augment", action="store_true", help="augmented inference"
|
691 |
-
)
|
692 |
-
parser.add_argument(
|
693 |
-
"--verbose", action="store_true", help="report mAP by class"
|
694 |
-
)
|
695 |
-
parser.add_argument(
|
696 |
-
"--save-txt", action="store_true", help="save results to *.txt"
|
697 |
-
)
|
698 |
-
parser.add_argument(
|
699 |
-
"--save-hybrid",
|
700 |
-
action="store_true",
|
701 |
-
help="save label+prediction hybrid results to *.txt",
|
702 |
-
)
|
703 |
-
parser.add_argument(
|
704 |
-
"--save-conf",
|
705 |
-
action="store_true",
|
706 |
-
help="save confidences in --save-txt labels",
|
707 |
-
)
|
708 |
-
parser.add_argument(
|
709 |
-
"--save-json",
|
710 |
-
action="store_true",
|
711 |
-
help="save a COCO-JSON results file",
|
712 |
-
)
|
713 |
-
parser.add_argument(
|
714 |
-
"--project",
|
715 |
-
default=ROOT / "runs/val-seg",
|
716 |
-
help="save results to project/name",
|
717 |
-
)
|
718 |
-
parser.add_argument("--name", default="exp", help="save to project/name")
|
719 |
-
parser.add_argument(
|
720 |
-
"--exist-ok",
|
721 |
-
action="store_true",
|
722 |
-
help="existing project/name ok, do not increment",
|
723 |
-
)
|
724 |
-
parser.add_argument(
|
725 |
-
"--half", action="store_true", help="use FP16 half-precision inference"
|
726 |
-
)
|
727 |
-
parser.add_argument(
|
728 |
-
"--dnn", action="store_true", help="use OpenCV DNN for ONNX inference"
|
729 |
-
)
|
730 |
-
opt = parser.parse_args()
|
731 |
-
opt.data = check_yaml(opt.data) # check YAML
|
732 |
-
# opt.save_json |= opt.data.endswith('coco.yaml')
|
733 |
-
opt.save_txt |= opt.save_hybrid
|
734 |
-
print_args(vars(opt))
|
735 |
-
return opt
|
736 |
-
|
737 |
-
|
738 |
-
def main(opt):
|
739 |
-
check_requirements(
|
740 |
-
requirements=ROOT / "requirements.txt", exclude=("tensorboard", "thop")
|
741 |
-
)
|
742 |
-
|
743 |
-
if opt.task in ("train", "val", "test"): # run normally
|
744 |
-
if (
|
745 |
-
opt.conf_thres > 0.001
|
746 |
-
): # https://github.com/ultralytics/yolov5/issues/1466
|
747 |
-
LOGGER.warning(
|
748 |
-
f"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results"
|
749 |
-
)
|
750 |
-
if opt.save_hybrid:
|
751 |
-
LOGGER.warning(
|
752 |
-
"WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone"
|
753 |
-
)
|
754 |
-
run(**vars(opt))
|
755 |
-
|
756 |
-
else:
|
757 |
-
weights = (
|
758 |
-
opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
759 |
-
)
|
760 |
-
opt.half = (
|
761 |
-
torch.cuda.is_available() and opt.device != "cpu"
|
762 |
-
) # FP16 for fastest results
|
763 |
-
if opt.task == "speed": # speed benchmarks
|
764 |
-
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
|
765 |
-
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
|
766 |
-
for opt.weights in weights:
|
767 |
-
run(**vars(opt), plots=False)
|
768 |
-
|
769 |
-
elif opt.task == "study": # speed vs mAP benchmarks
|
770 |
-
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
|
771 |
-
for opt.weights in weights:
|
772 |
-
f = f"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt" # filename to save to
|
773 |
-
x, y = (
|
774 |
-
list(range(256, 1536 + 128, 128)),
|
775 |
-
[],
|
776 |
-
) # x axis (image sizes), y axis
|
777 |
-
for opt.imgsz in x: # img-size
|
778 |
-
LOGGER.info(f"\nRunning {f} --imgsz {opt.imgsz}...")
|
779 |
-
r, _, t = run(**vars(opt), plots=False)
|
780 |
-
y.append(r + t) # results and times
|
781 |
-
np.savetxt(f, y, fmt="%10.4g") # save
|
782 |
-
os.system("zip -r study.zip study_*.txt")
|
783 |
-
plot_val_study(x=x) # plot
|
784 |
-
else:
|
785 |
-
raise NotImplementedError(
|
786 |
-
f'--task {opt.task} not in ("train", "val", "test", "speed", "study")'
|
787 |
-
)
|
788 |
-
|
789 |
-
|
790 |
-
if __name__ == "__main__":
|
791 |
-
opt = parse_opt()
|
792 |
-
main(opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/randomUuid.ts
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
type UUID = ReturnType<typeof crypto.randomUUID>;
|
2 |
-
|
3 |
-
export function randomUUID(): UUID {
|
4 |
-
// Only on old safari / ios
|
5 |
-
if (!("randomUUID" in crypto)) {
|
6 |
-
return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
|
7 |
-
(
|
8 |
-
Number(c) ^
|
9 |
-
(crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4)))
|
10 |
-
).toString(16)
|
11 |
-
) as UUID;
|
12 |
-
}
|
13 |
-
return crypto.randomUUID();
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/client/css/dropdown.css
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
.dropdown {
|
2 |
-
border: 1px solid var(--conversations);
|
3 |
-
}
|
4 |
-
|
5 |
-
@media screen and (max-width: 990px) {
|
6 |
-
.dropdown {
|
7 |
-
padding: 4px 8px;
|
8 |
-
font-size: 0.75rem;
|
9 |
-
}
|
10 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/checkboxshape.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CheckboxShape from './gameobjects/shape/checkbox/CheckboxShape.js';
|
2 |
-
export default CheckboxShape;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/ResetDisplayContent.js
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import CreateLabel from '../../utils/build/CreateLabel.js';
|
2 |
-
|
3 |
-
var ResetDisplayContent = function (config) {
|
4 |
-
if (config === undefined) {
|
5 |
-
config = {};
|
6 |
-
}
|
7 |
-
|
8 |
-
ResetTitle.call(this, config);
|
9 |
-
ResetContent.call(this, config);
|
10 |
-
ResetActions.call(this, config);
|
11 |
-
ResetChoices.call(this, config);
|
12 |
-
|
13 |
-
return this;
|
14 |
-
}
|
15 |
-
|
16 |
-
var ResetTitle = function (config) {
|
17 |
-
var title = this.childrenMap.title;
|
18 |
-
title.resetDisplayContent(config.title);
|
19 |
-
}
|
20 |
-
|
21 |
-
var ResetContent = function (config) {
|
22 |
-
var content = this.childrenMap.content;
|
23 |
-
if (content.resetDisplayContent) {
|
24 |
-
// Label
|
25 |
-
content.resetDisplayContent(config.content);
|
26 |
-
} else {
|
27 |
-
// TextArea
|
28 |
-
var text = config.content || '';
|
29 |
-
content.setText(text)
|
30 |
-
}
|
31 |
-
}
|
32 |
-
|
33 |
-
var ResetActions = function (config) {
|
34 |
-
var actionButtons = this.childrenMap.actions;
|
35 |
-
if (!actionButtons) {
|
36 |
-
return;
|
37 |
-
}
|
38 |
-
|
39 |
-
var buttonContentArray = config.buttons;
|
40 |
-
if (!buttonContentArray) {
|
41 |
-
var buttonA = actionButtons[0];
|
42 |
-
if (buttonA) {
|
43 |
-
buttonA.resetDisplayContent(config.buttonA);
|
44 |
-
}
|
45 |
-
|
46 |
-
var buttonB = actionButtons[1];
|
47 |
-
if (buttonB) {
|
48 |
-
buttonB.resetDisplayContent(config.buttonB);
|
49 |
-
}
|
50 |
-
|
51 |
-
} else {
|
52 |
-
var scene = this.scene;
|
53 |
-
var defaultActionConfig = this.defaultActionConfig;
|
54 |
-
var defaultActionButtonCreator = this.defaultActionButtonCreator;
|
55 |
-
for (var i = 0, cnt = buttonContentArray.length; i < cnt; i++) {
|
56 |
-
var buttonContent = buttonContentArray[i];
|
57 |
-
var button = actionButtons[i];
|
58 |
-
if (!button) {
|
59 |
-
button = CreateLabel(scene, defaultActionConfig, defaultActionButtonCreator);
|
60 |
-
this.addAction(button);
|
61 |
-
}
|
62 |
-
button.show().resetDisplayContent(buttonContent);
|
63 |
-
}
|
64 |
-
|
65 |
-
this.buttonMode = buttonContentArray.length;
|
66 |
-
|
67 |
-
for (var i = buttonContentArray.length, cnt = actionButtons.length; i < cnt; i++) {
|
68 |
-
actionButtons[i].hide();
|
69 |
-
}
|
70 |
-
}
|
71 |
-
}
|
72 |
-
|
73 |
-
var ResetChoices = function (config) {
|
74 |
-
var choices = this.childrenMap.choices;
|
75 |
-
if (!choices) {
|
76 |
-
return;
|
77 |
-
}
|
78 |
-
|
79 |
-
var buttonContentArray = config.choices;
|
80 |
-
if (!buttonContentArray) {
|
81 |
-
buttonContentArray = [];
|
82 |
-
}
|
83 |
-
|
84 |
-
var scene = this.scene;
|
85 |
-
var defaultChoiceConfig = this.defaultChoiceConfig;
|
86 |
-
var defaultActionButtonCreator = this.defaultActionButtonCreator;
|
87 |
-
for (var i = 0, cnt = buttonContentArray.length; i < cnt; i++) {
|
88 |
-
var buttonContent = buttonContentArray[i];
|
89 |
-
if (typeof (buttonContent) === 'string') {
|
90 |
-
buttonContent = { text: buttonContent };
|
91 |
-
}
|
92 |
-
|
93 |
-
var button = choices[i];
|
94 |
-
if (!button) {
|
95 |
-
button = CreateLabel(scene, defaultChoiceConfig, defaultActionButtonCreator);
|
96 |
-
this.addChoice(button);
|
97 |
-
}
|
98 |
-
|
99 |
-
button.show().resetDisplayContent(buttonContent)
|
100 |
-
|
101 |
-
var optionValue;
|
102 |
-
if (buttonContent.hasOwnProperty('value')) {
|
103 |
-
optionValue = buttonContent.value;
|
104 |
-
} else {
|
105 |
-
optionValue = buttonContent.text;
|
106 |
-
}
|
107 |
-
button.setName(optionValue)
|
108 |
-
}
|
109 |
-
|
110 |
-
for (var i = buttonContentArray.length, cnt = choices.length; i < cnt; i++) {
|
111 |
-
choices[i].hide();
|
112 |
-
}
|
113 |
-
}
|
114 |
-
|
115 |
-
export default ResetDisplayContent;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Factory.d.ts
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import Container from '../container/Container';
|
2 |
-
import Perspective from './Perspective';
|
3 |
-
|
4 |
-
export default function (
|
5 |
-
parentContainer: Container,
|
6 |
-
config?: Perspective.IConfig
|
7 |
-
): Perspective;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexZou/Deploy_Restoration/SuperResolution.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from torchvision import transforms
|
5 |
-
from PIL import Image
|
6 |
-
import time
|
7 |
-
import torchvision
|
8 |
-
import argparse
|
9 |
-
from models.SCET import SCET
|
10 |
-
|
11 |
-
def inference_img(img_path,Net):
|
12 |
-
|
13 |
-
low_image = Image.open(img_path).convert('RGB')
|
14 |
-
enhance_transforms = transforms.Compose([
|
15 |
-
transforms.ToTensor()
|
16 |
-
])
|
17 |
-
|
18 |
-
with torch.no_grad():
|
19 |
-
low_image = enhance_transforms(low_image)
|
20 |
-
low_image = low_image.unsqueeze(0)
|
21 |
-
start = time.time()
|
22 |
-
restored2 = Net(low_image)
|
23 |
-
end = time.time()
|
24 |
-
|
25 |
-
|
26 |
-
return restored2,end-start
|
27 |
-
|
28 |
-
if __name__ == '__main__':
|
29 |
-
parser=argparse.ArgumentParser()
|
30 |
-
parser.add_argument('--test_path',type=str,required=True,help='Path to test')
|
31 |
-
parser.add_argument('--save_path',type=str,required=True,help='Path to save')
|
32 |
-
parser.add_argument('--pk_path',type=str,default='model_zoo/SRx4.pth',help='Path of the checkpoint')
|
33 |
-
parser.add_argument('--scale',type=int,default=4,help='scale factor')
|
34 |
-
opt = parser.parse_args()
|
35 |
-
if not os.path.isdir(opt.save_path):
|
36 |
-
os.mkdir(opt.save_path)
|
37 |
-
if opt.scale == 3:
|
38 |
-
Net = SCET(63, 128, opt.scale)
|
39 |
-
else:
|
40 |
-
Net = SCET(64, 128, opt.scale)
|
41 |
-
Net.load_state_dict(torch.load(opt.pk_path, map_location=torch.device('cpu')))
|
42 |
-
Net=Net.eval()
|
43 |
-
image=opt.test_path
|
44 |
-
print(image)
|
45 |
-
restored2,time_num=inference_img(image,Net)
|
46 |
-
torchvision.utils.save_image(restored2,opt.save_path+'output.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/configs/hyperparameters.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
## Architechture
|
2 |
-
lpips_type = "alex"
|
3 |
-
first_inv_type = "w"
|
4 |
-
optim_type = "adam"
|
5 |
-
|
6 |
-
## Locality regularization
|
7 |
-
latent_ball_num_of_samples = 1
|
8 |
-
locality_regularization_interval = 1
|
9 |
-
use_locality_regularization = False
|
10 |
-
regulizer_l2_lambda = 0.1
|
11 |
-
regulizer_lpips_lambda = 0.1
|
12 |
-
regulizer_alpha = 30
|
13 |
-
|
14 |
-
## Loss
|
15 |
-
pt_l2_lambda = 1
|
16 |
-
pt_lpips_lambda = 1
|
17 |
-
|
18 |
-
## Steps
|
19 |
-
LPIPS_value_threshold = 0.06
|
20 |
-
max_pti_steps = 350
|
21 |
-
first_inv_steps = 450
|
22 |
-
max_images_to_invert = 30
|
23 |
-
|
24 |
-
## Optimization
|
25 |
-
pti_learning_rate = 3e-4
|
26 |
-
first_inv_lr = 5e-3
|
27 |
-
train_batch_size = 1
|
28 |
-
use_last_w_pivots = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py
DELETED
@@ -1,1024 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
|
16 |
-
import inspect
|
17 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import PIL.Image
|
21 |
-
import torch
|
22 |
-
import torch.nn.functional as F
|
23 |
-
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
24 |
-
|
25 |
-
from diffusers.utils.import_utils import is_invisible_watermark_available
|
26 |
-
|
27 |
-
from ...image_processor import VaeImageProcessor
|
28 |
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
29 |
-
from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
|
30 |
-
from ...models.attention_processor import (
|
31 |
-
AttnProcessor2_0,
|
32 |
-
LoRAAttnProcessor2_0,
|
33 |
-
LoRAXFormersAttnProcessor,
|
34 |
-
XFormersAttnProcessor,
|
35 |
-
)
|
36 |
-
from ...schedulers import KarrasDiffusionSchedulers
|
37 |
-
from ...utils import (
|
38 |
-
is_accelerate_available,
|
39 |
-
is_accelerate_version,
|
40 |
-
is_compiled_module,
|
41 |
-
logging,
|
42 |
-
randn_tensor,
|
43 |
-
replace_example_docstring,
|
44 |
-
)
|
45 |
-
from ..pipeline_utils import DiffusionPipeline
|
46 |
-
from ..stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
47 |
-
|
48 |
-
|
49 |
-
if is_invisible_watermark_available():
|
50 |
-
from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
51 |
-
|
52 |
-
from .multicontrolnet import MultiControlNetModel
|
53 |
-
|
54 |
-
|
55 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
56 |
-
|
57 |
-
|
58 |
-
EXAMPLE_DOC_STRING = """
|
59 |
-
Examples:
|
60 |
-
```py
|
61 |
-
>>> # To be updated when there's a useful ControlNet checkpoint
|
62 |
-
>>> # compatible with SDXL.
|
63 |
-
```
|
64 |
-
"""
|
65 |
-
|
66 |
-
|
67 |
-
class StableDiffusionXLControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
68 |
-
r"""
|
69 |
-
Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance.
|
70 |
-
|
71 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
72 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
73 |
-
|
74 |
-
In addition the pipeline inherits the following loading methods:
|
75 |
-
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
|
76 |
-
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
|
77 |
-
|
78 |
-
Args:
|
79 |
-
vae ([`AutoencoderKL`]):
|
80 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
81 |
-
text_encoder ([`CLIPTextModel`]):
|
82 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
83 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
84 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
85 |
-
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
86 |
-
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
87 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
88 |
-
specifically the
|
89 |
-
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
90 |
-
variant.
|
91 |
-
tokenizer (`CLIPTokenizer`):
|
92 |
-
Tokenizer of class
|
93 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
94 |
-
tokenizer_2 (`CLIPTokenizer`):
|
95 |
-
Second Tokenizer of class
|
96 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
97 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
98 |
-
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
99 |
-
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
|
100 |
-
as a list, the outputs from each ControlNet are added together to create one combined additional
|
101 |
-
conditioning.
|
102 |
-
scheduler ([`SchedulerMixin`]):
|
103 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
104 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
105 |
-
"""
|
106 |
-
|
107 |
-
def __init__(
|
108 |
-
self,
|
109 |
-
vae: AutoencoderKL,
|
110 |
-
text_encoder: CLIPTextModel,
|
111 |
-
text_encoder_2: CLIPTextModelWithProjection,
|
112 |
-
tokenizer: CLIPTokenizer,
|
113 |
-
tokenizer_2: CLIPTokenizer,
|
114 |
-
unet: UNet2DConditionModel,
|
115 |
-
controlnet: ControlNetModel,
|
116 |
-
scheduler: KarrasDiffusionSchedulers,
|
117 |
-
force_zeros_for_empty_prompt: bool = True,
|
118 |
-
add_watermarker: Optional[bool] = None,
|
119 |
-
):
|
120 |
-
super().__init__()
|
121 |
-
|
122 |
-
if isinstance(controlnet, (list, tuple)):
|
123 |
-
raise ValueError("MultiControlNet is not yet supported.")
|
124 |
-
|
125 |
-
self.register_modules(
|
126 |
-
vae=vae,
|
127 |
-
text_encoder=text_encoder,
|
128 |
-
text_encoder_2=text_encoder_2,
|
129 |
-
tokenizer=tokenizer,
|
130 |
-
tokenizer_2=tokenizer_2,
|
131 |
-
unet=unet,
|
132 |
-
controlnet=controlnet,
|
133 |
-
scheduler=scheduler,
|
134 |
-
)
|
135 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
136 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
137 |
-
self.control_image_processor = VaeImageProcessor(
|
138 |
-
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
139 |
-
)
|
140 |
-
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
141 |
-
|
142 |
-
if add_watermarker:
|
143 |
-
self.watermark = StableDiffusionXLWatermarker()
|
144 |
-
else:
|
145 |
-
self.watermark = None
|
146 |
-
|
147 |
-
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
148 |
-
|
149 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
150 |
-
def enable_vae_slicing(self):
|
151 |
-
r"""
|
152 |
-
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
153 |
-
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
154 |
-
"""
|
155 |
-
self.vae.enable_slicing()
|
156 |
-
|
157 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
158 |
-
def disable_vae_slicing(self):
|
159 |
-
r"""
|
160 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
161 |
-
computing decoding in one step.
|
162 |
-
"""
|
163 |
-
self.vae.disable_slicing()
|
164 |
-
|
165 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
|
166 |
-
def enable_vae_tiling(self):
|
167 |
-
r"""
|
168 |
-
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
169 |
-
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
170 |
-
processing larger images.
|
171 |
-
"""
|
172 |
-
self.vae.enable_tiling()
|
173 |
-
|
174 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
|
175 |
-
def disable_vae_tiling(self):
|
176 |
-
r"""
|
177 |
-
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
178 |
-
computing decoding in one step.
|
179 |
-
"""
|
180 |
-
self.vae.disable_tiling()
|
181 |
-
|
182 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
183 |
-
r"""
|
184 |
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
185 |
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
186 |
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
187 |
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
188 |
-
"""
|
189 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
190 |
-
from accelerate import cpu_offload_with_hook
|
191 |
-
else:
|
192 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
193 |
-
|
194 |
-
device = torch.device(f"cuda:{gpu_id}")
|
195 |
-
|
196 |
-
if self.device.type != "cpu":
|
197 |
-
self.to("cpu", silence_dtype_warnings=True)
|
198 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
199 |
-
|
200 |
-
model_sequence = (
|
201 |
-
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
202 |
-
)
|
203 |
-
model_sequence.extend([self.unet, self.vae])
|
204 |
-
|
205 |
-
hook = None
|
206 |
-
for cpu_offloaded_model in model_sequence:
|
207 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
208 |
-
|
209 |
-
cpu_offload_with_hook(self.controlnet, device)
|
210 |
-
|
211 |
-
# We'll offload the last model manually.
|
212 |
-
self.final_offload_hook = hook
|
213 |
-
|
214 |
-
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
|
215 |
-
def encode_prompt(
|
216 |
-
self,
|
217 |
-
prompt: str,
|
218 |
-
prompt_2: Optional[str] = None,
|
219 |
-
device: Optional[torch.device] = None,
|
220 |
-
num_images_per_prompt: int = 1,
|
221 |
-
do_classifier_free_guidance: bool = True,
|
222 |
-
negative_prompt: Optional[str] = None,
|
223 |
-
negative_prompt_2: Optional[str] = None,
|
224 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
225 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
226 |
-
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
227 |
-
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
228 |
-
lora_scale: Optional[float] = None,
|
229 |
-
):
|
230 |
-
r"""
|
231 |
-
Encodes the prompt into text encoder hidden states.
|
232 |
-
|
233 |
-
Args:
|
234 |
-
prompt (`str` or `List[str]`, *optional*):
|
235 |
-
prompt to be encoded
|
236 |
-
prompt_2 (`str` or `List[str]`, *optional*):
|
237 |
-
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
238 |
-
used in both text-encoders
|
239 |
-
device: (`torch.device`):
|
240 |
-
torch device
|
241 |
-
num_images_per_prompt (`int`):
|
242 |
-
number of images that should be generated per prompt
|
243 |
-
do_classifier_free_guidance (`bool`):
|
244 |
-
whether to use classifier free guidance or not
|
245 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
246 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
247 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
248 |
-
less than `1`).
|
249 |
-
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
250 |
-
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
251 |
-
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
252 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
253 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
254 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
255 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
256 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
257 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
258 |
-
argument.
|
259 |
-
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
260 |
-
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
261 |
-
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
262 |
-
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
263 |
-
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
264 |
-
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
265 |
-
input argument.
|
266 |
-
lora_scale (`float`, *optional*):
|
267 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
268 |
-
"""
|
269 |
-
device = device or self._execution_device
|
270 |
-
|
271 |
-
# set lora scale so that monkey patched LoRA
|
272 |
-
# function of text encoder can correctly access it
|
273 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
274 |
-
self._lora_scale = lora_scale
|
275 |
-
|
276 |
-
if prompt is not None and isinstance(prompt, str):
|
277 |
-
batch_size = 1
|
278 |
-
elif prompt is not None and isinstance(prompt, list):
|
279 |
-
batch_size = len(prompt)
|
280 |
-
else:
|
281 |
-
batch_size = prompt_embeds.shape[0]
|
282 |
-
|
283 |
-
# Define tokenizers and text encoders
|
284 |
-
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
285 |
-
text_encoders = (
|
286 |
-
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
287 |
-
)
|
288 |
-
|
289 |
-
if prompt_embeds is None:
|
290 |
-
prompt_2 = prompt_2 or prompt
|
291 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
292 |
-
prompt_embeds_list = []
|
293 |
-
prompts = [prompt, prompt_2]
|
294 |
-
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
295 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
296 |
-
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
297 |
-
|
298 |
-
text_inputs = tokenizer(
|
299 |
-
prompt,
|
300 |
-
padding="max_length",
|
301 |
-
max_length=tokenizer.model_max_length,
|
302 |
-
truncation=True,
|
303 |
-
return_tensors="pt",
|
304 |
-
)
|
305 |
-
|
306 |
-
text_input_ids = text_inputs.input_ids
|
307 |
-
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
308 |
-
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
309 |
-
|
310 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
311 |
-
text_input_ids, untruncated_ids
|
312 |
-
):
|
313 |
-
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
314 |
-
logger.warning(
|
315 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
316 |
-
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
317 |
-
)
|
318 |
-
|
319 |
-
prompt_embeds = text_encoder(
|
320 |
-
text_input_ids.to(device),
|
321 |
-
output_hidden_states=True,
|
322 |
-
)
|
323 |
-
|
324 |
-
# We are only ALWAYS interested in the pooled output of the final text encoder
|
325 |
-
pooled_prompt_embeds = prompt_embeds[0]
|
326 |
-
prompt_embeds = prompt_embeds.hidden_states[-2]
|
327 |
-
|
328 |
-
prompt_embeds_list.append(prompt_embeds)
|
329 |
-
|
330 |
-
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
331 |
-
|
332 |
-
# get unconditional embeddings for classifier free guidance
|
333 |
-
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
334 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
335 |
-
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
336 |
-
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
337 |
-
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
338 |
-
negative_prompt = negative_prompt or ""
|
339 |
-
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
340 |
-
|
341 |
-
uncond_tokens: List[str]
|
342 |
-
if prompt is not None and type(prompt) is not type(negative_prompt):
|
343 |
-
raise TypeError(
|
344 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
345 |
-
f" {type(prompt)}."
|
346 |
-
)
|
347 |
-
elif isinstance(negative_prompt, str):
|
348 |
-
uncond_tokens = [negative_prompt, negative_prompt_2]
|
349 |
-
elif batch_size != len(negative_prompt):
|
350 |
-
raise ValueError(
|
351 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
352 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
353 |
-
" the batch size of `prompt`."
|
354 |
-
)
|
355 |
-
else:
|
356 |
-
uncond_tokens = [negative_prompt, negative_prompt_2]
|
357 |
-
|
358 |
-
negative_prompt_embeds_list = []
|
359 |
-
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
360 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
361 |
-
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
362 |
-
|
363 |
-
max_length = prompt_embeds.shape[1]
|
364 |
-
uncond_input = tokenizer(
|
365 |
-
negative_prompt,
|
366 |
-
padding="max_length",
|
367 |
-
max_length=max_length,
|
368 |
-
truncation=True,
|
369 |
-
return_tensors="pt",
|
370 |
-
)
|
371 |
-
|
372 |
-
negative_prompt_embeds = text_encoder(
|
373 |
-
uncond_input.input_ids.to(device),
|
374 |
-
output_hidden_states=True,
|
375 |
-
)
|
376 |
-
# We are only ALWAYS interested in the pooled output of the final text encoder
|
377 |
-
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
378 |
-
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
379 |
-
|
380 |
-
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
381 |
-
|
382 |
-
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
383 |
-
|
384 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
385 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
386 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
387 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
388 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
389 |
-
|
390 |
-
if do_classifier_free_guidance:
|
391 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
392 |
-
seq_len = negative_prompt_embeds.shape[1]
|
393 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
394 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
395 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
396 |
-
|
397 |
-
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
398 |
-
bs_embed * num_images_per_prompt, -1
|
399 |
-
)
|
400 |
-
if do_classifier_free_guidance:
|
401 |
-
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
402 |
-
bs_embed * num_images_per_prompt, -1
|
403 |
-
)
|
404 |
-
|
405 |
-
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
406 |
-
|
407 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
408 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
409 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
410 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
411 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
412 |
-
# and should be between [0, 1]
|
413 |
-
|
414 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
415 |
-
extra_step_kwargs = {}
|
416 |
-
if accepts_eta:
|
417 |
-
extra_step_kwargs["eta"] = eta
|
418 |
-
|
419 |
-
# check if the scheduler accepts generator
|
420 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
421 |
-
if accepts_generator:
|
422 |
-
extra_step_kwargs["generator"] = generator
|
423 |
-
return extra_step_kwargs
|
424 |
-
|
425 |
-
def check_inputs(
|
426 |
-
self,
|
427 |
-
prompt,
|
428 |
-
prompt_2,
|
429 |
-
image,
|
430 |
-
callback_steps,
|
431 |
-
negative_prompt=None,
|
432 |
-
negative_prompt_2=None,
|
433 |
-
prompt_embeds=None,
|
434 |
-
negative_prompt_embeds=None,
|
435 |
-
controlnet_conditioning_scale=1.0,
|
436 |
-
control_guidance_start=0.0,
|
437 |
-
control_guidance_end=1.0,
|
438 |
-
):
|
439 |
-
if (callback_steps is None) or (
|
440 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
441 |
-
):
|
442 |
-
raise ValueError(
|
443 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
444 |
-
f" {type(callback_steps)}."
|
445 |
-
)
|
446 |
-
|
447 |
-
if prompt is not None and prompt_embeds is not None:
|
448 |
-
raise ValueError(
|
449 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
450 |
-
" only forward one of the two."
|
451 |
-
)
|
452 |
-
elif prompt_2 is not None and prompt_embeds is not None:
|
453 |
-
raise ValueError(
|
454 |
-
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
455 |
-
" only forward one of the two."
|
456 |
-
)
|
457 |
-
elif prompt is None and prompt_embeds is None:
|
458 |
-
raise ValueError(
|
459 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
460 |
-
)
|
461 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
462 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
463 |
-
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
464 |
-
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
465 |
-
|
466 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
467 |
-
raise ValueError(
|
468 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
469 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
470 |
-
)
|
471 |
-
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
472 |
-
raise ValueError(
|
473 |
-
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
474 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
475 |
-
)
|
476 |
-
|
477 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
478 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
479 |
-
raise ValueError(
|
480 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
481 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
482 |
-
f" {negative_prompt_embeds.shape}."
|
483 |
-
)
|
484 |
-
|
485 |
-
# Check `image`
|
486 |
-
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
487 |
-
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
488 |
-
)
|
489 |
-
if (
|
490 |
-
isinstance(self.controlnet, ControlNetModel)
|
491 |
-
or is_compiled
|
492 |
-
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
493 |
-
):
|
494 |
-
self.check_image(image, prompt, prompt_embeds)
|
495 |
-
else:
|
496 |
-
assert False
|
497 |
-
|
498 |
-
# Check `controlnet_conditioning_scale`
|
499 |
-
if (
|
500 |
-
isinstance(self.controlnet, ControlNetModel)
|
501 |
-
or is_compiled
|
502 |
-
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
503 |
-
):
|
504 |
-
if not isinstance(controlnet_conditioning_scale, float):
|
505 |
-
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
506 |
-
else:
|
507 |
-
assert False
|
508 |
-
|
509 |
-
if len(control_guidance_start) != len(control_guidance_end):
|
510 |
-
raise ValueError(
|
511 |
-
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
512 |
-
)
|
513 |
-
|
514 |
-
for start, end in zip(control_guidance_start, control_guidance_end):
|
515 |
-
if start >= end:
|
516 |
-
raise ValueError(
|
517 |
-
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
518 |
-
)
|
519 |
-
if start < 0.0:
|
520 |
-
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
521 |
-
if end > 1.0:
|
522 |
-
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
523 |
-
|
524 |
-
def check_image(self, image, prompt, prompt_embeds):
|
525 |
-
image_is_pil = isinstance(image, PIL.Image.Image)
|
526 |
-
image_is_tensor = isinstance(image, torch.Tensor)
|
527 |
-
image_is_np = isinstance(image, np.ndarray)
|
528 |
-
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
529 |
-
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
530 |
-
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
531 |
-
|
532 |
-
if (
|
533 |
-
not image_is_pil
|
534 |
-
and not image_is_tensor
|
535 |
-
and not image_is_np
|
536 |
-
and not image_is_pil_list
|
537 |
-
and not image_is_tensor_list
|
538 |
-
and not image_is_np_list
|
539 |
-
):
|
540 |
-
raise TypeError(
|
541 |
-
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
542 |
-
)
|
543 |
-
|
544 |
-
if image_is_pil:
|
545 |
-
image_batch_size = 1
|
546 |
-
else:
|
547 |
-
image_batch_size = len(image)
|
548 |
-
|
549 |
-
if prompt is not None and isinstance(prompt, str):
|
550 |
-
prompt_batch_size = 1
|
551 |
-
elif prompt is not None and isinstance(prompt, list):
|
552 |
-
prompt_batch_size = len(prompt)
|
553 |
-
elif prompt_embeds is not None:
|
554 |
-
prompt_batch_size = prompt_embeds.shape[0]
|
555 |
-
|
556 |
-
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
557 |
-
raise ValueError(
|
558 |
-
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
559 |
-
)
|
560 |
-
|
561 |
-
def prepare_image(
|
562 |
-
self,
|
563 |
-
image,
|
564 |
-
width,
|
565 |
-
height,
|
566 |
-
batch_size,
|
567 |
-
num_images_per_prompt,
|
568 |
-
device,
|
569 |
-
dtype,
|
570 |
-
do_classifier_free_guidance=False,
|
571 |
-
guess_mode=False,
|
572 |
-
):
|
573 |
-
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
574 |
-
image_batch_size = image.shape[0]
|
575 |
-
|
576 |
-
if image_batch_size == 1:
|
577 |
-
repeat_by = batch_size
|
578 |
-
else:
|
579 |
-
# image batch size is the same as prompt batch size
|
580 |
-
repeat_by = num_images_per_prompt
|
581 |
-
|
582 |
-
image = image.repeat_interleave(repeat_by, dim=0)
|
583 |
-
|
584 |
-
image = image.to(device=device, dtype=dtype)
|
585 |
-
|
586 |
-
if do_classifier_free_guidance and not guess_mode:
|
587 |
-
image = torch.cat([image] * 2)
|
588 |
-
|
589 |
-
return image
|
590 |
-
|
591 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
592 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
593 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
594 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
595 |
-
raise ValueError(
|
596 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
597 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
598 |
-
)
|
599 |
-
|
600 |
-
if latents is None:
|
601 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
602 |
-
else:
|
603 |
-
latents = latents.to(device)
|
604 |
-
|
605 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
606 |
-
latents = latents * self.scheduler.init_noise_sigma
|
607 |
-
return latents
|
608 |
-
|
609 |
-
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
|
610 |
-
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
|
611 |
-
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
612 |
-
|
613 |
-
passed_add_embed_dim = (
|
614 |
-
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
615 |
-
)
|
616 |
-
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
617 |
-
|
618 |
-
if expected_add_embed_dim != passed_add_embed_dim:
|
619 |
-
raise ValueError(
|
620 |
-
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
621 |
-
)
|
622 |
-
|
623 |
-
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
624 |
-
return add_time_ids
|
625 |
-
|
626 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
627 |
-
def upcast_vae(self):
|
628 |
-
dtype = self.vae.dtype
|
629 |
-
self.vae.to(dtype=torch.float32)
|
630 |
-
use_torch_2_0_or_xformers = isinstance(
|
631 |
-
self.vae.decoder.mid_block.attentions[0].processor,
|
632 |
-
(
|
633 |
-
AttnProcessor2_0,
|
634 |
-
XFormersAttnProcessor,
|
635 |
-
LoRAXFormersAttnProcessor,
|
636 |
-
LoRAAttnProcessor2_0,
|
637 |
-
),
|
638 |
-
)
|
639 |
-
# if xformers or torch_2_0 is used attention block does not need
|
640 |
-
# to be in float32 which can save lots of memory
|
641 |
-
if use_torch_2_0_or_xformers:
|
642 |
-
self.vae.post_quant_conv.to(dtype)
|
643 |
-
self.vae.decoder.conv_in.to(dtype)
|
644 |
-
self.vae.decoder.mid_block.to(dtype)
|
645 |
-
|
646 |
-
@torch.no_grad()
|
647 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
648 |
-
def __call__(
|
649 |
-
self,
|
650 |
-
prompt: Union[str, List[str]] = None,
|
651 |
-
prompt_2: Optional[Union[str, List[str]]] = None,
|
652 |
-
image: Union[
|
653 |
-
torch.FloatTensor,
|
654 |
-
PIL.Image.Image,
|
655 |
-
np.ndarray,
|
656 |
-
List[torch.FloatTensor],
|
657 |
-
List[PIL.Image.Image],
|
658 |
-
List[np.ndarray],
|
659 |
-
] = None,
|
660 |
-
height: Optional[int] = None,
|
661 |
-
width: Optional[int] = None,
|
662 |
-
num_inference_steps: int = 50,
|
663 |
-
guidance_scale: float = 5.0,
|
664 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
665 |
-
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
666 |
-
num_images_per_prompt: Optional[int] = 1,
|
667 |
-
eta: float = 0.0,
|
668 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
669 |
-
latents: Optional[torch.FloatTensor] = None,
|
670 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
671 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
672 |
-
output_type: Optional[str] = "pil",
|
673 |
-
return_dict: bool = True,
|
674 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
675 |
-
callback_steps: int = 1,
|
676 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
677 |
-
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
678 |
-
guess_mode: bool = False,
|
679 |
-
control_guidance_start: Union[float, List[float]] = 0.0,
|
680 |
-
control_guidance_end: Union[float, List[float]] = 1.0,
|
681 |
-
original_size: Tuple[int, int] = None,
|
682 |
-
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
683 |
-
target_size: Tuple[int, int] = None,
|
684 |
-
):
|
685 |
-
r"""
|
686 |
-
Function invoked when calling the pipeline for generation.
|
687 |
-
|
688 |
-
Args:
|
689 |
-
prompt (`str` or `List[str]`, *optional*):
|
690 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
691 |
-
instead.
|
692 |
-
prompt_2 (`str` or `List[str]`, *optional*):
|
693 |
-
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
694 |
-
used in both text-encoders
|
695 |
-
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
696 |
-
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
697 |
-
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
698 |
-
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
|
699 |
-
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
|
700 |
-
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
|
701 |
-
specified in init, images must be passed as a list such that each element of the list can be correctly
|
702 |
-
batched for input to a single controlnet.
|
703 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
704 |
-
The height in pixels of the generated image.
|
705 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
706 |
-
The width in pixels of the generated image.
|
707 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
708 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
709 |
-
expense of slower inference.
|
710 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
711 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
712 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
713 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
714 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
715 |
-
usually at the expense of lower image quality.
|
716 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
717 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
718 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
719 |
-
less than `1`).
|
720 |
-
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
721 |
-
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
722 |
-
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
723 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
724 |
-
The number of images to generate per prompt.
|
725 |
-
eta (`float`, *optional*, defaults to 0.0):
|
726 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
727 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
728 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
729 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
730 |
-
to make generation deterministic.
|
731 |
-
latents (`torch.FloatTensor`, *optional*):
|
732 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
733 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
734 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
735 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
736 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
737 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
738 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
739 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
740 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
741 |
-
argument.
|
742 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
743 |
-
The output format of the generate image. Choose between
|
744 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
745 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
746 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
747 |
-
plain tuple.
|
748 |
-
callback (`Callable`, *optional*):
|
749 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
750 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
751 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
752 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
753 |
-
called at every step.
|
754 |
-
cross_attention_kwargs (`dict`, *optional*):
|
755 |
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
756 |
-
`self.processor` in
|
757 |
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
758 |
-
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
759 |
-
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
760 |
-
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
761 |
-
corresponding scale as a list.
|
762 |
-
guess_mode (`bool`, *optional*, defaults to `False`):
|
763 |
-
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
|
764 |
-
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
765 |
-
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
766 |
-
The percentage of total steps at which the controlnet starts applying.
|
767 |
-
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
768 |
-
The percentage of total steps at which the controlnet stops applying.
|
769 |
-
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
770 |
-
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
771 |
-
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
772 |
-
explained in section 2.2 of
|
773 |
-
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
774 |
-
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
775 |
-
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
776 |
-
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
777 |
-
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
778 |
-
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
779 |
-
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
780 |
-
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
781 |
-
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
782 |
-
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
783 |
-
Examples:
|
784 |
-
|
785 |
-
Returns:
|
786 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
787 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
|
788 |
-
containing the output images.
|
789 |
-
"""
|
790 |
-
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
791 |
-
|
792 |
-
# align format for control guidance
|
793 |
-
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
794 |
-
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
795 |
-
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
796 |
-
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
797 |
-
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
798 |
-
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
799 |
-
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
|
800 |
-
control_guidance_end
|
801 |
-
]
|
802 |
-
|
803 |
-
# 1. Check inputs. Raise error if not correct
|
804 |
-
self.check_inputs(
|
805 |
-
prompt,
|
806 |
-
prompt_2,
|
807 |
-
image,
|
808 |
-
callback_steps,
|
809 |
-
negative_prompt,
|
810 |
-
negative_prompt_2,
|
811 |
-
prompt_embeds,
|
812 |
-
negative_prompt_embeds,
|
813 |
-
controlnet_conditioning_scale,
|
814 |
-
control_guidance_start,
|
815 |
-
control_guidance_end,
|
816 |
-
)
|
817 |
-
|
818 |
-
# 2. Define call parameters
|
819 |
-
if prompt is not None and isinstance(prompt, str):
|
820 |
-
batch_size = 1
|
821 |
-
elif prompt is not None and isinstance(prompt, list):
|
822 |
-
batch_size = len(prompt)
|
823 |
-
else:
|
824 |
-
batch_size = prompt_embeds.shape[0]
|
825 |
-
|
826 |
-
device = self._execution_device
|
827 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
828 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
829 |
-
# corresponds to doing no classifier free guidance.
|
830 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
831 |
-
|
832 |
-
global_pool_conditions = (
|
833 |
-
controlnet.config.global_pool_conditions
|
834 |
-
if isinstance(controlnet, ControlNetModel)
|
835 |
-
else controlnet.nets[0].config.global_pool_conditions
|
836 |
-
)
|
837 |
-
guess_mode = guess_mode or global_pool_conditions
|
838 |
-
|
839 |
-
# 3. Encode input prompt
|
840 |
-
text_encoder_lora_scale = (
|
841 |
-
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
842 |
-
)
|
843 |
-
(
|
844 |
-
prompt_embeds,
|
845 |
-
negative_prompt_embeds,
|
846 |
-
pooled_prompt_embeds,
|
847 |
-
negative_pooled_prompt_embeds,
|
848 |
-
) = self.encode_prompt(
|
849 |
-
prompt,
|
850 |
-
prompt_2,
|
851 |
-
device,
|
852 |
-
num_images_per_prompt,
|
853 |
-
do_classifier_free_guidance,
|
854 |
-
negative_prompt,
|
855 |
-
negative_prompt_2,
|
856 |
-
prompt_embeds=prompt_embeds,
|
857 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
858 |
-
lora_scale=text_encoder_lora_scale,
|
859 |
-
)
|
860 |
-
|
861 |
-
# 4. Prepare image
|
862 |
-
if isinstance(controlnet, ControlNetModel):
|
863 |
-
image = self.prepare_image(
|
864 |
-
image=image,
|
865 |
-
width=width,
|
866 |
-
height=height,
|
867 |
-
batch_size=batch_size * num_images_per_prompt,
|
868 |
-
num_images_per_prompt=num_images_per_prompt,
|
869 |
-
device=device,
|
870 |
-
dtype=controlnet.dtype,
|
871 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
872 |
-
guess_mode=guess_mode,
|
873 |
-
)
|
874 |
-
height, width = image.shape[-2:]
|
875 |
-
else:
|
876 |
-
assert False
|
877 |
-
|
878 |
-
# 5. Prepare timesteps
|
879 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
880 |
-
timesteps = self.scheduler.timesteps
|
881 |
-
|
882 |
-
# 6. Prepare latent variables
|
883 |
-
num_channels_latents = self.unet.config.in_channels
|
884 |
-
latents = self.prepare_latents(
|
885 |
-
batch_size * num_images_per_prompt,
|
886 |
-
num_channels_latents,
|
887 |
-
height,
|
888 |
-
width,
|
889 |
-
prompt_embeds.dtype,
|
890 |
-
device,
|
891 |
-
generator,
|
892 |
-
latents,
|
893 |
-
)
|
894 |
-
|
895 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
896 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
897 |
-
|
898 |
-
# 7.1 Create tensor stating which controlnets to keep
|
899 |
-
controlnet_keep = []
|
900 |
-
for i in range(len(timesteps)):
|
901 |
-
keeps = [
|
902 |
-
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
903 |
-
for s, e in zip(control_guidance_start, control_guidance_end)
|
904 |
-
]
|
905 |
-
controlnet_keep.append(keeps[0] if len(keeps) == 1 else keeps)
|
906 |
-
|
907 |
-
original_size = original_size or image.shape[-2:]
|
908 |
-
target_size = target_size or (height, width)
|
909 |
-
|
910 |
-
# 7.2 Prepare added time ids & embeddings
|
911 |
-
add_text_embeds = pooled_prompt_embeds
|
912 |
-
add_time_ids = self._get_add_time_ids(
|
913 |
-
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
914 |
-
)
|
915 |
-
|
916 |
-
if do_classifier_free_guidance:
|
917 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
918 |
-
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
919 |
-
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
920 |
-
|
921 |
-
prompt_embeds = prompt_embeds.to(device)
|
922 |
-
add_text_embeds = add_text_embeds.to(device)
|
923 |
-
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
924 |
-
|
925 |
-
# 8. Denoising loop
|
926 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
927 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
928 |
-
for i, t in enumerate(timesteps):
|
929 |
-
# expand the latents if we are doing classifier free guidance
|
930 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
931 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
932 |
-
|
933 |
-
# controlnet(s) inference
|
934 |
-
if guess_mode and do_classifier_free_guidance:
|
935 |
-
# Infer ControlNet only for the conditional batch.
|
936 |
-
control_model_input = latents
|
937 |
-
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
938 |
-
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
939 |
-
else:
|
940 |
-
control_model_input = latent_model_input
|
941 |
-
controlnet_prompt_embeds = prompt_embeds
|
942 |
-
|
943 |
-
if isinstance(controlnet_keep[i], list):
|
944 |
-
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
945 |
-
else:
|
946 |
-
cond_scale = controlnet_conditioning_scale * controlnet_keep[i]
|
947 |
-
|
948 |
-
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
949 |
-
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
950 |
-
control_model_input,
|
951 |
-
t,
|
952 |
-
encoder_hidden_states=controlnet_prompt_embeds,
|
953 |
-
controlnet_cond=image,
|
954 |
-
conditioning_scale=cond_scale,
|
955 |
-
guess_mode=guess_mode,
|
956 |
-
added_cond_kwargs=added_cond_kwargs,
|
957 |
-
return_dict=False,
|
958 |
-
)
|
959 |
-
|
960 |
-
if guess_mode and do_classifier_free_guidance:
|
961 |
-
# Infered ControlNet only for the conditional batch.
|
962 |
-
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
963 |
-
# add 0 to the unconditional batch to keep it unchanged.
|
964 |
-
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
965 |
-
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
966 |
-
|
967 |
-
# predict the noise residual
|
968 |
-
noise_pred = self.unet(
|
969 |
-
latent_model_input,
|
970 |
-
t,
|
971 |
-
encoder_hidden_states=prompt_embeds,
|
972 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
973 |
-
down_block_additional_residuals=down_block_res_samples,
|
974 |
-
mid_block_additional_residual=mid_block_res_sample,
|
975 |
-
added_cond_kwargs=added_cond_kwargs,
|
976 |
-
return_dict=False,
|
977 |
-
)[0]
|
978 |
-
|
979 |
-
# perform guidance
|
980 |
-
if do_classifier_free_guidance:
|
981 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
982 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
983 |
-
|
984 |
-
# compute the previous noisy sample x_t -> x_t-1
|
985 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
986 |
-
|
987 |
-
# call the callback, if provided
|
988 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
989 |
-
progress_bar.update()
|
990 |
-
if callback is not None and i % callback_steps == 0:
|
991 |
-
callback(i, t, latents)
|
992 |
-
|
993 |
-
# If we do sequential model offloading, let's offload unet and controlnet
|
994 |
-
# manually for max memory savings
|
995 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
996 |
-
self.unet.to("cpu")
|
997 |
-
self.controlnet.to("cpu")
|
998 |
-
torch.cuda.empty_cache()
|
999 |
-
|
1000 |
-
# make sure the VAE is in float32 mode, as it overflows in float16
|
1001 |
-
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
1002 |
-
self.upcast_vae()
|
1003 |
-
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
1004 |
-
|
1005 |
-
if not output_type == "latent":
|
1006 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
1007 |
-
else:
|
1008 |
-
image = latents
|
1009 |
-
return StableDiffusionXLPipelineOutput(images=image)
|
1010 |
-
|
1011 |
-
# apply watermark if available
|
1012 |
-
if self.watermark is not None:
|
1013 |
-
image = self.watermark.apply_watermark(image)
|
1014 |
-
|
1015 |
-
image = self.image_processor.postprocess(image, output_type=output_type)
|
1016 |
-
|
1017 |
-
# Offload last model to CPU
|
1018 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1019 |
-
self.final_offload_hook.offload()
|
1020 |
-
|
1021 |
-
if not return_dict:
|
1022 |
-
return (image,)
|
1023 |
-
|
1024 |
-
return StableDiffusionXLPipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/paa/README.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Probabilistic Anchor Assignment with IoU Prediction for Object Detection
|
2 |
-
|
3 |
-
[ALGORITHM]
|
4 |
-
|
5 |
-
```latex
|
6 |
-
@inproceedings{paa-eccv2020,
|
7 |
-
title={Probabilistic Anchor Assignment with IoU Prediction for Object Detection},
|
8 |
-
author={Kim, Kang and Lee, Hee Seok},
|
9 |
-
booktitle = {ECCV},
|
10 |
-
year={2020}
|
11 |
-
}
|
12 |
-
```
|
13 |
-
|
14 |
-
## Results and Models
|
15 |
-
|
16 |
-
We provide config files to reproduce the object detection results in the
|
17 |
-
ECCV 2020 paper for Probabilistic Anchor Assignment with IoU
|
18 |
-
Prediction for Object Detection.
|
19 |
-
|
20 |
-
| Backbone | Lr schd | Mem (GB) | Score voting | box AP | Config | Download |
|
21 |
-
|:-----------:|:-------:|:--------:|:------------:|:------:|:------:|:--------:|
|
22 |
-
| R-50-FPN | 12e | 3.7 | True | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) |
|
23 |
-
| R-50-FPN | 12e | 3.7 | False | 40.2 | - |
|
24 |
-
| R-50-FPN | 18e | 3.7 | True | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1.5x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.log.json) |
|
25 |
-
| R-50-FPN | 18e | 3.7 | False | 41.2 | - |
|
26 |
-
| R-50-FPN | 24e | 3.7 | True | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.log.json) |
|
27 |
-
| R-50-FPN | 36e | 3.7 | True | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_mstrain_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722.log.json) |
|
28 |
-
| R-101-FPN | 12e | 6.2 | True | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) |
|
29 |
-
| R-101-FPN | 12e | 6.2 | False | 42.4 | - |
|
30 |
-
| R-101-FPN | 24e | 6.2 | True | 43.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.log.json) |
|
31 |
-
| R-101-FPN | 36e | 6.2 | True | 45.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_mstrain_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202.log.json) |
|
32 |
-
|
33 |
-
**Note**:
|
34 |
-
|
35 |
-
1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.2 mAP. We report the best results.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
|
2 |
-
|
3 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/dnl_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/gallery/script.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
from modules.html_generator import get_image_cache
|
6 |
-
from modules.shared import gradio
|
7 |
-
|
8 |
-
|
9 |
-
def generate_css():
|
10 |
-
css = """
|
11 |
-
.character-gallery > .gallery {
|
12 |
-
margin: 1rem 0;
|
13 |
-
display: grid !important;
|
14 |
-
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
|
15 |
-
grid-column-gap: 0.4rem;
|
16 |
-
grid-row-gap: 1.2rem;
|
17 |
-
}
|
18 |
-
|
19 |
-
.character-gallery > .label {
|
20 |
-
display: none !important;
|
21 |
-
}
|
22 |
-
|
23 |
-
.character-gallery button.gallery-item {
|
24 |
-
display: contents;
|
25 |
-
}
|
26 |
-
|
27 |
-
.character-container {
|
28 |
-
cursor: pointer;
|
29 |
-
text-align: center;
|
30 |
-
position: relative;
|
31 |
-
opacity: 0.85;
|
32 |
-
}
|
33 |
-
|
34 |
-
.character-container:hover {
|
35 |
-
opacity: 1;
|
36 |
-
}
|
37 |
-
|
38 |
-
.character-container .placeholder, .character-container img {
|
39 |
-
width: 150px;
|
40 |
-
height: 200px;
|
41 |
-
background-color: gray;
|
42 |
-
object-fit: cover;
|
43 |
-
margin: 0 auto;
|
44 |
-
border-radius: 1rem;
|
45 |
-
border: 3px solid white;
|
46 |
-
box-shadow: 3px 3px 6px 0px rgb(0 0 0 / 50%);
|
47 |
-
}
|
48 |
-
|
49 |
-
.character-name {
|
50 |
-
margin-top: 0.3rem;
|
51 |
-
display: block;
|
52 |
-
font-size: 1.2rem;
|
53 |
-
font-weight: 600;
|
54 |
-
overflow-wrap: anywhere;
|
55 |
-
}
|
56 |
-
"""
|
57 |
-
return css
|
58 |
-
|
59 |
-
|
60 |
-
def generate_html():
|
61 |
-
cards = []
|
62 |
-
# Iterate through files in image folder
|
63 |
-
for file in sorted(Path("characters").glob("*")):
|
64 |
-
if file.suffix in [".json", ".yml", ".yaml"]:
|
65 |
-
character = file.stem
|
66 |
-
container_html = '<div class="character-container">'
|
67 |
-
image_html = "<div class='placeholder'></div>"
|
68 |
-
|
69 |
-
for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
|
70 |
-
if path.exists():
|
71 |
-
image_html = f'<img src="file/{get_image_cache(path)}">'
|
72 |
-
break
|
73 |
-
|
74 |
-
container_html += f'{image_html} <span class="character-name">{character}</span>'
|
75 |
-
container_html += "</div>"
|
76 |
-
cards.append([container_html, character])
|
77 |
-
|
78 |
-
return cards
|
79 |
-
|
80 |
-
|
81 |
-
def select_character(evt: gr.SelectData):
|
82 |
-
return (evt.value[1])
|
83 |
-
|
84 |
-
|
85 |
-
def custom_js():
|
86 |
-
path_to_js = Path(__file__).parent.resolve() / 'script.js'
|
87 |
-
return open(path_to_js, 'r').read()
|
88 |
-
|
89 |
-
|
90 |
-
def ui():
|
91 |
-
with gr.Accordion("Character gallery", open=False, elem_id='gallery-extension'):
|
92 |
-
update = gr.Button("Refresh")
|
93 |
-
gr.HTML(value="<style>" + generate_css() + "</style>")
|
94 |
-
gallery = gr.Dataset(components=[gr.HTML(visible=False)],
|
95 |
-
label="",
|
96 |
-
samples=generate_html(),
|
97 |
-
elem_classes=["character-gallery"],
|
98 |
-
samples_per_page=50
|
99 |
-
)
|
100 |
-
update.click(generate_html, [], gallery)
|
101 |
-
gallery.select(select_character, None, gradio['character_menu'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_model_menu.py
DELETED
@@ -1,267 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
import math
|
3 |
-
import re
|
4 |
-
import traceback
|
5 |
-
from functools import partial
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
import gradio as gr
|
9 |
-
import psutil
|
10 |
-
import torch
|
11 |
-
|
12 |
-
from modules import loaders, shared, ui, utils
|
13 |
-
from modules.logging_colors import logger
|
14 |
-
from modules.LoRA import add_lora_to_model
|
15 |
-
from modules.models import load_model, unload_model
|
16 |
-
from modules.models_settings import (
|
17 |
-
apply_model_settings_to_state,
|
18 |
-
get_model_metadata,
|
19 |
-
save_model_settings,
|
20 |
-
update_model_parameters
|
21 |
-
)
|
22 |
-
from modules.utils import gradio
|
23 |
-
|
24 |
-
|
25 |
-
def create_ui():
|
26 |
-
mu = shared.args.multi_user
|
27 |
-
|
28 |
-
# Finding the default values for the GPU and CPU memories
|
29 |
-
total_mem = []
|
30 |
-
for i in range(torch.cuda.device_count()):
|
31 |
-
total_mem.append(math.floor(torch.cuda.get_device_properties(i).total_memory / (1024 * 1024)))
|
32 |
-
|
33 |
-
default_gpu_mem = []
|
34 |
-
if shared.args.gpu_memory is not None and len(shared.args.gpu_memory) > 0:
|
35 |
-
for i in shared.args.gpu_memory:
|
36 |
-
if 'mib' in i.lower():
|
37 |
-
default_gpu_mem.append(int(re.sub('[a-zA-Z ]', '', i)))
|
38 |
-
else:
|
39 |
-
default_gpu_mem.append(int(re.sub('[a-zA-Z ]', '', i)) * 1000)
|
40 |
-
|
41 |
-
while len(default_gpu_mem) < len(total_mem):
|
42 |
-
default_gpu_mem.append(0)
|
43 |
-
|
44 |
-
total_cpu_mem = math.floor(psutil.virtual_memory().total / (1024 * 1024))
|
45 |
-
if shared.args.cpu_memory is not None:
|
46 |
-
default_cpu_mem = re.sub('[a-zA-Z ]', '', shared.args.cpu_memory)
|
47 |
-
else:
|
48 |
-
default_cpu_mem = 0
|
49 |
-
|
50 |
-
with gr.Tab("Model", elem_id="model-tab"):
|
51 |
-
with gr.Row():
|
52 |
-
with gr.Column():
|
53 |
-
with gr.Row():
|
54 |
-
with gr.Column():
|
55 |
-
with gr.Row():
|
56 |
-
shared.gradio['model_menu'] = gr.Dropdown(choices=utils.get_available_models(), value=shared.model_name, label='Model', elem_classes='slim-dropdown', interactive=not mu)
|
57 |
-
ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': utils.get_available_models()}, 'refresh-button', interactive=not mu)
|
58 |
-
shared.gradio['load_model'] = gr.Button("Load", visible=not shared.settings['autoload_model'], elem_classes='refresh-button', interactive=not mu)
|
59 |
-
shared.gradio['unload_model'] = gr.Button("Unload", elem_classes='refresh-button', interactive=not mu)
|
60 |
-
shared.gradio['reload_model'] = gr.Button("Reload", elem_classes='refresh-button', interactive=not mu)
|
61 |
-
shared.gradio['save_model_settings'] = gr.Button("Save settings", elem_classes='refresh-button', interactive=not mu)
|
62 |
-
|
63 |
-
with gr.Column():
|
64 |
-
with gr.Row():
|
65 |
-
shared.gradio['lora_menu'] = gr.Dropdown(multiselect=True, choices=utils.get_available_loras(), value=shared.lora_names, label='LoRA(s)', elem_classes='slim-dropdown', interactive=not mu)
|
66 |
-
ui.create_refresh_button(shared.gradio['lora_menu'], lambda: None, lambda: {'choices': utils.get_available_loras(), 'value': shared.lora_names}, 'refresh-button', interactive=not mu)
|
67 |
-
shared.gradio['lora_menu_apply'] = gr.Button(value='Apply LoRAs', elem_classes='refresh-button', interactive=not mu)
|
68 |
-
|
69 |
-
with gr.Row():
|
70 |
-
with gr.Column():
|
71 |
-
shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=loaders.loaders_and_params.keys(), value=None)
|
72 |
-
with gr.Box():
|
73 |
-
with gr.Row():
|
74 |
-
with gr.Column():
|
75 |
-
for i in range(len(total_mem)):
|
76 |
-
shared.gradio[f'gpu_memory_{i}'] = gr.Slider(label=f"gpu-memory in MiB for device :{i}", maximum=total_mem[i], value=default_gpu_mem[i])
|
77 |
-
|
78 |
-
shared.gradio['cpu_memory'] = gr.Slider(label="cpu-memory in MiB", maximum=total_cpu_mem, value=default_cpu_mem)
|
79 |
-
shared.gradio['transformers_info'] = gr.Markdown('load-in-4bit params:')
|
80 |
-
shared.gradio['compute_dtype'] = gr.Dropdown(label="compute_dtype", choices=["bfloat16", "float16", "float32"], value=shared.args.compute_dtype)
|
81 |
-
shared.gradio['quant_type'] = gr.Dropdown(label="quant_type", choices=["nf4", "fp4"], value=shared.args.quant_type)
|
82 |
-
|
83 |
-
shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers)
|
84 |
-
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=32768, step=256, label="n_ctx", value=shared.args.n_ctx)
|
85 |
-
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads)
|
86 |
-
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=32, value=shared.args.threads_batch)
|
87 |
-
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch)
|
88 |
-
|
89 |
-
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=str(shared.args.wbits) if shared.args.wbits > 0 else "None")
|
90 |
-
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=str(shared.args.groupsize) if shared.args.groupsize > 0 else "None")
|
91 |
-
shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None"], value=shared.args.model_type or "None")
|
92 |
-
shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0)
|
93 |
-
shared.gradio['autogptq_info'] = gr.Markdown('* ExLlama_HF is recommended over AutoGPTQ for models derived from LLaMA.')
|
94 |
-
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
|
95 |
-
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=32768, step=256, info='Maximum sequence length.', value=shared.args.max_seq_len)
|
96 |
-
shared.gradio['alpha_value'] = gr.Slider(label='alpha_value', minimum=1, maximum=8, step=0.05, info='Positional embeddings alpha factor for NTK RoPE scaling. Recommended values (NTKv1): 1.75 for 1.5x context, 2.5 for 2x context. Use either this or compress_pos_emb, not both.', value=shared.args.alpha_value)
|
97 |
-
shared.gradio['rope_freq_base'] = gr.Slider(label='rope_freq_base', minimum=0, maximum=1000000, step=1000, info='If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63)', value=shared.args.rope_freq_base)
|
98 |
-
shared.gradio['compress_pos_emb'] = gr.Slider(label='compress_pos_emb', minimum=1, maximum=8, step=1, info='Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.', value=shared.args.compress_pos_emb)
|
99 |
-
|
100 |
-
with gr.Column():
|
101 |
-
shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton)
|
102 |
-
shared.gradio['no_inject_fused_attention'] = gr.Checkbox(label="no_inject_fused_attention", value=shared.args.no_inject_fused_attention, info='Disable fused attention. Fused attention improves inference performance but uses more VRAM. Fuses layers for AutoAWQ. Disable if running low on VRAM.')
|
103 |
-
shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.')
|
104 |
-
shared.gradio['no_use_cuda_fp16'] = gr.Checkbox(label="no_use_cuda_fp16", value=shared.args.no_use_cuda_fp16, info='This can make models faster on some systems.')
|
105 |
-
shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='\'desc_act\', \'wbits\', and \'groupsize\' are used for old models without a quantize_config.json.')
|
106 |
-
shared.gradio['mul_mat_q'] = gr.Checkbox(label="mul_mat_q", value=shared.args.mul_mat_q, info='Recommended in most cases. Improves generation speed by 10-20%.')
|
107 |
-
shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Create an additional cache for CFG negative prompts.')
|
108 |
-
shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap)
|
109 |
-
shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock)
|
110 |
-
shared.gradio['numa'] = gr.Checkbox(label="numa", value=shared.args.numa, info='NUMA support can help on some systems with non-uniform memory access.')
|
111 |
-
shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu)
|
112 |
-
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
113 |
-
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
114 |
-
shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices)
|
115 |
-
shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk)
|
116 |
-
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
|
117 |
-
shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant)
|
118 |
-
shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17')
|
119 |
-
shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed)
|
120 |
-
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.')
|
121 |
-
shared.gradio['use_fast'] = gr.Checkbox(label="use_fast", value=shared.args.use_fast, info='Set use_fast=True while loading the tokenizer. May trigger a conversion that takes several minutes.')
|
122 |
-
shared.gradio['disable_exllama'] = gr.Checkbox(label="disable_exllama", value=shared.args.disable_exllama, info='Disable ExLlama kernel.')
|
123 |
-
shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa support is currently only kept for compatibility with older GPUs. AutoGPTQ or ExLlama is preferred when compatible. GPTQ-for-LLaMa is installed by default with the webui on supported systems. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).')
|
124 |
-
shared.gradio['exllama_info'] = gr.Markdown('For more information, consult the [docs](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).')
|
125 |
-
shared.gradio['exllama_HF_info'] = gr.Markdown('ExLlama_HF is a wrapper that lets you use ExLlama like a Transformers model, which means it can use the Transformers samplers. It\'s a bit slower than the regular ExLlama.')
|
126 |
-
shared.gradio['llamacpp_HF_info'] = gr.Markdown('llamacpp_HF loads llama.cpp as a Transformers model. To use it, you need to download a tokenizer.\n\nOption 1: download `oobabooga/llama-tokenizer` under "Download model or LoRA". That\'s a default Llama tokenizer.\n\nOption 2: place your .gguf in a subfolder of models/ along with these 3 files: tokenizer.model, tokenizer_config.json, and special_tokens_map.json. This takes precedence over Option 1.')
|
127 |
-
|
128 |
-
with gr.Column():
|
129 |
-
with gr.Row():
|
130 |
-
shared.gradio['autoload_model'] = gr.Checkbox(value=shared.settings['autoload_model'], label='Autoload the model', info='Whether to load the model as soon as it is selected in the Model dropdown.', interactive=not mu)
|
131 |
-
|
132 |
-
shared.gradio['custom_model_menu'] = gr.Textbox(label="Download model or LoRA", info="Enter the Hugging Face username/model path, for instance: facebook/galactica-125m. To specify a branch, add it at the end after a \":\" character like this: facebook/galactica-125m:main. To download a single file, enter its name in the second box.", interactive=not mu)
|
133 |
-
shared.gradio['download_specific_file'] = gr.Textbox(placeholder="File name (for GGUF models)", show_label=False, max_lines=1, interactive=not mu)
|
134 |
-
with gr.Row():
|
135 |
-
shared.gradio['download_model_button'] = gr.Button("Download", variant='primary', interactive=not mu)
|
136 |
-
shared.gradio['get_file_list'] = gr.Button("Get file list", interactive=not mu)
|
137 |
-
|
138 |
-
with gr.Row():
|
139 |
-
shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready')
|
140 |
-
|
141 |
-
|
142 |
-
def create_event_handlers():
|
143 |
-
shared.gradio['loader'].change(
|
144 |
-
loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params())).then(
|
145 |
-
lambda value: gr.update(choices=loaders.get_model_types(value)), gradio('loader'), gradio('model_type'))
|
146 |
-
|
147 |
-
# In this event handler, the interface state is read and updated
|
148 |
-
# with the model defaults (if any), and then the model is loaded
|
149 |
-
# unless "autoload_model" is unchecked
|
150 |
-
shared.gradio['model_menu'].change(
|
151 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
152 |
-
apply_model_settings_to_state, gradio('model_menu', 'interface_state'), gradio('interface_state')).then(
|
153 |
-
ui.apply_interface_values, gradio('interface_state'), gradio(ui.list_interface_input_elements()), show_progress=False).then(
|
154 |
-
update_model_parameters, gradio('interface_state'), None).then(
|
155 |
-
load_model_wrapper, gradio('model_menu', 'loader', 'autoload_model'), gradio('model_status'), show_progress=False).success(
|
156 |
-
update_truncation_length, gradio('truncation_length', 'interface_state'), gradio('truncation_length')).then(
|
157 |
-
lambda x: x, gradio('loader'), gradio('filter_by_loader'))
|
158 |
-
|
159 |
-
shared.gradio['load_model'].click(
|
160 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
161 |
-
update_model_parameters, gradio('interface_state'), None).then(
|
162 |
-
partial(load_model_wrapper, autoload=True), gradio('model_menu', 'loader'), gradio('model_status'), show_progress=False).success(
|
163 |
-
update_truncation_length, gradio('truncation_length', 'interface_state'), gradio('truncation_length')).then(
|
164 |
-
lambda x: x, gradio('loader'), gradio('filter_by_loader'))
|
165 |
-
|
166 |
-
shared.gradio['reload_model'].click(
|
167 |
-
unload_model, None, None).then(
|
168 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
169 |
-
update_model_parameters, gradio('interface_state'), None).then(
|
170 |
-
partial(load_model_wrapper, autoload=True), gradio('model_menu', 'loader'), gradio('model_status'), show_progress=False).success(
|
171 |
-
update_truncation_length, gradio('truncation_length', 'interface_state'), gradio('truncation_length')).then(
|
172 |
-
lambda x: x, gradio('loader'), gradio('filter_by_loader'))
|
173 |
-
|
174 |
-
shared.gradio['unload_model'].click(
|
175 |
-
unload_model, None, None).then(
|
176 |
-
lambda: "Model unloaded", None, gradio('model_status'))
|
177 |
-
|
178 |
-
shared.gradio['save_model_settings'].click(
|
179 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
180 |
-
save_model_settings, gradio('model_menu', 'interface_state'), gradio('model_status'), show_progress=False)
|
181 |
-
|
182 |
-
shared.gradio['lora_menu_apply'].click(load_lora_wrapper, gradio('lora_menu'), gradio('model_status'), show_progress=False)
|
183 |
-
shared.gradio['download_model_button'].click(download_model_wrapper, gradio('custom_model_menu', 'download_specific_file'), gradio('model_status'), show_progress=True)
|
184 |
-
shared.gradio['get_file_list'].click(partial(download_model_wrapper, return_links=True), gradio('custom_model_menu', 'download_specific_file'), gradio('model_status'), show_progress=True)
|
185 |
-
shared.gradio['autoload_model'].change(lambda x: gr.update(visible=not x), gradio('autoload_model'), gradio('load_model'))
|
186 |
-
|
187 |
-
|
188 |
-
def load_model_wrapper(selected_model, loader, autoload=False):
|
189 |
-
if not autoload:
|
190 |
-
yield f"The settings for `{selected_model}` have been updated.\n\nClick on \"Load\" to load it."
|
191 |
-
return
|
192 |
-
|
193 |
-
if selected_model == 'None':
|
194 |
-
yield "No model selected"
|
195 |
-
else:
|
196 |
-
try:
|
197 |
-
yield f"Loading `{selected_model}`..."
|
198 |
-
shared.model_name = selected_model
|
199 |
-
unload_model()
|
200 |
-
if selected_model != '':
|
201 |
-
shared.model, shared.tokenizer = load_model(shared.model_name, loader)
|
202 |
-
|
203 |
-
if shared.model is not None:
|
204 |
-
output = f"Successfully loaded `{selected_model}`."
|
205 |
-
|
206 |
-
settings = get_model_metadata(selected_model)
|
207 |
-
if 'instruction_template' in settings:
|
208 |
-
output += '\n\nIt seems to be an instruction-following model with template "{}". In the chat tab, instruct or chat-instruct modes should be used.'.format(settings['instruction_template'])
|
209 |
-
|
210 |
-
yield output
|
211 |
-
else:
|
212 |
-
yield f"Failed to load `{selected_model}`."
|
213 |
-
except:
|
214 |
-
exc = traceback.format_exc()
|
215 |
-
logger.error('Failed to load the model.')
|
216 |
-
print(exc)
|
217 |
-
yield exc.replace('\n', '\n\n')
|
218 |
-
|
219 |
-
|
220 |
-
def load_lora_wrapper(selected_loras):
|
221 |
-
yield ("Applying the following LoRAs to {}:\n\n{}".format(shared.model_name, '\n'.join(selected_loras)))
|
222 |
-
add_lora_to_model(selected_loras)
|
223 |
-
yield ("Successfuly applied the LoRAs")
|
224 |
-
|
225 |
-
|
226 |
-
def download_model_wrapper(repo_id, specific_file, progress=gr.Progress(), return_links=False, check=False):
|
227 |
-
try:
|
228 |
-
downloader_module = importlib.import_module("download-model")
|
229 |
-
downloader = downloader_module.ModelDownloader()
|
230 |
-
|
231 |
-
progress(0.0)
|
232 |
-
yield ("Cleaning up the model/branch names")
|
233 |
-
model, branch = downloader.sanitize_model_and_branch_names(repo_id, None)
|
234 |
-
|
235 |
-
yield ("Getting the download links from Hugging Face")
|
236 |
-
links, sha256, is_lora, is_llamacpp = downloader.get_download_links_from_huggingface(model, branch, text_only=False, specific_file=specific_file)
|
237 |
-
|
238 |
-
if return_links:
|
239 |
-
yield '\n\n'.join([f"`{Path(link).name}`" for link in links])
|
240 |
-
return
|
241 |
-
|
242 |
-
yield ("Getting the output folder")
|
243 |
-
base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir
|
244 |
-
output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp, base_folder=base_folder)
|
245 |
-
|
246 |
-
if check:
|
247 |
-
progress(0.5)
|
248 |
-
yield ("Checking previously downloaded files")
|
249 |
-
downloader.check_model_files(model, branch, links, sha256, output_folder)
|
250 |
-
progress(1.0)
|
251 |
-
else:
|
252 |
-
yield (f"Downloading file{'s' if len(links) > 1 else ''} to `{output_folder}/`")
|
253 |
-
downloader.download_model_files(model, branch, links, sha256, output_folder, progress_bar=progress, threads=1, is_llamacpp=is_llamacpp)
|
254 |
-
yield ("Done!")
|
255 |
-
except:
|
256 |
-
progress(1.0)
|
257 |
-
yield traceback.format_exc().replace('\n', '\n\n')
|
258 |
-
|
259 |
-
|
260 |
-
def update_truncation_length(current_length, state):
|
261 |
-
if 'loader' in state:
|
262 |
-
if state['loader'].lower().startswith('exllama'):
|
263 |
-
return state['max_seq_len']
|
264 |
-
elif state['loader'] in ['llama.cpp', 'llamacpp_HF', 'ctransformers']:
|
265 |
-
return state['n_ctx']
|
266 |
-
|
267 |
-
return current_length
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/tutorial_train.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from share import *
|
2 |
-
|
3 |
-
import pytorch_lightning as pl
|
4 |
-
from torch.utils.data import DataLoader
|
5 |
-
from tutorial_dataset import MyDataset
|
6 |
-
from cldm.logger import ImageLogger
|
7 |
-
from cldm.model import create_model, load_state_dict
|
8 |
-
|
9 |
-
|
10 |
-
# Configs
|
11 |
-
resume_path = './models/control_sd15_ini.ckpt'
|
12 |
-
batch_size = 4
|
13 |
-
logger_freq = 300
|
14 |
-
learning_rate = 1e-5
|
15 |
-
sd_locked = True
|
16 |
-
only_mid_control = False
|
17 |
-
|
18 |
-
|
19 |
-
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
|
20 |
-
model = create_model('./models/cldm_v15.yaml').cpu()
|
21 |
-
model.load_state_dict(load_state_dict(resume_path, location='cpu'))
|
22 |
-
model.learning_rate = learning_rate
|
23 |
-
model.sd_locked = sd_locked
|
24 |
-
model.only_mid_control = only_mid_control
|
25 |
-
|
26 |
-
|
27 |
-
# Misc
|
28 |
-
dataset = MyDataset()
|
29 |
-
dataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True)
|
30 |
-
logger = ImageLogger(batch_frequency=logger_freq)
|
31 |
-
trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger])
|
32 |
-
|
33 |
-
|
34 |
-
# Train!
|
35 |
-
trainer.fit(model, dataloader)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/nono/CONTRIBUTING.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
## Pull Requests
|
2 |
-
|
3 |
-
### Do
|
4 |
-
|
5 |
-
- ...consider to fix bugs over adding features
|
6 |
-
- ...one pull request for one feature or improvement
|
7 |
-
- ...consult us about implementation details
|
8 |
-
- ...proper testing before you submit your code
|
9 |
-
- ...resolve failed CI pipelines
|
10 |
-
|
11 |
-
### Don't
|
12 |
-
|
13 |
-
- ...introduce fundamental changes in terms of software architecture
|
14 |
-
- ...introduce OOP - we accept functional programming only
|
15 |
-
- ...ignore given requirements or try to work around them
|
16 |
-
- ...submit code to a development branch without consulting us
|
17 |
-
- ...submit massive amount of code changes
|
18 |
-
- ...submit a proof of concept
|
19 |
-
- ...submit code that is using undocumented and private APIs
|
20 |
-
- ...solve third party issues in our project
|
21 |
-
- ...comment what your code does - use proper naming instead
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ariharasudhan/YoloV5/utils/dataloaders.py
DELETED
@@ -1,1221 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Dataloaders and dataset utils
|
4 |
-
"""
|
5 |
-
|
6 |
-
import contextlib
|
7 |
-
import glob
|
8 |
-
import hashlib
|
9 |
-
import json
|
10 |
-
import math
|
11 |
-
import os
|
12 |
-
import random
|
13 |
-
import shutil
|
14 |
-
import time
|
15 |
-
from itertools import repeat
|
16 |
-
from multiprocessing.pool import Pool, ThreadPool
|
17 |
-
from pathlib import Path
|
18 |
-
from threading import Thread
|
19 |
-
from urllib.parse import urlparse
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import psutil
|
23 |
-
import torch
|
24 |
-
import torch.nn.functional as F
|
25 |
-
import torchvision
|
26 |
-
import yaml
|
27 |
-
from PIL import ExifTags, Image, ImageOps
|
28 |
-
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
|
29 |
-
from tqdm import tqdm
|
30 |
-
|
31 |
-
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
|
32 |
-
cutout, letterbox, mixup, random_perspective)
|
33 |
-
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,
|
34 |
-
colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy,
|
35 |
-
xywhn2xyxy, xyxy2xywhn)
|
36 |
-
from utils.torch_utils import torch_distributed_zero_first
|
37 |
-
|
38 |
-
# Parameters
|
39 |
-
HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
40 |
-
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
|
41 |
-
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
|
42 |
-
BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format
|
43 |
-
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
44 |
-
RANK = int(os.getenv('RANK', -1))
|
45 |
-
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
|
46 |
-
|
47 |
-
# Get orientation exif tag
|
48 |
-
for orientation in ExifTags.TAGS.keys():
|
49 |
-
if ExifTags.TAGS[orientation] == 'Orientation':
|
50 |
-
break
|
51 |
-
|
52 |
-
|
53 |
-
def get_hash(paths):
|
54 |
-
# Returns a single hash value of a list of paths (files or dirs)
|
55 |
-
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
|
56 |
-
h = hashlib.md5(str(size).encode()) # hash sizes
|
57 |
-
h.update(''.join(paths).encode()) # hash paths
|
58 |
-
return h.hexdigest() # return hash
|
59 |
-
|
60 |
-
|
61 |
-
def exif_size(img):
|
62 |
-
# Returns exif-corrected PIL size
|
63 |
-
s = img.size # (width, height)
|
64 |
-
with contextlib.suppress(Exception):
|
65 |
-
rotation = dict(img._getexif().items())[orientation]
|
66 |
-
if rotation in [6, 8]: # rotation 270 or 90
|
67 |
-
s = (s[1], s[0])
|
68 |
-
return s
|
69 |
-
|
70 |
-
|
71 |
-
def exif_transpose(image):
|
72 |
-
"""
|
73 |
-
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
|
74 |
-
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
|
75 |
-
|
76 |
-
:param image: The image to transpose.
|
77 |
-
:return: An image.
|
78 |
-
"""
|
79 |
-
exif = image.getexif()
|
80 |
-
orientation = exif.get(0x0112, 1) # default 1
|
81 |
-
if orientation > 1:
|
82 |
-
method = {
|
83 |
-
2: Image.FLIP_LEFT_RIGHT,
|
84 |
-
3: Image.ROTATE_180,
|
85 |
-
4: Image.FLIP_TOP_BOTTOM,
|
86 |
-
5: Image.TRANSPOSE,
|
87 |
-
6: Image.ROTATE_270,
|
88 |
-
7: Image.TRANSVERSE,
|
89 |
-
8: Image.ROTATE_90}.get(orientation)
|
90 |
-
if method is not None:
|
91 |
-
image = image.transpose(method)
|
92 |
-
del exif[0x0112]
|
93 |
-
image.info["exif"] = exif.tobytes()
|
94 |
-
return image
|
95 |
-
|
96 |
-
|
97 |
-
def seed_worker(worker_id):
|
98 |
-
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
|
99 |
-
worker_seed = torch.initial_seed() % 2 ** 32
|
100 |
-
np.random.seed(worker_seed)
|
101 |
-
random.seed(worker_seed)
|
102 |
-
|
103 |
-
|
104 |
-
def create_dataloader(path,
|
105 |
-
imgsz,
|
106 |
-
batch_size,
|
107 |
-
stride,
|
108 |
-
single_cls=False,
|
109 |
-
hyp=None,
|
110 |
-
augment=False,
|
111 |
-
cache=False,
|
112 |
-
pad=0.0,
|
113 |
-
rect=False,
|
114 |
-
rank=-1,
|
115 |
-
workers=8,
|
116 |
-
image_weights=False,
|
117 |
-
quad=False,
|
118 |
-
prefix='',
|
119 |
-
shuffle=False):
|
120 |
-
if rect and shuffle:
|
121 |
-
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
|
122 |
-
shuffle = False
|
123 |
-
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
124 |
-
dataset = LoadImagesAndLabels(
|
125 |
-
path,
|
126 |
-
imgsz,
|
127 |
-
batch_size,
|
128 |
-
augment=augment, # augmentation
|
129 |
-
hyp=hyp, # hyperparameters
|
130 |
-
rect=rect, # rectangular batches
|
131 |
-
cache_images=cache,
|
132 |
-
single_cls=single_cls,
|
133 |
-
stride=int(stride),
|
134 |
-
pad=pad,
|
135 |
-
image_weights=image_weights,
|
136 |
-
prefix=prefix)
|
137 |
-
|
138 |
-
batch_size = min(batch_size, len(dataset))
|
139 |
-
nd = torch.cuda.device_count() # number of CUDA devices
|
140 |
-
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
|
141 |
-
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
142 |
-
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
|
143 |
-
generator = torch.Generator()
|
144 |
-
generator.manual_seed(6148914691236517205 + RANK)
|
145 |
-
return loader(dataset,
|
146 |
-
batch_size=batch_size,
|
147 |
-
shuffle=shuffle and sampler is None,
|
148 |
-
num_workers=nw,
|
149 |
-
sampler=sampler,
|
150 |
-
pin_memory=PIN_MEMORY,
|
151 |
-
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
|
152 |
-
worker_init_fn=seed_worker,
|
153 |
-
generator=generator), dataset
|
154 |
-
|
155 |
-
|
156 |
-
class InfiniteDataLoader(dataloader.DataLoader):
|
157 |
-
""" Dataloader that reuses workers
|
158 |
-
|
159 |
-
Uses same syntax as vanilla DataLoader
|
160 |
-
"""
|
161 |
-
|
162 |
-
def __init__(self, *args, **kwargs):
|
163 |
-
super().__init__(*args, **kwargs)
|
164 |
-
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
|
165 |
-
self.iterator = super().__iter__()
|
166 |
-
|
167 |
-
def __len__(self):
|
168 |
-
return len(self.batch_sampler.sampler)
|
169 |
-
|
170 |
-
def __iter__(self):
|
171 |
-
for _ in range(len(self)):
|
172 |
-
yield next(self.iterator)
|
173 |
-
|
174 |
-
|
175 |
-
class _RepeatSampler:
|
176 |
-
""" Sampler that repeats forever
|
177 |
-
|
178 |
-
Args:
|
179 |
-
sampler (Sampler)
|
180 |
-
"""
|
181 |
-
|
182 |
-
def __init__(self, sampler):
|
183 |
-
self.sampler = sampler
|
184 |
-
|
185 |
-
def __iter__(self):
|
186 |
-
while True:
|
187 |
-
yield from iter(self.sampler)
|
188 |
-
|
189 |
-
|
190 |
-
class LoadScreenshots:
|
191 |
-
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
|
192 |
-
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
|
193 |
-
# source = [screen_number left top width height] (pixels)
|
194 |
-
check_requirements('mss')
|
195 |
-
import mss
|
196 |
-
|
197 |
-
source, *params = source.split()
|
198 |
-
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
|
199 |
-
if len(params) == 1:
|
200 |
-
self.screen = int(params[0])
|
201 |
-
elif len(params) == 4:
|
202 |
-
left, top, width, height = (int(x) for x in params)
|
203 |
-
elif len(params) == 5:
|
204 |
-
self.screen, left, top, width, height = (int(x) for x in params)
|
205 |
-
self.img_size = img_size
|
206 |
-
self.stride = stride
|
207 |
-
self.transforms = transforms
|
208 |
-
self.auto = auto
|
209 |
-
self.mode = 'stream'
|
210 |
-
self.frame = 0
|
211 |
-
self.sct = mss.mss()
|
212 |
-
|
213 |
-
# Parse monitor shape
|
214 |
-
monitor = self.sct.monitors[self.screen]
|
215 |
-
self.top = monitor["top"] if top is None else (monitor["top"] + top)
|
216 |
-
self.left = monitor["left"] if left is None else (monitor["left"] + left)
|
217 |
-
self.width = width or monitor["width"]
|
218 |
-
self.height = height or monitor["height"]
|
219 |
-
self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height}
|
220 |
-
|
221 |
-
def __iter__(self):
|
222 |
-
return self
|
223 |
-
|
224 |
-
def __next__(self):
|
225 |
-
# mss screen capture: get raw pixels from the screen as np array
|
226 |
-
im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
|
227 |
-
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
|
228 |
-
|
229 |
-
if self.transforms:
|
230 |
-
im = self.transforms(im0) # transforms
|
231 |
-
else:
|
232 |
-
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
|
233 |
-
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
234 |
-
im = np.ascontiguousarray(im) # contiguous
|
235 |
-
self.frame += 1
|
236 |
-
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
|
237 |
-
|
238 |
-
|
239 |
-
class LoadImages:
|
240 |
-
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
|
241 |
-
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
242 |
-
files = []
|
243 |
-
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
|
244 |
-
p = str(Path(p).resolve())
|
245 |
-
if '*' in p:
|
246 |
-
files.extend(sorted(glob.glob(p, recursive=True))) # glob
|
247 |
-
elif os.path.isdir(p):
|
248 |
-
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
|
249 |
-
elif os.path.isfile(p):
|
250 |
-
files.append(p) # files
|
251 |
-
else:
|
252 |
-
raise FileNotFoundError(f'{p} does not exist')
|
253 |
-
|
254 |
-
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
|
255 |
-
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
|
256 |
-
ni, nv = len(images), len(videos)
|
257 |
-
|
258 |
-
self.img_size = img_size
|
259 |
-
self.stride = stride
|
260 |
-
self.files = images + videos
|
261 |
-
self.nf = ni + nv # number of files
|
262 |
-
self.video_flag = [False] * ni + [True] * nv
|
263 |
-
self.mode = 'image'
|
264 |
-
self.auto = auto
|
265 |
-
self.transforms = transforms # optional
|
266 |
-
self.vid_stride = vid_stride # video frame-rate stride
|
267 |
-
if any(videos):
|
268 |
-
self._new_video(videos[0]) # new video
|
269 |
-
else:
|
270 |
-
self.cap = None
|
271 |
-
assert self.nf > 0, f'No images or videos found in {p}. ' \
|
272 |
-
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
|
273 |
-
|
274 |
-
def __iter__(self):
|
275 |
-
self.count = 0
|
276 |
-
return self
|
277 |
-
|
278 |
-
def __next__(self):
|
279 |
-
if self.count == self.nf:
|
280 |
-
raise StopIteration
|
281 |
-
path = self.files[self.count]
|
282 |
-
|
283 |
-
if self.video_flag[self.count]:
|
284 |
-
# Read video
|
285 |
-
self.mode = 'video'
|
286 |
-
for _ in range(self.vid_stride):
|
287 |
-
self.cap.grab()
|
288 |
-
ret_val, im0 = self.cap.retrieve()
|
289 |
-
while not ret_val:
|
290 |
-
self.count += 1
|
291 |
-
self.cap.release()
|
292 |
-
if self.count == self.nf: # last video
|
293 |
-
raise StopIteration
|
294 |
-
path = self.files[self.count]
|
295 |
-
self._new_video(path)
|
296 |
-
ret_val, im0 = self.cap.read()
|
297 |
-
|
298 |
-
self.frame += 1
|
299 |
-
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
|
300 |
-
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
|
301 |
-
|
302 |
-
else:
|
303 |
-
# Read image
|
304 |
-
self.count += 1
|
305 |
-
im0 = cv2.imread(path) # BGR
|
306 |
-
assert im0 is not None, f'Image Not Found {path}'
|
307 |
-
s = f'image {self.count}/{self.nf} {path}: '
|
308 |
-
|
309 |
-
if self.transforms:
|
310 |
-
im = self.transforms(im0) # transforms
|
311 |
-
else:
|
312 |
-
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
|
313 |
-
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
314 |
-
im = np.ascontiguousarray(im) # contiguous
|
315 |
-
|
316 |
-
return path, im, im0, self.cap, s
|
317 |
-
|
318 |
-
def _new_video(self, path):
|
319 |
-
# Create a new video capture object
|
320 |
-
self.frame = 0
|
321 |
-
self.cap = cv2.VideoCapture(path)
|
322 |
-
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
|
323 |
-
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
|
324 |
-
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
|
325 |
-
|
326 |
-
def _cv2_rotate(self, im):
|
327 |
-
# Rotate a cv2 video manually
|
328 |
-
if self.orientation == 0:
|
329 |
-
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
|
330 |
-
elif self.orientation == 180:
|
331 |
-
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
332 |
-
elif self.orientation == 90:
|
333 |
-
return cv2.rotate(im, cv2.ROTATE_180)
|
334 |
-
return im
|
335 |
-
|
336 |
-
def __len__(self):
|
337 |
-
return self.nf # number of files
|
338 |
-
|
339 |
-
|
340 |
-
class LoadStreams:
|
341 |
-
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
|
342 |
-
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
343 |
-
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
|
344 |
-
self.mode = 'stream'
|
345 |
-
self.img_size = img_size
|
346 |
-
self.stride = stride
|
347 |
-
self.vid_stride = vid_stride # video frame-rate stride
|
348 |
-
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
|
349 |
-
n = len(sources)
|
350 |
-
self.sources = [clean_str(x) for x in sources] # clean source names for later
|
351 |
-
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
|
352 |
-
for i, s in enumerate(sources): # index, source
|
353 |
-
# Start thread to read frames from video stream
|
354 |
-
st = f'{i + 1}/{n}: {s}... '
|
355 |
-
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
|
356 |
-
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
|
357 |
-
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
|
358 |
-
import pafy
|
359 |
-
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
|
360 |
-
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
|
361 |
-
if s == 0:
|
362 |
-
assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
|
363 |
-
assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
|
364 |
-
cap = cv2.VideoCapture(s)
|
365 |
-
assert cap.isOpened(), f'{st}Failed to open {s}'
|
366 |
-
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
367 |
-
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
368 |
-
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
|
369 |
-
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
|
370 |
-
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
|
371 |
-
|
372 |
-
_, self.imgs[i] = cap.read() # guarantee first frame
|
373 |
-
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
|
374 |
-
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
|
375 |
-
self.threads[i].start()
|
376 |
-
LOGGER.info('') # newline
|
377 |
-
|
378 |
-
# check for common shapes
|
379 |
-
s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
|
380 |
-
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
|
381 |
-
self.auto = auto and self.rect
|
382 |
-
self.transforms = transforms # optional
|
383 |
-
if not self.rect:
|
384 |
-
LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
|
385 |
-
|
386 |
-
def update(self, i, cap, stream):
|
387 |
-
# Read stream `i` frames in daemon thread
|
388 |
-
n, f = 0, self.frames[i] # frame number, frame array
|
389 |
-
while cap.isOpened() and n < f:
|
390 |
-
n += 1
|
391 |
-
cap.grab() # .read() = .grab() followed by .retrieve()
|
392 |
-
if n % self.vid_stride == 0:
|
393 |
-
success, im = cap.retrieve()
|
394 |
-
if success:
|
395 |
-
self.imgs[i] = im
|
396 |
-
else:
|
397 |
-
LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
|
398 |
-
self.imgs[i] = np.zeros_like(self.imgs[i])
|
399 |
-
cap.open(stream) # re-open stream if signal was lost
|
400 |
-
time.sleep(0.0) # wait time
|
401 |
-
|
402 |
-
def __iter__(self):
|
403 |
-
self.count = -1
|
404 |
-
return self
|
405 |
-
|
406 |
-
def __next__(self):
|
407 |
-
self.count += 1
|
408 |
-
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
|
409 |
-
cv2.destroyAllWindows()
|
410 |
-
raise StopIteration
|
411 |
-
|
412 |
-
im0 = self.imgs.copy()
|
413 |
-
if self.transforms:
|
414 |
-
im = np.stack([self.transforms(x) for x in im0]) # transforms
|
415 |
-
else:
|
416 |
-
im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
|
417 |
-
im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
|
418 |
-
im = np.ascontiguousarray(im) # contiguous
|
419 |
-
|
420 |
-
return self.sources, im, im0, None, ''
|
421 |
-
|
422 |
-
def __len__(self):
|
423 |
-
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
|
424 |
-
|
425 |
-
|
426 |
-
def img2label_paths(img_paths):
|
427 |
-
# Define label paths as a function of image paths
|
428 |
-
sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings
|
429 |
-
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
|
430 |
-
|
431 |
-
|
432 |
-
class LoadImagesAndLabels(Dataset):
|
433 |
-
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
|
434 |
-
cache_version = 0.6 # dataset labels *.cache version
|
435 |
-
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
|
436 |
-
|
437 |
-
def __init__(self,
|
438 |
-
path,
|
439 |
-
img_size=640,
|
440 |
-
batch_size=16,
|
441 |
-
augment=False,
|
442 |
-
hyp=None,
|
443 |
-
rect=False,
|
444 |
-
image_weights=False,
|
445 |
-
cache_images=False,
|
446 |
-
single_cls=False,
|
447 |
-
stride=32,
|
448 |
-
pad=0.0,
|
449 |
-
min_items=0,
|
450 |
-
prefix=''):
|
451 |
-
self.img_size = img_size
|
452 |
-
self.augment = augment
|
453 |
-
self.hyp = hyp
|
454 |
-
self.image_weights = image_weights
|
455 |
-
self.rect = False if image_weights else rect
|
456 |
-
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
|
457 |
-
self.mosaic_border = [-img_size // 2, -img_size // 2]
|
458 |
-
self.stride = stride
|
459 |
-
self.path = path
|
460 |
-
self.albumentations = Albumentations(size=img_size) if augment else None
|
461 |
-
|
462 |
-
try:
|
463 |
-
f = [] # image files
|
464 |
-
for p in path if isinstance(path, list) else [path]:
|
465 |
-
p = Path(p) # os-agnostic
|
466 |
-
if p.is_dir(): # dir
|
467 |
-
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
|
468 |
-
# f = list(p.rglob('*.*')) # pathlib
|
469 |
-
elif p.is_file(): # file
|
470 |
-
with open(p) as t:
|
471 |
-
t = t.read().strip().splitlines()
|
472 |
-
parent = str(p.parent) + os.sep
|
473 |
-
f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path
|
474 |
-
# f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)
|
475 |
-
else:
|
476 |
-
raise FileNotFoundError(f'{prefix}{p} does not exist')
|
477 |
-
self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
|
478 |
-
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
|
479 |
-
assert self.im_files, f'{prefix}No images found'
|
480 |
-
except Exception as e:
|
481 |
-
raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e
|
482 |
-
|
483 |
-
# Check cache
|
484 |
-
self.label_files = img2label_paths(self.im_files) # labels
|
485 |
-
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
|
486 |
-
try:
|
487 |
-
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
|
488 |
-
assert cache['version'] == self.cache_version # matches current version
|
489 |
-
assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
|
490 |
-
except Exception:
|
491 |
-
cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops
|
492 |
-
|
493 |
-
# Display cache
|
494 |
-
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
|
495 |
-
if exists and LOCAL_RANK in {-1, 0}:
|
496 |
-
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt"
|
497 |
-
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results
|
498 |
-
if cache['msgs']:
|
499 |
-
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
|
500 |
-
assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
|
501 |
-
|
502 |
-
# Read cache
|
503 |
-
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
|
504 |
-
labels, shapes, self.segments = zip(*cache.values())
|
505 |
-
nl = len(np.concatenate(labels, 0)) # number of labels
|
506 |
-
assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
|
507 |
-
self.labels = list(labels)
|
508 |
-
self.shapes = np.array(shapes)
|
509 |
-
self.im_files = list(cache.keys()) # update
|
510 |
-
self.label_files = img2label_paths(cache.keys()) # update
|
511 |
-
|
512 |
-
# Filter images
|
513 |
-
if min_items:
|
514 |
-
include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)
|
515 |
-
LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')
|
516 |
-
self.im_files = [self.im_files[i] for i in include]
|
517 |
-
self.label_files = [self.label_files[i] for i in include]
|
518 |
-
self.labels = [self.labels[i] for i in include]
|
519 |
-
self.segments = [self.segments[i] for i in include]
|
520 |
-
self.shapes = self.shapes[include] # wh
|
521 |
-
|
522 |
-
# Create indices
|
523 |
-
n = len(self.shapes) # number of images
|
524 |
-
bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
|
525 |
-
nb = bi[-1] + 1 # number of batches
|
526 |
-
self.batch = bi # batch index of image
|
527 |
-
self.n = n
|
528 |
-
self.indices = range(n)
|
529 |
-
|
530 |
-
# Update labels
|
531 |
-
include_class = [] # filter labels to include only these classes (optional)
|
532 |
-
include_class_array = np.array(include_class).reshape(1, -1)
|
533 |
-
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
|
534 |
-
if include_class:
|
535 |
-
j = (label[:, 0:1] == include_class_array).any(1)
|
536 |
-
self.labels[i] = label[j]
|
537 |
-
if segment:
|
538 |
-
self.segments[i] = segment[j]
|
539 |
-
if single_cls: # single-class training, merge all classes into 0
|
540 |
-
self.labels[i][:, 0] = 0
|
541 |
-
if segment:
|
542 |
-
self.segments[i][:, 0] = 0
|
543 |
-
|
544 |
-
# Rectangular Training
|
545 |
-
if self.rect:
|
546 |
-
# Sort by aspect ratio
|
547 |
-
s = self.shapes # wh
|
548 |
-
ar = s[:, 1] / s[:, 0] # aspect ratio
|
549 |
-
irect = ar.argsort()
|
550 |
-
self.im_files = [self.im_files[i] for i in irect]
|
551 |
-
self.label_files = [self.label_files[i] for i in irect]
|
552 |
-
self.labels = [self.labels[i] for i in irect]
|
553 |
-
self.segments = [self.segments[i] for i in irect]
|
554 |
-
self.shapes = s[irect] # wh
|
555 |
-
ar = ar[irect]
|
556 |
-
|
557 |
-
# Set training image shapes
|
558 |
-
shapes = [[1, 1]] * nb
|
559 |
-
for i in range(nb):
|
560 |
-
ari = ar[bi == i]
|
561 |
-
mini, maxi = ari.min(), ari.max()
|
562 |
-
if maxi < 1:
|
563 |
-
shapes[i] = [maxi, 1]
|
564 |
-
elif mini > 1:
|
565 |
-
shapes[i] = [1, 1 / mini]
|
566 |
-
|
567 |
-
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
|
568 |
-
|
569 |
-
# Cache images into RAM/disk for faster training
|
570 |
-
if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):
|
571 |
-
cache_images = False
|
572 |
-
self.ims = [None] * n
|
573 |
-
self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
|
574 |
-
if cache_images:
|
575 |
-
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
|
576 |
-
self.im_hw0, self.im_hw = [None] * n, [None] * n
|
577 |
-
fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
|
578 |
-
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
|
579 |
-
pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0)
|
580 |
-
for i, x in pbar:
|
581 |
-
if cache_images == 'disk':
|
582 |
-
b += self.npy_files[i].stat().st_size
|
583 |
-
else: # 'ram'
|
584 |
-
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
|
585 |
-
b += self.ims[i].nbytes
|
586 |
-
pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'
|
587 |
-
pbar.close()
|
588 |
-
|
589 |
-
def check_cache_ram(self, safety_margin=0.1, prefix=''):
|
590 |
-
# Check image caching requirements vs available memory
|
591 |
-
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
|
592 |
-
n = min(self.n, 30) # extrapolate from 30 random images
|
593 |
-
for _ in range(n):
|
594 |
-
im = cv2.imread(random.choice(self.im_files)) # sample image
|
595 |
-
ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
|
596 |
-
b += im.nbytes * ratio ** 2
|
597 |
-
mem_required = b * self.n / n # GB required to cache dataset into RAM
|
598 |
-
mem = psutil.virtual_memory()
|
599 |
-
cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
|
600 |
-
if not cache:
|
601 |
-
LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, "
|
602 |
-
f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, "
|
603 |
-
f"{'caching images ✅' if cache else 'not caching images ⚠️'}")
|
604 |
-
return cache
|
605 |
-
|
606 |
-
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
|
607 |
-
# Cache dataset labels, check images and read shapes
|
608 |
-
x = {} # dict
|
609 |
-
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
|
610 |
-
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
|
611 |
-
with Pool(NUM_THREADS) as pool:
|
612 |
-
pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
|
613 |
-
desc=desc,
|
614 |
-
total=len(self.im_files),
|
615 |
-
bar_format=BAR_FORMAT)
|
616 |
-
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
|
617 |
-
nm += nm_f
|
618 |
-
nf += nf_f
|
619 |
-
ne += ne_f
|
620 |
-
nc += nc_f
|
621 |
-
if im_file:
|
622 |
-
x[im_file] = [lb, shape, segments]
|
623 |
-
if msg:
|
624 |
-
msgs.append(msg)
|
625 |
-
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt"
|
626 |
-
|
627 |
-
pbar.close()
|
628 |
-
if msgs:
|
629 |
-
LOGGER.info('\n'.join(msgs))
|
630 |
-
if nf == 0:
|
631 |
-
LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
|
632 |
-
x['hash'] = get_hash(self.label_files + self.im_files)
|
633 |
-
x['results'] = nf, nm, ne, nc, len(self.im_files)
|
634 |
-
x['msgs'] = msgs # warnings
|
635 |
-
x['version'] = self.cache_version # cache version
|
636 |
-
try:
|
637 |
-
np.save(path, x) # save cache for next time
|
638 |
-
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
|
639 |
-
LOGGER.info(f'{prefix}New cache created: {path}')
|
640 |
-
except Exception as e:
|
641 |
-
LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable
|
642 |
-
return x
|
643 |
-
|
644 |
-
def __len__(self):
|
645 |
-
return len(self.im_files)
|
646 |
-
|
647 |
-
# def __iter__(self):
|
648 |
-
# self.count = -1
|
649 |
-
# print('ran dataset iter')
|
650 |
-
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
|
651 |
-
# return self
|
652 |
-
|
653 |
-
def __getitem__(self, index):
|
654 |
-
index = self.indices[index] # linear, shuffled, or image_weights
|
655 |
-
|
656 |
-
hyp = self.hyp
|
657 |
-
mosaic = self.mosaic and random.random() < hyp['mosaic']
|
658 |
-
if mosaic:
|
659 |
-
# Load mosaic
|
660 |
-
img, labels = self.load_mosaic(index)
|
661 |
-
shapes = None
|
662 |
-
|
663 |
-
# MixUp augmentation
|
664 |
-
if random.random() < hyp['mixup']:
|
665 |
-
img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
|
666 |
-
|
667 |
-
else:
|
668 |
-
# Load image
|
669 |
-
img, (h0, w0), (h, w) = self.load_image(index)
|
670 |
-
|
671 |
-
# Letterbox
|
672 |
-
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
|
673 |
-
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
|
674 |
-
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
|
675 |
-
|
676 |
-
labels = self.labels[index].copy()
|
677 |
-
if labels.size: # normalized xywh to pixel xyxy format
|
678 |
-
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
|
679 |
-
|
680 |
-
if self.augment:
|
681 |
-
img, labels = random_perspective(img,
|
682 |
-
labels,
|
683 |
-
degrees=hyp['degrees'],
|
684 |
-
translate=hyp['translate'],
|
685 |
-
scale=hyp['scale'],
|
686 |
-
shear=hyp['shear'],
|
687 |
-
perspective=hyp['perspective'])
|
688 |
-
|
689 |
-
nl = len(labels) # number of labels
|
690 |
-
if nl:
|
691 |
-
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
|
692 |
-
|
693 |
-
if self.augment:
|
694 |
-
# Albumentations
|
695 |
-
img, labels = self.albumentations(img, labels)
|
696 |
-
nl = len(labels) # update after albumentations
|
697 |
-
|
698 |
-
# HSV color-space
|
699 |
-
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
|
700 |
-
|
701 |
-
# Flip up-down
|
702 |
-
if random.random() < hyp['flipud']:
|
703 |
-
img = np.flipud(img)
|
704 |
-
if nl:
|
705 |
-
labels[:, 2] = 1 - labels[:, 2]
|
706 |
-
|
707 |
-
# Flip left-right
|
708 |
-
if random.random() < hyp['fliplr']:
|
709 |
-
img = np.fliplr(img)
|
710 |
-
if nl:
|
711 |
-
labels[:, 1] = 1 - labels[:, 1]
|
712 |
-
|
713 |
-
# Cutouts
|
714 |
-
# labels = cutout(img, labels, p=0.5)
|
715 |
-
# nl = len(labels) # update after cutout
|
716 |
-
|
717 |
-
labels_out = torch.zeros((nl, 6))
|
718 |
-
if nl:
|
719 |
-
labels_out[:, 1:] = torch.from_numpy(labels)
|
720 |
-
|
721 |
-
# Convert
|
722 |
-
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
723 |
-
img = np.ascontiguousarray(img)
|
724 |
-
|
725 |
-
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
|
726 |
-
|
727 |
-
def load_image(self, i):
|
728 |
-
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
|
729 |
-
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
|
730 |
-
if im is None: # not cached in RAM
|
731 |
-
if fn.exists(): # load npy
|
732 |
-
im = np.load(fn)
|
733 |
-
else: # read image
|
734 |
-
im = cv2.imread(f) # BGR
|
735 |
-
assert im is not None, f'Image Not Found {f}'
|
736 |
-
h0, w0 = im.shape[:2] # orig hw
|
737 |
-
r = self.img_size / max(h0, w0) # ratio
|
738 |
-
if r != 1: # if sizes are not equal
|
739 |
-
interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
|
740 |
-
im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp)
|
741 |
-
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
|
742 |
-
return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
|
743 |
-
|
744 |
-
def cache_images_to_disk(self, i):
|
745 |
-
# Saves an image as an *.npy file for faster loading
|
746 |
-
f = self.npy_files[i]
|
747 |
-
if not f.exists():
|
748 |
-
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
|
749 |
-
|
750 |
-
def load_mosaic(self, index):
|
751 |
-
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
|
752 |
-
labels4, segments4 = [], []
|
753 |
-
s = self.img_size
|
754 |
-
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
|
755 |
-
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
|
756 |
-
random.shuffle(indices)
|
757 |
-
for i, index in enumerate(indices):
|
758 |
-
# Load image
|
759 |
-
img, _, (h, w) = self.load_image(index)
|
760 |
-
|
761 |
-
# place img in img4
|
762 |
-
if i == 0: # top left
|
763 |
-
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
764 |
-
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
|
765 |
-
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
|
766 |
-
elif i == 1: # top right
|
767 |
-
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
|
768 |
-
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
|
769 |
-
elif i == 2: # bottom left
|
770 |
-
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
|
771 |
-
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
|
772 |
-
elif i == 3: # bottom right
|
773 |
-
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
|
774 |
-
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
|
775 |
-
|
776 |
-
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
777 |
-
padw = x1a - x1b
|
778 |
-
padh = y1a - y1b
|
779 |
-
|
780 |
-
# Labels
|
781 |
-
labels, segments = self.labels[index].copy(), self.segments[index].copy()
|
782 |
-
if labels.size:
|
783 |
-
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
|
784 |
-
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
|
785 |
-
labels4.append(labels)
|
786 |
-
segments4.extend(segments)
|
787 |
-
|
788 |
-
# Concat/clip labels
|
789 |
-
labels4 = np.concatenate(labels4, 0)
|
790 |
-
for x in (labels4[:, 1:], *segments4):
|
791 |
-
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
|
792 |
-
# img4, labels4 = replicate(img4, labels4) # replicate
|
793 |
-
|
794 |
-
# Augment
|
795 |
-
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
|
796 |
-
img4, labels4 = random_perspective(img4,
|
797 |
-
labels4,
|
798 |
-
segments4,
|
799 |
-
degrees=self.hyp['degrees'],
|
800 |
-
translate=self.hyp['translate'],
|
801 |
-
scale=self.hyp['scale'],
|
802 |
-
shear=self.hyp['shear'],
|
803 |
-
perspective=self.hyp['perspective'],
|
804 |
-
border=self.mosaic_border) # border to remove
|
805 |
-
|
806 |
-
return img4, labels4
|
807 |
-
|
808 |
-
def load_mosaic9(self, index):
|
809 |
-
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
|
810 |
-
labels9, segments9 = [], []
|
811 |
-
s = self.img_size
|
812 |
-
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
|
813 |
-
random.shuffle(indices)
|
814 |
-
hp, wp = -1, -1 # height, width previous
|
815 |
-
for i, index in enumerate(indices):
|
816 |
-
# Load image
|
817 |
-
img, _, (h, w) = self.load_image(index)
|
818 |
-
|
819 |
-
# place img in img9
|
820 |
-
if i == 0: # center
|
821 |
-
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
822 |
-
h0, w0 = h, w
|
823 |
-
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
|
824 |
-
elif i == 1: # top
|
825 |
-
c = s, s - h, s + w, s
|
826 |
-
elif i == 2: # top right
|
827 |
-
c = s + wp, s - h, s + wp + w, s
|
828 |
-
elif i == 3: # right
|
829 |
-
c = s + w0, s, s + w0 + w, s + h
|
830 |
-
elif i == 4: # bottom right
|
831 |
-
c = s + w0, s + hp, s + w0 + w, s + hp + h
|
832 |
-
elif i == 5: # bottom
|
833 |
-
c = s + w0 - w, s + h0, s + w0, s + h0 + h
|
834 |
-
elif i == 6: # bottom left
|
835 |
-
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
|
836 |
-
elif i == 7: # left
|
837 |
-
c = s - w, s + h0 - h, s, s + h0
|
838 |
-
elif i == 8: # top left
|
839 |
-
c = s - w, s + h0 - hp - h, s, s + h0 - hp
|
840 |
-
|
841 |
-
padx, pady = c[:2]
|
842 |
-
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
|
843 |
-
|
844 |
-
# Labels
|
845 |
-
labels, segments = self.labels[index].copy(), self.segments[index].copy()
|
846 |
-
if labels.size:
|
847 |
-
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
|
848 |
-
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
|
849 |
-
labels9.append(labels)
|
850 |
-
segments9.extend(segments)
|
851 |
-
|
852 |
-
# Image
|
853 |
-
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
|
854 |
-
hp, wp = h, w # height, width previous
|
855 |
-
|
856 |
-
# Offset
|
857 |
-
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
|
858 |
-
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
|
859 |
-
|
860 |
-
# Concat/clip labels
|
861 |
-
labels9 = np.concatenate(labels9, 0)
|
862 |
-
labels9[:, [1, 3]] -= xc
|
863 |
-
labels9[:, [2, 4]] -= yc
|
864 |
-
c = np.array([xc, yc]) # centers
|
865 |
-
segments9 = [x - c for x in segments9]
|
866 |
-
|
867 |
-
for x in (labels9[:, 1:], *segments9):
|
868 |
-
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
|
869 |
-
# img9, labels9 = replicate(img9, labels9) # replicate
|
870 |
-
|
871 |
-
# Augment
|
872 |
-
img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])
|
873 |
-
img9, labels9 = random_perspective(img9,
|
874 |
-
labels9,
|
875 |
-
segments9,
|
876 |
-
degrees=self.hyp['degrees'],
|
877 |
-
translate=self.hyp['translate'],
|
878 |
-
scale=self.hyp['scale'],
|
879 |
-
shear=self.hyp['shear'],
|
880 |
-
perspective=self.hyp['perspective'],
|
881 |
-
border=self.mosaic_border) # border to remove
|
882 |
-
|
883 |
-
return img9, labels9
|
884 |
-
|
885 |
-
@staticmethod
|
886 |
-
def collate_fn(batch):
|
887 |
-
im, label, path, shapes = zip(*batch) # transposed
|
888 |
-
for i, lb in enumerate(label):
|
889 |
-
lb[:, 0] = i # add target image index for build_targets()
|
890 |
-
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
|
891 |
-
|
892 |
-
@staticmethod
|
893 |
-
def collate_fn4(batch):
|
894 |
-
im, label, path, shapes = zip(*batch) # transposed
|
895 |
-
n = len(shapes) // 4
|
896 |
-
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
|
897 |
-
|
898 |
-
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
|
899 |
-
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
|
900 |
-
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
|
901 |
-
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
|
902 |
-
i *= 4
|
903 |
-
if random.random() < 0.5:
|
904 |
-
im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
|
905 |
-
align_corners=False)[0].type(im[i].type())
|
906 |
-
lb = label[i]
|
907 |
-
else:
|
908 |
-
im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)
|
909 |
-
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
|
910 |
-
im4.append(im1)
|
911 |
-
label4.append(lb)
|
912 |
-
|
913 |
-
for i, lb in enumerate(label4):
|
914 |
-
lb[:, 0] = i # add target image index for build_targets()
|
915 |
-
|
916 |
-
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
|
917 |
-
|
918 |
-
|
919 |
-
# Ancillary functions --------------------------------------------------------------------------------------------------
|
920 |
-
def flatten_recursive(path=DATASETS_DIR / 'coco128'):
|
921 |
-
# Flatten a recursive directory by bringing all files to top level
|
922 |
-
new_path = Path(f'{str(path)}_flat')
|
923 |
-
if os.path.exists(new_path):
|
924 |
-
shutil.rmtree(new_path) # delete output folder
|
925 |
-
os.makedirs(new_path) # make new output folder
|
926 |
-
for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)):
|
927 |
-
shutil.copyfile(file, new_path / Path(file).name)
|
928 |
-
|
929 |
-
|
930 |
-
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
|
931 |
-
# Convert detection dataset into classification dataset, with one directory per class
|
932 |
-
path = Path(path) # images dir
|
933 |
-
shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
|
934 |
-
files = list(path.rglob('*.*'))
|
935 |
-
n = len(files) # number of files
|
936 |
-
for im_file in tqdm(files, total=n):
|
937 |
-
if im_file.suffix[1:] in IMG_FORMATS:
|
938 |
-
# image
|
939 |
-
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
|
940 |
-
h, w = im.shape[:2]
|
941 |
-
|
942 |
-
# labels
|
943 |
-
lb_file = Path(img2label_paths([str(im_file)])[0])
|
944 |
-
if Path(lb_file).exists():
|
945 |
-
with open(lb_file) as f:
|
946 |
-
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
947 |
-
|
948 |
-
for j, x in enumerate(lb):
|
949 |
-
c = int(x[0]) # class
|
950 |
-
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
|
951 |
-
if not f.parent.is_dir():
|
952 |
-
f.parent.mkdir(parents=True)
|
953 |
-
|
954 |
-
b = x[1:] * [w, h, w, h] # box
|
955 |
-
# b[2:] = b[2:].max() # rectangle to square
|
956 |
-
b[2:] = b[2:] * 1.2 + 3 # pad
|
957 |
-
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
|
958 |
-
|
959 |
-
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
|
960 |
-
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
|
961 |
-
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
|
962 |
-
|
963 |
-
|
964 |
-
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
|
965 |
-
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
|
966 |
-
Usage: from utils.dataloaders import *; autosplit()
|
967 |
-
Arguments
|
968 |
-
path: Path to images directory
|
969 |
-
weights: Train, val, test weights (list, tuple)
|
970 |
-
annotated_only: Only use images with an annotated txt file
|
971 |
-
"""
|
972 |
-
path = Path(path) # images dir
|
973 |
-
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
|
974 |
-
n = len(files) # number of files
|
975 |
-
random.seed(0) # for reproducibility
|
976 |
-
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
|
977 |
-
|
978 |
-
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
|
979 |
-
for x in txt:
|
980 |
-
if (path.parent / x).exists():
|
981 |
-
(path.parent / x).unlink() # remove existing
|
982 |
-
|
983 |
-
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
|
984 |
-
for i, img in tqdm(zip(indices, files), total=n):
|
985 |
-
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
|
986 |
-
with open(path.parent / txt[i], 'a') as f:
|
987 |
-
f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
|
988 |
-
|
989 |
-
|
990 |
-
def verify_image_label(args):
|
991 |
-
# Verify one image-label pair
|
992 |
-
im_file, lb_file, prefix = args
|
993 |
-
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
|
994 |
-
try:
|
995 |
-
# verify images
|
996 |
-
im = Image.open(im_file)
|
997 |
-
im.verify() # PIL verify
|
998 |
-
shape = exif_size(im) # image size
|
999 |
-
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
|
1000 |
-
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
|
1001 |
-
if im.format.lower() in ('jpg', 'jpeg'):
|
1002 |
-
with open(im_file, 'rb') as f:
|
1003 |
-
f.seek(-2, 2)
|
1004 |
-
if f.read() != b'\xff\xd9': # corrupt JPEG
|
1005 |
-
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
|
1006 |
-
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
|
1007 |
-
|
1008 |
-
# verify labels
|
1009 |
-
if os.path.isfile(lb_file):
|
1010 |
-
nf = 1 # label found
|
1011 |
-
with open(lb_file) as f:
|
1012 |
-
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
|
1013 |
-
if any(len(x) > 6 for x in lb): # is segment
|
1014 |
-
classes = np.array([x[0] for x in lb], dtype=np.float32)
|
1015 |
-
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
|
1016 |
-
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
|
1017 |
-
lb = np.array(lb, dtype=np.float32)
|
1018 |
-
nl = len(lb)
|
1019 |
-
if nl:
|
1020 |
-
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
|
1021 |
-
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
|
1022 |
-
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
|
1023 |
-
_, i = np.unique(lb, axis=0, return_index=True)
|
1024 |
-
if len(i) < nl: # duplicate row check
|
1025 |
-
lb = lb[i] # remove duplicates
|
1026 |
-
if segments:
|
1027 |
-
segments = [segments[x] for x in i]
|
1028 |
-
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
|
1029 |
-
else:
|
1030 |
-
ne = 1 # label empty
|
1031 |
-
lb = np.zeros((0, 5), dtype=np.float32)
|
1032 |
-
else:
|
1033 |
-
nm = 1 # label missing
|
1034 |
-
lb = np.zeros((0, 5), dtype=np.float32)
|
1035 |
-
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
|
1036 |
-
except Exception as e:
|
1037 |
-
nc = 1
|
1038 |
-
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
|
1039 |
-
return [None, None, None, None, nm, nf, ne, nc, msg]
|
1040 |
-
|
1041 |
-
|
1042 |
-
class HUBDatasetStats():
|
1043 |
-
""" Class for generating HUB dataset JSON and `-hub` dataset directory
|
1044 |
-
|
1045 |
-
Arguments
|
1046 |
-
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
|
1047 |
-
autodownload: Attempt to download dataset if not found locally
|
1048 |
-
|
1049 |
-
Usage
|
1050 |
-
from utils.dataloaders import HUBDatasetStats
|
1051 |
-
stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
|
1052 |
-
stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
|
1053 |
-
stats.get_json(save=False)
|
1054 |
-
stats.process_images()
|
1055 |
-
"""
|
1056 |
-
|
1057 |
-
def __init__(self, path='coco128.yaml', autodownload=False):
|
1058 |
-
# Initialize class
|
1059 |
-
zipped, data_dir, yaml_path = self._unzip(Path(path))
|
1060 |
-
try:
|
1061 |
-
with open(check_yaml(yaml_path), errors='ignore') as f:
|
1062 |
-
data = yaml.safe_load(f) # data dict
|
1063 |
-
if zipped:
|
1064 |
-
data['path'] = data_dir
|
1065 |
-
except Exception as e:
|
1066 |
-
raise Exception("error/HUB/dataset_stats/yaml_load") from e
|
1067 |
-
|
1068 |
-
check_dataset(data, autodownload) # download dataset if missing
|
1069 |
-
self.hub_dir = Path(data['path'] + '-hub')
|
1070 |
-
self.im_dir = self.hub_dir / 'images'
|
1071 |
-
self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images
|
1072 |
-
self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary
|
1073 |
-
self.data = data
|
1074 |
-
|
1075 |
-
@staticmethod
|
1076 |
-
def _find_yaml(dir):
|
1077 |
-
# Return data.yaml file
|
1078 |
-
files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive
|
1079 |
-
assert files, f'No *.yaml file found in {dir}'
|
1080 |
-
if len(files) > 1:
|
1081 |
-
files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name
|
1082 |
-
assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed'
|
1083 |
-
assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}'
|
1084 |
-
return files[0]
|
1085 |
-
|
1086 |
-
def _unzip(self, path):
|
1087 |
-
# Unzip data.zip
|
1088 |
-
if not str(path).endswith('.zip'): # path is data.yaml
|
1089 |
-
return False, None, path
|
1090 |
-
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
|
1091 |
-
unzip_file(path, path=path.parent)
|
1092 |
-
dir = path.with_suffix('') # dataset directory == zip name
|
1093 |
-
assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/'
|
1094 |
-
return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path
|
1095 |
-
|
1096 |
-
def _hub_ops(self, f, max_dim=1920):
|
1097 |
-
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
|
1098 |
-
f_new = self.im_dir / Path(f).name # dataset-hub image filename
|
1099 |
-
try: # use PIL
|
1100 |
-
im = Image.open(f)
|
1101 |
-
r = max_dim / max(im.height, im.width) # ratio
|
1102 |
-
if r < 1.0: # image too large
|
1103 |
-
im = im.resize((int(im.width * r), int(im.height * r)))
|
1104 |
-
im.save(f_new, 'JPEG', quality=50, optimize=True) # save
|
1105 |
-
except Exception as e: # use OpenCV
|
1106 |
-
LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}')
|
1107 |
-
im = cv2.imread(f)
|
1108 |
-
im_height, im_width = im.shape[:2]
|
1109 |
-
r = max_dim / max(im_height, im_width) # ratio
|
1110 |
-
if r < 1.0: # image too large
|
1111 |
-
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
|
1112 |
-
cv2.imwrite(str(f_new), im)
|
1113 |
-
|
1114 |
-
def get_json(self, save=False, verbose=False):
|
1115 |
-
# Return dataset JSON for Ultralytics HUB
|
1116 |
-
def _round(labels):
|
1117 |
-
# Update labels to integer class and 6 decimal place floats
|
1118 |
-
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
|
1119 |
-
|
1120 |
-
for split in 'train', 'val', 'test':
|
1121 |
-
if self.data.get(split) is None:
|
1122 |
-
self.stats[split] = None # i.e. no test set
|
1123 |
-
continue
|
1124 |
-
dataset = LoadImagesAndLabels(self.data[split]) # load dataset
|
1125 |
-
x = np.array([
|
1126 |
-
np.bincount(label[:, 0].astype(int), minlength=self.data['nc'])
|
1127 |
-
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80)
|
1128 |
-
self.stats[split] = {
|
1129 |
-
'instance_stats': {
|
1130 |
-
'total': int(x.sum()),
|
1131 |
-
'per_class': x.sum(0).tolist()},
|
1132 |
-
'image_stats': {
|
1133 |
-
'total': dataset.n,
|
1134 |
-
'unlabelled': int(np.all(x == 0, 1).sum()),
|
1135 |
-
'per_class': (x > 0).sum(0).tolist()},
|
1136 |
-
'labels': [{
|
1137 |
-
str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]}
|
1138 |
-
|
1139 |
-
# Save, print and return
|
1140 |
-
if save:
|
1141 |
-
stats_path = self.hub_dir / 'stats.json'
|
1142 |
-
print(f'Saving {stats_path.resolve()}...')
|
1143 |
-
with open(stats_path, 'w') as f:
|
1144 |
-
json.dump(self.stats, f) # save stats.json
|
1145 |
-
if verbose:
|
1146 |
-
print(json.dumps(self.stats, indent=2, sort_keys=False))
|
1147 |
-
return self.stats
|
1148 |
-
|
1149 |
-
def process_images(self):
|
1150 |
-
# Compress images for Ultralytics HUB
|
1151 |
-
for split in 'train', 'val', 'test':
|
1152 |
-
if self.data.get(split) is None:
|
1153 |
-
continue
|
1154 |
-
dataset = LoadImagesAndLabels(self.data[split]) # load dataset
|
1155 |
-
desc = f'{split} images'
|
1156 |
-
for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc):
|
1157 |
-
pass
|
1158 |
-
print(f'Done. All images saved to {self.im_dir}')
|
1159 |
-
return self.im_dir
|
1160 |
-
|
1161 |
-
|
1162 |
-
# Classification dataloaders -------------------------------------------------------------------------------------------
|
1163 |
-
class ClassificationDataset(torchvision.datasets.ImageFolder):
|
1164 |
-
"""
|
1165 |
-
YOLOv5 Classification Dataset.
|
1166 |
-
Arguments
|
1167 |
-
root: Dataset path
|
1168 |
-
transform: torchvision transforms, used by default
|
1169 |
-
album_transform: Albumentations transforms, used if installed
|
1170 |
-
"""
|
1171 |
-
|
1172 |
-
def __init__(self, root, augment, imgsz, cache=False):
|
1173 |
-
super().__init__(root=root)
|
1174 |
-
self.torch_transforms = classify_transforms(imgsz)
|
1175 |
-
self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
|
1176 |
-
self.cache_ram = cache is True or cache == 'ram'
|
1177 |
-
self.cache_disk = cache == 'disk'
|
1178 |
-
self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
|
1179 |
-
|
1180 |
-
def __getitem__(self, i):
|
1181 |
-
f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
|
1182 |
-
if self.cache_ram and im is None:
|
1183 |
-
im = self.samples[i][3] = cv2.imread(f)
|
1184 |
-
elif self.cache_disk:
|
1185 |
-
if not fn.exists(): # load npy
|
1186 |
-
np.save(fn.as_posix(), cv2.imread(f))
|
1187 |
-
im = np.load(fn)
|
1188 |
-
else: # read image
|
1189 |
-
im = cv2.imread(f) # BGR
|
1190 |
-
if self.album_transforms:
|
1191 |
-
sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"]
|
1192 |
-
else:
|
1193 |
-
sample = self.torch_transforms(im)
|
1194 |
-
return sample, j
|
1195 |
-
|
1196 |
-
|
1197 |
-
def create_classification_dataloader(path,
|
1198 |
-
imgsz=224,
|
1199 |
-
batch_size=16,
|
1200 |
-
augment=True,
|
1201 |
-
cache=False,
|
1202 |
-
rank=-1,
|
1203 |
-
workers=8,
|
1204 |
-
shuffle=True):
|
1205 |
-
# Returns Dataloader object to be used with YOLOv5 Classifier
|
1206 |
-
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
1207 |
-
dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)
|
1208 |
-
batch_size = min(batch_size, len(dataset))
|
1209 |
-
nd = torch.cuda.device_count()
|
1210 |
-
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])
|
1211 |
-
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
1212 |
-
generator = torch.Generator()
|
1213 |
-
generator.manual_seed(6148914691236517205 + RANK)
|
1214 |
-
return InfiniteDataLoader(dataset,
|
1215 |
-
batch_size=batch_size,
|
1216 |
-
shuffle=shuffle and sampler is None,
|
1217 |
-
num_workers=nw,
|
1218 |
-
sampler=sampler,
|
1219 |
-
pin_memory=PIN_MEMORY,
|
1220 |
-
worker_init_fn=seed_worker,
|
1221 |
-
generator=generator) # or DataLoader(persistent_workers=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/vits/text/korean.py
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from jamo import h2j, j2hcj
|
3 |
-
import ko_pron
|
4 |
-
|
5 |
-
|
6 |
-
# This is a list of Korean classifiers preceded by pure Korean numerals.
|
7 |
-
_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
|
8 |
-
|
9 |
-
# List of (hangul, hangul divided) pairs:
|
10 |
-
_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
|
11 |
-
('ㄳ', 'ㄱㅅ'),
|
12 |
-
('ㄵ', 'ㄴㅈ'),
|
13 |
-
('ㄶ', 'ㄴㅎ'),
|
14 |
-
('ㄺ', 'ㄹㄱ'),
|
15 |
-
('ㄻ', 'ㄹㅁ'),
|
16 |
-
('ㄼ', 'ㄹㅂ'),
|
17 |
-
('ㄽ', 'ㄹㅅ'),
|
18 |
-
('ㄾ', 'ㄹㅌ'),
|
19 |
-
('ㄿ', 'ㄹㅍ'),
|
20 |
-
('ㅀ', 'ㄹㅎ'),
|
21 |
-
('ㅄ', 'ㅂㅅ'),
|
22 |
-
('ㅘ', 'ㅗㅏ'),
|
23 |
-
('ㅙ', 'ㅗㅐ'),
|
24 |
-
('ㅚ', 'ㅗㅣ'),
|
25 |
-
('ㅝ', 'ㅜㅓ'),
|
26 |
-
('ㅞ', 'ㅜㅔ'),
|
27 |
-
('ㅟ', 'ㅜㅣ'),
|
28 |
-
('ㅢ', 'ㅡㅣ'),
|
29 |
-
('ㅑ', 'ㅣㅏ'),
|
30 |
-
('ㅒ', 'ㅣㅐ'),
|
31 |
-
('ㅕ', 'ㅣㅓ'),
|
32 |
-
('ㅖ', 'ㅣㅔ'),
|
33 |
-
('ㅛ', 'ㅣㅗ'),
|
34 |
-
('ㅠ', 'ㅣㅜ')
|
35 |
-
]]
|
36 |
-
|
37 |
-
# List of (Latin alphabet, hangul) pairs:
|
38 |
-
_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
39 |
-
('a', '에이'),
|
40 |
-
('b', '비'),
|
41 |
-
('c', '시'),
|
42 |
-
('d', '디'),
|
43 |
-
('e', '이'),
|
44 |
-
('f', '에프'),
|
45 |
-
('g', '지'),
|
46 |
-
('h', '에이치'),
|
47 |
-
('i', '아이'),
|
48 |
-
('j', '제이'),
|
49 |
-
('k', '케이'),
|
50 |
-
('l', '엘'),
|
51 |
-
('m', '엠'),
|
52 |
-
('n', '엔'),
|
53 |
-
('o', '오'),
|
54 |
-
('p', '피'),
|
55 |
-
('q', '큐'),
|
56 |
-
('r', '아르'),
|
57 |
-
('s', '에스'),
|
58 |
-
('t', '티'),
|
59 |
-
('u', '유'),
|
60 |
-
('v', '브이'),
|
61 |
-
('w', '더블유'),
|
62 |
-
('x', '엑스'),
|
63 |
-
('y', '와이'),
|
64 |
-
('z', '제트')
|
65 |
-
]]
|
66 |
-
|
67 |
-
# List of (ipa, lazy ipa) pairs:
|
68 |
-
_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
69 |
-
('t͡ɕ','ʧ'),
|
70 |
-
('d͡ʑ','ʥ'),
|
71 |
-
('ɲ','n^'),
|
72 |
-
('ɕ','ʃ'),
|
73 |
-
('ʷ','w'),
|
74 |
-
('ɭ','l`'),
|
75 |
-
('ʎ','ɾ'),
|
76 |
-
('ɣ','ŋ'),
|
77 |
-
('ɰ','ɯ'),
|
78 |
-
('ʝ','j'),
|
79 |
-
('ʌ','ə'),
|
80 |
-
('ɡ','g'),
|
81 |
-
('\u031a','#'),
|
82 |
-
('\u0348','='),
|
83 |
-
('\u031e',''),
|
84 |
-
('\u0320',''),
|
85 |
-
('\u0339','')
|
86 |
-
]]
|
87 |
-
|
88 |
-
|
89 |
-
def latin_to_hangul(text):
|
90 |
-
for regex, replacement in _latin_to_hangul:
|
91 |
-
text = re.sub(regex, replacement, text)
|
92 |
-
return text
|
93 |
-
|
94 |
-
|
95 |
-
def divide_hangul(text):
|
96 |
-
text = j2hcj(h2j(text))
|
97 |
-
for regex, replacement in _hangul_divided:
|
98 |
-
text = re.sub(regex, replacement, text)
|
99 |
-
return text
|
100 |
-
|
101 |
-
|
102 |
-
def hangul_number(num, sino=True):
|
103 |
-
'''Reference https://github.com/Kyubyong/g2pK'''
|
104 |
-
num = re.sub(',', '', num)
|
105 |
-
|
106 |
-
if num == '0':
|
107 |
-
return '영'
|
108 |
-
if not sino and num == '20':
|
109 |
-
return '스무'
|
110 |
-
|
111 |
-
digits = '123456789'
|
112 |
-
names = '일이삼사오육칠팔구'
|
113 |
-
digit2name = {d: n for d, n in zip(digits, names)}
|
114 |
-
|
115 |
-
modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
|
116 |
-
decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
|
117 |
-
digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
|
118 |
-
digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
|
119 |
-
|
120 |
-
spelledout = []
|
121 |
-
for i, digit in enumerate(num):
|
122 |
-
i = len(num) - i - 1
|
123 |
-
if sino:
|
124 |
-
if i == 0:
|
125 |
-
name = digit2name.get(digit, '')
|
126 |
-
elif i == 1:
|
127 |
-
name = digit2name.get(digit, '') + '십'
|
128 |
-
name = name.replace('일십', '십')
|
129 |
-
else:
|
130 |
-
if i == 0:
|
131 |
-
name = digit2mod.get(digit, '')
|
132 |
-
elif i == 1:
|
133 |
-
name = digit2dec.get(digit, '')
|
134 |
-
if digit == '0':
|
135 |
-
if i % 4 == 0:
|
136 |
-
last_three = spelledout[-min(3, len(spelledout)):]
|
137 |
-
if ''.join(last_three) == '':
|
138 |
-
spelledout.append('')
|
139 |
-
continue
|
140 |
-
else:
|
141 |
-
spelledout.append('')
|
142 |
-
continue
|
143 |
-
if i == 2:
|
144 |
-
name = digit2name.get(digit, '') + '백'
|
145 |
-
name = name.replace('일백', '백')
|
146 |
-
elif i == 3:
|
147 |
-
name = digit2name.get(digit, '') + '천'
|
148 |
-
name = name.replace('일천', '천')
|
149 |
-
elif i == 4:
|
150 |
-
name = digit2name.get(digit, '') + '만'
|
151 |
-
name = name.replace('일만', '만')
|
152 |
-
elif i == 5:
|
153 |
-
name = digit2name.get(digit, '') + '십'
|
154 |
-
name = name.replace('일십', '십')
|
155 |
-
elif i == 6:
|
156 |
-
name = digit2name.get(digit, '') + '백'
|
157 |
-
name = name.replace('일백', '백')
|
158 |
-
elif i == 7:
|
159 |
-
name = digit2name.get(digit, '') + '천'
|
160 |
-
name = name.replace('일천', '천')
|
161 |
-
elif i == 8:
|
162 |
-
name = digit2name.get(digit, '') + '억'
|
163 |
-
elif i == 9:
|
164 |
-
name = digit2name.get(digit, '') + '십'
|
165 |
-
elif i == 10:
|
166 |
-
name = digit2name.get(digit, '') + '백'
|
167 |
-
elif i == 11:
|
168 |
-
name = digit2name.get(digit, '') + '천'
|
169 |
-
elif i == 12:
|
170 |
-
name = digit2name.get(digit, '') + '조'
|
171 |
-
elif i == 13:
|
172 |
-
name = digit2name.get(digit, '') + '십'
|
173 |
-
elif i == 14:
|
174 |
-
name = digit2name.get(digit, '') + '백'
|
175 |
-
elif i == 15:
|
176 |
-
name = digit2name.get(digit, '') + '천'
|
177 |
-
spelledout.append(name)
|
178 |
-
return ''.join(elem for elem in spelledout)
|
179 |
-
|
180 |
-
|
181 |
-
def number_to_hangul(text):
|
182 |
-
'''Reference https://github.com/Kyubyong/g2pK'''
|
183 |
-
tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
|
184 |
-
for token in tokens:
|
185 |
-
num, classifier = token
|
186 |
-
if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
|
187 |
-
spelledout = hangul_number(num, sino=False)
|
188 |
-
else:
|
189 |
-
spelledout = hangul_number(num, sino=True)
|
190 |
-
text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
|
191 |
-
# digit by digit for remaining digits
|
192 |
-
digits = '0123456789'
|
193 |
-
names = '영일이삼사오육칠팔구'
|
194 |
-
for d, n in zip(digits, names):
|
195 |
-
text = text.replace(d, n)
|
196 |
-
return text
|
197 |
-
|
198 |
-
|
199 |
-
def korean_to_lazy_ipa(text):
|
200 |
-
text = latin_to_hangul(text)
|
201 |
-
text = number_to_hangul(text)
|
202 |
-
text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)
|
203 |
-
for regex, replacement in _ipa_to_lazy_ipa:
|
204 |
-
text = re.sub(regex, replacement, text)
|
205 |
-
return text
|
206 |
-
|
207 |
-
|
208 |
-
def korean_to_ipa(text):
|
209 |
-
text = korean_to_lazy_ipa(text)
|
210 |
-
return text.replace('ʧ','tʃ').replace('ʥ','dʑ')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/encoding.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import codecs
|
2 |
-
import locale
|
3 |
-
import re
|
4 |
-
import sys
|
5 |
-
from typing import List, Tuple
|
6 |
-
|
7 |
-
BOMS: List[Tuple[bytes, str]] = [
|
8 |
-
(codecs.BOM_UTF8, "utf-8"),
|
9 |
-
(codecs.BOM_UTF16, "utf-16"),
|
10 |
-
(codecs.BOM_UTF16_BE, "utf-16-be"),
|
11 |
-
(codecs.BOM_UTF16_LE, "utf-16-le"),
|
12 |
-
(codecs.BOM_UTF32, "utf-32"),
|
13 |
-
(codecs.BOM_UTF32_BE, "utf-32-be"),
|
14 |
-
(codecs.BOM_UTF32_LE, "utf-32-le"),
|
15 |
-
]
|
16 |
-
|
17 |
-
ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)")
|
18 |
-
|
19 |
-
|
20 |
-
def auto_decode(data: bytes) -> str:
|
21 |
-
"""Check a bytes string for a BOM to correctly detect the encoding
|
22 |
-
|
23 |
-
Fallback to locale.getpreferredencoding(False) like open() on Python3"""
|
24 |
-
for bom, encoding in BOMS:
|
25 |
-
if data.startswith(bom):
|
26 |
-
return data[len(bom) :].decode(encoding)
|
27 |
-
# Lets check the first two lines as in PEP263
|
28 |
-
for line in data.split(b"\n")[:2]:
|
29 |
-
if line[0:1] == b"#" and ENCODING_RE.search(line):
|
30 |
-
result = ENCODING_RE.search(line)
|
31 |
-
assert result is not None
|
32 |
-
encoding = result.groups()[0].decode("ascii")
|
33 |
-
return data.decode(encoding)
|
34 |
-
return data.decode(
|
35 |
-
locale.getpreferredencoding(False) or sys.getdefaultencoding(),
|
36 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/chardistribution.py
DELETED
@@ -1,261 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is Mozilla Communicator client code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from typing import Tuple, Union
|
29 |
-
|
30 |
-
from .big5freq import (
|
31 |
-
BIG5_CHAR_TO_FREQ_ORDER,
|
32 |
-
BIG5_TABLE_SIZE,
|
33 |
-
BIG5_TYPICAL_DISTRIBUTION_RATIO,
|
34 |
-
)
|
35 |
-
from .euckrfreq import (
|
36 |
-
EUCKR_CHAR_TO_FREQ_ORDER,
|
37 |
-
EUCKR_TABLE_SIZE,
|
38 |
-
EUCKR_TYPICAL_DISTRIBUTION_RATIO,
|
39 |
-
)
|
40 |
-
from .euctwfreq import (
|
41 |
-
EUCTW_CHAR_TO_FREQ_ORDER,
|
42 |
-
EUCTW_TABLE_SIZE,
|
43 |
-
EUCTW_TYPICAL_DISTRIBUTION_RATIO,
|
44 |
-
)
|
45 |
-
from .gb2312freq import (
|
46 |
-
GB2312_CHAR_TO_FREQ_ORDER,
|
47 |
-
GB2312_TABLE_SIZE,
|
48 |
-
GB2312_TYPICAL_DISTRIBUTION_RATIO,
|
49 |
-
)
|
50 |
-
from .jisfreq import (
|
51 |
-
JIS_CHAR_TO_FREQ_ORDER,
|
52 |
-
JIS_TABLE_SIZE,
|
53 |
-
JIS_TYPICAL_DISTRIBUTION_RATIO,
|
54 |
-
)
|
55 |
-
from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
|
56 |
-
|
57 |
-
|
58 |
-
class CharDistributionAnalysis:
|
59 |
-
ENOUGH_DATA_THRESHOLD = 1024
|
60 |
-
SURE_YES = 0.99
|
61 |
-
SURE_NO = 0.01
|
62 |
-
MINIMUM_DATA_THRESHOLD = 3
|
63 |
-
|
64 |
-
def __init__(self) -> None:
|
65 |
-
# Mapping table to get frequency order from char order (get from
|
66 |
-
# GetOrder())
|
67 |
-
self._char_to_freq_order: Tuple[int, ...] = tuple()
|
68 |
-
self._table_size = 0 # Size of above table
|
69 |
-
# This is a constant value which varies from language to language,
|
70 |
-
# used in calculating confidence. See
|
71 |
-
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
|
72 |
-
# for further detail.
|
73 |
-
self.typical_distribution_ratio = 0.0
|
74 |
-
self._done = False
|
75 |
-
self._total_chars = 0
|
76 |
-
self._freq_chars = 0
|
77 |
-
self.reset()
|
78 |
-
|
79 |
-
def reset(self) -> None:
|
80 |
-
"""reset analyser, clear any state"""
|
81 |
-
# If this flag is set to True, detection is done and conclusion has
|
82 |
-
# been made
|
83 |
-
self._done = False
|
84 |
-
self._total_chars = 0 # Total characters encountered
|
85 |
-
# The number of characters whose frequency order is less than 512
|
86 |
-
self._freq_chars = 0
|
87 |
-
|
88 |
-
def feed(self, char: Union[bytes, bytearray], char_len: int) -> None:
|
89 |
-
"""feed a character with known length"""
|
90 |
-
if char_len == 2:
|
91 |
-
# we only care about 2-bytes character in our distribution analysis
|
92 |
-
order = self.get_order(char)
|
93 |
-
else:
|
94 |
-
order = -1
|
95 |
-
if order >= 0:
|
96 |
-
self._total_chars += 1
|
97 |
-
# order is valid
|
98 |
-
if order < self._table_size:
|
99 |
-
if 512 > self._char_to_freq_order[order]:
|
100 |
-
self._freq_chars += 1
|
101 |
-
|
102 |
-
def get_confidence(self) -> float:
|
103 |
-
"""return confidence based on existing data"""
|
104 |
-
# if we didn't receive any character in our consideration range,
|
105 |
-
# return negative answer
|
106 |
-
if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
|
107 |
-
return self.SURE_NO
|
108 |
-
|
109 |
-
if self._total_chars != self._freq_chars:
|
110 |
-
r = self._freq_chars / (
|
111 |
-
(self._total_chars - self._freq_chars) * self.typical_distribution_ratio
|
112 |
-
)
|
113 |
-
if r < self.SURE_YES:
|
114 |
-
return r
|
115 |
-
|
116 |
-
# normalize confidence (we don't want to be 100% sure)
|
117 |
-
return self.SURE_YES
|
118 |
-
|
119 |
-
def got_enough_data(self) -> bool:
|
120 |
-
# It is not necessary to receive all data to draw conclusion.
|
121 |
-
# For charset detection, certain amount of data is enough
|
122 |
-
return self._total_chars > self.ENOUGH_DATA_THRESHOLD
|
123 |
-
|
124 |
-
def get_order(self, _: Union[bytes, bytearray]) -> int:
|
125 |
-
# We do not handle characters based on the original encoding string,
|
126 |
-
# but convert this encoding string to a number, here called order.
|
127 |
-
# This allows multiple encodings of a language to share one frequency
|
128 |
-
# table.
|
129 |
-
return -1
|
130 |
-
|
131 |
-
|
132 |
-
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
|
133 |
-
def __init__(self) -> None:
|
134 |
-
super().__init__()
|
135 |
-
self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
|
136 |
-
self._table_size = EUCTW_TABLE_SIZE
|
137 |
-
self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
|
138 |
-
|
139 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
140 |
-
# for euc-TW encoding, we are interested
|
141 |
-
# first byte range: 0xc4 -- 0xfe
|
142 |
-
# second byte range: 0xa1 -- 0xfe
|
143 |
-
# no validation needed here. State machine has done that
|
144 |
-
first_char = byte_str[0]
|
145 |
-
if first_char >= 0xC4:
|
146 |
-
return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
|
147 |
-
return -1
|
148 |
-
|
149 |
-
|
150 |
-
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
|
151 |
-
def __init__(self) -> None:
|
152 |
-
super().__init__()
|
153 |
-
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
|
154 |
-
self._table_size = EUCKR_TABLE_SIZE
|
155 |
-
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
|
156 |
-
|
157 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
158 |
-
# for euc-KR encoding, we are interested
|
159 |
-
# first byte range: 0xb0 -- 0xfe
|
160 |
-
# second byte range: 0xa1 -- 0xfe
|
161 |
-
# no validation needed here. State machine has done that
|
162 |
-
first_char = byte_str[0]
|
163 |
-
if first_char >= 0xB0:
|
164 |
-
return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
|
165 |
-
return -1
|
166 |
-
|
167 |
-
|
168 |
-
class JOHABDistributionAnalysis(CharDistributionAnalysis):
|
169 |
-
def __init__(self) -> None:
|
170 |
-
super().__init__()
|
171 |
-
self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
|
172 |
-
self._table_size = EUCKR_TABLE_SIZE
|
173 |
-
self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
|
174 |
-
|
175 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
176 |
-
first_char = byte_str[0]
|
177 |
-
if 0x88 <= first_char < 0xD4:
|
178 |
-
code = first_char * 256 + byte_str[1]
|
179 |
-
return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
|
180 |
-
return -1
|
181 |
-
|
182 |
-
|
183 |
-
class GB2312DistributionAnalysis(CharDistributionAnalysis):
|
184 |
-
def __init__(self) -> None:
|
185 |
-
super().__init__()
|
186 |
-
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
|
187 |
-
self._table_size = GB2312_TABLE_SIZE
|
188 |
-
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
|
189 |
-
|
190 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
191 |
-
# for GB2312 encoding, we are interested
|
192 |
-
# first byte range: 0xb0 -- 0xfe
|
193 |
-
# second byte range: 0xa1 -- 0xfe
|
194 |
-
# no validation needed here. State machine has done that
|
195 |
-
first_char, second_char = byte_str[0], byte_str[1]
|
196 |
-
if (first_char >= 0xB0) and (second_char >= 0xA1):
|
197 |
-
return 94 * (first_char - 0xB0) + second_char - 0xA1
|
198 |
-
return -1
|
199 |
-
|
200 |
-
|
201 |
-
class Big5DistributionAnalysis(CharDistributionAnalysis):
|
202 |
-
def __init__(self) -> None:
|
203 |
-
super().__init__()
|
204 |
-
self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
|
205 |
-
self._table_size = BIG5_TABLE_SIZE
|
206 |
-
self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
|
207 |
-
|
208 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
209 |
-
# for big5 encoding, we are interested
|
210 |
-
# first byte range: 0xa4 -- 0xfe
|
211 |
-
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
|
212 |
-
# no validation needed here. State machine has done that
|
213 |
-
first_char, second_char = byte_str[0], byte_str[1]
|
214 |
-
if first_char >= 0xA4:
|
215 |
-
if second_char >= 0xA1:
|
216 |
-
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
|
217 |
-
return 157 * (first_char - 0xA4) + second_char - 0x40
|
218 |
-
return -1
|
219 |
-
|
220 |
-
|
221 |
-
class SJISDistributionAnalysis(CharDistributionAnalysis):
|
222 |
-
def __init__(self) -> None:
|
223 |
-
super().__init__()
|
224 |
-
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
|
225 |
-
self._table_size = JIS_TABLE_SIZE
|
226 |
-
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
|
227 |
-
|
228 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
229 |
-
# for sjis encoding, we are interested
|
230 |
-
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
|
231 |
-
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
|
232 |
-
# no validation needed here. State machine has done that
|
233 |
-
first_char, second_char = byte_str[0], byte_str[1]
|
234 |
-
if 0x81 <= first_char <= 0x9F:
|
235 |
-
order = 188 * (first_char - 0x81)
|
236 |
-
elif 0xE0 <= first_char <= 0xEF:
|
237 |
-
order = 188 * (first_char - 0xE0 + 31)
|
238 |
-
else:
|
239 |
-
return -1
|
240 |
-
order = order + second_char - 0x40
|
241 |
-
if second_char > 0x7F:
|
242 |
-
order = -1
|
243 |
-
return order
|
244 |
-
|
245 |
-
|
246 |
-
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
|
247 |
-
def __init__(self) -> None:
|
248 |
-
super().__init__()
|
249 |
-
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
|
250 |
-
self._table_size = JIS_TABLE_SIZE
|
251 |
-
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
|
252 |
-
|
253 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
|
254 |
-
# for euc-JP encoding, we are interested
|
255 |
-
# first byte range: 0xa0 -- 0xfe
|
256 |
-
# second byte range: 0xa1 -- 0xfe
|
257 |
-
# no validation needed here. State machine has done that
|
258 |
-
char = byte_str[0]
|
259 |
-
if char >= 0xA0:
|
260 |
-
return 94 * (char - 0xA1) + byte_str[1] - 0xA1
|
261 |
-
return -1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/models/stylegan2/__init__.py
DELETED
File without changes
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/deform_conv.py
DELETED
@@ -1,501 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import math
|
3 |
-
from functools import lru_cache
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.autograd import Function
|
7 |
-
from torch.autograd.function import once_differentiable
|
8 |
-
from torch.nn.modules.utils import _pair
|
9 |
-
from torchvision.ops import deform_conv2d
|
10 |
-
|
11 |
-
from detectron2 import _C
|
12 |
-
|
13 |
-
from .wrappers import _NewEmptyTensorOp
|
14 |
-
|
15 |
-
|
16 |
-
class _DeformConv(Function):
|
17 |
-
@staticmethod
|
18 |
-
def forward(
|
19 |
-
ctx,
|
20 |
-
input,
|
21 |
-
offset,
|
22 |
-
weight,
|
23 |
-
stride=1,
|
24 |
-
padding=0,
|
25 |
-
dilation=1,
|
26 |
-
groups=1,
|
27 |
-
deformable_groups=1,
|
28 |
-
im2col_step=64,
|
29 |
-
):
|
30 |
-
if input is not None and input.dim() != 4:
|
31 |
-
raise ValueError(
|
32 |
-
"Expected 4D tensor as input, got {}D tensor instead.".format(input.dim())
|
33 |
-
)
|
34 |
-
ctx.stride = _pair(stride)
|
35 |
-
ctx.padding = _pair(padding)
|
36 |
-
ctx.dilation = _pair(dilation)
|
37 |
-
ctx.groups = groups
|
38 |
-
ctx.deformable_groups = deformable_groups
|
39 |
-
ctx.im2col_step = im2col_step
|
40 |
-
|
41 |
-
ctx.save_for_backward(input, offset, weight)
|
42 |
-
|
43 |
-
output = input.new_empty(
|
44 |
-
_DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)
|
45 |
-
)
|
46 |
-
|
47 |
-
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
|
48 |
-
|
49 |
-
if not input.is_cuda:
|
50 |
-
if deformable_groups != 1:
|
51 |
-
raise NotImplementedError(
|
52 |
-
"Deformable Conv with deformable_groups != 1 is not supported on CPUs!"
|
53 |
-
)
|
54 |
-
return deform_conv2d(
|
55 |
-
input, offset, weight, stride=stride, padding=padding, dilation=dilation
|
56 |
-
)
|
57 |
-
else:
|
58 |
-
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
|
59 |
-
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
|
60 |
-
|
61 |
-
_C.deform_conv_forward(
|
62 |
-
input,
|
63 |
-
weight,
|
64 |
-
offset,
|
65 |
-
output,
|
66 |
-
ctx.bufs_[0],
|
67 |
-
ctx.bufs_[1],
|
68 |
-
weight.size(3),
|
69 |
-
weight.size(2),
|
70 |
-
ctx.stride[1],
|
71 |
-
ctx.stride[0],
|
72 |
-
ctx.padding[1],
|
73 |
-
ctx.padding[0],
|
74 |
-
ctx.dilation[1],
|
75 |
-
ctx.dilation[0],
|
76 |
-
ctx.groups,
|
77 |
-
ctx.deformable_groups,
|
78 |
-
cur_im2col_step,
|
79 |
-
)
|
80 |
-
return output
|
81 |
-
|
82 |
-
@staticmethod
|
83 |
-
@once_differentiable
|
84 |
-
def backward(ctx, grad_output):
|
85 |
-
input, offset, weight = ctx.saved_tensors
|
86 |
-
|
87 |
-
grad_input = grad_offset = grad_weight = None
|
88 |
-
|
89 |
-
if not grad_output.is_cuda:
|
90 |
-
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
|
91 |
-
else:
|
92 |
-
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
|
93 |
-
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
|
94 |
-
|
95 |
-
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
|
96 |
-
grad_input = torch.zeros_like(input)
|
97 |
-
grad_offset = torch.zeros_like(offset)
|
98 |
-
_C.deform_conv_backward_input(
|
99 |
-
input,
|
100 |
-
offset,
|
101 |
-
grad_output,
|
102 |
-
grad_input,
|
103 |
-
grad_offset,
|
104 |
-
weight,
|
105 |
-
ctx.bufs_[0],
|
106 |
-
weight.size(3),
|
107 |
-
weight.size(2),
|
108 |
-
ctx.stride[1],
|
109 |
-
ctx.stride[0],
|
110 |
-
ctx.padding[1],
|
111 |
-
ctx.padding[0],
|
112 |
-
ctx.dilation[1],
|
113 |
-
ctx.dilation[0],
|
114 |
-
ctx.groups,
|
115 |
-
ctx.deformable_groups,
|
116 |
-
cur_im2col_step,
|
117 |
-
)
|
118 |
-
|
119 |
-
if ctx.needs_input_grad[2]:
|
120 |
-
grad_weight = torch.zeros_like(weight)
|
121 |
-
_C.deform_conv_backward_filter(
|
122 |
-
input,
|
123 |
-
offset,
|
124 |
-
grad_output,
|
125 |
-
grad_weight,
|
126 |
-
ctx.bufs_[0],
|
127 |
-
ctx.bufs_[1],
|
128 |
-
weight.size(3),
|
129 |
-
weight.size(2),
|
130 |
-
ctx.stride[1],
|
131 |
-
ctx.stride[0],
|
132 |
-
ctx.padding[1],
|
133 |
-
ctx.padding[0],
|
134 |
-
ctx.dilation[1],
|
135 |
-
ctx.dilation[0],
|
136 |
-
ctx.groups,
|
137 |
-
ctx.deformable_groups,
|
138 |
-
1,
|
139 |
-
cur_im2col_step,
|
140 |
-
)
|
141 |
-
|
142 |
-
return grad_input, grad_offset, grad_weight, None, None, None, None, None, None
|
143 |
-
|
144 |
-
@staticmethod
|
145 |
-
def _output_size(input, weight, padding, dilation, stride):
|
146 |
-
channels = weight.size(0)
|
147 |
-
output_size = (input.size(0), channels)
|
148 |
-
for d in range(input.dim() - 2):
|
149 |
-
in_size = input.size(d + 2)
|
150 |
-
pad = padding[d]
|
151 |
-
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
|
152 |
-
stride_ = stride[d]
|
153 |
-
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,)
|
154 |
-
if not all(map(lambda s: s > 0, output_size)):
|
155 |
-
raise ValueError(
|
156 |
-
"convolution input is too small (output would be {})".format(
|
157 |
-
"x".join(map(str, output_size))
|
158 |
-
)
|
159 |
-
)
|
160 |
-
return output_size
|
161 |
-
|
162 |
-
@staticmethod
|
163 |
-
@lru_cache(maxsize=128)
|
164 |
-
def _cal_im2col_step(input_size, default_size):
|
165 |
-
"""
|
166 |
-
Calculate proper im2col step size, which should be divisible by input_size and not larger
|
167 |
-
than prefer_size. Meanwhile the step size should be as large as possible to be more
|
168 |
-
efficient. So we choose the largest one among all divisors of input_size which are smaller
|
169 |
-
than prefer_size.
|
170 |
-
:param input_size: input batch size .
|
171 |
-
:param default_size: default preferred im2col step size.
|
172 |
-
:return: the largest proper step size.
|
173 |
-
"""
|
174 |
-
if input_size <= default_size:
|
175 |
-
return input_size
|
176 |
-
best_step = 1
|
177 |
-
for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)):
|
178 |
-
if input_size % step == 0:
|
179 |
-
if input_size // step <= default_size:
|
180 |
-
return input_size // step
|
181 |
-
best_step = step
|
182 |
-
|
183 |
-
return best_step
|
184 |
-
|
185 |
-
|
186 |
-
class _ModulatedDeformConv(Function):
|
187 |
-
@staticmethod
|
188 |
-
def forward(
|
189 |
-
ctx,
|
190 |
-
input,
|
191 |
-
offset,
|
192 |
-
mask,
|
193 |
-
weight,
|
194 |
-
bias=None,
|
195 |
-
stride=1,
|
196 |
-
padding=0,
|
197 |
-
dilation=1,
|
198 |
-
groups=1,
|
199 |
-
deformable_groups=1,
|
200 |
-
):
|
201 |
-
ctx.stride = stride
|
202 |
-
ctx.padding = padding
|
203 |
-
ctx.dilation = dilation
|
204 |
-
ctx.groups = groups
|
205 |
-
ctx.deformable_groups = deformable_groups
|
206 |
-
ctx.with_bias = bias is not None
|
207 |
-
if not ctx.with_bias:
|
208 |
-
bias = input.new_empty(1) # fake tensor
|
209 |
-
if not input.is_cuda:
|
210 |
-
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
|
211 |
-
if (
|
212 |
-
weight.requires_grad
|
213 |
-
or mask.requires_grad
|
214 |
-
or offset.requires_grad
|
215 |
-
or input.requires_grad
|
216 |
-
):
|
217 |
-
ctx.save_for_backward(input, offset, mask, weight, bias)
|
218 |
-
output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight))
|
219 |
-
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
|
220 |
-
_C.modulated_deform_conv_forward(
|
221 |
-
input,
|
222 |
-
weight,
|
223 |
-
bias,
|
224 |
-
ctx._bufs[0],
|
225 |
-
offset,
|
226 |
-
mask,
|
227 |
-
output,
|
228 |
-
ctx._bufs[1],
|
229 |
-
weight.shape[2],
|
230 |
-
weight.shape[3],
|
231 |
-
ctx.stride,
|
232 |
-
ctx.stride,
|
233 |
-
ctx.padding,
|
234 |
-
ctx.padding,
|
235 |
-
ctx.dilation,
|
236 |
-
ctx.dilation,
|
237 |
-
ctx.groups,
|
238 |
-
ctx.deformable_groups,
|
239 |
-
ctx.with_bias,
|
240 |
-
)
|
241 |
-
return output
|
242 |
-
|
243 |
-
@staticmethod
|
244 |
-
@once_differentiable
|
245 |
-
def backward(ctx, grad_output):
|
246 |
-
if not grad_output.is_cuda:
|
247 |
-
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
|
248 |
-
input, offset, mask, weight, bias = ctx.saved_tensors
|
249 |
-
grad_input = torch.zeros_like(input)
|
250 |
-
grad_offset = torch.zeros_like(offset)
|
251 |
-
grad_mask = torch.zeros_like(mask)
|
252 |
-
grad_weight = torch.zeros_like(weight)
|
253 |
-
grad_bias = torch.zeros_like(bias)
|
254 |
-
_C.modulated_deform_conv_backward(
|
255 |
-
input,
|
256 |
-
weight,
|
257 |
-
bias,
|
258 |
-
ctx._bufs[0],
|
259 |
-
offset,
|
260 |
-
mask,
|
261 |
-
ctx._bufs[1],
|
262 |
-
grad_input,
|
263 |
-
grad_weight,
|
264 |
-
grad_bias,
|
265 |
-
grad_offset,
|
266 |
-
grad_mask,
|
267 |
-
grad_output,
|
268 |
-
weight.shape[2],
|
269 |
-
weight.shape[3],
|
270 |
-
ctx.stride,
|
271 |
-
ctx.stride,
|
272 |
-
ctx.padding,
|
273 |
-
ctx.padding,
|
274 |
-
ctx.dilation,
|
275 |
-
ctx.dilation,
|
276 |
-
ctx.groups,
|
277 |
-
ctx.deformable_groups,
|
278 |
-
ctx.with_bias,
|
279 |
-
)
|
280 |
-
if not ctx.with_bias:
|
281 |
-
grad_bias = None
|
282 |
-
|
283 |
-
return (
|
284 |
-
grad_input,
|
285 |
-
grad_offset,
|
286 |
-
grad_mask,
|
287 |
-
grad_weight,
|
288 |
-
grad_bias,
|
289 |
-
None,
|
290 |
-
None,
|
291 |
-
None,
|
292 |
-
None,
|
293 |
-
None,
|
294 |
-
)
|
295 |
-
|
296 |
-
@staticmethod
|
297 |
-
def _infer_shape(ctx, input, weight):
|
298 |
-
n = input.size(0)
|
299 |
-
channels_out = weight.size(0)
|
300 |
-
height, width = input.shape[2:4]
|
301 |
-
kernel_h, kernel_w = weight.shape[2:4]
|
302 |
-
height_out = (
|
303 |
-
height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)
|
304 |
-
) // ctx.stride + 1
|
305 |
-
width_out = (
|
306 |
-
width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)
|
307 |
-
) // ctx.stride + 1
|
308 |
-
return n, channels_out, height_out, width_out
|
309 |
-
|
310 |
-
|
311 |
-
deform_conv = _DeformConv.apply
|
312 |
-
modulated_deform_conv = _ModulatedDeformConv.apply
|
313 |
-
|
314 |
-
|
315 |
-
class DeformConv(nn.Module):
|
316 |
-
def __init__(
|
317 |
-
self,
|
318 |
-
in_channels,
|
319 |
-
out_channels,
|
320 |
-
kernel_size,
|
321 |
-
stride=1,
|
322 |
-
padding=0,
|
323 |
-
dilation=1,
|
324 |
-
groups=1,
|
325 |
-
deformable_groups=1,
|
326 |
-
bias=False,
|
327 |
-
norm=None,
|
328 |
-
activation=None,
|
329 |
-
):
|
330 |
-
"""
|
331 |
-
Deformable convolution from :paper:`deformconv`.
|
332 |
-
|
333 |
-
Arguments are similar to :class:`Conv2D`. Extra arguments:
|
334 |
-
|
335 |
-
Args:
|
336 |
-
deformable_groups (int): number of groups used in deformable convolution.
|
337 |
-
norm (nn.Module, optional): a normalization layer
|
338 |
-
activation (callable(Tensor) -> Tensor): a callable activation function
|
339 |
-
"""
|
340 |
-
super(DeformConv, self).__init__()
|
341 |
-
|
342 |
-
assert not bias
|
343 |
-
assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
|
344 |
-
in_channels, groups
|
345 |
-
)
|
346 |
-
assert (
|
347 |
-
out_channels % groups == 0
|
348 |
-
), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
|
349 |
-
|
350 |
-
self.in_channels = in_channels
|
351 |
-
self.out_channels = out_channels
|
352 |
-
self.kernel_size = _pair(kernel_size)
|
353 |
-
self.stride = _pair(stride)
|
354 |
-
self.padding = _pair(padding)
|
355 |
-
self.dilation = _pair(dilation)
|
356 |
-
self.groups = groups
|
357 |
-
self.deformable_groups = deformable_groups
|
358 |
-
self.norm = norm
|
359 |
-
self.activation = activation
|
360 |
-
|
361 |
-
self.weight = nn.Parameter(
|
362 |
-
torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
|
363 |
-
)
|
364 |
-
self.bias = None
|
365 |
-
|
366 |
-
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
|
367 |
-
|
368 |
-
def forward(self, x, offset):
|
369 |
-
if x.numel() == 0:
|
370 |
-
# When input is empty, we want to return a empty tensor with "correct" shape,
|
371 |
-
# So that the following operations will not panic
|
372 |
-
# if they check for the shape of the tensor.
|
373 |
-
# This computes the height and width of the output tensor
|
374 |
-
output_shape = [
|
375 |
-
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
|
376 |
-
for i, p, di, k, s in zip(
|
377 |
-
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
|
378 |
-
)
|
379 |
-
]
|
380 |
-
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
|
381 |
-
return _NewEmptyTensorOp.apply(x, output_shape)
|
382 |
-
|
383 |
-
x = deform_conv(
|
384 |
-
x,
|
385 |
-
offset,
|
386 |
-
self.weight,
|
387 |
-
self.stride,
|
388 |
-
self.padding,
|
389 |
-
self.dilation,
|
390 |
-
self.groups,
|
391 |
-
self.deformable_groups,
|
392 |
-
)
|
393 |
-
if self.norm is not None:
|
394 |
-
x = self.norm(x)
|
395 |
-
if self.activation is not None:
|
396 |
-
x = self.activation(x)
|
397 |
-
return x
|
398 |
-
|
399 |
-
def extra_repr(self):
|
400 |
-
tmpstr = "in_channels=" + str(self.in_channels)
|
401 |
-
tmpstr += ", out_channels=" + str(self.out_channels)
|
402 |
-
tmpstr += ", kernel_size=" + str(self.kernel_size)
|
403 |
-
tmpstr += ", stride=" + str(self.stride)
|
404 |
-
tmpstr += ", padding=" + str(self.padding)
|
405 |
-
tmpstr += ", dilation=" + str(self.dilation)
|
406 |
-
tmpstr += ", groups=" + str(self.groups)
|
407 |
-
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
|
408 |
-
tmpstr += ", bias=False"
|
409 |
-
return tmpstr
|
410 |
-
|
411 |
-
|
412 |
-
class ModulatedDeformConv(nn.Module):
|
413 |
-
def __init__(
|
414 |
-
self,
|
415 |
-
in_channels,
|
416 |
-
out_channels,
|
417 |
-
kernel_size,
|
418 |
-
stride=1,
|
419 |
-
padding=0,
|
420 |
-
dilation=1,
|
421 |
-
groups=1,
|
422 |
-
deformable_groups=1,
|
423 |
-
bias=True,
|
424 |
-
norm=None,
|
425 |
-
activation=None,
|
426 |
-
):
|
427 |
-
"""
|
428 |
-
Modulated deformable convolution from :paper:`deformconv2`.
|
429 |
-
|
430 |
-
Arguments are similar to :class:`Conv2D`. Extra arguments:
|
431 |
-
|
432 |
-
Args:
|
433 |
-
deformable_groups (int): number of groups used in deformable convolution.
|
434 |
-
norm (nn.Module, optional): a normalization layer
|
435 |
-
activation (callable(Tensor) -> Tensor): a callable activation function
|
436 |
-
"""
|
437 |
-
super(ModulatedDeformConv, self).__init__()
|
438 |
-
self.in_channels = in_channels
|
439 |
-
self.out_channels = out_channels
|
440 |
-
self.kernel_size = _pair(kernel_size)
|
441 |
-
self.stride = stride
|
442 |
-
self.padding = padding
|
443 |
-
self.dilation = dilation
|
444 |
-
self.groups = groups
|
445 |
-
self.deformable_groups = deformable_groups
|
446 |
-
self.with_bias = bias
|
447 |
-
self.norm = norm
|
448 |
-
self.activation = activation
|
449 |
-
|
450 |
-
self.weight = nn.Parameter(
|
451 |
-
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
|
452 |
-
)
|
453 |
-
if bias:
|
454 |
-
self.bias = nn.Parameter(torch.Tensor(out_channels))
|
455 |
-
else:
|
456 |
-
self.bias = None
|
457 |
-
|
458 |
-
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
|
459 |
-
if self.bias is not None:
|
460 |
-
nn.init.constant_(self.bias, 0)
|
461 |
-
|
462 |
-
def forward(self, x, offset, mask):
|
463 |
-
if x.numel() == 0:
|
464 |
-
output_shape = [
|
465 |
-
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
|
466 |
-
for i, p, di, k, s in zip(
|
467 |
-
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
|
468 |
-
)
|
469 |
-
]
|
470 |
-
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
|
471 |
-
return _NewEmptyTensorOp.apply(x, output_shape)
|
472 |
-
|
473 |
-
x = modulated_deform_conv(
|
474 |
-
x,
|
475 |
-
offset,
|
476 |
-
mask,
|
477 |
-
self.weight,
|
478 |
-
self.bias,
|
479 |
-
self.stride,
|
480 |
-
self.padding,
|
481 |
-
self.dilation,
|
482 |
-
self.groups,
|
483 |
-
self.deformable_groups,
|
484 |
-
)
|
485 |
-
if self.norm is not None:
|
486 |
-
x = self.norm(x)
|
487 |
-
if self.activation is not None:
|
488 |
-
x = self.activation(x)
|
489 |
-
return x
|
490 |
-
|
491 |
-
def extra_repr(self):
|
492 |
-
tmpstr = "in_channels=" + str(self.in_channels)
|
493 |
-
tmpstr += ", out_channels=" + str(self.out_channels)
|
494 |
-
tmpstr += ", kernel_size=" + str(self.kernel_size)
|
495 |
-
tmpstr += ", stride=" + str(self.stride)
|
496 |
-
tmpstr += ", padding=" + str(self.padding)
|
497 |
-
tmpstr += ", dilation=" + str(self.dilation)
|
498 |
-
tmpstr += ", groups=" + str(self.groups)
|
499 |
-
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
|
500 |
-
tmpstr += ", bias=" + str(self.with_bias)
|
501 |
-
return tmpstr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/convert-torchvision-to-d2.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import pickle as pkl
|
5 |
-
import sys
|
6 |
-
import torch
|
7 |
-
|
8 |
-
"""
|
9 |
-
Usage:
|
10 |
-
# download one of the ResNet{18,34,50,101,152} models from torchvision:
|
11 |
-
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
|
12 |
-
# run the conversion
|
13 |
-
./convert-torchvision-to-d2.py r50.pth r50.pkl
|
14 |
-
|
15 |
-
# Then, use r50.pkl with the following changes in config:
|
16 |
-
|
17 |
-
MODEL:
|
18 |
-
WEIGHTS: "/path/to/r50.pkl"
|
19 |
-
PIXEL_MEAN: [123.675, 116.280, 103.530]
|
20 |
-
PIXEL_STD: [58.395, 57.120, 57.375]
|
21 |
-
RESNETS:
|
22 |
-
DEPTH: 50
|
23 |
-
STRIDE_IN_1X1: False
|
24 |
-
INPUT:
|
25 |
-
FORMAT: "RGB"
|
26 |
-
|
27 |
-
These models typically produce slightly worse results than the
|
28 |
-
pre-trained ResNets we use in official configs, which are the
|
29 |
-
original ResNet models released by MSRA.
|
30 |
-
"""
|
31 |
-
|
32 |
-
if __name__ == "__main__":
|
33 |
-
input = sys.argv[1]
|
34 |
-
|
35 |
-
obj = torch.load(input, map_location="cpu")
|
36 |
-
|
37 |
-
newmodel = {}
|
38 |
-
for k in list(obj.keys()):
|
39 |
-
old_k = k
|
40 |
-
if "layer" not in k:
|
41 |
-
k = "stem." + k
|
42 |
-
for t in [1, 2, 3, 4]:
|
43 |
-
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
|
44 |
-
for t in [1, 2, 3]:
|
45 |
-
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
|
46 |
-
k = k.replace("downsample.0", "shortcut")
|
47 |
-
k = k.replace("downsample.1", "shortcut.norm")
|
48 |
-
print(old_k, "->", k)
|
49 |
-
newmodel[k] = obj.pop(old_k).detach().numpy()
|
50 |
-
|
51 |
-
res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
|
52 |
-
|
53 |
-
with open(sys.argv[2], "wb") as f:
|
54 |
-
pkl.dump(res, f)
|
55 |
-
if obj:
|
56 |
-
print("Unconverted keys:", obj.keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Azurro/APT-1B-Base/app.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
4 |
-
|
5 |
-
model_name = "Azurro/APT-1B-Base"
|
6 |
-
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
-
|
10 |
-
generator = pipeline(
|
11 |
-
"text-generation",
|
12 |
-
model=model,
|
13 |
-
tokenizer=tokenizer,
|
14 |
-
torch_dtype=torch.bfloat16,
|
15 |
-
device_map="auto",
|
16 |
-
)
|
17 |
-
|
18 |
-
def generate_text(prompt, max_length, temperature, top_k, top_p, beams):
|
19 |
-
output = generator(prompt,
|
20 |
-
max_length=max_length,
|
21 |
-
temperature=temperature,
|
22 |
-
top_k=top_k,
|
23 |
-
do_sample=True,
|
24 |
-
top_p=top_p,
|
25 |
-
num_beams=beams)
|
26 |
-
return output[0]['generated_text']
|
27 |
-
|
28 |
-
input_text = gr.inputs.Textbox(label="Input Text")
|
29 |
-
max_length = gr.inputs.Slider(1, 100, step=1, default=30, label="Max Length")
|
30 |
-
temperature = gr.inputs.Slider(0.1, 1.0, step=0.1, default=0.8, label="Temperature")
|
31 |
-
top_k = gr.inputs.Slider(1, 200, step=1, default=10, label="Top K")
|
32 |
-
top_p = gr.inputs.Slider(0.1, 2.0, step=0.1, default=0.95, label="Top P")
|
33 |
-
beams = gr.inputs.Slider(1, 20, step=1, default=1, label="Beams")
|
34 |
-
|
35 |
-
outputs = gr.outputs.Textbox(label="Generated Text")
|
36 |
-
|
37 |
-
iface = gr.Interface(generate_text, inputs=[input_text, max_length, temperature, top_k, top_p, beams], outputs=outputs)
|
38 |
-
iface.queue(concurrency_count=1)
|
39 |
-
iface.launch(max_threads=100)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/easy_infer.py
DELETED
@@ -1,1383 +0,0 @@
|
|
1 |
-
import subprocess
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
import errno
|
5 |
-
import shutil
|
6 |
-
import yt_dlp
|
7 |
-
from mega import Mega
|
8 |
-
import datetime
|
9 |
-
import unicodedata
|
10 |
-
import torch
|
11 |
-
import glob
|
12 |
-
import gradio as gr
|
13 |
-
import gdown
|
14 |
-
import zipfile
|
15 |
-
import traceback
|
16 |
-
import json
|
17 |
-
import mdx
|
18 |
-
from mdx_processing_script import get_model_list,id_to_ptm,prepare_mdx,run_mdx
|
19 |
-
import requests
|
20 |
-
import wget
|
21 |
-
import ffmpeg
|
22 |
-
import hashlib
|
23 |
-
now_dir = os.getcwd()
|
24 |
-
sys.path.append(now_dir)
|
25 |
-
from unidecode import unidecode
|
26 |
-
import re
|
27 |
-
import time
|
28 |
-
from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
|
29 |
-
from infer.modules.vc.pipeline import Pipeline
|
30 |
-
VC = Pipeline
|
31 |
-
from lib.infer_pack.models import (
|
32 |
-
SynthesizerTrnMs256NSFsid,
|
33 |
-
SynthesizerTrnMs256NSFsid_nono,
|
34 |
-
SynthesizerTrnMs768NSFsid,
|
35 |
-
SynthesizerTrnMs768NSFsid_nono,
|
36 |
-
)
|
37 |
-
from MDXNet import MDXNetDereverb
|
38 |
-
from configs.config import Config
|
39 |
-
from infer_uvr5 import _audio_pre_, _audio_pre_new
|
40 |
-
from huggingface_hub import HfApi, list_models
|
41 |
-
from huggingface_hub import login
|
42 |
-
from i18n import I18nAuto
|
43 |
-
i18n = I18nAuto()
|
44 |
-
from bs4 import BeautifulSoup
|
45 |
-
from sklearn.cluster import MiniBatchKMeans
|
46 |
-
from dotenv import load_dotenv
|
47 |
-
load_dotenv()
|
48 |
-
config = Config()
|
49 |
-
tmp = os.path.join(now_dir, "TEMP")
|
50 |
-
shutil.rmtree(tmp, ignore_errors=True)
|
51 |
-
os.environ["TEMP"] = tmp
|
52 |
-
weight_root = os.getenv("weight_root")
|
53 |
-
weight_uvr5_root = os.getenv("weight_uvr5_root")
|
54 |
-
index_root = os.getenv("index_root")
|
55 |
-
audio_root = "audios"
|
56 |
-
names = []
|
57 |
-
for name in os.listdir(weight_root):
|
58 |
-
if name.endswith(".pth"):
|
59 |
-
names.append(name)
|
60 |
-
index_paths = []
|
61 |
-
|
62 |
-
global indexes_list
|
63 |
-
indexes_list = []
|
64 |
-
|
65 |
-
audio_paths = []
|
66 |
-
for root, dirs, files in os.walk(index_root, topdown=False):
|
67 |
-
for name in files:
|
68 |
-
if name.endswith(".index") and "trained" not in name:
|
69 |
-
index_paths.append("%s\\%s" % (root, name))
|
70 |
-
|
71 |
-
for root, dirs, files in os.walk(audio_root, topdown=False):
|
72 |
-
for name in files:
|
73 |
-
audio_paths.append("%s/%s" % (root, name))
|
74 |
-
|
75 |
-
uvr5_names = []
|
76 |
-
for name in os.listdir(weight_uvr5_root):
|
77 |
-
if name.endswith(".pth") or "onnx" in name:
|
78 |
-
uvr5_names.append(name.replace(".pth", ""))
|
79 |
-
|
80 |
-
def calculate_md5(file_path):
|
81 |
-
hash_md5 = hashlib.md5()
|
82 |
-
with open(file_path, "rb") as f:
|
83 |
-
for chunk in iter(lambda: f.read(4096), b""):
|
84 |
-
hash_md5.update(chunk)
|
85 |
-
return hash_md5.hexdigest()
|
86 |
-
|
87 |
-
def format_title(title):
|
88 |
-
formatted_title = re.sub(r'[^\w\s-]', '', title)
|
89 |
-
formatted_title = formatted_title.replace(" ", "_")
|
90 |
-
return formatted_title
|
91 |
-
|
92 |
-
def silentremove(filename):
|
93 |
-
try:
|
94 |
-
os.remove(filename)
|
95 |
-
except OSError as e:
|
96 |
-
if e.errno != errno.ENOENT:
|
97 |
-
raise
|
98 |
-
def get_md5(temp_folder):
|
99 |
-
for root, subfolders, files in os.walk(temp_folder):
|
100 |
-
for file in files:
|
101 |
-
if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
|
102 |
-
md5_hash = calculate_md5(os.path.join(root, file))
|
103 |
-
return md5_hash
|
104 |
-
|
105 |
-
return None
|
106 |
-
|
107 |
-
def find_parent(search_dir, file_name):
|
108 |
-
for dirpath, dirnames, filenames in os.walk(search_dir):
|
109 |
-
if file_name in filenames:
|
110 |
-
return os.path.abspath(dirpath)
|
111 |
-
return None
|
112 |
-
|
113 |
-
def find_folder_parent(search_dir, folder_name):
|
114 |
-
for dirpath, dirnames, filenames in os.walk(search_dir):
|
115 |
-
if folder_name in dirnames:
|
116 |
-
return os.path.abspath(dirpath)
|
117 |
-
return None
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
def download_from_url(url):
|
122 |
-
parent_path = find_folder_parent(".", "pretrained_v2")
|
123 |
-
zips_path = os.path.join(parent_path, 'zips')
|
124 |
-
|
125 |
-
if url != '':
|
126 |
-
print(i18n("Downloading the file: ") + f"{url}")
|
127 |
-
if "drive.google.com" in url:
|
128 |
-
if "file/d/" in url:
|
129 |
-
file_id = url.split("file/d/")[1].split("/")[0]
|
130 |
-
elif "id=" in url:
|
131 |
-
file_id = url.split("id=")[1].split("&")[0]
|
132 |
-
else:
|
133 |
-
return None
|
134 |
-
|
135 |
-
if file_id:
|
136 |
-
os.chdir('./zips')
|
137 |
-
result = subprocess.run(["gdown", f"https://drive.google.com/uc?id={file_id}", "--fuzzy"], capture_output=True, text=True, encoding='utf-8')
|
138 |
-
if "Too many users have viewed or downloaded this file recently" in str(result.stderr):
|
139 |
-
return "too much use"
|
140 |
-
if "Cannot retrieve the public link of the file." in str(result.stderr):
|
141 |
-
return "private link"
|
142 |
-
print(result.stderr)
|
143 |
-
|
144 |
-
elif "/blob/" in url:
|
145 |
-
os.chdir('./zips')
|
146 |
-
url = url.replace("blob", "resolve")
|
147 |
-
response = requests.get(url)
|
148 |
-
if response.status_code == 200:
|
149 |
-
file_name = url.split('/')[-1]
|
150 |
-
with open(os.path.join(zips_path, file_name), "wb") as newfile:
|
151 |
-
newfile.write(response.content)
|
152 |
-
else:
|
153 |
-
os.chdir(parent_path)
|
154 |
-
elif "mega.nz" in url:
|
155 |
-
if "#!" in url:
|
156 |
-
file_id = url.split("#!")[1].split("!")[0]
|
157 |
-
elif "file/" in url:
|
158 |
-
file_id = url.split("file/")[1].split("/")[0]
|
159 |
-
else:
|
160 |
-
return None
|
161 |
-
if file_id:
|
162 |
-
m = Mega()
|
163 |
-
m.download_url(url, zips_path)
|
164 |
-
elif "/tree/main" in url:
|
165 |
-
response = requests.get(url)
|
166 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
167 |
-
temp_url = ''
|
168 |
-
for link in soup.find_all('a', href=True):
|
169 |
-
if link['href'].endswith('.zip'):
|
170 |
-
temp_url = link['href']
|
171 |
-
break
|
172 |
-
if temp_url:
|
173 |
-
url = temp_url
|
174 |
-
url = url.replace("blob", "resolve")
|
175 |
-
if "huggingface.co" not in url:
|
176 |
-
url = "https://huggingface.co" + url
|
177 |
-
|
178 |
-
wget.download(url)
|
179 |
-
else:
|
180 |
-
print("No .zip file found on the page.")
|
181 |
-
elif "cdn.discordapp.com" in url:
|
182 |
-
file = requests.get(url)
|
183 |
-
if file.status_code == 200:
|
184 |
-
name = url.split('/')
|
185 |
-
with open(os.path.join(zips_path, name[len(name)-1]), "wb") as newfile:
|
186 |
-
newfile.write(file.content)
|
187 |
-
else:
|
188 |
-
return None
|
189 |
-
elif "pixeldrain.com" in url:
|
190 |
-
try:
|
191 |
-
file_id = url.split("pixeldrain.com/u/")[1]
|
192 |
-
os.chdir('./zips')
|
193 |
-
print(file_id)
|
194 |
-
response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
|
195 |
-
if response.status_code == 200:
|
196 |
-
file_name = response.headers.get("Content-Disposition").split('filename=')[-1].strip('";')
|
197 |
-
if not os.path.exists(zips_path):
|
198 |
-
os.makedirs(zips_path)
|
199 |
-
with open(os.path.join(zips_path, file_name), "wb") as newfile:
|
200 |
-
newfile.write(response.content)
|
201 |
-
os.chdir(parent_path)
|
202 |
-
return "downloaded"
|
203 |
-
else:
|
204 |
-
os.chdir(parent_path)
|
205 |
-
return None
|
206 |
-
except Exception as e:
|
207 |
-
print(e)
|
208 |
-
os.chdir(parent_path)
|
209 |
-
return None
|
210 |
-
else:
|
211 |
-
os.chdir('./zips')
|
212 |
-
wget.download(url)
|
213 |
-
|
214 |
-
os.chdir(parent_path)
|
215 |
-
print(i18n("Full download"))
|
216 |
-
return "downloaded"
|
217 |
-
else:
|
218 |
-
return None
|
219 |
-
|
220 |
-
class error_message(Exception):
|
221 |
-
def __init__(self, mensaje):
|
222 |
-
self.mensaje = mensaje
|
223 |
-
super().__init__(mensaje)
|
224 |
-
|
225 |
-
def get_vc(sid, to_return_protect0, to_return_protect1):
|
226 |
-
global n_spk, tgt_sr, net_g, vc, cpt, version
|
227 |
-
if sid == "" or sid == []:
|
228 |
-
global hubert_model
|
229 |
-
if hubert_model is not None:
|
230 |
-
print("clean_empty_cache")
|
231 |
-
del net_g, n_spk, vc, hubert_model, tgt_sr
|
232 |
-
hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
|
233 |
-
if torch.cuda.is_available():
|
234 |
-
torch.cuda.empty_cache()
|
235 |
-
if_f0 = cpt.get("f0", 1)
|
236 |
-
version = cpt.get("version", "v1")
|
237 |
-
if version == "v1":
|
238 |
-
if if_f0 == 1:
|
239 |
-
net_g = SynthesizerTrnMs256NSFsid(
|
240 |
-
*cpt["config"], is_half=config.is_half
|
241 |
-
)
|
242 |
-
else:
|
243 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
244 |
-
elif version == "v2":
|
245 |
-
if if_f0 == 1:
|
246 |
-
net_g = SynthesizerTrnMs768NSFsid(
|
247 |
-
*cpt["config"], is_half=config.is_half
|
248 |
-
)
|
249 |
-
else:
|
250 |
-
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
251 |
-
del net_g, cpt
|
252 |
-
if torch.cuda.is_available():
|
253 |
-
torch.cuda.empty_cache()
|
254 |
-
cpt = None
|
255 |
-
return (
|
256 |
-
{"visible": False, "__type__": "update"},
|
257 |
-
{"visible": False, "__type__": "update"},
|
258 |
-
{"visible": False, "__type__": "update"},
|
259 |
-
)
|
260 |
-
person = "%s/%s" % (weight_root, sid)
|
261 |
-
print("loading %s" % person)
|
262 |
-
cpt = torch.load(person, map_location="cpu")
|
263 |
-
tgt_sr = cpt["config"][-1]
|
264 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
265 |
-
if_f0 = cpt.get("f0", 1)
|
266 |
-
if if_f0 == 0:
|
267 |
-
to_return_protect0 = to_return_protect1 = {
|
268 |
-
"visible": False,
|
269 |
-
"value": 0.5,
|
270 |
-
"__type__": "update",
|
271 |
-
}
|
272 |
-
else:
|
273 |
-
to_return_protect0 = {
|
274 |
-
"visible": True,
|
275 |
-
"value": to_return_protect0,
|
276 |
-
"__type__": "update",
|
277 |
-
}
|
278 |
-
to_return_protect1 = {
|
279 |
-
"visible": True,
|
280 |
-
"value": to_return_protect1,
|
281 |
-
"__type__": "update",
|
282 |
-
}
|
283 |
-
version = cpt.get("version", "v1")
|
284 |
-
if version == "v1":
|
285 |
-
if if_f0 == 1:
|
286 |
-
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
|
287 |
-
else:
|
288 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
289 |
-
elif version == "v2":
|
290 |
-
if if_f0 == 1:
|
291 |
-
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
|
292 |
-
else:
|
293 |
-
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
294 |
-
del net_g.enc_q
|
295 |
-
print(net_g.load_state_dict(cpt["weight"], strict=False))
|
296 |
-
net_g.eval().to(config.device)
|
297 |
-
if config.is_half:
|
298 |
-
net_g = net_g.half()
|
299 |
-
else:
|
300 |
-
net_g = net_g.float()
|
301 |
-
vc = VC(tgt_sr, config)
|
302 |
-
n_spk = cpt["config"][-3]
|
303 |
-
return (
|
304 |
-
{"visible": True, "maximum": n_spk, "__type__": "update"},
|
305 |
-
to_return_protect0,
|
306 |
-
to_return_protect1,
|
307 |
-
)
|
308 |
-
|
309 |
-
def load_downloaded_model(url):
|
310 |
-
parent_path = find_folder_parent(".", "pretrained_v2")
|
311 |
-
try:
|
312 |
-
infos = []
|
313 |
-
logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
|
314 |
-
zips_path = os.path.join(parent_path, 'zips')
|
315 |
-
unzips_path = os.path.join(parent_path, 'unzips')
|
316 |
-
weights_path = os.path.join(parent_path, 'weights')
|
317 |
-
logs_dir = ""
|
318 |
-
|
319 |
-
if os.path.exists(zips_path):
|
320 |
-
shutil.rmtree(zips_path)
|
321 |
-
if os.path.exists(unzips_path):
|
322 |
-
shutil.rmtree(unzips_path)
|
323 |
-
|
324 |
-
os.mkdir(zips_path)
|
325 |
-
os.mkdir(unzips_path)
|
326 |
-
|
327 |
-
download_file = download_from_url(url)
|
328 |
-
if not download_file:
|
329 |
-
print(i18n("The file could not be downloaded."))
|
330 |
-
infos.append(i18n("The file could not be downloaded."))
|
331 |
-
yield "\n".join(infos)
|
332 |
-
elif download_file == "downloaded":
|
333 |
-
print(i18n("It has been downloaded successfully."))
|
334 |
-
infos.append(i18n("It has been downloaded successfully."))
|
335 |
-
yield "\n".join(infos)
|
336 |
-
elif download_file == "too much use":
|
337 |
-
raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
|
338 |
-
elif download_file == "private link":
|
339 |
-
raise Exception(i18n("Cannot get file from this private link"))
|
340 |
-
|
341 |
-
for filename in os.listdir(zips_path):
|
342 |
-
if filename.endswith(".zip"):
|
343 |
-
zipfile_path = os.path.join(zips_path,filename)
|
344 |
-
print(i18n("Proceeding with the extraction..."))
|
345 |
-
infos.append(i18n("Proceeding with the extraction..."))
|
346 |
-
shutil.unpack_archive(zipfile_path, unzips_path, 'zip')
|
347 |
-
model_name = os.path.basename(zipfile_path)
|
348 |
-
logs_dir = os.path.join(parent_path,'logs', os.path.normpath(str(model_name).replace(".zip","")))
|
349 |
-
yield "\n".join(infos)
|
350 |
-
else:
|
351 |
-
print(i18n("Unzip error."))
|
352 |
-
infos.append(i18n("Unzip error."))
|
353 |
-
yield "\n".join(infos)
|
354 |
-
|
355 |
-
index_file = False
|
356 |
-
model_file = False
|
357 |
-
D_file = False
|
358 |
-
G_file = False
|
359 |
-
|
360 |
-
for path, subdirs, files in os.walk(unzips_path):
|
361 |
-
for item in files:
|
362 |
-
item_path = os.path.join(path, item)
|
363 |
-
if not 'G_' in item and not 'D_' in item and item.endswith('.pth'):
|
364 |
-
model_file = True
|
365 |
-
model_name = item.replace(".pth","")
|
366 |
-
logs_dir = os.path.join(parent_path,'logs', model_name)
|
367 |
-
if os.path.exists(logs_dir):
|
368 |
-
shutil.rmtree(logs_dir)
|
369 |
-
os.mkdir(logs_dir)
|
370 |
-
if not os.path.exists(weights_path):
|
371 |
-
os.mkdir(weights_path)
|
372 |
-
if os.path.exists(os.path.join(weights_path, item)):
|
373 |
-
os.remove(os.path.join(weights_path, item))
|
374 |
-
if os.path.exists(item_path):
|
375 |
-
shutil.move(item_path, weights_path)
|
376 |
-
|
377 |
-
if not model_file and not os.path.exists(logs_dir):
|
378 |
-
os.mkdir(logs_dir)
|
379 |
-
for path, subdirs, files in os.walk(unzips_path):
|
380 |
-
for item in files:
|
381 |
-
item_path = os.path.join(path, item)
|
382 |
-
if item.startswith('added_') and item.endswith('.index'):
|
383 |
-
index_file = True
|
384 |
-
if os.path.exists(item_path):
|
385 |
-
if os.path.exists(os.path.join(logs_dir, item)):
|
386 |
-
os.remove(os.path.join(logs_dir, item))
|
387 |
-
shutil.move(item_path, logs_dir)
|
388 |
-
if item.startswith('total_fea.npy') or item.startswith('events.'):
|
389 |
-
if os.path.exists(item_path):
|
390 |
-
if os.path.exists(os.path.join(logs_dir, item)):
|
391 |
-
os.remove(os.path.join(logs_dir, item))
|
392 |
-
shutil.move(item_path, logs_dir)
|
393 |
-
|
394 |
-
|
395 |
-
result = ""
|
396 |
-
if model_file:
|
397 |
-
if index_file:
|
398 |
-
print(i18n("The model works for inference, and has the .index file."))
|
399 |
-
infos.append("\n" + i18n("The model works for inference, and has the .index file."))
|
400 |
-
yield "\n".join(infos)
|
401 |
-
else:
|
402 |
-
print(i18n("The model works for inference, but it doesn't have the .index file."))
|
403 |
-
infos.append("\n" + i18n("The model works for inference, but it doesn't have the .index file."))
|
404 |
-
yield "\n".join(infos)
|
405 |
-
|
406 |
-
if not index_file and not model_file:
|
407 |
-
print(i18n("No relevant file was found to upload."))
|
408 |
-
infos.append(i18n("No relevant file was found to upload."))
|
409 |
-
yield "\n".join(infos)
|
410 |
-
|
411 |
-
if os.path.exists(zips_path):
|
412 |
-
shutil.rmtree(zips_path)
|
413 |
-
if os.path.exists(unzips_path):
|
414 |
-
shutil.rmtree(unzips_path)
|
415 |
-
os.chdir(parent_path)
|
416 |
-
return result
|
417 |
-
except Exception as e:
|
418 |
-
os.chdir(parent_path)
|
419 |
-
if "too much use" in str(e):
|
420 |
-
print(i18n("Too many users have recently viewed or downloaded this file"))
|
421 |
-
yield i18n("Too many users have recently viewed or downloaded this file")
|
422 |
-
elif "private link" in str(e):
|
423 |
-
print(i18n("Cannot get file from this private link"))
|
424 |
-
yield i18n("Cannot get file from this private link")
|
425 |
-
else:
|
426 |
-
print(e)
|
427 |
-
yield i18n("An error occurred downloading")
|
428 |
-
finally:
|
429 |
-
os.chdir(parent_path)
|
430 |
-
|
431 |
-
def load_dowloaded_dataset(url):
|
432 |
-
parent_path = find_folder_parent(".", "pretrained_v2")
|
433 |
-
infos = []
|
434 |
-
try:
|
435 |
-
zips_path = os.path.join(parent_path, 'zips')
|
436 |
-
unzips_path = os.path.join(parent_path, 'unzips')
|
437 |
-
datasets_path = os.path.join(parent_path, 'datasets')
|
438 |
-
audio_extenions =['wav', 'mp3', 'flac', 'ogg', 'opus',
|
439 |
-
'm4a', 'mp4', 'aac', 'alac', 'wma',
|
440 |
-
'aiff', 'webm', 'ac3']
|
441 |
-
|
442 |
-
if os.path.exists(zips_path):
|
443 |
-
shutil.rmtree(zips_path)
|
444 |
-
if os.path.exists(unzips_path):
|
445 |
-
shutil.rmtree(unzips_path)
|
446 |
-
|
447 |
-
if not os.path.exists(datasets_path):
|
448 |
-
os.mkdir(datasets_path)
|
449 |
-
|
450 |
-
os.mkdir(zips_path)
|
451 |
-
os.mkdir(unzips_path)
|
452 |
-
|
453 |
-
download_file = download_from_url(url)
|
454 |
-
|
455 |
-
if not download_file:
|
456 |
-
print(i18n("An error occurred downloading"))
|
457 |
-
infos.append(i18n("An error occurred downloading"))
|
458 |
-
yield "\n".join(infos)
|
459 |
-
raise Exception(i18n("An error occurred downloading"))
|
460 |
-
elif download_file == "downloaded":
|
461 |
-
print(i18n("It has been downloaded successfully."))
|
462 |
-
infos.append(i18n("It has been downloaded successfully."))
|
463 |
-
yield "\n".join(infos)
|
464 |
-
elif download_file == "too much use":
|
465 |
-
raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
|
466 |
-
elif download_file == "private link":
|
467 |
-
raise Exception(i18n("Cannot get file from this private link"))
|
468 |
-
|
469 |
-
zip_path = os.listdir(zips_path)
|
470 |
-
foldername = ""
|
471 |
-
for file in zip_path:
|
472 |
-
if file.endswith('.zip'):
|
473 |
-
file_path = os.path.join(zips_path, file)
|
474 |
-
print("....")
|
475 |
-
foldername = file.replace(".zip","").replace(" ","").replace("-","_")
|
476 |
-
dataset_path = os.path.join(datasets_path, foldername)
|
477 |
-
print(i18n("Proceeding with the extraction..."))
|
478 |
-
infos.append(i18n("Proceeding with the extraction..."))
|
479 |
-
yield "\n".join(infos)
|
480 |
-
shutil.unpack_archive(file_path, unzips_path, 'zip')
|
481 |
-
if os.path.exists(dataset_path):
|
482 |
-
shutil.rmtree(dataset_path)
|
483 |
-
|
484 |
-
os.mkdir(dataset_path)
|
485 |
-
|
486 |
-
for root, subfolders, songs in os.walk(unzips_path):
|
487 |
-
for song in songs:
|
488 |
-
song_path = os.path.join(root, song)
|
489 |
-
if song.endswith(tuple(audio_extenions)):
|
490 |
-
formatted_song_name = format_title(os.path.splitext(song)[0])
|
491 |
-
extension = os.path.splitext(song)[1]
|
492 |
-
new_song_path = os.path.join(dataset_path, f"{formatted_song_name}{extension}")
|
493 |
-
shutil.move(song_path, new_song_path)
|
494 |
-
else:
|
495 |
-
print(i18n("Unzip error."))
|
496 |
-
infos.append(i18n("Unzip error."))
|
497 |
-
yield "\n".join(infos)
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
if os.path.exists(zips_path):
|
502 |
-
shutil.rmtree(zips_path)
|
503 |
-
if os.path.exists(unzips_path):
|
504 |
-
shutil.rmtree(unzips_path)
|
505 |
-
|
506 |
-
print(i18n("The Dataset has been loaded successfully."))
|
507 |
-
infos.append(i18n("The Dataset has been loaded successfully."))
|
508 |
-
yield "\n".join(infos)
|
509 |
-
except Exception as e:
|
510 |
-
os.chdir(parent_path)
|
511 |
-
if "too much use" in str(e):
|
512 |
-
print(i18n("Too many users have recently viewed or downloaded this file"))
|
513 |
-
yield i18n("Too many users have recently viewed or downloaded this file")
|
514 |
-
elif "private link" in str(e):
|
515 |
-
print(i18n("Cannot get file from this private link"))
|
516 |
-
yield i18n("Cannot get file from this private link")
|
517 |
-
else:
|
518 |
-
print(e)
|
519 |
-
yield i18n("An error occurred downloading")
|
520 |
-
finally:
|
521 |
-
os.chdir(parent_path)
|
522 |
-
|
523 |
-
def save_model(modelname, save_action):
|
524 |
-
|
525 |
-
parent_path = find_folder_parent(".", "pretrained_v2")
|
526 |
-
zips_path = os.path.join(parent_path, 'zips')
|
527 |
-
dst = os.path.join(zips_path,modelname)
|
528 |
-
logs_path = os.path.join(parent_path, 'logs', modelname)
|
529 |
-
weights_path = os.path.join(parent_path, 'weights', f"{modelname}.pth")
|
530 |
-
save_folder = parent_path
|
531 |
-
infos = []
|
532 |
-
|
533 |
-
try:
|
534 |
-
if not os.path.exists(logs_path):
|
535 |
-
raise Exception("No model found.")
|
536 |
-
|
537 |
-
if not 'content' in parent_path:
|
538 |
-
save_folder = os.path.join(parent_path, 'RVC_Backup')
|
539 |
-
else:
|
540 |
-
save_folder = '/content/drive/MyDrive/RVC_Backup'
|
541 |
-
|
542 |
-
infos.append(i18n("Save model"))
|
543 |
-
yield "\n".join(infos)
|
544 |
-
|
545 |
-
if not os.path.exists(save_folder):
|
546 |
-
os.mkdir(save_folder)
|
547 |
-
if not os.path.exists(os.path.join(save_folder, 'ManualTrainingBackup')):
|
548 |
-
os.mkdir(os.path.join(save_folder, 'ManualTrainingBackup'))
|
549 |
-
if not os.path.exists(os.path.join(save_folder, 'Finished')):
|
550 |
-
os.mkdir(os.path.join(save_folder, 'Finished'))
|
551 |
-
|
552 |
-
if os.path.exists(zips_path):
|
553 |
-
shutil.rmtree(zips_path)
|
554 |
-
|
555 |
-
os.mkdir(zips_path)
|
556 |
-
added_file = glob.glob(os.path.join(logs_path, "added_*.index"))
|
557 |
-
d_file = glob.glob(os.path.join(logs_path, "D_*.pth"))
|
558 |
-
g_file = glob.glob(os.path.join(logs_path, "G_*.pth"))
|
559 |
-
|
560 |
-
if save_action == i18n("Choose the method"):
|
561 |
-
raise Exception("No method choosen.")
|
562 |
-
|
563 |
-
if save_action == i18n("Save all"):
|
564 |
-
print(i18n("Save all"))
|
565 |
-
save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
|
566 |
-
shutil.copytree(logs_path, dst)
|
567 |
-
else:
|
568 |
-
if not os.path.exists(dst):
|
569 |
-
os.mkdir(dst)
|
570 |
-
|
571 |
-
if save_action == i18n("Save D and G"):
|
572 |
-
print(i18n("Save D and G"))
|
573 |
-
save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
|
574 |
-
if len(d_file) > 0:
|
575 |
-
shutil.copy(d_file[0], dst)
|
576 |
-
if len(g_file) > 0:
|
577 |
-
shutil.copy(g_file[0], dst)
|
578 |
-
|
579 |
-
if len(added_file) > 0:
|
580 |
-
shutil.copy(added_file[0], dst)
|
581 |
-
else:
|
582 |
-
infos.append(i18n("Saved without index..."))
|
583 |
-
|
584 |
-
if save_action == i18n("Save voice"):
|
585 |
-
print(i18n("Save voice"))
|
586 |
-
save_folder = os.path.join(save_folder, 'Finished')
|
587 |
-
if len(added_file) > 0:
|
588 |
-
shutil.copy(added_file[0], dst)
|
589 |
-
else:
|
590 |
-
infos.append(i18n("Saved without index..."))
|
591 |
-
|
592 |
-
yield "\n".join(infos)
|
593 |
-
if not os.path.exists(weights_path):
|
594 |
-
infos.append(i18n("Saved without inference model..."))
|
595 |
-
else:
|
596 |
-
shutil.copy(weights_path, dst)
|
597 |
-
|
598 |
-
yield "\n".join(infos)
|
599 |
-
infos.append("\n" + i18n("This may take a few minutes, please wait..."))
|
600 |
-
yield "\n".join(infos)
|
601 |
-
|
602 |
-
shutil.make_archive(os.path.join(zips_path,f"{modelname}"), 'zip', zips_path)
|
603 |
-
shutil.move(os.path.join(zips_path,f"{modelname}.zip"), os.path.join(save_folder, f'{modelname}.zip'))
|
604 |
-
|
605 |
-
shutil.rmtree(zips_path)
|
606 |
-
infos.append("\n" + i18n("Model saved successfully"))
|
607 |
-
yield "\n".join(infos)
|
608 |
-
|
609 |
-
except Exception as e:
|
610 |
-
print(e)
|
611 |
-
if "No model found." in str(e):
|
612 |
-
infos.append(i18n("The model you want to save does not exist, be sure to enter the correct name."))
|
613 |
-
else:
|
614 |
-
infos.append(i18n("An error occurred saving the model"))
|
615 |
-
|
616 |
-
yield "\n".join(infos)
|
617 |
-
|
618 |
-
def load_downloaded_backup(url):
|
619 |
-
parent_path = find_folder_parent(".", "pretrained_v2")
|
620 |
-
try:
|
621 |
-
infos = []
|
622 |
-
logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
|
623 |
-
zips_path = os.path.join(parent_path, 'zips')
|
624 |
-
unzips_path = os.path.join(parent_path, 'unzips')
|
625 |
-
weights_path = os.path.join(parent_path, 'weights')
|
626 |
-
logs_dir = os.path.join(parent_path, 'logs')
|
627 |
-
|
628 |
-
if os.path.exists(zips_path):
|
629 |
-
shutil.rmtree(zips_path)
|
630 |
-
if os.path.exists(unzips_path):
|
631 |
-
shutil.rmtree(unzips_path)
|
632 |
-
|
633 |
-
os.mkdir(zips_path)
|
634 |
-
os.mkdir(unzips_path)
|
635 |
-
|
636 |
-
download_file = download_from_url(url)
|
637 |
-
if not download_file:
|
638 |
-
print(i18n("The file could not be downloaded."))
|
639 |
-
infos.append(i18n("The file could not be downloaded."))
|
640 |
-
yield "\n".join(infos)
|
641 |
-
elif download_file == "downloaded":
|
642 |
-
print(i18n("It has been downloaded successfully."))
|
643 |
-
infos.append(i18n("It has been downloaded successfully."))
|
644 |
-
yield "\n".join(infos)
|
645 |
-
elif download_file == "too much use":
|
646 |
-
raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
|
647 |
-
elif download_file == "private link":
|
648 |
-
raise Exception(i18n("Cannot get file from this private link"))
|
649 |
-
|
650 |
-
for filename in os.listdir(zips_path):
|
651 |
-
if filename.endswith(".zip"):
|
652 |
-
zipfile_path = os.path.join(zips_path,filename)
|
653 |
-
zip_dir_name = os.path.splitext(filename)[0]
|
654 |
-
unzip_dir = unzips_path
|
655 |
-
print(i18n("Proceeding with the extraction..."))
|
656 |
-
infos.append(i18n("Proceeding with the extraction..."))
|
657 |
-
shutil.unpack_archive(zipfile_path, unzip_dir, 'zip')
|
658 |
-
|
659 |
-
if os.path.exists(os.path.join(unzip_dir, zip_dir_name)):
|
660 |
-
shutil.move(os.path.join(unzip_dir, zip_dir_name), logs_dir)
|
661 |
-
else:
|
662 |
-
new_folder_path = os.path.join(logs_dir, zip_dir_name)
|
663 |
-
os.mkdir(new_folder_path)
|
664 |
-
for item_name in os.listdir(unzip_dir):
|
665 |
-
item_path = os.path.join(unzip_dir, item_name)
|
666 |
-
if os.path.isfile(item_path):
|
667 |
-
shutil.move(item_path, new_folder_path)
|
668 |
-
elif os.path.isdir(item_path):
|
669 |
-
shutil.move(item_path, new_folder_path)
|
670 |
-
|
671 |
-
yield "\n".join(infos)
|
672 |
-
else:
|
673 |
-
print(i18n("Unzip error."))
|
674 |
-
infos.append(i18n("Unzip error."))
|
675 |
-
yield "\n".join(infos)
|
676 |
-
|
677 |
-
result = ""
|
678 |
-
|
679 |
-
for filename in os.listdir(unzips_path):
|
680 |
-
if filename.endswith(".zip"):
|
681 |
-
silentremove(filename)
|
682 |
-
|
683 |
-
if os.path.exists(zips_path):
|
684 |
-
shutil.rmtree(zips_path)
|
685 |
-
if os.path.exists(os.path.join(parent_path, 'unzips')):
|
686 |
-
shutil.rmtree(os.path.join(parent_path, 'unzips'))
|
687 |
-
print(i18n("The Backup has been uploaded successfully."))
|
688 |
-
infos.append("\n" + i18n("The Backup has been uploaded successfully."))
|
689 |
-
yield "\n".join(infos)
|
690 |
-
os.chdir(parent_path)
|
691 |
-
return result
|
692 |
-
except Exception as e:
|
693 |
-
os.chdir(parent_path)
|
694 |
-
if "too much use" in str(e):
|
695 |
-
print(i18n("Too many users have recently viewed or downloaded this file"))
|
696 |
-
yield i18n("Too many users have recently viewed or downloaded this file")
|
697 |
-
elif "private link" in str(e):
|
698 |
-
print(i18n("Cannot get file from this private link"))
|
699 |
-
yield i18n("Cannot get file from this private link")
|
700 |
-
else:
|
701 |
-
print(e)
|
702 |
-
yield i18n("An error occurred downloading")
|
703 |
-
finally:
|
704 |
-
os.chdir(parent_path)
|
705 |
-
|
706 |
-
def save_to_wav(record_button):
|
707 |
-
if record_button is None:
|
708 |
-
pass
|
709 |
-
else:
|
710 |
-
path_to_file=record_button
|
711 |
-
new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
|
712 |
-
new_path='./audios/'+new_name
|
713 |
-
shutil.move(path_to_file,new_path)
|
714 |
-
return new_name
|
715 |
-
|
716 |
-
|
717 |
-
def change_choices2():
|
718 |
-
audio_paths=[]
|
719 |
-
for filename in os.listdir("./audios"):
|
720 |
-
if filename.endswith(('wav', 'mp3', 'flac', 'ogg', 'opus',
|
721 |
-
'm4a', 'mp4', 'aac', 'alac', 'wma',
|
722 |
-
'aiff', 'webm', 'ac3')):
|
723 |
-
audio_paths.append(os.path.join('./audios',filename).replace('\\', '/'))
|
724 |
-
return {"choices": sorted(audio_paths), "__type__": "update"}, {"__type__": "update"}
|
725 |
-
|
726 |
-
|
727 |
-
|
728 |
-
|
729 |
-
|
730 |
-
def uvr(input_url, output_path, model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0, architecture):
|
731 |
-
carpeta_a_eliminar = "yt_downloads"
|
732 |
-
if os.path.exists(carpeta_a_eliminar) and os.path.isdir(carpeta_a_eliminar):
|
733 |
-
for archivo in os.listdir(carpeta_a_eliminar):
|
734 |
-
ruta_archivo = os.path.join(carpeta_a_eliminar, archivo)
|
735 |
-
if os.path.isfile(ruta_archivo):
|
736 |
-
os.remove(ruta_archivo)
|
737 |
-
elif os.path.isdir(ruta_archivo):
|
738 |
-
shutil.rmtree(ruta_archivo)
|
739 |
-
|
740 |
-
|
741 |
-
|
742 |
-
ydl_opts = {
|
743 |
-
'no-windows-filenames': True,
|
744 |
-
'restrict-filenames': True,
|
745 |
-
'extract_audio': True,
|
746 |
-
'format': 'bestaudio',
|
747 |
-
'quiet': True,
|
748 |
-
'no-warnings': True,
|
749 |
-
}
|
750 |
-
|
751 |
-
try:
|
752 |
-
print(i18n("Downloading audio from the video..."))
|
753 |
-
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
754 |
-
info_dict = ydl.extract_info(input_url, download=False)
|
755 |
-
formatted_title = format_title(info_dict.get('title', 'default_title'))
|
756 |
-
formatted_outtmpl = output_path + '/' + formatted_title + '.wav'
|
757 |
-
ydl_opts['outtmpl'] = formatted_outtmpl
|
758 |
-
ydl = yt_dlp.YoutubeDL(ydl_opts)
|
759 |
-
ydl.download([input_url])
|
760 |
-
print(i18n("Audio downloaded!"))
|
761 |
-
except Exception as error:
|
762 |
-
print(i18n("An error occurred:"), error)
|
763 |
-
|
764 |
-
actual_directory = os.path.dirname(__file__)
|
765 |
-
|
766 |
-
vocal_directory = os.path.join(actual_directory, save_root_vocal)
|
767 |
-
instrumental_directory = os.path.join(actual_directory, save_root_ins)
|
768 |
-
|
769 |
-
vocal_formatted = f"vocal_{formatted_title}.wav.reformatted.wav_10.wav"
|
770 |
-
instrumental_formatted = f"instrument_{formatted_title}.wav.reformatted.wav_10.wav"
|
771 |
-
|
772 |
-
vocal_audio_path = os.path.join(vocal_directory, vocal_formatted)
|
773 |
-
instrumental_audio_path = os.path.join(instrumental_directory, instrumental_formatted)
|
774 |
-
|
775 |
-
vocal_formatted_mdx = f"{formatted_title}_vocal_.wav"
|
776 |
-
instrumental_formatted_mdx = f"{formatted_title}_instrument_.wav"
|
777 |
-
|
778 |
-
vocal_audio_path_mdx = os.path.join(vocal_directory, vocal_formatted_mdx)
|
779 |
-
instrumental_audio_path_mdx = os.path.join(instrumental_directory, instrumental_formatted_mdx)
|
780 |
-
|
781 |
-
if architecture == "VR":
|
782 |
-
try:
|
783 |
-
print(i18n("Starting audio conversion... (This might take a moment)"))
|
784 |
-
inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]]
|
785 |
-
usable_files = [os.path.join(inp_root, file)
|
786 |
-
for file in os.listdir(inp_root)
|
787 |
-
if file.endswith(tuple(sup_audioext))]
|
788 |
-
|
789 |
-
|
790 |
-
pre_fun = MDXNetDereverb(15) if model_name == "onnx_dereverb_By_FoxJoy" else (_audio_pre_ if "DeEcho" not in model_name else _audio_pre_new)(
|
791 |
-
agg=int(agg),
|
792 |
-
model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
|
793 |
-
device=config.device,
|
794 |
-
is_half=config.is_half,
|
795 |
-
)
|
796 |
-
|
797 |
-
try:
|
798 |
-
if paths != None:
|
799 |
-
paths = [path.name for path in paths]
|
800 |
-
else:
|
801 |
-
paths = usable_files
|
802 |
-
|
803 |
-
except:
|
804 |
-
traceback.print_exc()
|
805 |
-
paths = usable_files
|
806 |
-
print(paths)
|
807 |
-
for path in paths:
|
808 |
-
inp_path = os.path.join(inp_root, path)
|
809 |
-
need_reformat, done = 1, 0
|
810 |
-
|
811 |
-
try:
|
812 |
-
info = ffmpeg.probe(inp_path, cmd="ffprobe")
|
813 |
-
if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100":
|
814 |
-
need_reformat = 0
|
815 |
-
pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0)
|
816 |
-
done = 1
|
817 |
-
except:
|
818 |
-
traceback.print_exc()
|
819 |
-
|
820 |
-
if need_reformat:
|
821 |
-
tmp_path = f"{tmp}/{os.path.basename(inp_path)}.reformatted.wav"
|
822 |
-
os.system(f"ffmpeg -i {inp_path} -vn -acodec pcm_s16le -ac 2 -ar 44100 {tmp_path} -y")
|
823 |
-
inp_path = tmp_path
|
824 |
-
|
825 |
-
try:
|
826 |
-
if not done:
|
827 |
-
pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0)
|
828 |
-
print(f"{os.path.basename(inp_path)}->Success")
|
829 |
-
except:
|
830 |
-
print(f"{os.path.basename(inp_path)}->{traceback.format_exc()}")
|
831 |
-
except:
|
832 |
-
traceback.print_exc()
|
833 |
-
finally:
|
834 |
-
try:
|
835 |
-
if model_name == "onnx_dereverb_By_FoxJoy":
|
836 |
-
del pre_fun.pred.model
|
837 |
-
del pre_fun.pred.model_
|
838 |
-
else:
|
839 |
-
del pre_fun.model
|
840 |
-
|
841 |
-
del pre_fun
|
842 |
-
return i18n("Finished"), vocal_audio_path, instrumental_audio_path
|
843 |
-
except: traceback.print_exc()
|
844 |
-
|
845 |
-
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
846 |
-
|
847 |
-
elif architecture == "MDX":
|
848 |
-
try:
|
849 |
-
print(i18n("Starting audio conversion... (This might take a moment)"))
|
850 |
-
inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]]
|
851 |
-
|
852 |
-
usable_files = [os.path.join(inp_root, file)
|
853 |
-
for file in os.listdir(inp_root)
|
854 |
-
if file.endswith(tuple(sup_audioext))]
|
855 |
-
try:
|
856 |
-
if paths != None:
|
857 |
-
paths = [path.name for path in paths]
|
858 |
-
else:
|
859 |
-
paths = usable_files
|
860 |
-
|
861 |
-
except:
|
862 |
-
traceback.print_exc()
|
863 |
-
paths = usable_files
|
864 |
-
print(paths)
|
865 |
-
invert=True
|
866 |
-
denoise=True
|
867 |
-
use_custom_parameter=True
|
868 |
-
dim_f=2048
|
869 |
-
dim_t=256
|
870 |
-
n_fft=7680
|
871 |
-
use_custom_compensation=True
|
872 |
-
compensation=1.025
|
873 |
-
suffix = "vocal_" #@param ["Vocals", "Drums", "Bass", "Other"]{allow-input: true}
|
874 |
-
suffix_invert = "instrument_" #@param ["Instrumental", "Drumless", "Bassless", "Instruments"]{allow-input: true}
|
875 |
-
print_settings = True # @param{type:"boolean"}
|
876 |
-
onnx = id_to_ptm(model_name)
|
877 |
-
compensation = compensation if use_custom_compensation or use_custom_parameter else None
|
878 |
-
mdx_model = prepare_mdx(onnx,use_custom_parameter, dim_f, dim_t, n_fft, compensation=compensation)
|
879 |
-
|
880 |
-
|
881 |
-
for path in paths:
|
882 |
-
#inp_path = os.path.join(inp_root, path)
|
883 |
-
suffix_naming = suffix if use_custom_parameter else None
|
884 |
-
diff_suffix_naming = suffix_invert if use_custom_parameter else None
|
885 |
-
run_mdx(onnx, mdx_model, path, format0, diff=invert,suffix=suffix_naming,diff_suffix=diff_suffix_naming,denoise=denoise)
|
886 |
-
|
887 |
-
if print_settings:
|
888 |
-
print()
|
889 |
-
print('[MDX-Net_Colab settings used]')
|
890 |
-
print(f'Model used: {onnx}')
|
891 |
-
print(f'Model MD5: {mdx.MDX.get_hash(onnx)}')
|
892 |
-
print(f'Model parameters:')
|
893 |
-
print(f' -dim_f: {mdx_model.dim_f}')
|
894 |
-
print(f' -dim_t: {mdx_model.dim_t}')
|
895 |
-
print(f' -n_fft: {mdx_model.n_fft}')
|
896 |
-
print(f' -compensation: {mdx_model.compensation}')
|
897 |
-
print()
|
898 |
-
print('[Input file]')
|
899 |
-
print('filename(s): ')
|
900 |
-
for filename in paths:
|
901 |
-
print(f' -{filename}')
|
902 |
-
print(f"{os.path.basename(filename)}->Success")
|
903 |
-
except:
|
904 |
-
traceback.print_exc()
|
905 |
-
finally:
|
906 |
-
try:
|
907 |
-
del mdx_model
|
908 |
-
return i18n("Finished"), vocal_audio_path_mdx, instrumental_audio_path_mdx
|
909 |
-
except: traceback.print_exc()
|
910 |
-
|
911 |
-
print("clean_empty_cache")
|
912 |
-
|
913 |
-
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
914 |
-
sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus',
|
915 |
-
'm4a', 'mp4', 'aac', 'alac', 'wma',
|
916 |
-
'aiff', 'webm', 'ac3'}
|
917 |
-
|
918 |
-
def load_downloaded_audio(url):
|
919 |
-
parent_path = find_folder_parent(".", "pretrained_v2")
|
920 |
-
try:
|
921 |
-
infos = []
|
922 |
-
audios_path = os.path.join(parent_path, 'audios')
|
923 |
-
zips_path = os.path.join(parent_path, 'zips')
|
924 |
-
|
925 |
-
if not os.path.exists(audios_path):
|
926 |
-
os.mkdir(audios_path)
|
927 |
-
|
928 |
-
download_file = download_from_url(url)
|
929 |
-
if not download_file:
|
930 |
-
print(i18n("The file could not be downloaded."))
|
931 |
-
infos.append(i18n("The file could not be downloaded."))
|
932 |
-
yield "\n".join(infos)
|
933 |
-
elif download_file == "downloaded":
|
934 |
-
print(i18n("It has been downloaded successfully."))
|
935 |
-
infos.append(i18n("It has been downloaded successfully."))
|
936 |
-
yield "\n".join(infos)
|
937 |
-
elif download_file == "too much use":
|
938 |
-
raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
|
939 |
-
elif download_file == "private link":
|
940 |
-
raise Exception(i18n("Cannot get file from this private link"))
|
941 |
-
|
942 |
-
for filename in os.listdir(zips_path):
|
943 |
-
item_path = os.path.join(zips_path, filename)
|
944 |
-
if item_path.split('.')[-1] in sup_audioext:
|
945 |
-
if os.path.exists(item_path):
|
946 |
-
shutil.move(item_path, audios_path)
|
947 |
-
|
948 |
-
result = ""
|
949 |
-
print(i18n("Audio files have been moved to the 'audios' folder."))
|
950 |
-
infos.append(i18n("Audio files have been moved to the 'audios' folder."))
|
951 |
-
yield "\n".join(infos)
|
952 |
-
|
953 |
-
os.chdir(parent_path)
|
954 |
-
return result
|
955 |
-
except Exception as e:
|
956 |
-
os.chdir(parent_path)
|
957 |
-
if "too much use" in str(e):
|
958 |
-
print(i18n("Too many users have recently viewed or downloaded this file"))
|
959 |
-
yield i18n("Too many users have recently viewed or downloaded this file")
|
960 |
-
elif "private link" in str(e):
|
961 |
-
print(i18n("Cannot get file from this private link"))
|
962 |
-
yield i18n("Cannot get file from this private link")
|
963 |
-
else:
|
964 |
-
print(e)
|
965 |
-
yield i18n("An error occurred downloading")
|
966 |
-
finally:
|
967 |
-
os.chdir(parent_path)
|
968 |
-
|
969 |
-
|
970 |
-
class error_message(Exception):
|
971 |
-
def __init__(self, mensaje):
|
972 |
-
self.mensaje = mensaje
|
973 |
-
super().__init__(mensaje)
|
974 |
-
|
975 |
-
def get_vc(sid, to_return_protect0, to_return_protect1):
|
976 |
-
global n_spk, tgt_sr, net_g, vc, cpt, version
|
977 |
-
if sid == "" or sid == []:
|
978 |
-
global hubert_model
|
979 |
-
if hubert_model is not None:
|
980 |
-
print("clean_empty_cache")
|
981 |
-
del net_g, n_spk, vc, hubert_model, tgt_sr
|
982 |
-
hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
|
983 |
-
if torch.cuda.is_available():
|
984 |
-
torch.cuda.empty_cache()
|
985 |
-
if_f0 = cpt.get("f0", 1)
|
986 |
-
version = cpt.get("version", "v1")
|
987 |
-
if version == "v1":
|
988 |
-
if if_f0 == 1:
|
989 |
-
net_g = SynthesizerTrnMs256NSFsid(
|
990 |
-
*cpt["config"], is_half=config.is_half
|
991 |
-
)
|
992 |
-
else:
|
993 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
994 |
-
elif version == "v2":
|
995 |
-
if if_f0 == 1:
|
996 |
-
net_g = SynthesizerTrnMs768NSFsid(
|
997 |
-
*cpt["config"], is_half=config.is_half
|
998 |
-
)
|
999 |
-
else:
|
1000 |
-
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
1001 |
-
del net_g, cpt
|
1002 |
-
if torch.cuda.is_available():
|
1003 |
-
torch.cuda.empty_cache()
|
1004 |
-
cpt = None
|
1005 |
-
return (
|
1006 |
-
{"visible": False, "__type__": "update"},
|
1007 |
-
{"visible": False, "__type__": "update"},
|
1008 |
-
{"visible": False, "__type__": "update"},
|
1009 |
-
)
|
1010 |
-
person = "%s/%s" % (weight_root, sid)
|
1011 |
-
print("loading %s" % person)
|
1012 |
-
cpt = torch.load(person, map_location="cpu")
|
1013 |
-
tgt_sr = cpt["config"][-1]
|
1014 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
1015 |
-
if_f0 = cpt.get("f0", 1)
|
1016 |
-
if if_f0 == 0:
|
1017 |
-
to_return_protect0 = to_return_protect1 = {
|
1018 |
-
"visible": False,
|
1019 |
-
"value": 0.5,
|
1020 |
-
"__type__": "update",
|
1021 |
-
}
|
1022 |
-
else:
|
1023 |
-
to_return_protect0 = {
|
1024 |
-
"visible": True,
|
1025 |
-
"value": to_return_protect0,
|
1026 |
-
"__type__": "update",
|
1027 |
-
}
|
1028 |
-
to_return_protect1 = {
|
1029 |
-
"visible": True,
|
1030 |
-
"value": to_return_protect1,
|
1031 |
-
"__type__": "update",
|
1032 |
-
}
|
1033 |
-
version = cpt.get("version", "v1")
|
1034 |
-
if version == "v1":
|
1035 |
-
if if_f0 == 1:
|
1036 |
-
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
|
1037 |
-
else:
|
1038 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
1039 |
-
elif version == "v2":
|
1040 |
-
if if_f0 == 1:
|
1041 |
-
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
|
1042 |
-
else:
|
1043 |
-
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
1044 |
-
del net_g.enc_q
|
1045 |
-
print(net_g.load_state_dict(cpt["weight"], strict=False))
|
1046 |
-
net_g.eval().to(config.device)
|
1047 |
-
if config.is_half:
|
1048 |
-
net_g = net_g.half()
|
1049 |
-
else:
|
1050 |
-
net_g = net_g.float()
|
1051 |
-
vc = VC(tgt_sr, config)
|
1052 |
-
n_spk = cpt["config"][-3]
|
1053 |
-
return (
|
1054 |
-
{"visible": True, "maximum": n_spk, "__type__": "update"},
|
1055 |
-
to_return_protect0,
|
1056 |
-
to_return_protect1,
|
1057 |
-
)
|
1058 |
-
|
1059 |
-
def update_model_choices(select_value):
|
1060 |
-
model_ids = get_model_list()
|
1061 |
-
model_ids_list = list(model_ids)
|
1062 |
-
if select_value == "VR":
|
1063 |
-
return {"choices": uvr5_names, "__type__": "update"}
|
1064 |
-
elif select_value == "MDX":
|
1065 |
-
return {"choices": model_ids_list, "__type__": "update"}
|
1066 |
-
|
1067 |
-
def download_model():
|
1068 |
-
gr.Markdown(value="# " + i18n("Download Model"))
|
1069 |
-
gr.Markdown(value=i18n("It is used to download your inference models."))
|
1070 |
-
with gr.Row():
|
1071 |
-
model_url=gr.Textbox(label=i18n("Url:"))
|
1072 |
-
with gr.Row():
|
1073 |
-
download_model_status_bar=gr.Textbox(label=i18n("Status:"))
|
1074 |
-
with gr.Row():
|
1075 |
-
download_button=gr.Button(i18n("Download"))
|
1076 |
-
download_button.click(fn=load_downloaded_model, inputs=[model_url], outputs=[download_model_status_bar])
|
1077 |
-
|
1078 |
-
def download_backup():
|
1079 |
-
gr.Markdown(value="# " + i18n("Download Backup"))
|
1080 |
-
gr.Markdown(value=i18n("It is used to download your training backups."))
|
1081 |
-
with gr.Row():
|
1082 |
-
model_url=gr.Textbox(label=i18n("Url:"))
|
1083 |
-
with gr.Row():
|
1084 |
-
download_model_status_bar=gr.Textbox(label=i18n("Status:"))
|
1085 |
-
with gr.Row():
|
1086 |
-
download_button=gr.Button(i18n("Download"))
|
1087 |
-
download_button.click(fn=load_downloaded_backup, inputs=[model_url], outputs=[download_model_status_bar])
|
1088 |
-
|
1089 |
-
def update_dataset_list(name):
|
1090 |
-
new_datasets = []
|
1091 |
-
for foldername in os.listdir("./datasets"):
|
1092 |
-
if "." not in foldername:
|
1093 |
-
new_datasets.append(os.path.join(find_folder_parent(".","pretrained"),"datasets",foldername))
|
1094 |
-
return gr.Dropdown.update(choices=new_datasets)
|
1095 |
-
|
1096 |
-
def download_dataset(trainset_dir4):
|
1097 |
-
gr.Markdown(value="# " + i18n("Download Dataset"))
|
1098 |
-
gr.Markdown(value=i18n("Download the dataset with the audios in a compatible format (.wav/.flac) to train your model."))
|
1099 |
-
with gr.Row():
|
1100 |
-
dataset_url=gr.Textbox(label=i18n("Url:"))
|
1101 |
-
with gr.Row():
|
1102 |
-
load_dataset_status_bar=gr.Textbox(label=i18n("Status:"))
|
1103 |
-
with gr.Row():
|
1104 |
-
load_dataset_button=gr.Button(i18n("Download"))
|
1105 |
-
load_dataset_button.click(fn=load_dowloaded_dataset, inputs=[dataset_url], outputs=[load_dataset_status_bar])
|
1106 |
-
load_dataset_status_bar.change(update_dataset_list, dataset_url, trainset_dir4)
|
1107 |
-
|
1108 |
-
def download_audio():
|
1109 |
-
gr.Markdown(value="# " + i18n("Download Audio"))
|
1110 |
-
gr.Markdown(value=i18n("Download audios of any format for use in inference (recommended for mobile users)."))
|
1111 |
-
with gr.Row():
|
1112 |
-
audio_url=gr.Textbox(label=i18n("Url:"))
|
1113 |
-
with gr.Row():
|
1114 |
-
download_audio_status_bar=gr.Textbox(label=i18n("Status:"))
|
1115 |
-
with gr.Row():
|
1116 |
-
download_button2=gr.Button(i18n("Download"))
|
1117 |
-
download_button2.click(fn=load_downloaded_audio, inputs=[audio_url], outputs=[download_audio_status_bar])
|
1118 |
-
|
1119 |
-
def youtube_separator():
|
1120 |
-
gr.Markdown(value="# " + i18n("Separate YouTube tracks"))
|
1121 |
-
gr.Markdown(value=i18n("Download audio from a YouTube video and automatically separate the vocal and instrumental tracks"))
|
1122 |
-
with gr.Row():
|
1123 |
-
input_url = gr.inputs.Textbox(label=i18n("Enter the YouTube link:"))
|
1124 |
-
output_path = gr.Textbox(
|
1125 |
-
label=i18n("Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):"),
|
1126 |
-
value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/yt_downloads",
|
1127 |
-
visible=False,
|
1128 |
-
)
|
1129 |
-
advanced_settings_checkbox = gr.Checkbox(
|
1130 |
-
value=False,
|
1131 |
-
label=i18n("Advanced Settings"),
|
1132 |
-
interactive=True,
|
1133 |
-
)
|
1134 |
-
with gr.Row(label = i18n("Advanced Settings"), visible=False, variant='compact') as advanced_settings:
|
1135 |
-
with gr.Column():
|
1136 |
-
model_select = gr.Radio(
|
1137 |
-
label=i18n("Model Architecture:"),
|
1138 |
-
choices=["VR", "MDX"],
|
1139 |
-
value="VR",
|
1140 |
-
interactive=True,
|
1141 |
-
)
|
1142 |
-
model_choose = gr.Dropdown(label=i18n("Model: (Be aware that in some models the named vocal will be the instrumental)"),
|
1143 |
-
choices=uvr5_names,
|
1144 |
-
value="HP5_only_main_vocal"
|
1145 |
-
)
|
1146 |
-
with gr.Row():
|
1147 |
-
agg = gr.Slider(
|
1148 |
-
minimum=0,
|
1149 |
-
maximum=20,
|
1150 |
-
step=1,
|
1151 |
-
label=i18n("Vocal Extraction Aggressive"),
|
1152 |
-
value=10,
|
1153 |
-
interactive=True,
|
1154 |
-
)
|
1155 |
-
with gr.Row():
|
1156 |
-
opt_vocal_root = gr.Textbox(
|
1157 |
-
label=i18n("Specify the output folder for vocals:"), value="audios",
|
1158 |
-
)
|
1159 |
-
opt_ins_root = gr.Textbox(
|
1160 |
-
label=i18n("Specify the output folder for accompaniment:"), value="audio-others",
|
1161 |
-
)
|
1162 |
-
dir_wav_input = gr.Textbox(
|
1163 |
-
label=i18n("Enter the path of the audio folder to be processed:"),
|
1164 |
-
value=((os.getcwd()).replace('\\', '/') + "/yt_downloads"),
|
1165 |
-
visible=False,
|
1166 |
-
)
|
1167 |
-
format0 = gr.Radio(
|
1168 |
-
label=i18n("Export file format"),
|
1169 |
-
choices=["wav", "flac", "mp3", "m4a"],
|
1170 |
-
value="wav",
|
1171 |
-
visible=False,
|
1172 |
-
interactive=True,
|
1173 |
-
)
|
1174 |
-
wav_inputs = gr.File(
|
1175 |
-
file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder."),
|
1176 |
-
visible=False,
|
1177 |
-
)
|
1178 |
-
model_select.change(
|
1179 |
-
fn=update_model_choices,
|
1180 |
-
inputs=model_select,
|
1181 |
-
outputs=model_choose,
|
1182 |
-
)
|
1183 |
-
with gr.Row():
|
1184 |
-
vc_output4 = gr.Textbox(label=i18n("Status:"))
|
1185 |
-
vc_output5 = gr.Audio(label=i18n("Vocal"), type='filepath')
|
1186 |
-
vc_output6 = gr.Audio(label=i18n("Instrumental"), type='filepath')
|
1187 |
-
with gr.Row():
|
1188 |
-
but2 = gr.Button(i18n("Download and Separate"))
|
1189 |
-
but2.click(
|
1190 |
-
uvr,
|
1191 |
-
[
|
1192 |
-
input_url,
|
1193 |
-
output_path,
|
1194 |
-
model_choose,
|
1195 |
-
dir_wav_input,
|
1196 |
-
opt_vocal_root,
|
1197 |
-
wav_inputs,
|
1198 |
-
opt_ins_root,
|
1199 |
-
agg,
|
1200 |
-
format0,
|
1201 |
-
model_select
|
1202 |
-
],
|
1203 |
-
[vc_output4, vc_output5, vc_output6],
|
1204 |
-
)
|
1205 |
-
def toggle_advanced_settings(checkbox):
|
1206 |
-
return {"visible": checkbox, "__type__": "update"}
|
1207 |
-
|
1208 |
-
advanced_settings_checkbox.change(
|
1209 |
-
fn=toggle_advanced_settings,
|
1210 |
-
inputs=[advanced_settings_checkbox],
|
1211 |
-
outputs=[advanced_settings]
|
1212 |
-
)
|
1213 |
-
|
1214 |
-
|
1215 |
-
def get_bark_voice():
|
1216 |
-
mensaje = """
|
1217 |
-
v2/en_speaker_0 English Male
|
1218 |
-
v2/en_speaker_1 English Male
|
1219 |
-
v2/en_speaker_2 English Male
|
1220 |
-
v2/en_speaker_3 English Male
|
1221 |
-
v2/en_speaker_4 English Male
|
1222 |
-
v2/en_speaker_5 English Male
|
1223 |
-
v2/en_speaker_6 English Male
|
1224 |
-
v2/en_speaker_7 English Male
|
1225 |
-
v2/en_speaker_8 English Male
|
1226 |
-
v2/en_speaker_9 English Female
|
1227 |
-
v2/zh_speaker_0 Chinese (Simplified) Male
|
1228 |
-
v2/zh_speaker_1 Chinese (Simplified) Male
|
1229 |
-
v2/zh_speaker_2 Chinese (Simplified) Male
|
1230 |
-
v2/zh_speaker_3 Chinese (Simplified) Male
|
1231 |
-
v2/zh_speaker_4 Chinese (Simplified) Female
|
1232 |
-
v2/zh_speaker_5 Chinese (Simplified) Male
|
1233 |
-
v2/zh_speaker_6 Chinese (Simplified) Female
|
1234 |
-
v2/zh_speaker_7 Chinese (Simplified) Female
|
1235 |
-
v2/zh_speaker_8 Chinese (Simplified) Male
|
1236 |
-
v2/zh_speaker_9 Chinese (Simplified) Female
|
1237 |
-
v2/fr_speaker_0 French Male
|
1238 |
-
v2/fr_speaker_1 French Female
|
1239 |
-
v2/fr_speaker_2 French Female
|
1240 |
-
v2/fr_speaker_3 French Male
|
1241 |
-
v2/fr_speaker_4 French Male
|
1242 |
-
v2/fr_speaker_5 French Female
|
1243 |
-
v2/fr_speaker_6 French Male
|
1244 |
-
v2/fr_speaker_7 French Male
|
1245 |
-
v2/fr_speaker_8 French Male
|
1246 |
-
v2/fr_speaker_9 French Male
|
1247 |
-
v2/de_speaker_0 German Male
|
1248 |
-
v2/de_speaker_1 German Male
|
1249 |
-
v2/de_speaker_2 German Male
|
1250 |
-
v2/de_speaker_3 German Female
|
1251 |
-
v2/de_speaker_4 German Male
|
1252 |
-
v2/de_speaker_5 German Male
|
1253 |
-
v2/de_speaker_6 German Male
|
1254 |
-
v2/de_speaker_7 German Male
|
1255 |
-
v2/de_speaker_8 German Female
|
1256 |
-
v2/de_speaker_9 German Male
|
1257 |
-
v2/hi_speaker_0 Hindi Female
|
1258 |
-
v2/hi_speaker_1 Hindi Female
|
1259 |
-
v2/hi_speaker_2 Hindi Male
|
1260 |
-
v2/hi_speaker_3 Hindi Female
|
1261 |
-
v2/hi_speaker_4 Hindi Female
|
1262 |
-
v2/hi_speaker_5 Hindi Male
|
1263 |
-
v2/hi_speaker_6 Hindi Male
|
1264 |
-
v2/hi_speaker_7 Hindi Male
|
1265 |
-
v2/hi_speaker_8 Hindi Male
|
1266 |
-
v2/hi_speaker_9 Hindi Female
|
1267 |
-
v2/it_speaker_0 Italian Male
|
1268 |
-
v2/it_speaker_1 Italian Male
|
1269 |
-
v2/it_speaker_2 Italian Female
|
1270 |
-
v2/it_speaker_3 Italian Male
|
1271 |
-
v2/it_speaker_4 Italian Male
|
1272 |
-
v2/it_speaker_5 Italian Male
|
1273 |
-
v2/it_speaker_6 Italian Male
|
1274 |
-
v2/it_speaker_7 Italian Female
|
1275 |
-
v2/it_speaker_8 Italian Male
|
1276 |
-
v2/it_speaker_9 Italian Female
|
1277 |
-
v2/ja_speaker_0 Japanese Female
|
1278 |
-
v2/ja_speaker_1 Japanese Female
|
1279 |
-
v2/ja_speaker_2 Japanese Male
|
1280 |
-
v2/ja_speaker_3 Japanese Female
|
1281 |
-
v2/ja_speaker_4 Japanese Female
|
1282 |
-
v2/ja_speaker_5 Japanese Female
|
1283 |
-
v2/ja_speaker_6 Japanese Male
|
1284 |
-
v2/ja_speaker_7 Japanese Female
|
1285 |
-
v2/ja_speaker_8 Japanese Female
|
1286 |
-
v2/ja_speaker_9 Japanese Female
|
1287 |
-
v2/ko_speaker_0 Korean Female
|
1288 |
-
v2/ko_speaker_1 Korean Male
|
1289 |
-
v2/ko_speaker_2 Korean Male
|
1290 |
-
v2/ko_speaker_3 Korean Male
|
1291 |
-
v2/ko_speaker_4 Korean Male
|
1292 |
-
v2/ko_speaker_5 Korean Male
|
1293 |
-
v2/ko_speaker_6 Korean Male
|
1294 |
-
v2/ko_speaker_7 Korean Male
|
1295 |
-
v2/ko_speaker_8 Korean Male
|
1296 |
-
v2/ko_speaker_9 Korean Male
|
1297 |
-
v2/pl_speaker_0 Polish Male
|
1298 |
-
v2/pl_speaker_1 Polish Male
|
1299 |
-
v2/pl_speaker_2 Polish Male
|
1300 |
-
v2/pl_speaker_3 Polish Male
|
1301 |
-
v2/pl_speaker_4 Polish Female
|
1302 |
-
v2/pl_speaker_5 Polish Male
|
1303 |
-
v2/pl_speaker_6 Polish Female
|
1304 |
-
v2/pl_speaker_7 Polish Male
|
1305 |
-
v2/pl_speaker_8 Polish Male
|
1306 |
-
v2/pl_speaker_9 Polish Female
|
1307 |
-
v2/pt_speaker_0 Portuguese Male
|
1308 |
-
v2/pt_speaker_1 Portuguese Male
|
1309 |
-
v2/pt_speaker_2 Portuguese Male
|
1310 |
-
v2/pt_speaker_3 Portuguese Male
|
1311 |
-
v2/pt_speaker_4 Portuguese Male
|
1312 |
-
v2/pt_speaker_5 Portuguese Male
|
1313 |
-
v2/pt_speaker_6 Portuguese Male
|
1314 |
-
v2/pt_speaker_7 Portuguese Male
|
1315 |
-
v2/pt_speaker_8 Portuguese Male
|
1316 |
-
v2/pt_speaker_9 Portuguese Male
|
1317 |
-
v2/ru_speaker_0 Russian Male
|
1318 |
-
v2/ru_speaker_1 Russian Male
|
1319 |
-
v2/ru_speaker_2 Russian Male
|
1320 |
-
v2/ru_speaker_3 Russian Male
|
1321 |
-
v2/ru_speaker_4 Russian Male
|
1322 |
-
v2/ru_speaker_5 Russian Female
|
1323 |
-
v2/ru_speaker_6 Russian Female
|
1324 |
-
v2/ru_speaker_7 Russian Male
|
1325 |
-
v2/ru_speaker_8 Russian Male
|
1326 |
-
v2/ru_speaker_9 Russian Female
|
1327 |
-
v2/es_speaker_0 Spanish Male
|
1328 |
-
v2/es_speaker_1 Spanish Male
|
1329 |
-
v2/es_speaker_2 Spanish Male
|
1330 |
-
v2/es_speaker_3 Spanish Male
|
1331 |
-
v2/es_speaker_4 Spanish Male
|
1332 |
-
v2/es_speaker_5 Spanish Male
|
1333 |
-
v2/es_speaker_6 Spanish Male
|
1334 |
-
v2/es_speaker_7 Spanish Male
|
1335 |
-
v2/es_speaker_8 Spanish Female
|
1336 |
-
v2/es_speaker_9 Spanish Female
|
1337 |
-
v2/tr_speaker_0 Turkish Male
|
1338 |
-
v2/tr_speaker_1 Turkish Male
|
1339 |
-
v2/tr_speaker_2 Turkish Male
|
1340 |
-
v2/tr_speaker_3 Turkish Male
|
1341 |
-
v2/tr_speaker_4 Turkish Female
|
1342 |
-
v2/tr_speaker_5 Turkish Female
|
1343 |
-
v2/tr_speaker_6 Turkish Male
|
1344 |
-
v2/tr_speaker_7 Turkish Male
|
1345 |
-
v2/tr_speaker_8 Turkish Male
|
1346 |
-
v2/tr_speaker_9 Turkish Male
|
1347 |
-
"""
|
1348 |
-
# Dividir el mensaje en líneas
|
1349 |
-
lineas = mensaje.split("\n")
|
1350 |
-
datos_deseados = []
|
1351 |
-
for linea in lineas:
|
1352 |
-
partes = linea.split("\t")
|
1353 |
-
if len(partes) == 3:
|
1354 |
-
clave, _, genero = partes
|
1355 |
-
datos_deseados.append(f"{clave}-{genero}")
|
1356 |
-
|
1357 |
-
return datos_deseados
|
1358 |
-
|
1359 |
-
|
1360 |
-
def get_edge_voice():
|
1361 |
-
completed_process = subprocess.run(['edge-tts',"-l"], capture_output=True, text=True)
|
1362 |
-
lines = completed_process.stdout.strip().split("\n")
|
1363 |
-
data = []
|
1364 |
-
current_entry = {}
|
1365 |
-
for line in lines:
|
1366 |
-
if line.startswith("Name: "):
|
1367 |
-
if current_entry:
|
1368 |
-
data.append(current_entry)
|
1369 |
-
current_entry = {"Name": line.split(": ")[1]}
|
1370 |
-
elif line.startswith("Gender: "):
|
1371 |
-
current_entry["Gender"] = line.split(": ")[1]
|
1372 |
-
if current_entry:
|
1373 |
-
data.append(current_entry)
|
1374 |
-
tts_voice = []
|
1375 |
-
for entry in data:
|
1376 |
-
name = entry["Name"]
|
1377 |
-
gender = entry["Gender"]
|
1378 |
-
formatted_entry = f'{name}-{gender}'
|
1379 |
-
tts_voice.append(formatted_entry)
|
1380 |
-
return tts_voice
|
1381 |
-
|
1382 |
-
|
1383 |
-
#print(set_tts_voice)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bloque Explosin Aventura Maestro Apk Descargar.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bloque explosión aventura maestro APK Descargar: Un divertido y adictivo juego de rompecabezas de bloques</h1>
|
3 |
-
<p>Si estás buscando un divertido y adictivo juego de puzzle de bloques que relaje tu mente y desafíe tu cerebro, deberías probar <strong>Block Blast Adventure Master</strong>. Este juego es una mezcla perfecta de bloques y rompecabezas mentales, combinando la creatividad con los clásicos. Puedes descargar el archivo APK de este juego desde varias fuentes y disfrutarlo en tu dispositivo Android. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, incluyendo qué es, cómo descargarlo e instalarlo, cómo jugarlo, cuáles son sus características, cuáles son algunos consejos y trucos, cuáles son algunos comentarios y algunas preguntas frecuentes.</p>
|
4 |
-
<h2>¿Qué es Block Blast Adventure Master? </h2>
|
5 |
-
<p>Block Blast Adventure Master es un juego de puzzle desarrollado por Hungry Studio. El juego ha estado disponible desde septiembre de 2022 y se ha descargado más de 10 millones de veces. Tiene una alta calificación de 4.8 de 5 estrellas en Google Play Store, basado en más de 65,000 comentarios. El juego fue actualizado por última vez el 15 de junio de 2023. </p>
|
6 |
-
<h2>bloque explosión aventura maestro apk descargar</h2><br /><p><b><b>DOWNLOAD</b> ❤❤❤ <a href="https://bltlly.com/2v6L1m">https://bltlly.com/2v6L1m</a></b></p><br /><br />
|
7 |
-
<h3>Un clásico juego de puzzle de bloques para todas las edades</h3>
|
8 |
-
<p>Block Blast Adventure Master es un clásico juego de puzzle de bloques que es adecuado para todas las edades. El juego es simple pero adictivo. Tienes que arrastrar y soltar bloques de cubo en una cuadrícula de 8x8 y llenar filas o columnas con bloques para eliminarlos. Si no hay bloques restantes, el juego ha terminado. Los bloques no se pueden rotar, por lo que es más difícil e incierto. </p>
|
9 |
-
<h3>Un juego de bloques gratis con un modo de aventura de historia</h3>
|
10 |
-
<p>Block Blast Adventure Master es un juego de bloques gratis que también tiene un modo de aventura de historia. En este modo, puedes seguir el viaje de un lindo personaje llamado Woody mientras explora diferentes mundos y se enfrenta a varios obstáculos. Puedes desbloquear nuevos niveles y temas a medida que avanzas en la historia. También puedes recoger monedas y gemas para comprar objetos y potenciadores. </p>
|
11 |
-
<h3>Un desafío mental desafiante y relajante</h3>
|
12 |
-
|
13 |
-
<h2> ¿Cómo descargar e instalar Block Blast Adventure Master APK? </h2>
|
14 |
-
<p>Si desea descargar e instalar Block Blast Adventure Master APK en su dispositivo Android, usted tiene dos opciones. Puede descargarlo desde las tiendas de aplicaciones oficiales o desde sitios web de terceros. Estos son los pasos para ambas opciones:</p>
|
15 |
-
<h3>Descargar de Google Play Store o App Store</h3>
|
16 |
-
<p>La forma más fácil y segura de descargar e instalar Block Blast Adventure Master APK es conseguirlo desde la Google Play Store o la App Store. Puedes simplemente seguir estos pasos:</p>
|
17 |
-
<ol>
|
18 |
-
<li>Abre la Google Play Store o la App Store en tu dispositivo. </li>
|
19 |
-
<li>Buscar "Block Blast Adventure Master" en la barra de búsqueda. </li>
|
20 |
-
<li>Seleccione el juego de la lista de resultados y toque en "Instalar". </li>
|
21 |
-
<li>Espere a que se complete la descarga y la instalación. </li>
|
22 |
-
<li>Iniciar el juego y disfrutar. </li>
|
23 |
-
</ol>
|
24 |
-
<h3>Descargar desde APKCombo o AppBrain</h3>
|
25 |
-
<p>Si no puede acceder a la Google Play Store o la App Store, o si desea obtener la última versión de Block Blast Adventure Master APK, también puede descargarlo de sitios web de terceros como APKCombo o AppBrain. Sin embargo, debe tener cuidado y solo descargar de fuentes confiables, ya que algunos sitios web pueden contener malware o virus. También debe habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Puede seguir estos pasos:</p>
|
26 |
-
<ol>
|
27 |
-
<li>Ir a APKCombo o AppBrain en su navegador. </li>
|
28 |
-
<li>Buscar "Block Blast Adventure Master" en la barra de búsqueda. </li>
|
29 |
-
<li>Seleccione el juego de la lista de resultados y toque en "Descargar APK". </li>
|
30 |
-
<li>Espere a que la descarga termine y localice el archivo en su dispositivo. </li>
|
31 |
-
<li>Toque en el archivo y siga las instrucciones para instalarlo. </li>
|
32 |
-
<li>Iniciar el juego y disfrutar. </li>
|
33 |
-
</ol>
|
34 |
-
<h2>¿Cómo se juega Block Blast Adventure Master? </h2>
|
35 |
-
|
36 |
-
<h3>Arrastre y suelte bloques de cubo en una cuadrícula de 8x8</h3>
|
37 |
-
<p>El juego te dará tres bloques de cubo a la vez en la parte inferior de la pantalla. Puedes arrastrarlos y soltarlos en cualquier espacio vacío de la cuadrícula de 8x8. También puede ver una vista previa de los siguientes tres bloques en la parte superior de la pantalla. No puedes rotar los bloques, así que tienes que pensar cuidadosamente dónde colocarlos. </p>
|
38 |
-
<h3>Rellenar filas o columnas con bloques para eliminarlos</h3>
|
39 |
-
<p>Cuando llenas una fila o una columna con bloques, desaparecerán y obtendrás puntos. Cuantas más filas o columnas borre a la vez, más puntos obtendrá. También puede realizar combos borrando varias filas o columnas en sucesión. Esto desencadenará animaciones de eliminación cool y puntos de bonificación. </p>
|
40 |
-
<p></p>
|
41 |
-
<h3>Usa el espacio en blanco sabiamente y planifica con anticipación</h3>
|
42 |
-
<p>El juego terminará cuando no haya más espacios para nuevos bloques. Por lo tanto, tienes que usar el espacio en blanco sabiamente y planificar con anticipación. Trata de evitar dejar huecos o agujeros en la red, ya que limitarán tus opciones más adelante. Además, intente equilibrar la distribución de bloques en diferentes áreas de la cuadrícula, para que pueda borrar más filas o columnas a la vez. </p>
|
43 |
-
<h3>Realizar combos para obtener puntos de bonificación y animaciones cool</h3>
|
44 |
-
<p>Uno de los aspectos más satisfactorios de Block Blast Adventure Master es realizar combos. Un combo es cuando borra más de una fila o columna a la vez, o cuando borra filas o columnas en sucesión sin colocar nuevos bloques en el medio. Cuando realices un combo, obtendrás puntos de bonificación y animaciones geniales que te harán sentir increíble. </p>
|
45 |
-
<h2>¿Cuáles son las características de Block Blast Adventure Master? </h2>
|
46 |
-
<p>Block Blast Adventure Master no es solo un simple juego de puzzle de bloques. Tiene muchas características que lo hacen destacar de otros juegos de su género. Estos son algunos de ellos:</p>
|
47 |
-
<h3>Gráficos coloridos y efectos de sonido maravillosos</h3>
|
48 |
-
|
49 |
-
<h3>No se requiere wifi, ideal para matar el tiempo</h3>
|
50 |
-
<p>El juego no requiere wifi o conexión a Internet para jugar. Puedes jugar en cualquier momento y en cualquier lugar, ya sea en casa, en el trabajo, en el autobús o en un avión. El juego es ideal para matar el tiempo y relajar tu mente. </p>
|
51 |
-
<h3>Intentos ilimitados, sin límite de tiempo, sin presión</h3>
|
52 |
-
<p>El juego no tiene límite sobre cuántas veces puedes probar o cuánto tiempo puedes jugar. Puedes jugar a tu propio ritmo y disfrutar del juego sin ninguna presión. También puedes pausar y reanudar el juego cuando quieras. </p>
|
53 |
-
<h3>Diferentes niveles de dificultad y modos para elegir</h3>
|
54 |
-
<p>El juego tiene cuatro niveles de dificultad en el modo clásico: fácil, medio, duro y experto. Usted puede elegir el nivel que se adapte a su habilidad y preferencia. El juego también tiene un modo de aventura donde puedes seguir la historia de Woody y desbloquear nuevos mundos y temas. También puede cambiar entre los modos en cualquier momento. </p>
|
55 |
-
<h2>¿Cuáles son algunos consejos y trucos para Block Blast Adventure Master? </h2>
|
56 |
-
<p>Si quieres mejorar tus habilidades y anotar en Block Blast Adventure Master, aquí hay algunos consejos y trucos que pueden ayudarte:</p>
|
57 |
-
<h3>Vea los videos de YouTube para obtener sugerencias y soluciones</h3>
|
58 |
-
<p>Si estás atascado en un nivel o quieres ver cómo otros jugadores juegan el juego, puedes ver algunos videos de YouTube que muestran sugerencias y soluciones para Block Blast Adventure Master. Puedes aprender de sus estrategias y técnicas y aplicarlas a tu propio juego. </p>
|
59 |
-
<h3>Elija la mejor posición para el bloque basado en su forma</h3>
|
60 |
-
<p>Uno de los factores clave en el juego Block Blast Adventure Master es elegir la mejor posición para el bloque en función de su forma. Tienes que considerar cómo el bloque encajará en la red y cómo afectará a los bloques futuros. También tienes que evitar dejar huecos o agujeros que limiten tus opciones más adelante. Intente colocar los bloques de una manera que cree más oportunidades para limpiar filas o columnas. </p>
|
61 |
-
|
62 |
-
<p>Otro factor importante en jugar Block Blast Adventure Master está tratando de borrar varias líneas a la vez para obtener una puntuación más alta. Cuantas más líneas borres a la vez, más puntos obtendrás. También obtendrás puntos extra y animaciones geniales si realizas combos. Por lo tanto, debe intentar planificar con anticipación y crear oportunidades para borrar varias líneas a la vez. </p>
|
63 |
-
<h3>Siga la página de Facebook para actualizaciones y noticias</h3>
|
64 |
-
<p>Si quieres mantenerte actualizado sobre las últimas noticias y actualizaciones sobre Block Blast Adventure Master, debes seguir la página oficial de Facebook del juego. También puedes interactuar con otros jugadores y compartir tus comentarios y sugerencias. También puede obtener algunas ofertas especiales y recompensas de vez en cuando. </p>
|
65 |
-
<h2>¿Cuáles son algunas opiniones de Block Blast Adventure Master? </h2>
|
66 |
-
<p>Block Blast Adventure Master ha recibido muchas críticas positivas de jugadores y críticos por igual. Aquí hay algunos ejemplos de lo que han dicho sobre el juego:</p>
|
67 |
-
<h3>Comentarios positivos de jugadores y críticos</h3>
|
68 |
-
<p>"Este es uno de los mejores juegos de rompecabezas de bloques que he jugado. Es tan adictivo y divertido. Me encanta el modo aventura y los diferentes temas. Los gráficos son increíbles y los efectos de sonido son relajantes. Recomiendo este juego a cualquiera que ame los juegos de puzzle."</p>
|
69 |
-
<p>"Soy un gran fan de los juegos de rompecabezas de bloques y este es de lejos mi favorito. Es desafiante pero no frustrante. Tiene muchas características y modos que lo hacen interesante y variado. También es muy relajante y calmante. Lo toco todos los días antes de ir a la cama." </p>
|
70 |
-
<p>"Block Blast Adventure Master es un gran juego que combina el clásico juego de puzzle de bloques con un modo de aventura historia. Es fácil de jugar pero difícil de dominar. Tiene gráficos coloridos y efectos de sonido maravillosos que crean una atmósfera agradable. También es gratis para jugar y no requiere wifi o conexión a Internet."</p>
|
71 |
-
<h3>Altas clasificaciones y rankings en tiendas de aplicaciones</h3>
|
72 |
-
|
73 |
-
<h3>Algunos problemas menores y sugerencias para mejorar</h3>
|
74 |
-
<p>A pesar de las críticas positivas, el juego también tiene algunos problemas menores y sugerencias para mejorar de algunos jugadores y críticos. Estos son algunos de ellos:</p>
|
75 |
-
<p>"El juego es genial pero tiene algunos errores y fallas. A veces los bloques no encajan correctamente o desaparecen al azar. A veces el juego se congela o se bloquea. Espero que los desarrolladores puedan solucionar estos problemas pronto."</p>
|
76 |
-
<p>"El juego es divertido, pero puede ser repetitivo y aburrido después de un tiempo. Me gustaría que hubiera más modos y características para hacerlo más emocionante y desafiante. Tal vez puedan agregar algunos power-ups, bloques especiales o minijuegos." </p>
|
77 |
-
<p>"El juego es relajante, pero también puede ser frustrante y estresante. A veces los bloques son demasiado grandes o demasiado pequeños para la red. A veces los bloques son demasiado duros o demasiado fáciles de borrar. Creo que deben equilibrar la dificultad y la aleatoriedad de los bloques." </p>
|
78 |
-
<h2>Conclusión</h2>
|
79 |
-
<p>Block Blast Adventure Master es un divertido y adictivo juego de puzzle de bloques que relajará tu mente y desafiará tu cerebro. Puedes descargar el archivo APK de este juego desde varias fuentes y disfrutarlo en tu dispositivo Android. El juego tiene muchas características que lo hacen destacar de otros juegos en su género, tales como gráficos coloridos, efectos de sonido maravillosos, no requiere wifi, intentos ilimitados, diferentes niveles de dificultad, modos y temas, y un modo de aventura historia. El juego también tiene algunos consejos y trucos que podrían ayudarte a mejorar tus habilidades y puntuación, como ver videos de YouTube, elegir la mejor posición para el bloque, limpiar varias líneas a la vez y seguir la página de Facebook. El juego ha recibido muchas críticas positivas de jugadores y críticos por igual, así como altas calificaciones y rankings en tiendas de aplicaciones. El juego también tiene algunos problemas menores y sugerencias de mejora que podrían ser corregidos o añadidos en futuras actualizaciones. </p>
|
80 |
-
<h2>Preguntas frecuentes</h2>
|
81 |
-
|
82 |
-
<h3>Q: ¿Es Block Blast Adventure Master libre para jugar? </h3>
|
83 |
-
<p>A: Sí, Block Blast Adventure Master es gratis. Sin embargo, contiene anuncios y compras en la aplicación que pueden mejorar su experiencia de juego. </p>
|
84 |
-
<h3>Q: ¿Cuáles son los requisitos mínimos para jugar Block Blast Adventure Master? </h3>
|
85 |
-
<p>A: Para jugar Block Blast Adventure Master, necesitas un dispositivo Android con Android 4.4 o una versión superior, o un dispositivo iOS con iOS 9.0 o una versión superior. </p>
|
86 |
-
<h3>P: ¿Cómo puedo contactar a los desarrolladores de Block Blast Adventure Master? </h3>
|
87 |
-
<p>A: Puede ponerse en contacto con los desarrolladores de Block Blast Adventure Master enviando un correo electrónico a [email protected] o visitando su sitio web en https://www.hungry-studio.com/.</p>
|
88 |
-
<h3>P: ¿Cómo puedo apoyar a los desarrolladores de Block Blast Adventure Master? </h3>
|
89 |
-
<p>A: Puedes apoyar a los desarrolladores de Block Blast Adventure Master clasificando y revisando el juego en las tiendas de aplicaciones, compartiéndolo con tus amigos y familiares, siguiendo sus cuentas de redes sociales y haciendo compras en la aplicación si lo deseas. </p>
|
90 |
-
<h3>P: ¿Cómo puedo obtener más monedas y gemas en Block Blast Adventure Master? </h3>
|
91 |
-
<p>A: Puedes obtener más monedas y gemas en Block Blast Adventure Master jugando el juego regularmente, limpiando niveles y modos, viendo anuncios, completando tareas y logros, girando la rueda, abriendo cofres y comprándolos con dinero real. </p> 64aa2da5cf<br />
|
92 |
-
<br />
|
93 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Solitario Para Telfono Android.md
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Juego de solitario descarga gratuita para teléfono Android</h1>
|
3 |
-
<p>Si usted está buscando una manera divertida y relajante para pasar el tiempo, es posible que desee probar a jugar solitario en su teléfono Android. Solitario es uno de los juegos de cartas más populares del mundo, y es fácil de aprender y jugar. En este artículo, le diremos todo lo que necesita saber sobre Solitaire, cómo jugarlo en su teléfono Android, y cómo descargar e instalar las mejores aplicaciones de solitario de forma gratuita. </p>
|
4 |
-
<h2>descargar gratis juego de solitario para teléfono android</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6JrT">https://bltlly.com/2v6JrT</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el Solitario? </h2>
|
6 |
-
<p>Solitario es un juego de cartas que puede ser jugado por una persona o más. El objetivo del juego es organizar todas las cartas en un orden específico, generalmente por palo y rango, desde el as hasta el rey. Hay muchas versiones diferentes de Solitaire, como Klondike, Spider, FreeCell, Pyramid y TriPeaks. Cada versión tiene sus propias reglas y desafíos, pero todos comparten el mismo principio básico de clasificación de tarjetas. </p>
|
7 |
-
<h3>La historia y popularidad de Solitaire</h3>
|
8 |
-
<p>Se cree que el solitario se originó en Europa a finales del siglo XVIII, como una forma para que la gente se entretuviera durante largos períodos de aislamiento o aburrimiento. El juego se hizo popular en Francia, donde se llamó "paciencia", y luego se extendió a otros países. En el siglo XIX, Solitaire fue introducido a América por los colonos británicos, que lo llamaron "solitario". El juego ganó más popularidad en el siglo XX, especialmente después de que se incluyó como un programa predeterminado en Microsoft Windows en 1990. Desde entonces, millones de personas han jugado al Solitario en sus computadoras, teléfonos, tabletas y otros dispositivos. </p>
|
9 |
-
<h3>Los beneficios de jugar al solitario</h3>
|
10 |
-
<p>Jugar al solitario no solo es divertido, sino también beneficioso para su salud mental y bienestar. Algunos de los beneficios de jugar al solitario son:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Mejora tu concentración, memoria y habilidades para resolver problemas. </li>
|
13 |
-
<li>Reduce el estrés, la ansiedad y el aburrimiento. </li>
|
14 |
-
<li>Aumenta tu estado de ánimo, confianza y autoestima. </li>
|
15 |
-
|
16 |
-
<li>Mejora tus habilidades sociales y de comunicación. </li>
|
17 |
-
</ul>
|
18 |
-
<p>Jugar al solitario también puede ayudarte a aprender cosas nuevas, como matemáticas, lógica, estrategia y paciencia. También puede retarte a batir tus propios récords y logros. </p>
|
19 |
-
<h2>Cómo jugar al solitario en tu teléfono Android</h2>
|
20 |
-
<p>Jugar al solitario en tu teléfono Android es muy fácil y conveniente. No necesitas una baraja de cartas o una mesa. Solo necesitas tu teléfono y una conexión a Internet. Puedes jugar al solitario en cualquier momento y en cualquier lugar que desees, ya sea en casa, en el trabajo o en cualquier lugar. También puede elegir entre una variedad de aplicaciones de solitario que ofrecen diferentes características y opciones. </p>
|
21 |
-
<p></p>
|
22 |
-
<h3>Las reglas y variaciones del Solitario</h3>
|
23 |
-
<p>Las reglas del Solitario dependen de la versión que esté jugando. Sin embargo, las reglas generales son:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Empiezas con una baraja de 52 cartas. </li>
|
26 |
-
<li>Reparte algunas cartas boca arriba en la mesa en un diseño específico, llamado tableau. Las cartas restantes se colocan boca abajo en una pila, llamada stock. </li>
|
27 |
-
<li>Usted mueve las cartas del tablero o de la acción a otra pila, llamada la fundación. La fundación consiste en cuatro pilas, una para cada palo. </li>
|
28 |
-
<li>Solo puedes mover una carta a la vez, a menos que tengas una secuencia de cartas en orden descendente y alternando colores (rojo-negro o negro-rojo). </li>
|
29 |
-
<li>Solo puedes colocar una carta encima de otra carta que tenga un rango más alto y del color opuesto. Por ejemplo, puede colocar un 9 negro en un 10 rojo, o un 5 rojo en un 6.</li>negro
|
30 |
-
<li>Puedes mover cartas del montón al tablero o a la fundación, girando una carta a la vez, o tres cartas a la vez, dependiendo de la configuración. </li>
|
31 |
-
<li>Ganas el juego cuando mueves todas las cartas a la fundación, en orden ascendente y por palo, desde el as al rey.</li>
|
32 |
-
</ol>
|
33 |
-
<p>Algunas de las variaciones más populares de Solitaire son:</p>
|
34 |
-
<tabla>
|
35 |
-
<tr>
|
36 |
-
<th>Versión</th>
|
37 |
-
<th>Descripción</th>
|
38 |
-
</tr>
|
39 |
-
<tr>
|
40 |
-
<td>Klondike</td>
|
41 |
-
|
42 |
-
</tr>
|
43 |
-
<tr>
|
44 |
-
<td>Araña</td>
|
45 |
-
<td>Una versión más desafiante de Solitaire. Reparte 10 columnas de cartas en el tablero, con las primeras cuatro columnas con seis cartas cada una, y el resto con cinco cartas cada una. Todas las cartas están boca arriba. Puede mover cualquier carta o secuencia de cartas dentro del tablero, independientemente del palo o el color. Sin embargo, solo puedes mover cartas a la fundación cuando están en orden descendente y del mismo palo. Puedes repartir una nueva fila de cartas del montón cuando no haya más movimientos en el tablero. </td>
|
46 |
-
</tr>
|
47 |
-
<tr>
|
48 |
-
<td>FreeCell</td>
|
49 |
-
<td>Una versión estratégica de Solitario. Repartes ocho columnas de cartas en el tablero, con todas las cartas boca arriba. Puede mover cualquier carta o secuencia de cartas dentro del tablero, siempre que haya un espacio vacío (celda) disponible. Tiene cuatro celdas en la esquina superior izquierda de la pantalla, donde puede almacenar temporalmente una tarjeta cada una. Puede mover las cartas a la fundación cuando están en orden ascendente y por palo. </td>
|
50 |
-
</tr>
|
51 |
-
<tr>
|
52 |
-
<td>Pirámide</td>
|
53 |
-
<td>Una versión divertida y fácil de Solitario. Repartes 28 cartas en el tablero en forma de pirámide, con siete filas y siete columnas. La fila superior tiene una carta, la segunda fila tiene dos cartas, etc. La fila inferior tiene siete cartas. Todas las cartas están boca arriba. Puede quitar dos cartas del tablero si suman 13 (As = 1, Jota = 11, Reina = 12, Rey = 13). También puede quitar un solo Rey. Usted puede dar vuelta una tarjeta de la acción a la vez, y utilizarla para quitar otra tarjeta del tableau. Ganas el juego cuando quitas todas las cartas del tablero. </td>
|
54 |
-
</tr>
|
55 |
-
<tr>
|
56 |
-
<td>TriPeaks</td>
|
57 |
-
|
58 |
-
</tr>
|
59 |
-
</tabla>
|
60 |
-
<h3>Las características y opciones de las aplicaciones de solitario</h3>
|
61 |
-
<p>Aplicaciones de solitario son aplicaciones que le permiten jugar solitario en su teléfono Android. Hay muchas aplicaciones de solitario disponibles en Google Play Store, y ofrecen diferentes características y opciones para satisfacer sus preferencias y necesidades. Algunas de las características y opciones comunes de las aplicaciones de solitario son:</p>
|
62 |
-
<ul>
|
63 |
-
<li>Puedes elegir entre diferentes versiones y modos de Solitario, como Klondike, Spider, FreeCell, Pyramid, TriPeaks y más. </li>
|
64 |
-
<li> Puede personalizar la apariencia y el diseño del juego, como el diseño de la tarjeta, fondo, sonido, animación y orientación. </li>
|
65 |
-
<li>Puedes ajustar la dificultad y el desafío del juego, como el número de cartas que debes entregar del montón, el número de palos que debes usar y el sistema de puntuación. </li>
|
66 |
-
<li> Puede realizar un seguimiento de su progreso y rendimiento, como el número de juegos jugados, ganados y perdidos, el tiempo dedicado y las mejores puntuaciones. </li>
|
67 |
-
<li> Puedes acceder a sugerencias, consejos y movimientos de deshacer para ayudarte a jugar mejor y más rápido. </li>
|
68 |
-
<li>Puede competir con otros jugadores en línea, o jugar sin conexión a Internet. </li>
|
69 |
-
<li>Puedes disfrutar de otras características y beneficios, como desafíos diarios, logros, recompensas, tablas de clasificación y más. </li>
|
70 |
-
</ul>
|
71 |
-
<h3>Las mejores aplicaciones de solitario para teléfono Android</h3>
|
72 |
-
<p>Con tantas aplicaciones de solitario para elegir, es posible que se pregunte cuáles son las mejores para su teléfono Android. Estas son algunas de las mejores aplicaciones de solitario que recomendamos:</p>
|
73 |
-
<ul>
|
74 |
-
|
75 |
-
<li><strong>Solitaire por Brainium Studios</strong>: Esta es otra gran aplicación de solitario para el teléfono Android. Tiene más de 10 millones de descargas y 4.6 estrellas en la Google Play Store. Ofrece Klondike Solitaire con varias opciones y características, tales como dibujar 1 o 3 cartas, modo retrato o paisaje, Vegas puntuación o puntuación estándar, desafíos diarios, logros, tablas de clasificación, estadísticas, pistas, deshacer movimientos, función de autocompletar, y más. También tiene un diseño hermoso y elegante que es personalizable y fácil de usar. </li>
|
76 |
-
<li><strong>Solitaire Collection by Ruben Reboredo</strong>: Esta es una aplicación de solitario que ofrece una colección de diferentes versiones y modos de solitario, como Klondike, Spider, FreeCell, Pyramid, TriPeaks, Golf, Yukon, Scorpion y más. Tiene más de 5 millones de descargas y 4.7 estrellas en la Google Play Store. Ofrece varias opciones y características, como dibujar 1 o 3 cartas, modo de retrato o paisaje, puntuación estándar o sin puntuación, desafíos diarios, logros, tablas de clasificación, estadísticas, pistas, movimientos de deshacer, función de autocompletar y más. También tiene un diseño simple y colorido que es fácil de navegar y jugar. </li>
|
77 |
-
</ul>
|
78 |
-
<h2>Cómo descargar e instalar aplicaciones Solitaire en su teléfono Android</h2>
|
79 |
-
<p>Descargar e instalar aplicaciones Solitaire en tu teléfono Android es muy fácil y rápido. Solo tienes que seguir estos pasos:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Ir a la Google Play Store en su teléfono Android y buscar la aplicación Solitaire que desea descargar. También puede utilizar los enlaces que proporcionamos anteriormente para ir directamente a la página de la aplicación. </li>
|
82 |
-
<li>Toque en el botón Instalar y espere a que la aplicación se descargue e instale en su teléfono. Es posible que necesite conceder algunos permisos y configuraciones para que la aplicación funcione correctamente. </li>
|
83 |
-
<li>Una vez que la aplicación está instalada, puede abrirla y comenzar a jugar Solitario en su teléfono Android. También puede crear una cuenta o iniciar sesión con su cuenta de Google para guardar su progreso y preferencias. </li>
|
84 |
-
</ol>
|
85 |
-
|
86 |
-
<p>Algunos de los permisos y configuraciones que las aplicaciones Solitaire pueden pedir son:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Acceso al almacenamiento, fotos, medios y archivos de su dispositivo. Esto es para guardar los datos y preferencias del juego en su teléfono. </li>
|
89 |
-
<li>Acceso a la conexión de red del dispositivo. Esto es para habilitar funciones y funciones en línea, como desafíos diarios, logros, tablas de clasificación, anuncios y más. </li>
|
90 |
-
<li>Acceso a la vibración de tu dispositivo. Esto es para proporcionar retroalimentación y efectos cuando juegas el juego. </li>
|
91 |
-
<li>Acceso a la ubicación de su dispositivo. Esto es para proporcionar contenido personalizado y anuncios basados en su región. </li>
|
92 |
-
</ul>
|
93 |
-
<p>Puede administrar estos permisos y configuraciones yendo al menú de configuración de la aplicación o al menú de configuración del teléfono. También puede desactivar algunos de estos permisos y configuraciones si no los desea o los necesita. </p>
|
94 |
-
<h3>Solución de problemas y soporte para aplicaciones de solitario</h3>
|
95 |
-
<p>Si encuentras algún problema o problema con las aplicaciones Solitaire en tu teléfono Android, puedes probar algunos de estos consejos de solución de problemas y soporte:</p>
|
96 |
-
<ul>
|
97 |
-
<li>Compruebe su conexión a Internet y asegúrese de que es estable y rápido. </li>
|
98 |
-
<li>Reinicia el teléfono y la aplicación y ver si el problema persiste. </li>
|
99 |
-
<li> Borrar la caché de la aplicación y los datos y ver si el problema se resuelve. </li>
|
100 |
-
<li> Actualizar la aplicación a la última versión y ver si el problema está solucionado. </li>
|
101 |
-
<li>Póngase en contacto con el desarrollador o servicio al cliente de la aplicación y reportar el problema y pedir ayuda. </li>
|
102 |
-
</ul>
|
103 |
-
<h2>Conclusión</h2>
|
104 |
-
|
105 |
-
<p>Si usted está buscando una manera divertida y relajante para pasar el tiempo, usted debe tratar de jugar solitario en su teléfono Android. Es fácil de aprender y jugar, y te desafiará a mejorar tus habilidades y rendimiento. También te entretendrá y te mantendrá ocupado durante horas. ¿Qué estás esperando? ¡Descargue una de las mejores aplicaciones de solitario gratis hoy y disfrute jugando al solitario en su teléfono Android! </p>
|
106 |
-
<h3>Preguntas frecuentes</h3>
|
107 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre el juego de solitario descarga gratuita para el teléfono Android:</p>
|
108 |
-
<ol>
|
109 |
-
<li><strong>Q: ¿Cuánto espacio ocupa una aplicación Solitaire en mi teléfono Android? </strong></li>
|
110 |
-
<li>A: El espacio que ocupa una aplicación de solitario en su teléfono Android depende de la aplicación en sí, pero por lo general no es muy grande. La mayoría de las aplicaciones Solitaire toman menos de 100 MB de espacio en su teléfono, que no es mucho en comparación con otras aplicaciones. </li>
|
111 |
-
<li><strong>Q: ¿Cómo puedo eliminar una aplicación de solitario de mi teléfono Android? </strong></li>
|
112 |
-
<li>A: Para eliminar una aplicación Solitaire de su teléfono Android, puede seguir estos pasos:</li>
|
113 |
-
<ul>
|
114 |
-
<li>Vaya al menú Configuración en su teléfono y toque en Aplicaciones o Aplicaciones.</li>
|
115 |
-
<li>Encuentra la aplicación Solitaire que desea eliminar y toque en ella. </li>
|
116 |
-
<li>Toque en Desinstalar o Eliminar y confirme su elección. </li>
|
117 |
-
</ul>
|
118 |
-
<li><strong>Q: ¿Cómo puedo jugar al solitario con otras personas en línea? </strong></li>
|
119 |
-
<li>A: Para jugar al solitario con otras personas en línea, es necesario descargar una aplicación de solitario que admite el modo multijugador o el modo en línea. Algunas de las aplicaciones de solitario que ofrecen esta función son:</li>
|
120 |
-
<ul>
|
121 |
-
<li>Solitaire Arena por RockYou Inc.</li>
|
122 |
-
<li>Solitario en vivo por Gazeus Games.</li>
|
123 |
-
<li>Solitaire Grand Harvest por Supertreat.</li>
|
124 |
-
</ul>
|
125 |
-
<li><strong>Q: ¿Cómo puedo desactivar los anuncios en una aplicación Solitaire? </strong></li>
|
126 |
-
<li>A: Para desactivar anuncios en una aplicación Solitaire, tienes dos opciones:</li>
|
127 |
-
<ul>
|
128 |
-
|
129 |
-
<li>Puede desactivar su conexión a Internet mientras reproduce la aplicación. Esto evitará que los anuncios se carguen y aparezcan en su pantalla. Sin embargo, esto también desactivará algunas características y funciones en línea de la aplicación. </li>
|
130 |
-
</ul>
|
131 |
-
<li><strong>Q: ¿Cómo puedo obtener más monedas o recompensas en una aplicación de solitario? </strong></li>
|
132 |
-
<li>A: Para obtener más monedas o recompensas en una aplicación de solitario, tienes varias opciones:</li>
|
133 |
-
<ul>
|
134 |
-
<li>Puedes completar desafíos diarios, logros o misiones que ofrece la aplicación. Estos te recompensarán con monedas u otros premios. </li>
|
135 |
-
<li>Puedes ver anuncios o videos que ofrece la aplicación. Estos te recompensarán con monedas u otros bonos. </li>
|
136 |
-
<li>Puedes invitar a tus amigos o familiares a jugar la aplicación contigo. Algunas aplicaciones te recompensarán con monedas u otros incentivos para referir nuevos jugadores. </li>
|
137 |
-
</ul></p> 64aa2da5cf<br />
|
138 |
-
<br />
|
139 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/setup.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
from setuptools import find_packages, setup
|
4 |
-
|
5 |
-
import os
|
6 |
-
import subprocess
|
7 |
-
import time
|
8 |
-
|
9 |
-
version_file = 'realesrgan/version.py'
|
10 |
-
|
11 |
-
|
12 |
-
def readme():
|
13 |
-
with open('README.md', encoding='utf-8') as f:
|
14 |
-
content = f.read()
|
15 |
-
return content
|
16 |
-
|
17 |
-
|
18 |
-
def get_git_hash():
|
19 |
-
|
20 |
-
def _minimal_ext_cmd(cmd):
|
21 |
-
# construct minimal environment
|
22 |
-
env = {}
|
23 |
-
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
|
24 |
-
v = os.environ.get(k)
|
25 |
-
if v is not None:
|
26 |
-
env[k] = v
|
27 |
-
# LANGUAGE is used on win32
|
28 |
-
env['LANGUAGE'] = 'C'
|
29 |
-
env['LANG'] = 'C'
|
30 |
-
env['LC_ALL'] = 'C'
|
31 |
-
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
32 |
-
return out
|
33 |
-
|
34 |
-
try:
|
35 |
-
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
|
36 |
-
sha = out.strip().decode('ascii')
|
37 |
-
except OSError:
|
38 |
-
sha = 'unknown'
|
39 |
-
|
40 |
-
return sha
|
41 |
-
|
42 |
-
|
43 |
-
def get_hash():
|
44 |
-
if os.path.exists('.git'):
|
45 |
-
sha = get_git_hash()[:7]
|
46 |
-
else:
|
47 |
-
sha = 'unknown'
|
48 |
-
|
49 |
-
return sha
|
50 |
-
|
51 |
-
|
52 |
-
def write_version_py():
|
53 |
-
content = """# GENERATED VERSION FILE
|
54 |
-
# TIME: {}
|
55 |
-
__version__ = '{}'
|
56 |
-
__gitsha__ = '{}'
|
57 |
-
version_info = ({})
|
58 |
-
"""
|
59 |
-
sha = get_hash()
|
60 |
-
with open('VERSION', 'r') as f:
|
61 |
-
SHORT_VERSION = f.read().strip()
|
62 |
-
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
|
63 |
-
|
64 |
-
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
|
65 |
-
with open(version_file, 'w') as f:
|
66 |
-
f.write(version_file_str)
|
67 |
-
|
68 |
-
|
69 |
-
def get_version():
|
70 |
-
with open(version_file, 'r') as f:
|
71 |
-
exec(compile(f.read(), version_file, 'exec'))
|
72 |
-
return locals()['__version__']
|
73 |
-
|
74 |
-
|
75 |
-
def get_requirements(filename='requirements.txt'):
|
76 |
-
here = os.path.dirname(os.path.realpath(__file__))
|
77 |
-
with open(os.path.join(here, filename), 'r') as f:
|
78 |
-
requires = [line.replace('\n', '') for line in f.readlines()]
|
79 |
-
return requires
|
80 |
-
|
81 |
-
|
82 |
-
if __name__ == '__main__':
|
83 |
-
write_version_py()
|
84 |
-
setup(
|
85 |
-
name='realesrgan',
|
86 |
-
version=get_version(),
|
87 |
-
description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration',
|
88 |
-
long_description=readme(),
|
89 |
-
long_description_content_type='text/markdown',
|
90 |
-
author='Xintao Wang',
|
91 |
-
author_email='[email protected]',
|
92 |
-
keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan',
|
93 |
-
url='https://github.com/xinntao/Real-ESRGAN',
|
94 |
-
include_package_data=True,
|
95 |
-
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
|
96 |
-
classifiers=[
|
97 |
-
'Development Status :: 4 - Beta',
|
98 |
-
'License :: OSI Approved :: Apache Software License',
|
99 |
-
'Operating System :: OS Independent',
|
100 |
-
'Programming Language :: Python :: 3',
|
101 |
-
'Programming Language :: Python :: 3.7',
|
102 |
-
'Programming Language :: Python :: 3.8',
|
103 |
-
],
|
104 |
-
license='BSD-3-Clause License',
|
105 |
-
setup_requires=['cython', 'numpy'],
|
106 |
-
install_requires=get_requirements(),
|
107 |
-
zip_safe=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BlueRey/MendoBERT_QA/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MendoBERT QA
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.19.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: afl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/testing/unittest/cuda/testframework.h
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include <unittest/testframework.h>
|
4 |
-
#include <thrust/system/cuda/memory.h>
|
5 |
-
#include <thrust/system_error.h>
|
6 |
-
#include <vector>
|
7 |
-
|
8 |
-
class CUDATestDriver
|
9 |
-
: public UnitTestDriver
|
10 |
-
{
|
11 |
-
public:
|
12 |
-
int current_device_architecture() const;
|
13 |
-
|
14 |
-
private:
|
15 |
-
std::vector<int> target_devices(const ArgumentMap &kwargs);
|
16 |
-
|
17 |
-
bool check_cuda_error(bool concise);
|
18 |
-
|
19 |
-
virtual bool post_test_sanity_check(const UnitTest &test, bool concise);
|
20 |
-
|
21 |
-
virtual bool run_tests(const ArgumentSet &args, const ArgumentMap &kwargs);
|
22 |
-
};
|
23 |
-
|
24 |
-
UnitTestDriver &driver_instance(thrust::system::cuda::tag);
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/bbox/assigners/hungarian_assigner.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from ..builder import BBOX_ASSIGNERS
|
4 |
-
from ..match_costs import build_match_cost
|
5 |
-
from ..transforms import bbox_cxcywh_to_xyxy
|
6 |
-
from .assign_result import AssignResult
|
7 |
-
from .base_assigner import BaseAssigner
|
8 |
-
|
9 |
-
try:
|
10 |
-
from scipy.optimize import linear_sum_assignment
|
11 |
-
except ImportError:
|
12 |
-
linear_sum_assignment = None
|
13 |
-
|
14 |
-
|
15 |
-
@BBOX_ASSIGNERS.register_module()
|
16 |
-
class HungarianAssigner(BaseAssigner):
|
17 |
-
"""Computes one-to-one matching between predictions and ground truth.
|
18 |
-
|
19 |
-
This class computes an assignment between the targets and the predictions
|
20 |
-
based on the costs. The costs are weighted sum of three components:
|
21 |
-
classification cost, regression L1 cost and regression iou cost. The
|
22 |
-
targets don't include the no_object, so generally there are more
|
23 |
-
predictions than targets. After the one-to-one matching, the un-matched
|
24 |
-
are treated as backgrounds. Thus each query prediction will be assigned
|
25 |
-
with `0` or a positive integer indicating the ground truth index:
|
26 |
-
|
27 |
-
- 0: negative sample, no assigned gt
|
28 |
-
- positive integer: positive sample, index (1-based) of assigned gt
|
29 |
-
|
30 |
-
Args:
|
31 |
-
cls_weight (int | float, optional): The scale factor for classification
|
32 |
-
cost. Default 1.0.
|
33 |
-
bbox_weight (int | float, optional): The scale factor for regression
|
34 |
-
L1 cost. Default 1.0.
|
35 |
-
iou_weight (int | float, optional): The scale factor for regression
|
36 |
-
iou cost. Default 1.0.
|
37 |
-
iou_calculator (dict | optional): The config for the iou calculation.
|
38 |
-
Default type `BboxOverlaps2D`.
|
39 |
-
iou_mode (str | optional): "iou" (intersection over union), "iof"
|
40 |
-
(intersection over foreground), or "giou" (generalized
|
41 |
-
intersection over union). Default "giou".
|
42 |
-
"""
|
43 |
-
|
44 |
-
def __init__(self,
|
45 |
-
cls_cost=dict(type='ClassificationCost', weight=1.),
|
46 |
-
reg_cost=dict(type='BBoxL1Cost', weight=1.0),
|
47 |
-
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
|
48 |
-
self.cls_cost = build_match_cost(cls_cost)
|
49 |
-
self.reg_cost = build_match_cost(reg_cost)
|
50 |
-
self.iou_cost = build_match_cost(iou_cost)
|
51 |
-
|
52 |
-
def assign(self,
|
53 |
-
bbox_pred,
|
54 |
-
cls_pred,
|
55 |
-
gt_bboxes,
|
56 |
-
gt_labels,
|
57 |
-
img_meta,
|
58 |
-
gt_bboxes_ignore=None,
|
59 |
-
eps=1e-7):
|
60 |
-
"""Computes one-to-one matching based on the weighted costs.
|
61 |
-
|
62 |
-
This method assign each query prediction to a ground truth or
|
63 |
-
background. The `assigned_gt_inds` with -1 means don't care,
|
64 |
-
0 means negative sample, and positive number is the index (1-based)
|
65 |
-
of assigned gt.
|
66 |
-
The assignment is done in the following steps, the order matters.
|
67 |
-
|
68 |
-
1. assign every prediction to -1
|
69 |
-
2. compute the weighted costs
|
70 |
-
3. do Hungarian matching on CPU based on the costs
|
71 |
-
4. assign all to 0 (background) first, then for each matched pair
|
72 |
-
between predictions and gts, treat this prediction as foreground
|
73 |
-
and assign the corresponding gt index (plus 1) to it.
|
74 |
-
|
75 |
-
Args:
|
76 |
-
bbox_pred (Tensor): Predicted boxes with normalized coordinates
|
77 |
-
(cx, cy, w, h), which are all in range [0, 1]. Shape
|
78 |
-
[num_query, 4].
|
79 |
-
cls_pred (Tensor): Predicted classification logits, shape
|
80 |
-
[num_query, num_class].
|
81 |
-
gt_bboxes (Tensor): Ground truth boxes with unnormalized
|
82 |
-
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
|
83 |
-
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
|
84 |
-
img_meta (dict): Meta information for current image.
|
85 |
-
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
|
86 |
-
labelled as `ignored`. Default None.
|
87 |
-
eps (int | float, optional): A value added to the denominator for
|
88 |
-
numerical stability. Default 1e-7.
|
89 |
-
|
90 |
-
Returns:
|
91 |
-
:obj:`AssignResult`: The assigned result.
|
92 |
-
"""
|
93 |
-
assert gt_bboxes_ignore is None, \
|
94 |
-
'Only case when gt_bboxes_ignore is None is supported.'
|
95 |
-
num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
|
96 |
-
|
97 |
-
# 1. assign -1 by default
|
98 |
-
assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
|
99 |
-
-1,
|
100 |
-
dtype=torch.long)
|
101 |
-
assigned_labels = bbox_pred.new_full((num_bboxes, ),
|
102 |
-
-1,
|
103 |
-
dtype=torch.long)
|
104 |
-
if num_gts == 0 or num_bboxes == 0:
|
105 |
-
# No ground truth or boxes, return empty assignment
|
106 |
-
if num_gts == 0:
|
107 |
-
# No ground truth, assign all to background
|
108 |
-
assigned_gt_inds[:] = 0
|
109 |
-
return AssignResult(
|
110 |
-
num_gts, assigned_gt_inds, None, labels=assigned_labels)
|
111 |
-
img_h, img_w, _ = img_meta['img_shape']
|
112 |
-
factor = gt_bboxes.new_tensor([img_w, img_h, img_w,
|
113 |
-
img_h]).unsqueeze(0)
|
114 |
-
|
115 |
-
# 2. compute the weighted costs
|
116 |
-
# classification and bboxcost.
|
117 |
-
cls_cost = self.cls_cost(cls_pred, gt_labels)
|
118 |
-
# regression L1 cost
|
119 |
-
normalize_gt_bboxes = gt_bboxes / factor
|
120 |
-
reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
|
121 |
-
# regression iou cost, defaultly giou is used in official DETR.
|
122 |
-
bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
|
123 |
-
iou_cost = self.iou_cost(bboxes, gt_bboxes)
|
124 |
-
# weighted sum of above three costs
|
125 |
-
cost = cls_cost + reg_cost + iou_cost
|
126 |
-
|
127 |
-
# 3. do Hungarian matching on CPU using linear_sum_assignment
|
128 |
-
cost = cost.detach().cpu()
|
129 |
-
if linear_sum_assignment is None:
|
130 |
-
raise ImportError('Please run "pip install scipy" '
|
131 |
-
'to install scipy first.')
|
132 |
-
matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
|
133 |
-
matched_row_inds = torch.from_numpy(matched_row_inds).to(
|
134 |
-
bbox_pred.device)
|
135 |
-
matched_col_inds = torch.from_numpy(matched_col_inds).to(
|
136 |
-
bbox_pred.device)
|
137 |
-
|
138 |
-
# 4. assign backgrounds and foregrounds
|
139 |
-
# assign all indices to backgrounds first
|
140 |
-
assigned_gt_inds[:] = 0
|
141 |
-
# assign foregrounds based on matching results
|
142 |
-
assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
|
143 |
-
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
|
144 |
-
return AssignResult(
|
145 |
-
num_gts, assigned_gt_inds, None, labels=assigned_labels)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/drawings-to-human/static/_app/immutable/assets/pages/index.svelte-7bf249dc.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
form.svelte-1gwcbp.svelte-1gwcbp{width:100%;overflow:hidden}.samples.svelte-1gwcbp.svelte-1gwcbp{display:flex;scroll-snap-type:x var(--tw-scroll-snap-strictness);--tw-scroll-snap-strictness:mandatory;flex-wrap:nowrap;gap:.25rem;overflow-x:scroll;-ms-overflow-style:none;scrollbar-width:none}.samples.svelte-1gwcbp.svelte-1gwcbp::-webkit-scrollbar{display:none}input[type=radio].svelte-1gwcbp.svelte-1gwcbp{position:absolute;display:none;height:0px;width:0px;opacity:0}input[type=radio].svelte-1gwcbp.svelte-1gwcbp:disabled{opacity:.5}input[type=radio].svelte-1gwcbp:checked~label.svelte-1gwcbp{outline-style:solid;outline-width:2px;outline-color:#eab308}input[type=radio].svelte-1gwcbp:disabled+label.svelte-1gwcbp{opacity:.5}label.svelte-1gwcbp.svelte-1gwcbp{display:flex;cursor:pointer;outline-width:2px;outline-offset:-2px;outline-color:#eab308;transition-property:all;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1)}label.svelte-1gwcbp.svelte-1gwcbp:hover{outline-style:solid}img.svelte-1gwcbp.svelte-1gwcbp{max-height:6rem;max-width:none}.colors.svelte-1oy4poo.svelte-1oy4poo{display:grid;max-height:9rem;scroll-snap-type:y var(--tw-scroll-snap-strictness);--tw-scroll-snap-strictness:mandatory;grid-template-columns:repeat(2,minmax(0,1fr));gap:.5rem;overflow:scroll}@media (min-width: 530px){.colors.svelte-1oy4poo.svelte-1oy4poo{max-height:none;grid-template-columns:repeat(3,minmax(0,1fr))}}.colors.svelte-1oy4poo span.svelte-1oy4poo{margin-left:.5rem}.colors.svelte-1oy4poo svg.svelte-1oy4poo{display:block}input[type=radio].svelte-1oy4poo.svelte-1oy4poo{position:absolute;display:none;height:0px;width:0px;opacity:0}input[type=radio].svelte-1oy4poo:checked~label.svelte-1oy4poo{outline-style:solid;outline-width:2px;outline-color:#eab308}label.svelte-1oy4poo.svelte-1oy4poo{display:flex;cursor:pointer;white-space:nowrap;outline-width:2px;outline-offset:-2px;outline-color:#eab308;transition-property:all;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1)}label.svelte-1oy4poo.svelte-1oy4poo:hover{outline-style:solid}.brush.svelte-1oy4poo.svelte-1oy4poo{display:flex}.sections.svelte-uoay71.svelte-uoay71{display:flex;flex-direction:column;gap:.25rem}@media (min-width: 530px){.sections.svelte-uoay71.svelte-uoay71{flex-direction:row;gap:.75rem}}select.svelte-uoay71.svelte-uoay71,button.svelte-uoay71.svelte-uoay71,input.svelte-uoay71.svelte-uoay71{border-radius:.5rem;border-width:1px;--tw-border-opacity:1;border-color:rgb(209 213 219 / var(--tw-border-opacity));--tw-bg-opacity:1;background-color:rgb(249 250 251 / var(--tw-bg-opacity));padding:.25rem;font-size:.875rem;line-height:1.25rem;--tw-text-opacity:1;color:rgb(17 24 39 / var(--tw-text-opacity))}select.svelte-uoay71.svelte-uoay71:focus,button.svelte-uoay71.svelte-uoay71:focus,input.svelte-uoay71.svelte-uoay71:focus{--tw-border-opacity:1;border-color:rgb(59 130 246 / var(--tw-border-opacity));--tw-ring-opacity:1;--tw-ring-color:rgb(59 130 246 / var(--tw-ring-opacity)) }select.svelte-uoay71.svelte-uoay71:disabled,button.svelte-uoay71.svelte-uoay71:disabled,input.svelte-uoay71.svelte-uoay71:disabled{opacity:.5}@media (prefers-color-scheme: dark){select.svelte-uoay71.svelte-uoay71,button.svelte-uoay71.svelte-uoay71,input.svelte-uoay71.svelte-uoay71{--tw-border-opacity:1;border-color:rgb(75 85 99 / var(--tw-border-opacity));--tw-bg-opacity:1;background-color:rgb(55 65 81 / var(--tw-bg-opacity));--tw-text-opacity:1;color:rgb(255 255 255 / var(--tw-text-opacity))}select.svelte-uoay71.svelte-uoay71::-moz-placeholder,button.svelte-uoay71.svelte-uoay71::-moz-placeholder,input.svelte-uoay71.svelte-uoay71::-moz-placeholder{--tw-placeholder-opacity:1;color:rgb(156 163 175 / var(--tw-placeholder-opacity))}select.svelte-uoay71.svelte-uoay71::placeholder,button.svelte-uoay71.svelte-uoay71::placeholder,input.svelte-uoay71.svelte-uoay71::placeholder{--tw-placeholder-opacity:1;color:rgb(156 163 175 / var(--tw-placeholder-opacity))}select.svelte-uoay71.svelte-uoay71:focus,button.svelte-uoay71.svelte-uoay71:focus,input.svelte-uoay71.svelte-uoay71:focus{--tw-border-opacity:1;border-color:rgb(59 130 246 / var(--tw-border-opacity));--tw-ring-opacity:1;--tw-ring-color:rgb(59 130 246 / var(--tw-ring-opacity)) }}input.svelte-uoay71:disabled+label.svelte-uoay71{opacity:.5}input.svelte-uoay71.svelte-uoay71{padding-left:.75rem}.canvas.svelte-1k5plc8{z-index:0;aspect-ratio:256/512;width:100%;max-width:100%;border-width:1px;--tw-border-opacity:1;border-color:rgb(107 114 128 / var(--tw-border-opacity))}@media (prefers-color-scheme: dark){.canvas.svelte-1k5plc8{--tw-border-opacity:1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}}.brush.svelte-1k5plc8{pointer-events:none;position:absolute;z-index:10;--tw-translate-x:-50%;--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.label.svelte-1k5plc8{pointer-events:none;position:absolute;top:0px;left:0px;z-index:20;-webkit-user-select:none;-moz-user-select:none;user-select:none;padding-left:.5rem;padding-right:.5rem;font-size:1rem;line-height:1.5rem;--tw-text-opacity:1;color:rgb(255 255 255 / var(--tw-text-opacity));color:#fff;font-weight:bolder;-webkit-text-stroke:1px black;-webkit-text-fill-color:white}.image.svelte-1iibjwx{z-index:0;box-sizing:border-box;aspect-ratio:256/512;border-width:1px;--tw-border-opacity:1;border-color:rgb(107 114 128 / var(--tw-border-opacity))}@media (prefers-color-scheme: dark){.image.svelte-1iibjwx{--tw-border-opacity:1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}}.loading.svelte-1iibjwx{position:absolute;top:0px;left:0px;right:0px;bottom:0px;display:flex;flex-direction:column;align-items:center;justify-content:center}.drawings.svelte-237ry5{display:grid;grid-template-columns:2fr 1.5fr;place-items:center}@media (min-width: 530px){.drawings.svelte-237ry5{grid-template-columns:repeat(2,minmax(0,1fr))}}button.svelte-237ry5{border-radius:.5rem;border-width:1px;--tw-border-opacity:1;border-color:rgb(209 213 219 / var(--tw-border-opacity));--tw-bg-opacity:1;background-color:rgb(249 250 251 / var(--tw-bg-opacity));padding:.25rem;font-size:.875rem;line-height:1.25rem;--tw-text-opacity:1;color:rgb(17 24 39 / var(--tw-text-opacity))}button.svelte-237ry5:focus{--tw-border-opacity:1;border-color:rgb(59 130 246 / var(--tw-border-opacity));--tw-ring-opacity:1;--tw-ring-color:rgb(59 130 246 / var(--tw-ring-opacity)) }button.svelte-237ry5:disabled{opacity:.5}@media (prefers-color-scheme: dark){button.svelte-237ry5{--tw-border-opacity:1;border-color:rgb(75 85 99 / var(--tw-border-opacity));--tw-bg-opacity:1;background-color:rgb(55 65 81 / var(--tw-bg-opacity));--tw-text-opacity:1;color:rgb(255 255 255 / var(--tw-text-opacity))}button.svelte-237ry5::-moz-placeholder{--tw-placeholder-opacity:1;color:rgb(156 163 175 / var(--tw-placeholder-opacity))}button.svelte-237ry5::placeholder{--tw-placeholder-opacity:1;color:rgb(156 163 175 / var(--tw-placeholder-opacity))}button.svelte-237ry5:focus{--tw-border-opacity:1;border-color:rgb(59 130 246 / var(--tw-border-opacity));--tw-ring-opacity:1;--tw-ring-color:rgb(59 130 246 / var(--tw-ring-opacity)) }}
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h
DELETED
@@ -1,370 +0,0 @@
|
|
1 |
-
// Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
#pragma once
|
3 |
-
|
4 |
-
#include <cassert>
|
5 |
-
#include <cmath>
|
6 |
-
|
7 |
-
#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
|
8 |
-
// Designates functions callable from the host (CPU) and the device (GPU)
|
9 |
-
#define HOST_DEVICE __host__ __device__
|
10 |
-
#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
|
11 |
-
#else
|
12 |
-
#include <algorithm>
|
13 |
-
#define HOST_DEVICE
|
14 |
-
#define HOST_DEVICE_INLINE HOST_DEVICE inline
|
15 |
-
#endif
|
16 |
-
|
17 |
-
namespace detectron2 {
|
18 |
-
|
19 |
-
namespace {
|
20 |
-
|
21 |
-
template <typename T>
|
22 |
-
struct RotatedBox {
|
23 |
-
T x_ctr, y_ctr, w, h, a;
|
24 |
-
};
|
25 |
-
|
26 |
-
template <typename T>
|
27 |
-
struct Point {
|
28 |
-
T x, y;
|
29 |
-
HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {}
|
30 |
-
HOST_DEVICE_INLINE Point operator+(const Point& p) const {
|
31 |
-
return Point(x + p.x, y + p.y);
|
32 |
-
}
|
33 |
-
HOST_DEVICE_INLINE Point& operator+=(const Point& p) {
|
34 |
-
x += p.x;
|
35 |
-
y += p.y;
|
36 |
-
return *this;
|
37 |
-
}
|
38 |
-
HOST_DEVICE_INLINE Point operator-(const Point& p) const {
|
39 |
-
return Point(x - p.x, y - p.y);
|
40 |
-
}
|
41 |
-
HOST_DEVICE_INLINE Point operator*(const T coeff) const {
|
42 |
-
return Point(x * coeff, y * coeff);
|
43 |
-
}
|
44 |
-
};
|
45 |
-
|
46 |
-
template <typename T>
|
47 |
-
HOST_DEVICE_INLINE T dot_2d(const Point<T>& A, const Point<T>& B) {
|
48 |
-
return A.x * B.x + A.y * B.y;
|
49 |
-
}
|
50 |
-
|
51 |
-
// R: result type. can be different from input type
|
52 |
-
template <typename T, typename R = T>
|
53 |
-
HOST_DEVICE_INLINE R cross_2d(const Point<T>& A, const Point<T>& B) {
|
54 |
-
return static_cast<R>(A.x) * static_cast<R>(B.y) -
|
55 |
-
static_cast<R>(B.x) * static_cast<R>(A.y);
|
56 |
-
}
|
57 |
-
|
58 |
-
template <typename T>
|
59 |
-
HOST_DEVICE_INLINE void get_rotated_vertices(
|
60 |
-
const RotatedBox<T>& box,
|
61 |
-
Point<T> (&pts)[4]) {
|
62 |
-
// M_PI / 180. == 0.01745329251
|
63 |
-
double theta = box.a * 0.01745329251;
|
64 |
-
T cosTheta2 = (T)cos(theta) * 0.5f;
|
65 |
-
T sinTheta2 = (T)sin(theta) * 0.5f;
|
66 |
-
|
67 |
-
// y: top --> down; x: left --> right
|
68 |
-
pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w;
|
69 |
-
pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w;
|
70 |
-
pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w;
|
71 |
-
pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w;
|
72 |
-
pts[2].x = 2 * box.x_ctr - pts[0].x;
|
73 |
-
pts[2].y = 2 * box.y_ctr - pts[0].y;
|
74 |
-
pts[3].x = 2 * box.x_ctr - pts[1].x;
|
75 |
-
pts[3].y = 2 * box.y_ctr - pts[1].y;
|
76 |
-
}
|
77 |
-
|
78 |
-
template <typename T>
|
79 |
-
HOST_DEVICE_INLINE int get_intersection_points(
|
80 |
-
const Point<T> (&pts1)[4],
|
81 |
-
const Point<T> (&pts2)[4],
|
82 |
-
Point<T> (&intersections)[24]) {
|
83 |
-
// Line vector
|
84 |
-
// A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1]
|
85 |
-
Point<T> vec1[4], vec2[4];
|
86 |
-
for (int i = 0; i < 4; i++) {
|
87 |
-
vec1[i] = pts1[(i + 1) % 4] - pts1[i];
|
88 |
-
vec2[i] = pts2[(i + 1) % 4] - pts2[i];
|
89 |
-
}
|
90 |
-
|
91 |
-
// When computing the intersection area, it doesn't hurt if we have
|
92 |
-
// more (duplicated/approximate) intersections/vertices than needed,
|
93 |
-
// while it can cause drastic difference if we miss an intersection/vertex.
|
94 |
-
// Therefore, we add an epsilon to relax the comparisons between
|
95 |
-
// the float point numbers that decide the intersection points.
|
96 |
-
double EPS = 1e-5;
|
97 |
-
|
98 |
-
// Line test - test all line combos for intersection
|
99 |
-
int num = 0; // number of intersections
|
100 |
-
for (int i = 0; i < 4; i++) {
|
101 |
-
for (int j = 0; j < 4; j++) {
|
102 |
-
// Solve for 2x2 Ax=b
|
103 |
-
T det = cross_2d<T>(vec2[j], vec1[i]);
|
104 |
-
|
105 |
-
// This takes care of parallel lines
|
106 |
-
if (fabs(det) <= 1e-14) {
|
107 |
-
continue;
|
108 |
-
}
|
109 |
-
|
110 |
-
auto vec12 = pts2[j] - pts1[i];
|
111 |
-
|
112 |
-
T t1 = cross_2d<T>(vec2[j], vec12) / det;
|
113 |
-
T t2 = cross_2d<T>(vec1[i], vec12) / det;
|
114 |
-
|
115 |
-
if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) {
|
116 |
-
intersections[num++] = pts1[i] + vec1[i] * t1;
|
117 |
-
}
|
118 |
-
}
|
119 |
-
}
|
120 |
-
|
121 |
-
// Check for vertices of rect1 inside rect2
|
122 |
-
{
|
123 |
-
const auto& AB = vec2[0];
|
124 |
-
const auto& DA = vec2[3];
|
125 |
-
auto ABdotAB = dot_2d<T>(AB, AB);
|
126 |
-
auto ADdotAD = dot_2d<T>(DA, DA);
|
127 |
-
for (int i = 0; i < 4; i++) {
|
128 |
-
// assume ABCD is the rectangle, and P is the point to be judged
|
129 |
-
// P is inside ABCD iff. P's projection on AB lies within AB
|
130 |
-
// and P's projection on AD lies within AD
|
131 |
-
|
132 |
-
auto AP = pts1[i] - pts2[0];
|
133 |
-
|
134 |
-
auto APdotAB = dot_2d<T>(AP, AB);
|
135 |
-
auto APdotAD = -dot_2d<T>(AP, DA);
|
136 |
-
|
137 |
-
if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
|
138 |
-
(APdotAD < ADdotAD + EPS)) {
|
139 |
-
intersections[num++] = pts1[i];
|
140 |
-
}
|
141 |
-
}
|
142 |
-
}
|
143 |
-
|
144 |
-
// Reverse the check - check for vertices of rect2 inside rect1
|
145 |
-
{
|
146 |
-
const auto& AB = vec1[0];
|
147 |
-
const auto& DA = vec1[3];
|
148 |
-
auto ABdotAB = dot_2d<T>(AB, AB);
|
149 |
-
auto ADdotAD = dot_2d<T>(DA, DA);
|
150 |
-
for (int i = 0; i < 4; i++) {
|
151 |
-
auto AP = pts2[i] - pts1[0];
|
152 |
-
|
153 |
-
auto APdotAB = dot_2d<T>(AP, AB);
|
154 |
-
auto APdotAD = -dot_2d<T>(AP, DA);
|
155 |
-
|
156 |
-
if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
|
157 |
-
(APdotAD < ADdotAD + EPS)) {
|
158 |
-
intersections[num++] = pts2[i];
|
159 |
-
}
|
160 |
-
}
|
161 |
-
}
|
162 |
-
|
163 |
-
return num;
|
164 |
-
}
|
165 |
-
|
166 |
-
template <typename T>
|
167 |
-
HOST_DEVICE_INLINE int convex_hull_graham(
|
168 |
-
const Point<T> (&p)[24],
|
169 |
-
const int& num_in,
|
170 |
-
Point<T> (&q)[24],
|
171 |
-
bool shift_to_zero = false) {
|
172 |
-
assert(num_in >= 2);
|
173 |
-
|
174 |
-
// Step 1:
|
175 |
-
// Find point with minimum y
|
176 |
-
// if more than 1 points have the same minimum y,
|
177 |
-
// pick the one with the minimum x.
|
178 |
-
int t = 0;
|
179 |
-
for (int i = 1; i < num_in; i++) {
|
180 |
-
if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) {
|
181 |
-
t = i;
|
182 |
-
}
|
183 |
-
}
|
184 |
-
auto& start = p[t]; // starting point
|
185 |
-
|
186 |
-
// Step 2:
|
187 |
-
// Subtract starting point from every points (for sorting in the next step)
|
188 |
-
for (int i = 0; i < num_in; i++) {
|
189 |
-
q[i] = p[i] - start;
|
190 |
-
}
|
191 |
-
|
192 |
-
// Swap the starting point to position 0
|
193 |
-
auto tmp = q[0];
|
194 |
-
q[0] = q[t];
|
195 |
-
q[t] = tmp;
|
196 |
-
|
197 |
-
// Step 3:
|
198 |
-
// Sort point 1 ~ num_in according to their relative cross-product values
|
199 |
-
// (essentially sorting according to angles)
|
200 |
-
// If the angles are the same, sort according to their distance to origin
|
201 |
-
T dist[24];
|
202 |
-
#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
|
203 |
-
// compute distance to origin before sort, and sort them together with the
|
204 |
-
// points
|
205 |
-
for (int i = 0; i < num_in; i++) {
|
206 |
-
dist[i] = dot_2d<T>(q[i], q[i]);
|
207 |
-
}
|
208 |
-
|
209 |
-
// CUDA version
|
210 |
-
// In the future, we can potentially use thrust
|
211 |
-
// for sorting here to improve speed (though not guaranteed)
|
212 |
-
for (int i = 1; i < num_in - 1; i++) {
|
213 |
-
for (int j = i + 1; j < num_in; j++) {
|
214 |
-
T crossProduct = cross_2d<T>(q[i], q[j]);
|
215 |
-
if ((crossProduct < -1e-6) ||
|
216 |
-
(fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
|
217 |
-
auto q_tmp = q[i];
|
218 |
-
q[i] = q[j];
|
219 |
-
q[j] = q_tmp;
|
220 |
-
auto dist_tmp = dist[i];
|
221 |
-
dist[i] = dist[j];
|
222 |
-
dist[j] = dist_tmp;
|
223 |
-
}
|
224 |
-
}
|
225 |
-
}
|
226 |
-
#else
|
227 |
-
// CPU version
|
228 |
-
std::sort(
|
229 |
-
q + 1, q + num_in, [](const Point<T>& A, const Point<T>& B) -> bool {
|
230 |
-
T temp = cross_2d<T>(A, B);
|
231 |
-
if (fabs(temp) < 1e-6) {
|
232 |
-
return dot_2d<T>(A, A) < dot_2d<T>(B, B);
|
233 |
-
} else {
|
234 |
-
return temp > 0;
|
235 |
-
}
|
236 |
-
});
|
237 |
-
// compute distance to origin after sort, since the points are now different.
|
238 |
-
for (int i = 0; i < num_in; i++) {
|
239 |
-
dist[i] = dot_2d<T>(q[i], q[i]);
|
240 |
-
}
|
241 |
-
#endif
|
242 |
-
|
243 |
-
// Step 4:
|
244 |
-
// Make sure there are at least 2 points (that don't overlap with each other)
|
245 |
-
// in the stack
|
246 |
-
int k; // index of the non-overlapped second point
|
247 |
-
for (k = 1; k < num_in; k++) {
|
248 |
-
if (dist[k] > 1e-8) {
|
249 |
-
break;
|
250 |
-
}
|
251 |
-
}
|
252 |
-
if (k == num_in) {
|
253 |
-
// We reach the end, which means the convex hull is just one point
|
254 |
-
q[0] = p[t];
|
255 |
-
return 1;
|
256 |
-
}
|
257 |
-
q[1] = q[k];
|
258 |
-
int m = 2; // 2 points in the stack
|
259 |
-
// Step 5:
|
260 |
-
// Finally we can start the scanning process.
|
261 |
-
// When a non-convex relationship between the 3 points is found
|
262 |
-
// (either concave shape or duplicated points),
|
263 |
-
// we pop the previous point from the stack
|
264 |
-
// until the 3-point relationship is convex again, or
|
265 |
-
// until the stack only contains two points
|
266 |
-
for (int i = k + 1; i < num_in; i++) {
|
267 |
-
while (m > 1) {
|
268 |
-
auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2];
|
269 |
-
// cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) -
|
270 |
-
// q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we
|
271 |
-
// compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means
|
272 |
-
// round to nearest floating point).
|
273 |
-
if (q1.x * q2.y >= q2.x * q1.y)
|
274 |
-
m--;
|
275 |
-
else
|
276 |
-
break;
|
277 |
-
}
|
278 |
-
// Using double also helps, but float can solve the issue for now.
|
279 |
-
// while (m > 1 && cross_2d<T, double>(q[i] - q[m - 2], q[m - 1] - q[m - 2])
|
280 |
-
// >= 0) {
|
281 |
-
// m--;
|
282 |
-
// }
|
283 |
-
q[m++] = q[i];
|
284 |
-
}
|
285 |
-
|
286 |
-
// Step 6 (Optional):
|
287 |
-
// In general sense we need the original coordinates, so we
|
288 |
-
// need to shift the points back (reverting Step 2)
|
289 |
-
// But if we're only interested in getting the area/perimeter of the shape
|
290 |
-
// We can simply return.
|
291 |
-
if (!shift_to_zero) {
|
292 |
-
for (int i = 0; i < m; i++) {
|
293 |
-
q[i] += start;
|
294 |
-
}
|
295 |
-
}
|
296 |
-
|
297 |
-
return m;
|
298 |
-
}
|
299 |
-
|
300 |
-
template <typename T>
|
301 |
-
HOST_DEVICE_INLINE T polygon_area(const Point<T> (&q)[24], const int& m) {
|
302 |
-
if (m <= 2) {
|
303 |
-
return 0;
|
304 |
-
}
|
305 |
-
|
306 |
-
T area = 0;
|
307 |
-
for (int i = 1; i < m - 1; i++) {
|
308 |
-
area += fabs(cross_2d<T>(q[i] - q[0], q[i + 1] - q[0]));
|
309 |
-
}
|
310 |
-
|
311 |
-
return area / 2.0;
|
312 |
-
}
|
313 |
-
|
314 |
-
template <typename T>
|
315 |
-
HOST_DEVICE_INLINE T rotated_boxes_intersection(
|
316 |
-
const RotatedBox<T>& box1,
|
317 |
-
const RotatedBox<T>& box2) {
|
318 |
-
// There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned
|
319 |
-
// from rotated_rect_intersection_pts
|
320 |
-
Point<T> intersectPts[24], orderedPts[24];
|
321 |
-
|
322 |
-
Point<T> pts1[4];
|
323 |
-
Point<T> pts2[4];
|
324 |
-
get_rotated_vertices<T>(box1, pts1);
|
325 |
-
get_rotated_vertices<T>(box2, pts2);
|
326 |
-
|
327 |
-
int num = get_intersection_points<T>(pts1, pts2, intersectPts);
|
328 |
-
|
329 |
-
if (num <= 2) {
|
330 |
-
return 0.0;
|
331 |
-
}
|
332 |
-
|
333 |
-
// Convex Hull to order the intersection points in clockwise order and find
|
334 |
-
// the contour area.
|
335 |
-
int num_convex = convex_hull_graham<T>(intersectPts, num, orderedPts, true);
|
336 |
-
return polygon_area<T>(orderedPts, num_convex);
|
337 |
-
}
|
338 |
-
|
339 |
-
} // namespace
|
340 |
-
|
341 |
-
template <typename T>
|
342 |
-
HOST_DEVICE_INLINE T
|
343 |
-
single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) {
|
344 |
-
// shift center to the middle point to achieve higher precision in result
|
345 |
-
RotatedBox<T> box1, box2;
|
346 |
-
auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0;
|
347 |
-
auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0;
|
348 |
-
box1.x_ctr = box1_raw[0] - center_shift_x;
|
349 |
-
box1.y_ctr = box1_raw[1] - center_shift_y;
|
350 |
-
box1.w = box1_raw[2];
|
351 |
-
box1.h = box1_raw[3];
|
352 |
-
box1.a = box1_raw[4];
|
353 |
-
box2.x_ctr = box2_raw[0] - center_shift_x;
|
354 |
-
box2.y_ctr = box2_raw[1] - center_shift_y;
|
355 |
-
box2.w = box2_raw[2];
|
356 |
-
box2.h = box2_raw[3];
|
357 |
-
box2.a = box2_raw[4];
|
358 |
-
|
359 |
-
T area1 = box1.w * box1.h;
|
360 |
-
T area2 = box2.w * box2.h;
|
361 |
-
if (area1 < 1e-14 || area2 < 1e-14) {
|
362 |
-
return 0.f;
|
363 |
-
}
|
364 |
-
|
365 |
-
T intersection = rotated_boxes_intersection<T>(box1, box2);
|
366 |
-
T iou = intersection / (area1 + area2 - intersection);
|
367 |
-
return iou;
|
368 |
-
}
|
369 |
-
|
370 |
-
} // namespace detectron2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/no_memory.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
"""A class that does not store any data. This is the default memory provider."""
|
2 |
-
from __future__ import annotations
|
3 |
-
|
4 |
-
from typing import Any
|
5 |
-
|
6 |
-
from autogpt.memory.base import MemoryProviderSingleton
|
7 |
-
|
8 |
-
|
9 |
-
class NoMemory(MemoryProviderSingleton):
|
10 |
-
"""
|
11 |
-
A class that does not store any data. This is the default memory provider.
|
12 |
-
"""
|
13 |
-
|
14 |
-
def __init__(self, cfg):
|
15 |
-
"""
|
16 |
-
Initializes the NoMemory provider.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
cfg: The config object.
|
20 |
-
|
21 |
-
Returns: None
|
22 |
-
"""
|
23 |
-
pass
|
24 |
-
|
25 |
-
def add(self, data: str) -> str:
|
26 |
-
"""
|
27 |
-
Adds a data point to the memory. No action is taken in NoMemory.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
data: The data to add.
|
31 |
-
|
32 |
-
Returns: An empty string.
|
33 |
-
"""
|
34 |
-
return ""
|
35 |
-
|
36 |
-
def get(self, data: str) -> list[Any] | None:
|
37 |
-
"""
|
38 |
-
Gets the data from the memory that is most relevant to the given data.
|
39 |
-
NoMemory always returns None.
|
40 |
-
|
41 |
-
Args:
|
42 |
-
data: The data to compare to.
|
43 |
-
|
44 |
-
Returns: None
|
45 |
-
"""
|
46 |
-
return None
|
47 |
-
|
48 |
-
def clear(self) -> str:
|
49 |
-
"""
|
50 |
-
Clears the memory. No action is taken in NoMemory.
|
51 |
-
|
52 |
-
Returns: An empty string.
|
53 |
-
"""
|
54 |
-
return ""
|
55 |
-
|
56 |
-
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
57 |
-
"""
|
58 |
-
Returns all the data in the memory that is relevant to the given data.
|
59 |
-
NoMemory always returns None.
|
60 |
-
|
61 |
-
Args:
|
62 |
-
data: The data to compare to.
|
63 |
-
num_relevant: The number of relevant data to return.
|
64 |
-
|
65 |
-
Returns: None
|
66 |
-
"""
|
67 |
-
return None
|
68 |
-
|
69 |
-
def get_stats(self):
|
70 |
-
"""
|
71 |
-
Returns: An empty dictionary as there are no stats in NoMemory.
|
72 |
-
"""
|
73 |
-
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|