Commit
·
3da387e
1
Parent(s):
7a8d488
Update parquet files (step 60 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Catia V5 R20 64 Bit Crack The Ultimate Solution for Your Engineering Projects.md +0 -118
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Codigo De Activacion De Video Repair 16.md +0 -139
- spaces/1gistliPinn/ChatGPT4/Examples/3dmgame Dll Mediafire 88.md +0 -45
- spaces/1gistliPinn/ChatGPT4/Examples/A Pdf Content Splitter 4.8.4 [HOT] Keygen For 14.md +0 -39
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/60 Lakh - The New Punjabi Hit by Bukka Jatt and R Nait.md +0 -109
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo descargar Video Poker Jackpot APK y ganar grandes premios.md +0 -181
- spaces/1toTree/lora_test/ppdiffusers/experimental/rl/__init__.py +0 -17
- spaces/A00001/bingothoo/src/state/index.ts +0 -118
- spaces/AB-TW/team-ai/app.py +0 -190
- spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/transforms.py +0 -209
- spaces/AI-Zero-to-Hero/03-GR-AI-Text2ArtGenerator/README.md +0 -13
- spaces/AIConsultant/MusicGen/audiocraft/data/audio_dataset.py +0 -587
- spaces/AICopilot/Dropbox/README.md +0 -13
- spaces/AIFILMS/StyleGANEX/configs/transforms_config.py +0 -242
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/base_model.py +0 -16
- spaces/AISuperheroes/07GR-NLP-Seq2Seq-AutoQA/README.md +0 -13
- spaces/Aaron299/bingo/Dockerfile +0 -7
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/base_provider.py +0 -138
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasinput.js +0 -2
- spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/networks.py +0 -966
- spaces/Amrrs/openai-whisper-live-transcribe/app.py +0 -36
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/paint_by_example.md +0 -39
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py +0 -296
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/deprecation_utils.py +0 -49
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_lora_layers.py +0 -841
- spaces/Andy1621/uniformer_image_detection/configs/gcnet/README.md +0 -59
- spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py +0 -15
- spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_1x_coco.py +0 -140
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/RWKV.py +0 -153
- spaces/AnkitGaur2811/Image_Conversion_app_using_Opencv/README.md +0 -12
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/main.py +0 -9
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/gather_points.py +0 -57
- spaces/AntX-ai/Fintech/index.html +0 -15
- spaces/AnthonyTruchetPoC/persistent-docker/scripts/build-clean-docs.sh +0 -11
- spaces/Apex-X/nono/roop/typing.py +0 -7
- spaces/Atsushi/kinoko-mini-AI/app.py +0 -30
- spaces/Bavesh/Oral_Cancer_Detection/README.md +0 -13
- spaces/Benson/text-generation/Examples/Apk Download Traffic Rider Hack.md +0 -77
- spaces/Benson/text-generation/Examples/Descargar Apk Mvil Zingspeed.md +0 -103
- spaces/Benson/text-generation/Examples/Descargar El Formulario 29 30.md +0 -51
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/certifi/__init__.py +0 -4
- spaces/CVH-vn1210/make_hair/minigpt4/models/eva_vit.py +0 -442
- spaces/CVPR/LIVE/pybind11/include/pybind11/attr.h +0 -528
- spaces/CVPR/LIVE/thrust/thrust/detail/allocator/no_throw_allocator.h +0 -71
- spaces/CVPR/WALT/mmdet/core/mask/__init__.py +0 -8
- spaces/CVPR/WALT/mmdet/core/mask/mask_target.py +0 -122
- spaces/CVPR/regionclip-demo/detectron2/data/datasets/cityscapes_panoptic.py +0 -187
- spaces/Caoyunkang/Segment-Any-Anomaly/utils/visualization.py +0 -131
- spaces/CourserLi/classify/app.py +0 -32
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Catia V5 R20 64 Bit Crack The Ultimate Solution for Your Engineering Projects.md
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Catia V5 R20 64 Bit Crack: What You Need to Know</h1>
|
3 |
-
<p>If you are looking for a way to use one of the most popular and powerful software for Computer Aided Design, Engineering, Analysis, Manufacturing and Production (CAD/CAM/CAE), you might have heard of Catia V5 R20. This is a comprehensive program that allows you to design, analyze, and produce products in various industries, such as aerospace, automotive, shipbuilding, consumer goods, and more. However, using Catia V5 R20 is not cheap or easy. You need to pay a license fee to access its full features, and you need to activate it online with a valid serial number. This can be a hassle for some users who want to use Catia V5 R20 without spending too much money or time.</p>
|
4 |
-
<p>That's why some people look for a crack for Catia V5 R20. A crack is a file that modifies or bypasses the original software's security features, allowing you to use it without paying or activating it. A crack can be a patch, a keygen, or a loader that changes the software's code or behavior. In this article, we will show you how to download and install Catia V5 R20 64 bit crack on your Windows computer. We will also discuss the benefits and risks of using a crack for Catia V5 R20, as well as some alternatives that you can consider.</p>
|
5 |
-
<h2>Catia V5 R20 64 Bit Crack</h2><br /><p><b><b>DOWNLOAD</b> ►►►►► <a href="https://byltly.com/2uKv3v">https://byltly.com/2uKv3v</a></b></p><br /><br />
|
6 |
-
<h2>How to Download Catia V5 R20 64 Bit Crack</h2>
|
7 |
-
<p>The first step to use Catia V5 R20 with a crack is to find and download the crack file from a reliable source. There are many websites that claim to offer cracks for various software, but not all of them are trustworthy. Some of them may contain malware or viruses that can harm your computer or steal your personal information. Some of them may also provide fake or outdated cracks that do not work or cause errors. Therefore, you need to be careful when choosing where to download the crack file from.</p>
|
8 |
-
<p>One possible source that we found is <a href="https://vspaceu.blogspot.com/2022/07/catia-p2-v5r20-free-download.html">this blog post</a> that provides a link to download Catia P2 V5R20 with a crack included. According to the post, this is a full offline installer setup of Catia P2 V5R20 that works perfectly fine without any problem. The post also provides instructions on how to install the program and apply the crack. However, we cannot guarantee the safety or validity of this source, so you should use it at your own risk.</p>
|
9 |
-
<p>To download the crack file from this source, you need to follow these steps:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Click on the green "Direct Download" button on the blog post.</li>
|
12 |
-
<li>Wait for the download to start automatically.</li>
|
13 |
-
<li>Save the file on your computer.</li>
|
14 |
-
<li>Extract the file using WinRAR or WinZip.</li>
|
15 |
-
</ul>
|
16 |
-
<h2>How to Install Catia V5 R20 64 Bit Crack</h2>
|
17 |
-
<p>The next step is to install Catia P2 V5R20 on your computer using the crack file. Before you do that, you need to make sure that your system meets the minimum requirements for running Catia P2 V5R20. According to <a href="https://pesktop.com/en/windows/ds_catia">this website</a> , these are the system requirements:</p>
|
18 |
-
<ul>
|
19 |
-
<li>Operating System: Windows 7/8/8.1/10</li>
|
20 |
-
<li>Memory (RAM): 4 GB of RAM required.</li>
|
21 |
-
<li>Hard Disk Space: 5 GB of free space required for full installation.</li>
|
22 |
-
<li>Processor: Intel Pentium i3, Multi-core GHz or higher.</li>
|
23 |
-
</ul>
|
24 |
-
<p>You also need to disable your antivirus and firewall before installing Catia P2 V5R20 with a crack. This is because some antivirus programs may detect the crack file as a threat and delete it or block its execution. To disable your antivirus and firewall, you can follow <a href="https://support.microsoft.com/en-us/windows/turn-microsoft-defender-firewall-on-or-off-ec0844f7-aebd-0583-67fe-601ecf79f6ee">these steps</a> for Windows Defender Firewall or <a href="https://support.microsoft.com/en-us/windows/how-to-turn-off-microsoft-defender-antivirus-in-windows-10-99e6004f-c54c-8509-773c-a4d776b77960">these steps</a> for Microsoft Defender Antivirus.</p>
|
25 |
-
<p>After disabling your antivirus and firewall, you can install Catia P2 V5R20 with a crack by following these steps:</p>
|
26 |
-
<p>How to install Catia V5 R20 64 Bit Crack on Windows 10<br />
|
27 |
-
Catia V5 R20 64 Bit Crack download link<br />
|
28 |
-
Catia V5 R20 64 Bit Crack free trial<br />
|
29 |
-
Catia V5 R20 64 Bit Crack license key generator<br />
|
30 |
-
Catia V5 R20 64 Bit Crack tutorial pdf<br />
|
31 |
-
Catia V5 R20 64 Bit Crack system requirements<br />
|
32 |
-
Catia V5 R20 64 Bit Crack vs Catia V6<br />
|
33 |
-
Catia V5 R20 64 Bit Crack features and benefits<br />
|
34 |
-
Catia V5 R20 64 Bit Crack online course<br />
|
35 |
-
Catia V5 R20 64 Bit Crack review and rating<br />
|
36 |
-
Catia V5 R20 64 Bit Crack alternatives and competitors<br />
|
37 |
-
Catia V5 R20 64 Bit Crack price and discount<br />
|
38 |
-
Catia V5 R20 64 Bit Crack support and customer service<br />
|
39 |
-
Catia V5 R20 64 Bit Crack activation code and serial number<br />
|
40 |
-
Catia V5 R20 64 Bit Crack error and troubleshooting<br />
|
41 |
-
Catia V5 R20 64 Bit Crack update and patch<br />
|
42 |
-
Catia V5 R20 64 Bit Crack tips and tricks<br />
|
43 |
-
Catia V5 R20 64 Bit Crack best practices and standards<br />
|
44 |
-
Catia V5 R20 64 Bit Crack comparison and benchmark<br />
|
45 |
-
Catia V5 R20 64 Bit Crack pros and cons<br />
|
46 |
-
Catia V5 R20 64 Bit Crack forum and community<br />
|
47 |
-
Catia V5 R20 64 Bit Crack case study and success story<br />
|
48 |
-
Catia V5 R20 64 Bit Crack FAQ and Q&A<br />
|
49 |
-
Catia V5 R20 64 Bit Crack video and audio<br />
|
50 |
-
Catia V5 R20 64 Bit Crack blog and article<br />
|
51 |
-
Catia V5 R20 64 Bit Crack ebook and guide<br />
|
52 |
-
Catia V5 R20 64 Bit Crack webinar and workshop<br />
|
53 |
-
Catia V5 R20 64 Bit Crack software and hardware<br />
|
54 |
-
Catia V5 R20 64 Bit Crack tools and resources<br />
|
55 |
-
Catia V5 R20 64 Bit Crack simulation and animation<br />
|
56 |
-
Catia V5 R20 64 Bit Crack design and modeling<br />
|
57 |
-
Catia V5 R20 64 Bit Crack engineering and analysis<br />
|
58 |
-
Catia V5 R20 64 Bit Crack manufacturing and production<br />
|
59 |
-
Catia V5 R20 64 Bit Crack testing and validation<br />
|
60 |
-
Catia V5 R20 64 Bit Crack optimization and improvement<br />
|
61 |
-
Catia V5 R20 64 Bit Crack integration and interoperability<br />
|
62 |
-
Catia V5 R20 64 Bit Crack collaboration and communication<br />
|
63 |
-
Catia V5 R20 64 Bit Crack documentation and reporting<br />
|
64 |
-
Catia V5 R20 64 Bit Crack customization and configuration<br />
|
65 |
-
Catia V5 R20 64 Bit Crack security and privacy<br />
|
66 |
-
Catia V5 R20 64 Bit Crack backup and recovery<br />
|
67 |
-
Catia V5 R20 64 Bit Crack migration and upgrade<br />
|
68 |
-
Catia V5 R20 64 Bit Crack compatibility and performance<br />
|
69 |
-
Catia V5 R20 64 Bit Crack quality and reliability<br />
|
70 |
-
Catia V5 R20 64 Bit Crack innovation and creativity<br />
|
71 |
-
Catia V5 R20 64 Bit Crack fun and entertainment<br />
|
72 |
-
Catia V5 R20 64 Bit Crack challenge and opportunity<br />
|
73 |
-
Catia V5 R20 64 Bit Crack learning and development<br />
|
74 |
-
Catia V5 R20 64 Bit Crack career and growth</p>
|
75 |
-
<ul>
|
76 |
-
<li>Open the installation folder after extracting the file and click on the installation file.</li>
|
77 |
-
<li>Accept the terms and conditions and follow the instructions on the screen.</li>
|
78 |
-
<li>Select the destination folder where you want to install Catia P2 V5R20.</li>
|
79 |
-
<li>Wait for the installation process to complete.</li>
|
80 |
-
<li>Open the Mechdesign_Fix folder inside the extracted file and copy the JS0GROUP.dll file.</li>
|
81 |
-
<li>Paste the JS0GROUP.dll file into the installation folder where you installed Catia P2 V5R20.</li>
|
82 |
-
<li>Replace the original JS0GROUP.dll file with the copied one.</li>
|
83 |
-
<li>Run Catia P2 V5R20 from your desktop shortcut or start menu.</li>
|
84 |
-
</ul>
|
85 |
-
<h2>Benefits of Using Catia V5 R20 64 Bit Crack</h2>
|
86 |
-
<p>By using Catia P2 V5R20 with a crack, you can enjoy some benefits that may not be available if you use the original software with a license. Here are some of them:</p>
|
87 |
-
<ul>
|
88 |
-
<li>You can access all the features and functions of Catia P2 V5R20 without paying any license fee or activation code. This can save you a lot of money in the long run.</li>
|
89 |
-
<li>You can use Catia P2 V5R20 without any online connection or verification process. This can save you time and hassle when you want to use it anytime and anywhere.</li>
|
90 |
-
<li>You can explore different design, analysis, and production environments in Catia P2 V5R20 that are suitable for various industries and applications. You can also integrate them with other tools and processes for better results.</li>
|
91 |
-
</ul>
|
92 |
-
<h2>Risks of Using Catia V5 R20 64 Bit Crack</h2>
|
93 |
-
<p>However, using Catia P2 V5R20 with a crack also comes with some risks that you should be aware of. Here are some of them:</p>
|
94 |
-
<ul>
|
95 |
-
<li>You may face legal consequences for violating the intellectual property rights of Dassault Systèmes, the developer of Catia P2 V5R20. They may sue you for damages or take other actions against you if they find out that you are using their software illegally.</li>
|
96 |
-
<li>You may expose your computer to security threats from malware or viruses that may be hidden in the crack file or the source website. These malicious programs may damage your system, steal your data, or compromise your privacy.</li>
|
97 |
-
<li>You may experience performance issues or errors from using an unstable or outdated crack file that does not match the latest version of Catia P2 V5R20. You may also miss out on new features, updates, or bug fixes that are available for the original software.</li>
|
98 |
-
</ul>
|
99 |
-
<h2>Alternatives to Catia V5 R20 64 Bit Crack</h2>
|
100 |
-
<p>If you are not comfortable with using Catia P2 V5R20 with a crack, or if you want to avoid the risks associated with it, you can consider some alternatives that may suit your needs better. Here are some of them:</p>
|
101 |
-
<ul>
|
102 |
-
<li>You can use a free trial version of Catia P2 V5R20 that allows you to use the software for a limited time without paying or activating it. You can download the free trial version from <a href="https://www.3ds.com/products-services/catia/trial/">this website</a>. However, you may not be able to access all the features or save your work after the trial period expires.</li>
|
103 |
-
<li>You can use a student or academic version of Catia P2 V5R20 that is available for students and teachers who want to learn or teach the software. You can get the student or academic version from <a href="https://academy.3ds.com/en/software/catia-v5-student-edition">this website</a>. However, you may need to provide proof of your eligibility and agree to some terms and conditions before using it.</li>
|
104 |
-
<li>You can use a similar but cheaper or free CAD/CAM/CAE tool that offers comparable functions and features to Catia P2 V5R20. Some examples are <a href="https://www.autodesk.com/products/fusion-360/overview">Fusion 360</a>, <a href="https://www.freecadweb.org/">FreeCAD</a>, <a href="https://www.solidworks.com/">SolidWorks</a>, and <a href="https://www.plm.automation.siemens.com/global/en/products/nx/">NX</a>. However, you may need to learn a new interface or workflow and deal with compatibility issues when switching to a different tool.</li>
|
105 |
-
</ul>
|
106 |
-
<h1>Conclusion</h1>
|
107 |
-
<p>In conclusion, Catia V5 R20 64 bit crack is a file that allows you to use Catia P2 V5R20 without paying or activating it. It can provide some benefits such as saving money and time and accessing a powerful and comprehensive CAD/CAM/CAE tool. However, it also has some risks such as legal consequences, security threats, and performance issues. Therefore, you should weigh the pros and cons carefully before deciding whether to use it or not. Alternatively, you can consider some other options such as using a free trial version, a student or academic version, or a similar but cheaper or free tool.</p>
|
108 |
-
<h2>FAQs</h2>
|
109 |
-
<ul>
|
110 |
-
<li><b>What is Catia V5 R20?</b><br>Catia V5 R20 is a comprehensive program for Computer Aided Design, Engineering, Analysis, Manufacturing and Production (CAD/CAM/CAE) that allows you to design, analyze, and produce products in various industries.</li>
|
111 |
-
<li><b>What is a crack?</b><br>A crack is a file that modifies or bypasses the original software's security features, allowing you to use it without paying or activating it.</li>
|
112 |
-
<li><b>How to download and install Catia V5 R20 64 bit crack?</b><br>To download and install Catia V5 R20 64 bit crack, you need to find and download the crack file from a reliable source, disable your antivirus and firewall, install Catia P2 V5R20 on your computer, and copy and paste the crack file into the installation folder.</li>
|
113 |
-
<li><b>What are the benefits of using Catia V5 R20 64 bit crack?</b><br>The benefits of using Catia V5 R20 64 bit crack are that you can access all the features and functions of Catia P2 V5R20 without paying any license fee or activation code, use Catia P2 V5R20 without any online connection or verification process, and explore different design, analysis, and production environments in Catia P2 V5R20.</li>
|
114 |
-
<li><b>What are the risks of using Catia V5 R20 64 bit crack?</b><br>The risks of using Catia V5 R20 64 bit crack are that you may face legal consequences for violating the intellectual property rights of Dassault Systèmes, expose your computer to security threats from malware or viruses that may be hidden in the crack file or the source website, and experience performance issues or errors from using an unstable or outdated crack file that does not match the latest version of Catia P2 V5R20.</li>
|
115 |
-
</ul>
|
116 |
-
</p> 0a6ba089eb<br />
|
117 |
-
<br />
|
118 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Codigo De Activacion De Video Repair 16.md
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Codigo De Activacion De Video Repair 16: How to Fix Your Corrupted Videos</h1>
|
3 |
-
<p>Have you ever encountered a situation where your videos are corrupted and you can't play them on your computer or mobile device? Maybe you have recorded some precious moments with your family or friends, but the videos are damaged due to virus infection, power failure, improper operation, or other reasons. Or maybe you have downloaded some videos from the internet, but they are incomplete or broken. How frustrating is that?</p>
|
4 |
-
<h2>Codigo De Activacion De Video Repair 16</h2><br /><p><b><b>Download</b> ⚹ <a href="https://byltly.com/2uKymQ">https://byltly.com/2uKymQ</a></b></p><br /><br />
|
5 |
-
<p>Don't worry, there is a solution for you. In this article, we will introduce you to a powerful tool called Video Repair 16, which can help you fix your corrupted videos in a few simple steps. We will also show you how to get a codigo de activacion for Video Repair 16, which is required to activate the full version of the program. And we will share some tips and tricks for using Video Repair 16 effectively. So, let's get started!</p>
|
6 |
-
<h2>Introduction</h2>
|
7 |
-
<h3>What is Video Repair 16?</h3>
|
8 |
-
<p>Video Repair 16 is a professional video repair software that can repair various types of video corruption issues, such as video not playing, video freezing, video stuttering, video out of sync, video pixelated, video distorted, video black screen, and more. It supports repairing videos in various formats, such as MP4, MOV, AVI, MKV, FLV, WMV, etc. It also supports repairing videos from different sources, such as cameras, camcorders, drones, smartphones, memory cards, hard drives, etc.</p>
|
9 |
-
<p>Video Repair 16 has two repair modes: quick repair and advanced repair. The quick repair mode can fix most common video corruption issues by analyzing and repairing the video data. The advanced repair mode can fix more complex video corruption issues by using a sample video file as a reference. The sample video file should be from the same device and in the same format as the corrupted video file.</p>
|
10 |
-
<h3>Why do you need a codigo de activacion for Video Repair 16?</h3>
|
11 |
-
<p>Video Repair 16 is a paid software that offers a free trial version for users to test its features and performance. However, the free trial version has some limitations. For example, it can only repair up to three videos at a time, and it can only save up to one-third of each repaired video. To unlock the full functionality of Video Repair 16 and repair unlimited videos without any restrictions, you need to purchase a codigo de activacion for Video Repair 16.</p>
|
12 |
-
<p>Como obtener el codigo de activacion de video repair 16<br />
|
13 |
-
Video repair 16 codigo de activacion gratis<br />
|
14 |
-
Donde encontrar el codigo de activacion de video repair 16<br />
|
15 |
-
Video repair 16 codigo de activacion full<br />
|
16 |
-
Solucionar error de codigo de activacion de video repair 16<br />
|
17 |
-
Video repair 16 codigo de activacion crack<br />
|
18 |
-
Descargar codigo de activacion de video repair 16<br />
|
19 |
-
Video repair 16 codigo de activacion online<br />
|
20 |
-
Generar codigo de activacion de video repair 16<br />
|
21 |
-
Video repair 16 codigo de activacion serial<br />
|
22 |
-
Requisitos para el codigo de activacion de video repair 16<br />
|
23 |
-
Video repair 16 codigo de activacion licencia<br />
|
24 |
-
Funcionamiento del codigo de activacion de video repair 16<br />
|
25 |
-
Video repair 16 codigo de activacion keygen<br />
|
26 |
-
Tutorial para el codigo de activacion de video repair 16<br />
|
27 |
-
Video repair 16 codigo de activacion mega<br />
|
28 |
-
Ventajas del codigo de activacion de video repair 16<br />
|
29 |
-
Video repair 16 codigo de activacion original<br />
|
30 |
-
Alternativas al codigo de activacion de video repair 16<br />
|
31 |
-
Video repair 16 codigo de activacion premium<br />
|
32 |
-
Pasos para el codigo de activacion de video repair 16<br />
|
33 |
-
Video repair 16 codigo de activacion valido<br />
|
34 |
-
Beneficios del codigo de activacion de video repair 16<br />
|
35 |
-
Video repair 16 codigo de activacion windows<br />
|
36 |
-
Caracteristicas del codigo de activacion de video repair 16<br />
|
37 |
-
Video repair 16 codigo de activacion youtube<br />
|
38 |
-
Consejos para el codigo de activacion de video repair 16<br />
|
39 |
-
Video repair 16 codigo de activacion zip<br />
|
40 |
-
Dudas sobre el codigo de activacion de video repair 16<br />
|
41 |
-
Video repair 16 codigo de activacion zippyshare<br />
|
42 |
-
Opiniones sobre el codigo de activacion de video repair 16<br />
|
43 |
-
Video repair 16 codigo de activacion mediafire<br />
|
44 |
-
Preguntas frecuentes sobre el codigo de activacion de video repair 16<br />
|
45 |
-
Video repair 16 codigo de activacion uptobox<br />
|
46 |
-
Testimonios sobre el codigo de activacion de video repair 16<br />
|
47 |
-
Video repair 16 codigo de activacion rapidgator<br />
|
48 |
-
Problemas con el codigo de activacion de video repair 16<br />
|
49 |
-
Video repair 16 codigo de activacion turbobit<br />
|
50 |
-
Garantia del codigo de activacion de video repair 16<br />
|
51 |
-
Video repair 16 codigo de activacion uploaded<br />
|
52 |
-
Soporte para el codigo de activacion de video repair 16<br />
|
53 |
-
Video repair 16 codigo de activacion nitroflare<br />
|
54 |
-
Oferta del codigo de activacion de video repair 16<br />
|
55 |
-
Video repair 16 codigo de activacion filefactory<br />
|
56 |
-
Comparativa del codigo de activacion de video repair 16<br />
|
57 |
-
Video repair 16 codigo de activacion depositfiles<br />
|
58 |
-
Valoracion del codigo de activacion de video repair 16<br />
|
59 |
-
Video repair 16 codigo de activacion hitfile<br />
|
60 |
-
Experiencia con el codigo</p>
|
61 |
-
<p>A codigo de activacion for Video Repair 16 is a unique code that is generated after you buy a license for the software. It is used to verify your identity and activate your copy of Video Repair 16. Once you enter your codigo de activacion in the program, you can enjoy all the benefits of Video Repair 16.</p>
|
62 |
-
<h3>How to get a codigo de activacion for Video Repair 16?</h3>
|
63 |
-
<p>To get a codigo de activacion for Video Repair 16, you need to follow these steps:</p>
|
64 |
-
<ol>
|
65 |
-
<li>Visit the official website of Video Repair 16 and click on the "Buy Now" button.</li>
|
66 |
-
<li>Select the license type that suits your needs. You can choose between a one-year license and a lifetime license.</li>
|
67 |
-
<li>Enter your personal information and payment details and complete the order.</li>
|
68 |
-
<li>Check your email inbox for a confirmation email from Video Repair 16. The email will contain your codigo de activacion and a download link for the software.</li>
|
69 |
-
<li>Download and install Video Repair 16 on your computer.</li>
|
70 |
-
</ol>
|
71 |
-
<h2>How to use Video Repair 16 to fix your corrupted videos</h2>
|
72 |
-
<h3>Step 1: Download and install Video Repair 16</h3>
|
73 |
-
<p>If you have already downloaded and installed Video Repair 16 on your computer, you can skip this step. If not, you can follow these steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Click on the download link in the confirmation email from Video Repair 16 or visit the official website of Video Repair 16 and click on the "Download" button.</li>
|
76 |
-
<li>Save the setup file on your computer and run it.</li>
|
77 |
-
<li>Follow the instructions on the screen to complete the installation process.</li>
|
78 |
-
</ol>
|
79 |
-
<h3>Step 2: Launch Video Repair 16 and enter your codigo de activacion</h3>
|
80 |
-
<p>If you have already entered your codigo de activacion in Video Repair 16, you can skip this step. If not, you can follow these steps:</p>
|
81 |
-
<ol>
|
82 |
-
<li>Launch Video Repair 16 on your computer.</li>
|
83 |
-
<li>Click on the "Register" button at the top right corner of the main interface.</li>
|
84 |
-
<li>Enter your email address and codigo de activacion in the pop-up window and click on "Activate".</li>
|
85 |
-
<li>A message will appear confirming that your activation is successful.</li>
|
86 |
-
</ol>
|
87 |
-
<h3>Step 3: Add the corrupted videos to the program</h3>
|
88 |
-
<p>To add the corrupted videos to Video Repair 16, you can follow these steps:</p>
|
89 |
-
<ol>
|
90 |
-
<li>Click on the "Add" button at the bottom left corner of the main interface.</li>
|
91 |
-
<li>Browse your computer or external device and select the corrupted videos that you want to repair.</li>
|
92 |
-
<li>Click on "Open" to import them to the program.</li>
|
93 |
-
<li>You can also drag and drop the corrupted videos directly to the program.</li>
|
94 |
-
</ol>
|
95 |
-
<h3>Step 4: Choose the repair mode and start the repair process</h3>
|
96 |
-
<p>To choose the repair mode and start the repair process in Video Repair 16, you can follow these steps:</p>
|
97 |
-
<ol>
|
98 |
-
<li>Select one or more corrupted videos that you want to repair from the list.</li>
|
99 |
-
<li>Click on "Repair" at the bottom right corner of the main interface.</li>
|
100 |
-
<li>A pop-up window will appear asking you to choose between quick repair and advanced repair. You can select either option depending on your situation.</li>
|
101 |
-
<li>If you choose quick repair, click on "OK" to start repairing your videos immediately.</li>
|
102 |
-
<li>If you choose advanced repair, click on "OK" and then click on "Folder" icon next to each corrupted video to add a sample video file as a reference. Then click on "Repair" again to start repairing your videos.</li>
|
103 |
-
</ol>
|
104 |
-
<h3>Step 5: Preview and save the repaired videos</h3>
|
105 |
-
<p>To preview and save the repaired videos in Video Repair 16, you can follow these steps:</p>
|
106 |
-
<ol>
|
107 |
-
<li>After repairing your videos successfully with either quick repair or advanced repair mode ,you will see them listed under "Repaired Files".</li>
|
108 |
-
<li>You can click on each repaired video file name or thumbnail image to preview it in a built-in media player window.</li>
|
109 |
-
<li>You can also check some information about each repaired video file such as format ,size ,duration ,and resolution under "File Information".</li>
|
110 |
-
<li>If you are satisfied with the results ,you can click on "Save All" at bottom right corner of main interface .</li>
|
111 |
-
<li>A pop-up window will appear asking you to choose a destination folder where you want to save your repaired videos .You can browse your computer or external device and select a folder .Then click on "Save" .</li>
|
112 |
-
<li>Your repaired videos will be saved in selected folder .You can access them anytime .</li>
|
113 |
-
</ol>
|
114 |
-
<h2>Tips and tricks for using Video Repair 16</h2>
|
115 |
-
<h3>Tip 1: Backup your videos before repairing them</h3>
|
116 |
-
can also use a cloud service such as Google Drive ,Dropbox ,or OneDrive to backup your videos online .</p>
|
117 |
-
<h3>Tip 2: Use the advanced repair mode for severely corrupted videos</h3>
|
118 |
-
<p>If your videos are severely corrupted and the quick repair mode cannot fix them ,you can try the advanced repair mode .The advanced repair mode can repair more complex video corruption issues by using a sample video file as a reference .The sample video file should be from the same device and in the same format as the corrupted video file .For example ,if your corrupted video file is a MP4 file recorded by your iPhone ,you should use another MP4 file recorded by your iPhone as a sample video file .The sample video file should also be healthy and playable .The advanced repair mode will use the information from the sample video file to repair the corrupted video file .</p>
|
119 |
-
<h3>Tip 3: Contact the customer support if you encounter any problems</h3>
|
120 |
-
<p>If you encounter any problems while using Video Repair 16 ,such as activation issues ,repairing errors ,or saving failures ,you can contact the customer support team of Video Repair 16 for help .You can send an email to [email protected] or visit the official website of Video Repair 16 and click on "Contact Us" .You can also check the FAQ section on the website for some common questions and answers .The customer support team of Video Repair 16 is friendly and professional ,and they will try their best to solve your problems as soon as possible .</p>
|
121 |
-
<h2>Conclusion</h2>
|
122 |
-
<p>In conclusion ,Video Repair 16 is a powerful and easy-to-use video repair software that can help you fix your corrupted videos in a few simple steps .It supports repairing videos in various formats and from different sources .It also offers two repair modes :quick repair and advanced repair .To use Video Repair 16 ,you need to get a codigo de activacion for Video Repair 16 first ,which you can buy from the official website of Video Repair 16 .Then you can follow the steps we have shown you in this article to add ,repair ,preview ,and save your corrupted videos .We hope this article has helped you understand how to use Video Repair 16 and how to get a codigo de activacion for Video Repair 16 .If you have any questions or feedback ,please feel free to leave a comment below or contact us via email .Thank you for reading!</p>
|
123 |
-
<h2>FAQs</h2>
|
124 |
-
<p>Here are some frequently asked questions about Video Repair 16 and codigo de activacion for Video Repair 16 :</p>
|
125 |
-
<ol>
|
126 |
-
<li>Q: How much does a codigo de activacion for Video Repair 16 cost?</li>
|
127 |
-
<li>A: A codigo de activacion for Video Repair 16 costs $49.95 for a one-year license and $69.95 for a lifetime license. You can pay with PayPal, credit card, debit card, or other payment methods.</li>
|
128 |
-
<li>Q: How long does it take to receive my codigo de activacion for Video Repair 16 after I place an order?</li>
|
129 |
-
<li>A: You will receive your codigo de activacion for Video Repair 16 instantly via email after you complete your payment. Please check your email inbox and spam folder for the confirmation email from Video Repair 16.</li>
|
130 |
-
<li>Q: Can I use my codigo de activacion for Video Repair 16 on multiple computers?</li>
|
131 |
-
<li>A: No, you can only use your codigo de activacion for Video Repair 16 on one computer. If you want to use it on another computer, you need to deactivate it from the first computer and activate it on the second computer.</li>
|
132 |
-
<li>Q: What if I lose my codigo de activacion for Video Repair 16 or forget to deactivate it from my old computer?</li>
|
133 |
-
<li>A: If you lose your codigo de activacion for Video Repair 16 or forget to deactivate it from your old computer, you can contact the customer support team of Video Repair 16 and provide them with your order number and email address. They will help you retrieve your codigo de activacion or reset your activation status.</li>
|
134 |
-
<li>Q: Does Video Repair 16 guarantee to fix all corrupted videos?</li>
|
135 |
-
<li>A: No, Video Repair 16 does not guarantee to fix all corrupted videos. Some videos may be too damaged or corrupted beyond repair. However, Video Repair 16 has a high success rate in repairing most common video corruption issues. You can try it for free before buying it to see if it works for your videos.</li>
|
136 |
-
</ol>
|
137 |
-
</p> 0a6ba089eb<br />
|
138 |
-
<br />
|
139 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/3dmgame Dll Mediafire 88.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Fix 3dmGameDll.dll Errors for Free</h1>
|
3 |
-
<p>If you are looking for a way to download and fix 3dmGameDll.dll errors for free, you have come to the right place. 3dmGameDll.dll is a dynamic link library file that is used by some popular games such as Mad Max, Metal Gear Solid V: The Phantom Pain, and Watch Dogs. This file contains important functions and data that the games need to run properly. However, sometimes this file can get corrupted, deleted, or misplaced, causing various problems such as crashes, freezes, or error messages.</p>
|
4 |
-
<h2>3dmgame dll mediafire 88</h2><br /><p><b><b>Download</b> ★★★★★ <a href="https://imgfil.com/2uxXOI">https://imgfil.com/2uxXOI</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download and fix 3dmGameDll.dll errors for free using simple methods. We will also explain what causes these errors and how to prevent them in the future. Follow the steps below to get started.</p>
|
6 |
-
<h2>What Causes 3dmGameDll.dll Errors?</h2>
|
7 |
-
<p>There are many possible causes of 3dmGameDll.dll errors, but some of the most common ones are:</p>
|
8 |
-
<ul>
|
9 |
-
<li>The game installation is incomplete or damaged.</li>
|
10 |
-
<li>The game has been updated or patched incorrectly.</li>
|
11 |
-
<li>The game has been cracked or modified by third-party software.</li>
|
12 |
-
<li>The 3dmGameDll.dll file is missing or overwritten by another version.</li>
|
13 |
-
<li>The 3dmGameDll.dll file is infected by malware or viruses.</li>
|
14 |
-
<li>The Windows registry is corrupted or contains invalid entries.</li>
|
15 |
-
<li>The system drivers are outdated or incompatible.</li>
|
16 |
-
</ul>
|
17 |
-
<p>These causes can lead to various symptoms such as:</p>
|
18 |
-
<ul>
|
19 |
-
<li>The game fails to launch or crashes randomly.</li>
|
20 |
-
<li>The game runs slowly or lags.</li>
|
21 |
-
<li>The game displays error messages such as "The program can't start because 3dmGameDll.dll is missing from your computer. Try reinstalling the program to fix this problem." or "3dmGameDll.dll not found."</li>
|
22 |
-
<li>The game has graphical glitches or audio issues.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to Download and Fix 3dmGameDll.dll Errors for Free?</h2>
|
25 |
-
<p>There are several ways to download and fix 3dmGameDll.dll errors for free, depending on the cause and severity of the problem. Here are some of the most effective methods:</p>
|
26 |
-
<p></p>
|
27 |
-
<h3>Method 1: Reinstall the Game</h3>
|
28 |
-
<p>The easiest and most reliable way to fix 3dmGameDll.dll errors is to reinstall the game that is causing the problem. This will ensure that all the game files are intact and up-to-date, including the 3dmGameDll.dll file. To reinstall the game, follow these steps:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Uninstall the game from your computer using the Control Panel or the game's uninstaller.</li>
|
31 |
-
<li>Delete any leftover files and folders related to the game from your hard drive.</li>
|
32 |
-
<li>Download the latest version of the game from its official website or a trusted source.</li>
|
33 |
-
<li>Install the game on your computer following the instructions on the screen.</li>
|
34 |
-
<li>Launch the game and check if the error is resolved.</li>
|
35 |
-
</ol>
|
36 |
-
<h3>Method 2: Download and Replace the 3dmGameDll.dll File</h3>
|
37 |
-
<p>If reinstalling the game does not work, you can try downloading and replacing the 3dmGameDll.dll file manually. This can help if the file is missing or corrupted on your system. To download and replace the 3dmGameDll.dll file, follow these steps:</p>
|
38 |
-
<ol>
|
39 |
-
<li>Go to a reputable website that offers free .dll file downloads, such as <a href="https://www.dllme.com/dll/files/3dmgamedll">DLLme.com</a>.</li>
|
40 |
-
<li>Search for "3dmGameDll.dll" and select the version or variant that matches your game and system specifications.</li>
|
41 |
-
<li>Click on "Download" and save the file to your computer.</li>
|
42 |
-
<li>Locate the folder where your game is installed on your hard drive (usually C:\Program Files (x86) or C:\Program Files).</li>
|
43 |
-
<li>Find and rename the existing 3dmGameDll.dll file (if any) to something else, such as "3dmGameDll_old.dll".</li></p> d5da3c52bf<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/A Pdf Content Splitter 4.8.4 [HOT] Keygen For 14.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>A-PDF Content Splitter 4.8.4: A Powerful Tool to Split PDF Files by Content</h1>
|
3 |
-
<p>If you have ever dealt with large PDF files that contain multiple documents or sections, you know how hard it can be to manage them. You may want to extract some pages, rearrange them, or save them as separate files for easier sharing or printing. But how can you do that without spending hours on manual work or buying expensive software?</p>
|
4 |
-
<p>Fortunately, there is a solution: A-PDF Content Splitter 4.8.4. This is a user-friendly and affordable PDF tool that allows you to split PDF files into smaller documents based on specific content on their pages. You can set up rules to define how to split your PDFs by unique text, find text, or word position. You can also customize the output names and properties of the split files, and even set up hot directories to automate the splitting process.</p>
|
5 |
-
<h2>a pdf content splitter 4.8.4 keygen for 14</h2><br /><p><b><b>Download Zip</b> »»» <a href="https://imgfil.com/2uxXhM">https://imgfil.com/2uxXhM</a></b></p><br /><br />
|
6 |
-
<p>With A-PDF Content Splitter 4.8.4, you can easily manage your PDF content and save time and money. Whether you need to split invoices, reports, contracts, manuals, or any other PDF documents, A-PDF Content Splitter 4.8.4 can handle it with ease and accuracy.</p>
|
7 |
-
<p>Here are some of the features and benefits of A-PDF Content Splitter 4.8.4:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It supports three different split methods: by unique text, by find text, and by word position.</li>
|
10 |
-
<li>It allows you to edit and reuse the split rules for different files.</li>
|
11 |
-
<li>It offers flexible tags to trigger the split and customize the output names.</li>
|
12 |
-
<li>It can work with command line and trigger another program to process the split files.</li>
|
13 |
-
<li>It is compatible with Windows XP/Vista/7/8/10.</li>
|
14 |
-
<li>It is affordable and offers a free trial version.</li>
|
15 |
-
</ul>
|
16 |
-
<p>If you want to learn more about A-PDF Content Splitter 4.8.4, you can visit their website[^1^] or download it from Softpedia[^2^]. You can also check out some other online PDF tools such as Adobe Acrobat[^4^] that can help you split PDF files by pages.</p>
|
17 |
-
|
18 |
-
<p>How to use A-PDF Content Splitter 4.8.4</p>
|
19 |
-
<p>Using A-PDF Content Splitter 4.8.4 is very easy and intuitive. You just need to follow these simple steps:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Select the PDF files that you want to split.</li>
|
22 |
-
<li>Select a split rule to apply. You can choose from the predefined rules or create your own.</li>
|
23 |
-
<li>Click the "Split all" button and wait for the program to finish.</li>
|
24 |
-
<li>Check the output folder and enjoy your split PDF files.</li>
|
25 |
-
</ol>
|
26 |
-
<p>You can also use the hot directory feature to automatically split any PDF files that are placed in a specific folder. You just need to set up the hot directory, the split rule, and the output folder, and A-PDF Content Splitter 4.8.4 will do the rest for you.</p>
|
27 |
-
|
28 |
-
<p>Why choose A-PDF Content Splitter 4.8.4</p>
|
29 |
-
<p>A-PDF Content Splitter 4.8.4 is a powerful and reliable PDF tool that can help you split your PDF files by content in a fast and accurate way. Here are some of the reasons why you should choose A-PDF Content Splitter 4.8.4 over other PDF splitters:</p>
|
30 |
-
<p></p>
|
31 |
-
<ul>
|
32 |
-
<li>It can split PDF files by content, not just by pages. This means that you can extract specific sections or documents from your PDF files based on the text they contain.</li>
|
33 |
-
<li>It can handle large and complex PDF files with ease. It can split PDF files that have hundreds or thousands of pages without any problems.</li>
|
34 |
-
<li>It can save you time and money. You don't need to spend hours on manual work or buy expensive software to split your PDF files by content. A-PDF Content Splitter 4.8.4 can do it for you in minutes and at a fraction of the cost.</li>
|
35 |
-
<li>It offers a free trial version. You can download and try A-PDF Content Splitter 4.8.4 for free before you decide to buy it. You can test its features and performance and see if it meets your needs.</li>
|
36 |
-
</ul>
|
37 |
-
<p>A-PDF Content Splitter 4.8.4 is a must-have tool for anyone who works with PDF files on a regular basis. It can help you manage your PDF content more efficiently and effectively.</p> d5da3c52bf<br />
|
38 |
-
<br />
|
39 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/60 Lakh - The New Punjabi Hit by Bukka Jatt and R Nait.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download 60 Lakh Song from DJ Punjab</h1>
|
3 |
-
<p>Punjabi music is one of the most popular genres of music in India and across the world. It has a rich and diverse history, culture, and style that appeals to people of all ages and backgrounds. One of the latest hits in Punjabi music is <strong>60 Lakh</strong>, a song by <strong>Bukka Jatt</strong> featuring <strong>R Nait</strong>. The song was released in September 2021 and has gained over 4.9 million views on YouTube as of October 2021. The song is a catchy and upbeat track that celebrates the success and lifestyle of the singers.</p>
|
4 |
-
<h2>60 lakh song download dj punjab</h2><br /><p><b><b>Download</b> ⚹ <a href="https://urlin.us/2uSYvk">https://urlin.us/2uSYvk</a></b></p><br /><br />
|
5 |
-
<p>If you are a fan of Punjabi music and want to download 60 Lakh song to your device, you might be wondering where to find it. One of the best websites for Punjabi songs download is <strong>DJ Punjab</strong>. DJ Punjab is a popular online platform that offers a huge collection of Punjabi songs, albums, videos, and more. You can find songs from various genres, artists, and eras on DJ Punjab. You can also download songs in different formats and qualities according to your preference.</p>
|
6 |
-
<p>Downloading songs from DJ Punjab has many benefits. You can enjoy your favorite Punjabi songs offline without any internet connection. You can also save your data and storage space by choosing the optimal file size and quality. You can also create your own playlists and share them with your friends and family. In this article, we will show you how to download 60 Lakh song from DJ Punjab in simple steps. We will also give you some tips and tricks for downloading songs from DJ Punjab safely and efficiently.</p>
|
7 |
-
<h2>Steps to Download 60 Lakh Song from DJ Punjab</h2>
|
8 |
-
<p>Downloading 60 Lakh song from DJ Punjab is very easy and fast. You just need to follow these four steps:</p>
|
9 |
-
<h3>Step 1: Visit the official website of DJ Punjab</h3>
|
10 |
-
<p>The first step is to visit the official website of DJ Punjab. You can use any web browser on your device to access it. The website address is <a href="(^6^)">djpunjab.com</a>. You will see a homepage with various categories, menus, and options.</p>
|
11 |
-
<h3>Step 2: Search for 60 Lakh song in the search box</h3>
|
12 |
-
<p>The next step is to search for 60 Lakh song in the search box. You can find the search box at the top right corner of the homepage. Type in "60 Lakh" or "Bukka Jatt" or "R Nait" in the search box and hit enter. You will see a list of results related to your query <h3>Step 3: Select the desired quality and format of the song</h3>
|
13 |
-
<p>The third step is to select the desired quality and format of the song. You can find different options for downloading the song on the result page. You can choose from MP3, MP4, HD, or 320 kbps formats. You can also see the file size and duration of the song before downloading. Choose the option that suits your device and preference.</p>
|
14 |
-
<h3>Step 4: Click on the download button and save the song to your device</h3>
|
15 |
-
<p>The final step is to click on the download button and save the song to your device. You can find the download button below the selected option. Click on it and wait for a few seconds. The song will start downloading automatically. You can check the progress of the download in your browser or in your device's download folder. Once the download is complete, you can enjoy listening to 60 Lakh song offline.</p>
|
16 |
-
<h2>Tips and Tricks for Downloading Songs from DJ Punjab</h2>
|
17 |
-
<p>Downloading songs from DJ Punjab is easy and convenient, but there are some tips and tricks that you can follow to make it even better. Here are some of them:</p>
|
18 |
-
<h3>Use a VPN or proxy to access DJ Punjab if it is blocked in your region</h3>
|
19 |
-
<p>DJ Punjab is a free website that offers Punjabi songs download, but it may not be accessible in some regions due to legal or technical issues. If you face any problem in accessing DJ Punjab, you can use a VPN or proxy service to bypass the restrictions. A VPN or proxy service will change your IP address and location, and allow you to access DJ Punjab from anywhere in the world.</p>
|
20 |
-
<p>60 lakh punjabi song mp3 download<br />
|
21 |
-
60 lakh r nait song download<br />
|
22 |
-
60 lakh bukka jatt song download<br />
|
23 |
-
60 lakh gopy randhawa song download<br />
|
24 |
-
60 lakh new punjabi song download<br />
|
25 |
-
60 lakh song download mr jatt<br />
|
26 |
-
60 lakh song download djpunjab.com<br />
|
27 |
-
60 lakh song download pagalworld<br />
|
28 |
-
60 lakh song download mp3tau<br />
|
29 |
-
60 lakh song download raag.fm<br />
|
30 |
-
60 lakh video song download hdyaar<br />
|
31 |
-
60 lakh video song download mp4<br />
|
32 |
-
60 lakh video song download djjohal<br />
|
33 |
-
60 lakh video song download pendujatt<br />
|
34 |
-
60 lakh video song download riskyjatt<br />
|
35 |
-
60 lakh lyrics r nait song download<br />
|
36 |
-
60 lakh lyrics bukka jatt song download<br />
|
37 |
-
60 lakh lyrics gopy randhawa song download<br />
|
38 |
-
60 lakh lyrics in punjabi song download<br />
|
39 |
-
60 lakh lyrics in hindi song download<br />
|
40 |
-
60 lakh remix dj hans song download<br />
|
41 |
-
60 lakh remix dj lishkara song download<br />
|
42 |
-
60 lakh remix dj sunny qadian song download<br />
|
43 |
-
60 lakh remix dj baapu song download<br />
|
44 |
-
60 lakh remix dj youngster song download<br />
|
45 |
-
60 lakh ringtone r nait song download<br />
|
46 |
-
60 lakh ringtone bukka jatt song download<br />
|
47 |
-
60 lakh ringtone gopy randhawa song download<br />
|
48 |
-
60 lakh ringtone mp3 song download<br />
|
49 |
-
60 lakh ringtone zedge song download<br />
|
50 |
-
60 lakh status r nait song download<br />
|
51 |
-
60 lakh status bukka jatt song download<br />
|
52 |
-
60 lakh status gopy randhawa song download<br />
|
53 |
-
60 lakh status video song download<br />
|
54 |
-
60 lakh status whatsapp song download<br />
|
55 |
-
60 lakh karaoke r nait song download<br />
|
56 |
-
60 lakh karaoke bukka jatt song download<br />
|
57 |
-
60 lakh karaoke gopy randhawa song download<br />
|
58 |
-
60 lakh karaoke mp3 song download<br />
|
59 |
-
60 lakh karaoke with lyrics song download<br />
|
60 |
-
60 lakh instrumental r nait song download<br />
|
61 |
-
60 lakh instrumental bukka jatt song download<br />
|
62 |
-
60 lakh instrumental gopy randhawa song download<br />
|
63 |
-
60 lakh instrumental mp3 song download<br />
|
64 |
-
60 lakh instrumental beatcop music song download<br />
|
65 |
-
60 lakh mashup r nait song download<br />
|
66 |
-
60 lakh mashup bukka jatt song download<br />
|
67 |
-
60 lakh mashup gopy randhawa song download<br />
|
68 |
-
60 lakh mashup mp3 song download</p>
|
69 |
-
<h3>Check the file size and duration of the song before downloading to avoid fake or incomplete downloads</h3>
|
70 |
-
<p>DJ Punjab is a reliable website that offers high-quality Punjabi songs download, but sometimes you may encounter fake or incomplete downloads. These are files that have a smaller size or shorter duration than the original song, and may contain malware or viruses. To avoid these, you should always check the file size and duration of the song before downloading. You can compare them with the information given on YouTube or other sources. If you find any discrepancy, you should avoid downloading that file and look for another option.</p>
|
71 |
-
<h3>Use a reliable antivirus software to scan the downloaded files for any malware or viruses</h3>
|
72 |
-
<p>DJ Punjab is a safe website that offers virus-free Punjabi songs download, but sometimes you may still get infected by malware or viruses from other sources. These are malicious programs that can harm your device or steal your data. To prevent these, you should always use a reliable antivirus software to scan the downloaded files for any malware or viruses. You should also update your antivirus software regularly to keep it up to date with the latest threats.</p>
|
73 |
-
<h2>Conclusion</h2>
|
74 |
-
<p>Punjabi music is a great way to enjoy yourself and express your emotions. 60 Lakh is one of the latest and most popular songs in Punjabi music that you can download from DJ Punjab. DJ Punjab is a wonderful website that offers a huge collection of Punjabi songs, albums, videos, and more. You can download songs from DJ Punjab in simple steps and in different formats and qualities.</p>
|
75 |
-
<p>However, if you are looking for some alternatives to DJ Punjab for Punjabi songs download, you can try these websites as well:</p>
|
76 |
-
<ul>
|
77 |
-
<li><a href="">Mr Jatt</a>: Mr Jatt is another popular website for Punjabi songs download that offers a wide range of songs from various genres, artists, and eras.</li>
|
78 |
-
<li><a href="">Pendu Jatt</a>: Pendu Jatt is a website that specializes in Punjabi songs download that offers exclusive and latest songs from top Punjabi singers and musicians.</li>
|
79 |
-
<li><a href="">Dj Youngster</a>: Dj Youngster is a website that focuses on Punjabi songs download that offers high-quality and fast downloads of new and old Punjabi songs.</li>
|
80 |
-
</ul>
|
81 |
-
<p>We hope this article has helped you learn how to download 60 Lakh song from DJ Punjab. If you have any questions or feedback, please let us know in the comments section below. Thank you for reading!</p>
|
82 |
-
<h2>FAQs</h2>
|
83 |
-
<h3>Is DJ Punjab legal and safe to use?</h3>
|
84 |
-
<p>DJ Punjab is a legal and safe website to use for Punjabi songs download, as long as you use it for personal and non-commercial purposes only. However, some of the songs on DJ Punjab may be copyrighted by their respective owners, so you should always respect their rights and follow their terms and conditions.</p>
|
85 |
-
<h3>How can I download Punjabi songs from YouTube?</h3>
|
86 |
-
<p>You can download Punjabi songs from YouTube by using a third-party website or software that can convert YouTube videos to MP3 or MP4 files. Some of the websites that you can use are <a href="">ytmp3.cc</a>, <a href="">y2mate.com</a>, and <a href="">flvto.biz</a>. However, you should be careful when using these websites, as they may contain ads, pop-ups, or malware. You should also respect the rights of the YouTube creators and follow their terms and conditions.</p>
|
87 |
-
<h3>What are some of the best Punjabi songs of 2021?</h3>
|
88 |
-
<p>Some of the best Punjabi songs of 2021 are:</p>
|
89 |
-
<table>
|
90 |
-
<tr><th>Song</th><th>Singer</th><th>Views on YouTube (as of October 2021)</th></tr>
|
91 |
-
<tr><td>60 Lakh</td><td>Bukka Jatt ft. R Nait</td><td>4.9 million</td></tr>
|
92 |
-
<tr><td>Brown Munde</td><td>AP Dhillon ft. Gurinder Gill and Shinda Kahlon</td><td>163 million</td></tr>
|
93 |
-
<tr><td>Bachpan Ka Pyaar</td><td>Sahdev Dirdo ft. Badshah and Aastha Gill</td><td>64 million</td></tr>
|
94 |
-
<tr><td>Pani Di Gal</td><td>Maninder Buttar ft. Asees Kaur and Jasmin Bhasin</td><td>197 million</td></tr>
|
95 |
-
<tr><td>Baarish Ki Jaaye</td><td>B Praak ft. Nawazuddin Siddiqui and Sunanda Sharma</td><td>387 million</td></tr>
|
96 |
-
</table>
|
97 |
-
<h3>How can I listen to Punjabi songs online for free?</h3>
|
98 |
-
<p>You can listen to Punjabi songs online for free by using various streaming platforms and apps that offer Punjabi music. Some of the platforms and apps that you can use are <a href="">Gaana</a>, <a href="">JioSaavn</a>, <a href="">Spotify</a>, <a href="">Wynk Music</a>, and <a href="">Hungama Music</a>. You can browse through different categories, playlists, and recommendations on these platforms and apps. You can also create your own account and customize your preferences.</p>
|
99 |
-
<h3>What are some of the features of Punjabi music industry?</h3>
|
100 |
-
<p>Punjabi music industry is one of the most vibrant and dynamic music industries in India and the world. It has some distinctive features, such as:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Punjabi music is influenced by various cultures, regions, and languages, such as Sikhism, Sufism, folk, rap, pop, rock, and more.</li>
|
103 |
-
<li>Punjabi music is known for its lively and energetic beats, catchy lyrics, and expressive vocals.</li>
|
104 |
-
<li>Punjabi music is constantly evolving and experimenting with new styles, trends, and collaborations.</li>
|
105 |
-
<li>Punjabi music has a huge fan base and market, both in India and abroad.</li>
|
106 |
-
<li>Punjabi music has produced many talented and famous singers, musicians, composers, and producers.</li>
|
107 |
-
</ul></p> 197e85843d<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo descargar Video Poker Jackpot APK y ganar grandes premios.md
DELETED
@@ -1,181 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Video Poker Jackpot APK: How to Play and Win Big</h1>
|
3 |
-
<p>If you love playing video poker, you will love Video Poker Jackpot, a fun and addictive game for Android devices. In this article, we will show you how to download and install Video Poker Jackpot APK, how to play and win big at this game, and some stories and testimonials of video poker jackpot winners. Let's get started!</p>
|
4 |
-
<h2>descargar video poker jackpot apk</h2><br /><p><b><b>Download File</b> ➡ <a href="https://urlin.us/2uT2l5">https://urlin.us/2uT2l5</a></b></p><br /><br />
|
5 |
-
<h2>What is Video Poker Jackpot?</h2>
|
6 |
-
<h3>A popular video poker game for Android devices</h3>
|
7 |
-
<p>Video Poker Jackpot is a free video poker game that you can play on your Android phone or tablet. It is one of the most popular video poker games on Google Play, with over 1 million downloads and a 4.5-star rating. You can enjoy playing various video poker variants, such as Jacks or Better, Deuces Wild, Double Bonus Poker, and more. You can also compete with other players in tournaments and leaderboards, and win huge jackpots.</p>
|
8 |
-
<h3>Features and benefits of the game</h3>
|
9 |
-
<p>Some of the features and benefits of playing Video Poker Jackpot are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>You can play offline or online, anytime and anywhere.</li>
|
12 |
-
<li>You can choose from different bet sizes, from 1 to 500 coins.</li>
|
13 |
-
<li>You can get free coins every hour, every day, and every week.</li>
|
14 |
-
<li>You can claim daily bonuses and rewards for playing.</li>
|
15 |
-
<li>You can customize your game settings, such as sound, speed, auto-hold, etc.</li>
|
16 |
-
<li>You can enjoy realistic graphics, animations, and sounds.</li>
|
17 |
-
<li>You can learn how to play video poker with the help section.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>How to download and install Video Poker Jackpot APK</h2>
|
20 |
-
<h3>Steps to download the APK file from a trusted source</h3>
|
21 |
-
<p>If you want to download Video Poker Jackpot APK, you need to follow these steps:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Go to a trusted website that offers the APK file of Video Poker Jackpot, such as [Uptodown](^1^).</li>
|
24 |
-
<li>Click on the green button that says "Download" or "Descargar".</li>
|
25 |
-
<li>Wait for the download to finish. You may need to allow downloads from unknown sources in your device settings.</li>
|
26 |
-
<li>Locate the downloaded APK file in your device storage.</li>
|
27 |
-
</ol>
|
28 |
-
<h3>Steps to install the APK file on your device</h3>
|
29 |
-
<p>After you have downloaded the APK file of Video Poker Jackpot, you need to install it on your device. Here are the steps:</p>
|
30 |
-
<p>descargar video poker jackpot gratis para android<br />
|
31 |
-
descargar video poker jackpot uptodown<br />
|
32 |
-
descargar video poker jackpot mod apk<br />
|
33 |
-
descargar video poker jackpot full apk<br />
|
34 |
-
descargar video poker jackpot sin internet<br />
|
35 |
-
descargar video poker jackpot con dinero real<br />
|
36 |
-
descargar video poker jackpot en español<br />
|
37 |
-
descargar video poker jackpot hackeado<br />
|
38 |
-
descargar video poker jackpot offline<br />
|
39 |
-
descargar video poker jackpot online<br />
|
40 |
-
descargar video poker jackpot pro apk<br />
|
41 |
-
descargar video poker jackpot premium apk<br />
|
42 |
-
descargar video poker jackpot ultima version<br />
|
43 |
-
descargar video poker jackpot 2023 apk<br />
|
44 |
-
descargar video poker jackpot para pc<br />
|
45 |
-
descargar video poker jackpot para iphone<br />
|
46 |
-
descargar video poker jackpot para tablet<br />
|
47 |
-
descargar video poker jackpot para celular<br />
|
48 |
-
descargar video poker jackpot para smart tv<br />
|
49 |
-
descargar video poker jackpot para fire tv<br />
|
50 |
-
descargar video poker jackpot de casino<br />
|
51 |
-
descargar video poker jackpot de las vegas<br />
|
52 |
-
descargar video poker jackpot de texas holdem<br />
|
53 |
-
descargar video poker jackpot de joker wild<br />
|
54 |
-
descargar video poker jackpot de double bonus<br />
|
55 |
-
descargar video poker jackpot con bonus gratis<br />
|
56 |
-
descargar video poker jackpot con giros gratis<br />
|
57 |
-
descargar video poker jackpot con premios reales<br />
|
58 |
-
descargar video poker jackpot con torneos<br />
|
59 |
-
descargar video poker jackpot con amigos<br />
|
60 |
-
como descargar video poker jackpot apk<br />
|
61 |
-
como jugar video poker jackpot apk<br />
|
62 |
-
como ganar en video poker jackpot apk<br />
|
63 |
-
como hackear video poker jackpot apk<br />
|
64 |
-
como actualizar video poker jackpot apk<br />
|
65 |
-
mejor app para descargar video poker jackpot apk<br />
|
66 |
-
mejor sitio para descargar video poker jackpot apk<br />
|
67 |
-
mejor juego de video poker jackpot apk<br />
|
68 |
-
mejor forma de jugar video poker jackpot apk<br />
|
69 |
-
mejor estrategia para ganar en video poker jackpot apk<br />
|
70 |
-
opiniones sobre descargar video poker jackpot apk<br />
|
71 |
-
reseñas de descargar video poker jackpot apk<br />
|
72 |
-
ventajas de descargar video poker jackpot apk<br />
|
73 |
-
desventajas de descargar video poker jackpot apk<br />
|
74 |
-
alternativas a descargar video poker jackpot apk<br />
|
75 |
-
soluciones a problemas al descargar video poker jackpot apk<br />
|
76 |
-
trucos y consejos para descargar video poker jackpot apk<br />
|
77 |
-
guia completa para descargar video poker jackpot apk<br />
|
78 |
-
tutorial paso a paso para descargar video poker jackpot apk</p>
|
79 |
-
<ol>
|
80 |
-
<li>Tap on the APK file that you have downloaded.</li>
|
81 |
-
<li>A pop-up window will appear asking you to confirm the installation. Tap on "Install" or "Instalar".</li>
|
82 |
-
<li>Wait for the installation to complete. You may need to grant some permissions to the app.</li>
|
83 |
-
<li> <li>Once the installation is done, you can open the app and start playing Video Poker Jackpot.</li>
|
84 |
-
</ol>
|
85 |
-
<h2>How to play Video Poker Jackpot</h2>
|
86 |
-
<h3>The rules and objective of video poker</h3>
|
87 |
-
<p>Video poker is a casino game that is based on five-card draw poker. The objective of the game is to make the best possible poker hand out of the five cards that you are dealt. You can choose to keep or discard any of the cards, and replace them with new ones from the same deck. The payout of the game depends on the strength of your final hand and the paytable of the game variant that you are playing.</p>
|
88 |
-
<h3>The different variants and paytables of video poker</h3>
|
89 |
-
<p>Video Poker Jackpot offers you several video poker variants to choose from, each with its own rules and paytable. Some of the variants are:</p>
|
90 |
-
<table>
|
91 |
-
<tr>
|
92 |
-
<th>Variant</th>
|
93 |
-
<th>Rules</th>
|
94 |
-
<th>Paytable (for 1 coin bet)</th>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Jacks or Better</td>
|
98 |
-
<td>The most basic and common variant of video poker. You need at least a pair of jacks or better to win.</td>
|
99 |
-
<td>Royal Flush: 250<br>Straight Flush: 50<br>Four of a Kind: 25<br>Full House: 9<br>Flush: 6<br>Straight: 4<br>Three of a Kind: 3<br>Two Pair: 2<br>Jacks or Better: 1</td>
|
100 |
-
</tr>
|
101 |
-
<tr>
|
102 |
-
<td>Deuces Wild</td>
|
103 |
-
<td>All the twos in the deck are wild cards, meaning they can substitute for any other card to make a winning hand. You need at least a three of a kind to win.</td>
|
104 |
-
<td>Natural Royal Flush: 250<br>Four Deuces: 200<br>Wild Royal Flush: 25<br>Five of a Kind: 15<br>Straight Flush: 9<br>Four of a Kind: 4<br>Full House: 4<br>Flush: 3<br>Straight: 2<br>Three of a Kind: 1</td>
|
105 |
-
</tr>
|
106 |
-
<tr>
|
107 |
-
<td>Double Bonus Poker</td>
|
108 |
-
<td>A variant of Jacks or Better that pays extra for four aces, four twos, threes, or fours, and four fives through kings. You need at least a pair of jacks or better to win.</td>
|
109 |
-
<td>Royal Flush: 250<br>Straight Flush: 50<br>Four Aces: 160<br>Four Twos, Threes, or Fours: 80<br>Four Fives through Kings: 50<br>Full House: 10<br>Flush: 7<br>Straight: 5<br>Three of a Kind: 3<br>Two Pair: 1<br>Jacks or Better: 1</td>
|
110 |
-
</tr>
|
111 |
-
</table>
|
112 |
-
<h3>The tips and strategies to improve your chances of winning</h3>
|
113 |
-
<p>To play Video Poker Jackpot effectively, you need to follow some tips and strategies, such as:</p>
|
114 |
-
<ul>
|
115 |
-
<li>Always bet the maximum number of coins, as this will give you the highest payout for a royal flush.</li>
|
116 |
-
<li>Always check the paytable of the game variant that you are playing, and choose the one that offers the best return.</li>
|
117 |
-
<li>Always use the auto-hold feature, as this will help you make the optimal decisions based on the mathematically correct strategy.</li>
|
118 |
-
<li>Always keep any winning hand, unless you have a chance to improve it to a higher-paying hand.</li>
|
119 |
-
<li>Always keep any four cards to a royal flush, straight flush, or flush.</li>
|
120 |
-
<li>Always keep any three cards to a royal flush or straight flush.</li>
|
121 |
-
<li>Always keep any pair, unless you have four cards to a straight or flush.</li>
|
122 |
-
<li>Always keep any high card (jack or better), unless you have three cards to a straight or flush.</li>
|
123 |
-
<li>Always discard any unpaired low cards (ten or lower), unless you have four cards to a straight or flush.</li>
|
124 |
-
</ul>
|
125 |
-
<h2>How to win big at Video Poker Jackpot</h2>
|
126 |
-
<h3>The best hands and payouts in video poker</h3>
|
127 |
-
<p>The best hands in video poker are the ones that pay the most, depending on the game variant and the number of coins that you bet. Here are some examples:</p>
|
128 |
-
- A royal flush is the highest-paying hand in video poker. It consists of an ace, king, queen, jack, and ten of the same suit. It pays 250 coins for a one-coin bet, but it pays a whopping 4,000 coins for a five-coin bet. That's why it's important to - A straight flush is the second-highest-paying hand in video poker. It consists of five consecutive cards of the same suit. It pays 50 coins for a one-coin bet, and 250 coins for a five-coin bet. - A four of a kind is the third-highest-paying hand in video poker. It consists of four cards of the same rank. It pays 25 coins for a one-coin bet, and 125 coins for a five-coin bet. However, some game variants pay more for certain four of a kinds, such as four aces or four deuces. - A full house is the fourth-highest-paying hand in video poker. It consists of three cards of the same rank and two cards of another rank. It pays 9 coins for a one-coin bet, and 45 coins for a five-coin bet. - A flush is the fifth-highest-paying hand in video poker. It consists of five cards of the same suit. It pays 6 coins for a one-coin bet, and 30 coins for a five-coin bet. <h3>The jackpot feature and how to trigger it</h3>
|
129 |
-
<p>One of the most exciting features of Video Poker Jackpot is the jackpot feature, which gives you a chance to win a huge amount of coins. The jackpot feature is triggered randomly after any winning hand. When it happens, you will see a wheel with different segments, each with a multiplier value. You can spin the wheel once, and whatever multiplier you land on will be applied to your current win. For example, if you win 100 coins and spin the wheel and get a 10x multiplier, you will win 1,000 coins.</p>
|
130 |
-
<p>The jackpot feature also has a special segment that says "Jackpot". If you are lucky enough to land on this segment, you will win the progressive jackpot, which is the highest prize in the game. The progressive jackpot is a pool of coins that increases every time someone plays Video Poker Jackpot. You can see the current amount of the jackpot on the top of the screen.</p>
|
131 |
-
<h3>The stories and testimonials of video poker jackpot winners</h3>
|
132 |
-
<p>Many players have won big at Video Poker Jackpot, and some of them have shared their stories and testimonials on the game's review section on Google Play. Here are some examples:</p>
|
133 |
-
<blockquote>
|
134 |
-
<p>"I love this game! I won the jackpot twice in one day! I couldn't believe it! Thank you so much for this awesome game!" - Maria</p>
|
135 |
-
</blockquote>
|
136 |
-
<blockquote>
|
137 |
-
<p>"This is the best video poker game ever! I play it every day and I always have fun. I hit the jackpot last week and I was so happy! I recommend this game to everyone who loves video poker!" - John</p>
|
138 |
-
</blockquote>
|
139 |
-
<blockquote>
|
140 |
-
<p>"Wow! This game is amazing! I just won the jackpot and I'm speechless! This game is very generous and rewarding. I'm so glad I found it!" - Lisa</p>
|
141 |
-
</blockquote>
|
142 |
-
<h2>Conclusion</h2>
|
143 |
-
<p>Video Poker Jackpot is a great game for video poker lovers who want to play on their Android devices. You can download and install Video Poker Jackpot APK from a trusted source, and enjoy playing various video poker variants with realistic graphics and sounds. You can also win big at this game by following some tips and strategies, and by triggering the jackpot feature. If you are lucky, you might join the club of video poker jackpot winners who have shared their stories and testimonials on Google Play.</p>
|
144 |
-
<p>So what are you waiting for? Download Video Poker Jackpot APK today and start playing and winning big!</p>
|
145 |
-
<h2>FAQs</h2>
|
146 |
-
<h3>What are the advantages of playing video poker online?</h3>
|
147 |
-
<p>Some of the advantages of playing video poker online are:</p>
|
148 |
-
<ul>
|
149 |
-
<li>You can play anytime and anywhere, as long as you have an internet connection.</li>
|
150 |
-
<li>You can choose from a wide range of video poker variants, each with its own rules and paytable.</li>
|
151 |
-
<li>You can play at your own pace, without any pressure from other players or dealers.</li>
|
152 |
-
<li>You can practice your skills and strategies without risking any real money.</li>
|
153 |
-
<li>You can enjoy various bonuses and rewards from online casinos or apps.</li>
|
154 |
-
</ul>
|
155 |
-
<h3>Is Video Poker Jackpot safe and secure?</h3>
|
156 |
-
<p>Yes, Video Poker Jackpot is safe and secure, as long as you download and install it from a trusted source, such as [Uptodown]. The app does not contain any malware or viruses that could harm your device or data. The app also uses encryption technology to protect your transactions and personal information.</p>
|
157 |
-
<h3>How can I get free coins and bonuses in Video Poker Jackpot?</h3>
|
158 |
-
<p>There are several ways to get free coins and bonuses in Video Poker Jackpot, such as:</p>
|
159 |
-
<ul>
|
160 |
-
<li>You can get free coins every hour, every day, and every week by tapping on the coin icon on the top of the screen.</li>
|
161 |
-
<li>You can claim daily bonuses and rewards by tapping on the gift icon on the top of the screen.</li>
|
162 |
-
<li>You can watch video ads to earn extra coins by tapping on the video icon on the bottom of the screen.</li>
|
163 |
-
<li>You can invite your friends to play Video Poker Jackpot and get referral bonuses by tapping on the invite icon on the bottom of the screen.</li>
|
164 |
-
<li>You can participate in tournaments and leaderboards and win coins and prizes by tapping on the trophy icon on the bottom of the screen.</li>
|
165 |
-
</ul>
|
166 |
-
<h3>What are the system requirements for Video Poker Jackpot?</h3>
|
167 |
-
<p>The system requirements for Video Poker Jackpot are:</p>
|
168 |
-
<ul>
|
169 |
-
<li>An Android device with version 4.1 or higher.</li>
|
170 |
-
<li>At least 100 MB of free storage space.</li>
|
171 |
-
<li>An internet connection (optional, but recommended).</li>
|
172 |
-
</ul>
|
173 |
-
<h3>How can I contact the support team of Video Poker Jackpot?</h3>
|
174 |
-
<p>If you have any questions, feedback, or issues with Video Poker Jackpot, you can contact the support team by:</p>
|
175 |
-
<ul>
|
176 |
-
<li>Tapping on the menu icon on the top left of the screen and selecting "Contact Us".</li>
|
177 |
-
<li>Sending an email to [email protected].</li>
|
178 |
-
<li>Visiting their Facebook page at [Video Poker Jackpot].</li>
|
179 |
-
</ul></p> 197e85843d<br />
|
180 |
-
<br />
|
181 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/experimental/rl/__init__.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
# flake8: noqa
|
16 |
-
|
17 |
-
from .value_guided_sampling import ValueGuidedRLPipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/state/index.ts
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import { BingWebBot } from '@/lib/bots/bing'
|
2 |
-
import { BingConversationStyle, ChatMessageModel, BotId } from '@/lib/bots/bing/types'
|
3 |
-
import { nanoid } from '@/lib/utils'
|
4 |
-
import { atom } from 'jotai'
|
5 |
-
import { atomWithImmer } from 'jotai-immer'
|
6 |
-
import { atomWithStorage } from 'jotai/utils'
|
7 |
-
import { atomFamily } from 'jotai/utils'
|
8 |
-
import { atomWithHash, atomWithLocation } from 'jotai-location'
|
9 |
-
|
10 |
-
const initialMessages: ChatMessageModel[] = [
|
11 |
-
{ author: 'system', text: 'conversation between user and robot', id: '1' },
|
12 |
-
{ author: 'user', text: '销量最高的 3 种宠物吸尘器有哪些优点和缺点? ', id: '2' },
|
13 |
-
{
|
14 |
-
author: 'bot', text: `
|
15 |
-
您好,这是必应。根据网上的搜索结果,我为您找到了以下三款销量最高的宠物吸尘器,以及它们的优点和缺点:
|
16 |
-
|
17 |
-
- **戴森Dyson V10轻量版 Digital Slim Fluffy无线吸尘器**[^1^] [^3^]:这款吸尘器专为宠物家庭设计,拥有强大吸力和多功能吸头,能够有效清除宠物毛发和灰尘。
|
18 |
-
- 优点:轻便易用,续航时间长,过滤系统先进,能够有效去除过敏原;
|
19 |
-
- 缺点:价格较高,噪音较大,需要定期清洁滤网。
|
20 |
-
- **飞利浦FC6908/82(W7)无线吸尘器**[^2^] [^4^]:这是一款手持式干吸无线吸尘器,专门为宠物家庭设计。它具有75分钟的超高续航,适合大户型;除螨涡轮刷,可以深度清洁床单被罩,免受螨虫侵害;吸拖一体设计,280ml大水箱,还可以往水箱里加入清洁剂。
|
21 |
-
- 优点:清洁性能强劲,操作方便,适用多种场景;
|
22 |
-
- 缺点:噪音较大,达到84dB(A);尘杯容量较小,只有0.6L;充电时长较长,需要5小时。
|
23 |
-
- **必胜(Bissell) 宠物吸尘器**[^2^]:这是一款便携式手持真空吸尘器,专为清理宠物毛发而设计。它配备特殊的橡胶喷嘴和软毛刷头,可以轻松拾取宠物毛发和灰尘;几乎静音运行。
|
24 |
-
- 优点:轻巧易用,重量只有1.36kg;电池可充电可更换;
|
25 |
-
- 缺点:续航时间不稳定,在15-30分钟之间浮动;吸力不够强劲,在地毯或沙发上可能效果不佳;没有过滤系统或过滤网。
|
26 |
-
|
27 |
-
希望这些信息对您有所帮助。如果您还有其他问题,请随时提问。😊
|
28 |
-
`, id: '3' },
|
29 |
-
{ author: 'user', text: '今天的新闻', id: '4' },
|
30 |
-
{
|
31 |
-
author: 'bot', text: `你好,这是Bing。我根据你的搜索查询“今天的新闻”为你找到了以下信息:
|
32 |
-
|
33 |
-
# 中国新闻
|
34 |
-
- 《求是》杂志发表习近平总书记重要文章《深化党和国家机构改革 推进国家治理体系和治理能力现代化》[^1^]
|
35 |
-
- 梦之队第5金! 全红婵陈芋汐女双10米台夺冠[^1^]
|
36 |
-
- 中央气象台7月16日18时发布台风橙色预警[^1^]
|
37 |
-
- 贵州石阡:暑期旅游带动乡村振兴[^1^]
|
38 |
-
- 激活大科学装置的“人才红利”[^1^]
|
39 |
-
- 聚才用才留才 让希望的田野成为智慧的田野[^1^]
|
40 |
-
- 科学家发现粮食系统增效可有效支撑我国碳中和[^1^]
|
41 |
-
- 成都以赛为媒提升城市美誉度[^1^]
|
42 |
-
- 香江观澜:“抢人才”成效显著 香港竞争力依旧[^1^]
|
43 |
-
- 浙江建德:新安江上享清凉[^1^]
|
44 |
-
- 定向资金持续精准滴灌 高质量发展力量不断积累[^1^]
|
45 |
-
- 企业信心恢复加大中长期投资 为稳增长奠定基础[^1^]
|
46 |
-
- 武汉渡江节开赛 千名“泳士”竞渡长江[^1^]
|
47 |
-
- 金融更加重视对实体经济支持 降低融资成本提振市场信心[^1^]
|
48 |
-
- 大运来了丨成都迎大运 全民健身动起来[^1^]
|
49 |
-
- 藏在高校里的宝藏博物馆[^1^]
|
50 |
-
- 中国汽车工业用70年赢得三个“全球第一”[^1^]
|
51 |
-
- 辽宁阜新市委常委、副市长赖化平涉嫌严重违纪违法接受审查调查[^1^]
|
52 |
-
|
53 |
-
# 国际新闻
|
54 |
-
- 联合国秘书长:可持续发展面临风险 各国需加强行动[^2^]
|
55 |
-
- 国际航运业加快绿色转型[^2^]
|
56 |
-
- 美企反对收紧对华芯片出口限制[^2^]
|
57 |
-
- 欧洲加大气候科技领域投资[^2^]
|
58 |
-
- 中企助力丹麦发展清洁能源[^2^]
|
59 |
-
- 中国代表呼吁国际社会共同努力防止乌克兰局势失控[^2^]
|
60 |
-
- 中国和阿尔及利亚共同构建新型国际关系典范[^2^]
|
61 |
-
|
62 |
-
以上信息仅供参考,具体详情请点击以下链接查看:
|
63 |
-
|
64 |
-
[1] [中国新闻_央视网(cctv.com)](https://news.cctv.com/china/)
|
65 |
-
[2] [人民网_网上的人民日报](http://www.people.com.cn/)`, id: '5' },
|
66 |
-
{ author: 'user', text: '写一个快排', id: '6' },
|
67 |
-
{ author: 'bot', text: "你好,这是Bing。我可以帮你用python写一个快排。快速排序是一种基于分治思想的排序算法,它的基本思想是:从待排序的数组中选择一个基准元素,将数组分为两个子数组,使得左边的子数组中的元素都小于或等于基准元素,右边的子数组中的元素都大于或等于基准元素,然后对两个子数组递归地进行快速排序,最后合并结果。下面是用python实现的快速排序的代码:\n\n```python\n# 定义一个函数,用��交换数组中的两个元素\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n# 定义一个函数,用于对数组的一段区间进行划分\ndef partition(arr, low, high):\n # 选择最右边的元素作为基准元素\n pivot = arr[high]\n # 初始化一个指针,指向左边区间的最后一个位置\n i = low - 1\n # 遍历左边区间中的每个元素\n for j in range(low, high):\n # 如果当前元素小于或等于基准元素\n if arr[j] <= pivot:\n # 将指针右移一位\n i += 1\n # 将指针所指的元素与当前元素交换\n swap(arr, i, j)\n # 将基准元素与指针右边的第一个元素交换\n swap(arr, i + 1, high)\n # 返回基准元素的最终位置\n return i + 1\n\n# 定义一个函数,用于对数组进行快速排序\ndef quick_sort(arr, low, high):\n # 如果区间长度大于1\n if low < high:\n # 对区间进行划分,得到基准元素的位置\n p = partition(arr, low, high)\n # 对左边子区间进行快速排序\n quick_sort(arr, low, p - 1)\n # 对右边子区间进行快速排序\n quick_sort(arr, p + 1, high)\n\n# 测试代码\narr = [5, 3, 7, 2, 9, 1, 4, 6, 8]\nprint(\"原始数组:\", arr)\nquick_sort(arr, 0, len(arr) - 1)\nprint(\"排序后的数组:\", arr)\n```", id: '7' },
|
68 |
-
{
|
69 |
-
author: 'bot', text: "好的,我会尝试画一只猫。\n > ",
|
70 |
-
id: '8'
|
71 |
-
}
|
72 |
-
]
|
73 |
-
|
74 |
-
export const GreetMessages = [
|
75 |
-
'谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?',
|
76 |
-
'重新开始总是很棒。问我任何问题!',
|
77 |
-
'当然,我很乐意重新开始。我现在可以为你提供哪些帮助?',
|
78 |
-
'当然,我已准备好进行新的挑战。我现在可以为你做什么?',
|
79 |
-
'很好,让我们来更改主题。你在想什么?',
|
80 |
-
'不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?',
|
81 |
-
'好的,我准备好了!感谢重置。我们应该了解哪些内容?',
|
82 |
-
'感谢刷新!你有新的话题吗?',
|
83 |
-
'明白了,让我们重新开始。接下来应该讨论什么?',
|
84 |
-
'下一步!我可以为你做什么?',
|
85 |
-
'好的,我已准备好新话题。我们应该一起了解哪些内容?'
|
86 |
-
]
|
87 |
-
|
88 |
-
export const bingConversationStyleAtom = atomWithStorage<BingConversationStyle>('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true })
|
89 |
-
export const voiceAtom = atomWithStorage<boolean>('enableTTS', false, undefined, { unstable_getOnInit: true })
|
90 |
-
|
91 |
-
type Param = { botId: BotId; page: string }
|
92 |
-
|
93 |
-
const createBotInstance = () => {
|
94 |
-
return new BingWebBot({
|
95 |
-
cookie: ' ',
|
96 |
-
ua: ' ',
|
97 |
-
})
|
98 |
-
}
|
99 |
-
|
100 |
-
export const chatFamily = atomFamily(
|
101 |
-
(param: Param) => {
|
102 |
-
return atomWithImmer({
|
103 |
-
botId: param.botId,
|
104 |
-
bot: createBotInstance(),
|
105 |
-
messages: [] as ChatMessageModel[],
|
106 |
-
generatingMessageId: '',
|
107 |
-
abortController: undefined as AbortController | undefined,
|
108 |
-
conversationId: nanoid(),
|
109 |
-
})
|
110 |
-
},
|
111 |
-
(a, b) => a.botId === b.botId && a.page === b.page,
|
112 |
-
)
|
113 |
-
|
114 |
-
export const hashAtom = atomWithHash('dialog', '')
|
115 |
-
|
116 |
-
export const locationAtom = atomWithLocation()
|
117 |
-
|
118 |
-
export const voiceListenAtom = atom(false)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/app.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from langchain.document_loaders import TextLoader
|
3 |
-
from agents.tools.python_code_tool import generate_and_excute_python_code
|
4 |
-
from agents.tools.shell_tool import generate_and_excute_shell_code
|
5 |
-
from chains import HumanFeedBackChain, contextRewriteChain
|
6 |
-
from embedding import CustomEmbedding
|
7 |
-
from memories import HumenFeedbackBufferMemory
|
8 |
-
from agents.code_generate_agent import code_agent_executor, code_agent_tools
|
9 |
-
from agents.code_execute_agent import generate_and_excute_code_agent
|
10 |
-
|
11 |
-
|
12 |
-
baMemory = HumenFeedbackBufferMemory(
|
13 |
-
input_key="input", human_prefix="Answer", ai_prefix="AI")
|
14 |
-
baChain = HumanFeedBackChain(verbose=True, memory=baMemory)
|
15 |
-
|
16 |
-
"""读取document/business_context.py文件内容作为context"""
|
17 |
-
context_path = "./documents/bussiness_context/business_context.md"
|
18 |
-
|
19 |
-
|
20 |
-
def sendMessage(chatbot, input):
|
21 |
-
chatbot.append((
|
22 |
-
(None if len(input) == 0 else input), None))
|
23 |
-
return chatbot
|
24 |
-
|
25 |
-
|
26 |
-
def clearMemory(chatbot):
|
27 |
-
chatbot.clear()
|
28 |
-
if baMemory != None:
|
29 |
-
baMemory.clear()
|
30 |
-
return chatbot, ""
|
31 |
-
|
32 |
-
def loadContext():
|
33 |
-
textloader = TextLoader(context_path)
|
34 |
-
return textloader.load()[0].page_content
|
35 |
-
|
36 |
-
|
37 |
-
def saveContext(context):
|
38 |
-
with open(context_path, 'w') as f:
|
39 |
-
f.write(context)
|
40 |
-
|
41 |
-
def feedBack(context, story, chatbot=[], input=""):
|
42 |
-
if len(input) > 0:
|
43 |
-
context += (f"\n\n {input}")
|
44 |
-
saveContext(context)
|
45 |
-
response = baChain.run(
|
46 |
-
input=(input if len(input) == 0 else input), context=context, story=story, stop="\nAnswer:")
|
47 |
-
chatbot[-1][1] = response
|
48 |
-
return chatbot, "", context
|
49 |
-
|
50 |
-
|
51 |
-
customerEmbedding = CustomEmbedding()
|
52 |
-
|
53 |
-
faqChain = customerEmbedding.getFAQAgent()
|
54 |
-
|
55 |
-
code_agent_executor = code_agent_executor()
|
56 |
-
def faqFromLocal(input, chatbot=[]):
|
57 |
-
# response = faqChain({"question": f"{input}"})
|
58 |
-
response = faqChain.run(input)
|
59 |
-
chatbot.append((input, response))
|
60 |
-
return chatbot, ""
|
61 |
-
|
62 |
-
|
63 |
-
def generateEmbeddings(chatbot=[]):
|
64 |
-
response = customerEmbedding.calculateEmbedding()
|
65 |
-
chatbot.append((None, response))
|
66 |
-
return chatbot
|
67 |
-
|
68 |
-
|
69 |
-
def generateCode(input: str, chatbot=[], returnCode=False):
|
70 |
-
if len(input) <=0:
|
71 |
-
chatbot[-1][1] = None
|
72 |
-
return chatbot, ""
|
73 |
-
response = code_agent_executor.run(
|
74 |
-
input=(input if len(input) == 0 else input))
|
75 |
-
chatbot[-1][1] = response
|
76 |
-
return chatbot, ""
|
77 |
-
|
78 |
-
def generateCodeByMultiPart(context: str, relateCode: str, toolName: str, chatbot=[]):
|
79 |
-
input = f"请根据如下信息{toolName}:\n{context}\n\n{relateCode}"
|
80 |
-
return generateCode(input, chatbot)
|
81 |
-
|
82 |
-
def sendMessageByMultiPart(chatbot, context: str, relateCode: str, toolName: str):
|
83 |
-
input = f"请根据如下信息{toolName}:\n{context}\n\n{relateCode}"
|
84 |
-
chatbot.append((input, None))
|
85 |
-
return chatbot
|
86 |
-
|
87 |
-
|
88 |
-
def rewriteContext(input, chatbot):
|
89 |
-
response = contextRewriteChain.run(input=input, verbose=True)
|
90 |
-
chatbot.append((input, response))
|
91 |
-
return chatbot, response
|
92 |
-
|
93 |
-
def generateCodeAndExcute(input, chatbot=[], language="python"):
|
94 |
-
request = f'''write a {language} script to solve the following problem and return code and the results:\n{input}'''
|
95 |
-
result = generate_and_excute_code_agent.run(request)
|
96 |
-
chatbot.append((input, result))
|
97 |
-
return chatbot
|
98 |
-
|
99 |
-
def generatePyhonCodeAndExcute(input, chatbot=[]):
|
100 |
-
request = f'''write a {language} script to solve the following problem and return code and the results:\n{input}'''
|
101 |
-
result = generate_and_excute_python_code.run(request)
|
102 |
-
chatbot.append((input, result))
|
103 |
-
return chatbot
|
104 |
-
|
105 |
-
def generateShellCodeAndExcute(input, chatbot=[]):
|
106 |
-
request = f'''write a {language} script to solve the following problem and return code and the results:\n{input}'''
|
107 |
-
result = generate_and_excute_shell_code.run(request)
|
108 |
-
chatbot.append((input, result))
|
109 |
-
return chatbot
|
110 |
-
|
111 |
-
toolTextBox = []
|
112 |
-
with gr.Blocks() as demo:
|
113 |
-
with gr.Row():
|
114 |
-
with gr.Tab("Business"):
|
115 |
-
with gr.Row():
|
116 |
-
with gr.Column():
|
117 |
-
chatbot = gr.Chatbot().style()
|
118 |
-
with gr.Row():
|
119 |
-
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(
|
120 |
-
container=False)
|
121 |
-
with gr.Column():
|
122 |
-
with gr.Row():
|
123 |
-
context = gr.Textbox(show_label=True, label="Context", placeholder="Enter Context").style(
|
124 |
-
container=False)
|
125 |
-
with gr.Row():
|
126 |
-
story = gr.Textbox(show_label=True, label="User Story", placeholder="Enter User Story").style(
|
127 |
-
container=False)
|
128 |
-
with gr.Row():
|
129 |
-
gr.Button("Generate Scenarios").click(clearMemory, [chatbot], [chatbot, txt]).then(sendMessage, [chatbot, txt], [chatbot]).then(
|
130 |
-
feedBack, [context, story, chatbot], [chatbot, txt])
|
131 |
-
with gr.Row():
|
132 |
-
with gr.Column(scale=5):
|
133 |
-
gr.Button("Rewrite Context").click(rewriteContext, [context, chatbot], [chatbot, context])
|
134 |
-
with gr.Column(scale=1):
|
135 |
-
gr.Button("Revert").click(loadContext, [], [context])
|
136 |
-
with gr.Row():
|
137 |
-
gr.Button("Save Context").click(saveContext, [context], [])
|
138 |
-
|
139 |
-
with gr.Tab("Tech"):
|
140 |
-
with gr.Row():
|
141 |
-
with gr.Column():
|
142 |
-
code_chatbot = gr.Chatbot().style()
|
143 |
-
with gr.Row():
|
144 |
-
code = gr.Textbox(show_label=False, label="Code Generate", placeholder="Enter text and press enter").style(
|
145 |
-
container=False)
|
146 |
-
with gr.Column():
|
147 |
-
with gr.Row():
|
148 |
-
code_context = gr.Textbox(show_label=True, label="Context", placeholder="Enter Context").style(
|
149 |
-
container=False)
|
150 |
-
with gr.Row():
|
151 |
-
relateCode = gr.Textbox(show_label=True, label="Relate Code", placeholder="Enter Relate Code").style(
|
152 |
-
container=False)
|
153 |
-
for index, tool in enumerate(code_agent_tools):
|
154 |
-
with gr.Row():
|
155 |
-
toolTextBox.append(gr.Textbox(show_label=False, visible=False, label=tool.name, value=tool.name).style())
|
156 |
-
gr.Button(tool.name).click(
|
157 |
-
sendMessageByMultiPart, [code_chatbot, code_context, relateCode, toolTextBox[index]], [code_chatbot]).then(
|
158 |
-
generateCodeByMultiPart, [code_context, relateCode, toolTextBox[index], code_chatbot], [code_chatbot, code])
|
159 |
-
with gr.Tab("FAQ"):
|
160 |
-
faq_chatbot = gr.Chatbot().style()
|
161 |
-
with gr.Row():
|
162 |
-
faq = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(
|
163 |
-
container=False)
|
164 |
-
with gr.Row():
|
165 |
-
gr.Button("Regenerate embedding").click(generateEmbeddings,[faq_chatbot], [faq_chatbot])
|
166 |
-
with gr.Tab("TOOL"):
|
167 |
-
with gr.Row():
|
168 |
-
with gr.Column():
|
169 |
-
tool_request = gr.Textbox(show_label=False, placeholder="Enter your tool Request").style(
|
170 |
-
container=False, show_copy_button=True)
|
171 |
-
language = gr.Dropdown(choices=["Python", "Shell"], label="Language", value="Python").style()
|
172 |
-
tool_button = gr.Button("Generate Code and Execute with agent")
|
173 |
-
python_tool_button = gr.Button("Generate Python Code and Execute")
|
174 |
-
shell_tool_button = gr.Button("Generate Sehll Code and Execute")
|
175 |
-
with gr.Column():
|
176 |
-
tool_chatbot = gr.Chatbot(elem_id="chatbot").style(container=False)
|
177 |
-
tool_button.click(generateCodeAndExcute,[tool_request, tool_chatbot, language], [tool_chatbot])
|
178 |
-
python_tool_button.click(generatePyhonCodeAndExcute,[tool_request, tool_chatbot], [tool_chatbot])
|
179 |
-
shell_tool_button.click(generateShellCodeAndExcute,[tool_request, tool_chatbot], [tool_chatbot])
|
180 |
-
|
181 |
-
txt.submit(sendMessage, [chatbot, txt], [chatbot]).then(
|
182 |
-
feedBack, [context, story, chatbot, txt], [chatbot, txt, context])
|
183 |
-
|
184 |
-
code.submit(sendMessage, [code_chatbot, code], [code_chatbot]).then(
|
185 |
-
generateCode, [code, code_chatbot], [code_chatbot, code])
|
186 |
-
|
187 |
-
faq.submit(faqFromLocal, [faq, faq_chatbot], [faq_chatbot, faq])
|
188 |
-
|
189 |
-
demo.load(loadContext, [], [context])
|
190 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/transforms.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(
|
13 |
-
inputs,
|
14 |
-
unnormalized_widths,
|
15 |
-
unnormalized_heights,
|
16 |
-
unnormalized_derivatives,
|
17 |
-
inverse=False,
|
18 |
-
tails=None,
|
19 |
-
tail_bound=1.0,
|
20 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
-
):
|
24 |
-
if tails is None:
|
25 |
-
spline_fn = rational_quadratic_spline
|
26 |
-
spline_kwargs = {}
|
27 |
-
else:
|
28 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
-
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
-
|
31 |
-
outputs, logabsdet = spline_fn(
|
32 |
-
inputs=inputs,
|
33 |
-
unnormalized_widths=unnormalized_widths,
|
34 |
-
unnormalized_heights=unnormalized_heights,
|
35 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
-
inverse=inverse,
|
37 |
-
min_bin_width=min_bin_width,
|
38 |
-
min_bin_height=min_bin_height,
|
39 |
-
min_derivative=min_derivative,
|
40 |
-
**spline_kwargs
|
41 |
-
)
|
42 |
-
return outputs, logabsdet
|
43 |
-
|
44 |
-
|
45 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
-
bin_locations[..., -1] += eps
|
47 |
-
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
-
|
49 |
-
|
50 |
-
def unconstrained_rational_quadratic_spline(
|
51 |
-
inputs,
|
52 |
-
unnormalized_widths,
|
53 |
-
unnormalized_heights,
|
54 |
-
unnormalized_derivatives,
|
55 |
-
inverse=False,
|
56 |
-
tails="linear",
|
57 |
-
tail_bound=1.0,
|
58 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
-
):
|
62 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
-
outside_interval_mask = ~inside_interval_mask
|
64 |
-
|
65 |
-
outputs = torch.zeros_like(inputs)
|
66 |
-
logabsdet = torch.zeros_like(inputs)
|
67 |
-
|
68 |
-
if tails == "linear":
|
69 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
-
unnormalized_derivatives[..., 0] = constant
|
72 |
-
unnormalized_derivatives[..., -1] = constant
|
73 |
-
|
74 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
-
logabsdet[outside_interval_mask] = 0
|
76 |
-
else:
|
77 |
-
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
-
|
79 |
-
(
|
80 |
-
outputs[inside_interval_mask],
|
81 |
-
logabsdet[inside_interval_mask],
|
82 |
-
) = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound,
|
89 |
-
right=tail_bound,
|
90 |
-
bottom=-tail_bound,
|
91 |
-
top=tail_bound,
|
92 |
-
min_bin_width=min_bin_width,
|
93 |
-
min_bin_height=min_bin_height,
|
94 |
-
min_derivative=min_derivative,
|
95 |
-
)
|
96 |
-
|
97 |
-
return outputs, logabsdet
|
98 |
-
|
99 |
-
|
100 |
-
def rational_quadratic_spline(
|
101 |
-
inputs,
|
102 |
-
unnormalized_widths,
|
103 |
-
unnormalized_heights,
|
104 |
-
unnormalized_derivatives,
|
105 |
-
inverse=False,
|
106 |
-
left=0.0,
|
107 |
-
right=1.0,
|
108 |
-
bottom=0.0,
|
109 |
-
top=1.0,
|
110 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
-
):
|
114 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
-
raise ValueError("Input to a transform is not within its domain")
|
116 |
-
|
117 |
-
num_bins = unnormalized_widths.shape[-1]
|
118 |
-
|
119 |
-
if min_bin_width * num_bins > 1.0:
|
120 |
-
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
-
if min_bin_height * num_bins > 1.0:
|
122 |
-
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
-
|
124 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
-
cumwidths = (right - left) * cumwidths + left
|
129 |
-
cumwidths[..., 0] = left
|
130 |
-
cumwidths[..., -1] = right
|
131 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
-
|
133 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
-
|
135 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
-
cumheights = (top - bottom) * cumheights + bottom
|
140 |
-
cumheights[..., 0] = bottom
|
141 |
-
cumheights[..., -1] = top
|
142 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
-
|
144 |
-
if inverse:
|
145 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
-
else:
|
147 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
-
|
149 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
-
delta = heights / widths
|
154 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
-
|
156 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
-
|
159 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
-
|
161 |
-
if inverse:
|
162 |
-
a = (inputs - input_cumheights) * (
|
163 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
-
) + input_heights * (input_delta - input_derivatives)
|
165 |
-
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
-
)
|
168 |
-
c = -input_delta * (inputs - input_cumheights)
|
169 |
-
|
170 |
-
discriminant = b.pow(2) - 4 * a * c
|
171 |
-
assert (discriminant >= 0).all()
|
172 |
-
|
173 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
-
outputs = root * input_bin_widths + input_cumwidths
|
175 |
-
|
176 |
-
theta_one_minus_theta = root * (1 - root)
|
177 |
-
denominator = input_delta + (
|
178 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
-
* theta_one_minus_theta
|
180 |
-
)
|
181 |
-
derivative_numerator = input_delta.pow(2) * (
|
182 |
-
input_derivatives_plus_one * root.pow(2)
|
183 |
-
+ 2 * input_delta * theta_one_minus_theta
|
184 |
-
+ input_derivatives * (1 - root).pow(2)
|
185 |
-
)
|
186 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
-
|
188 |
-
return outputs, -logabsdet
|
189 |
-
else:
|
190 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
-
theta_one_minus_theta = theta * (1 - theta)
|
192 |
-
|
193 |
-
numerator = input_heights * (
|
194 |
-
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
-
)
|
196 |
-
denominator = input_delta + (
|
197 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
-
* theta_one_minus_theta
|
199 |
-
)
|
200 |
-
outputs = input_cumheights + numerator / denominator
|
201 |
-
|
202 |
-
derivative_numerator = input_delta.pow(2) * (
|
203 |
-
input_derivatives_plus_one * theta.pow(2)
|
204 |
-
+ 2 * input_delta * theta_one_minus_theta
|
205 |
-
+ input_derivatives * (1 - theta).pow(2)
|
206 |
-
)
|
207 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
-
|
209 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Zero-to-Hero/03-GR-AI-Text2ArtGenerator/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 03 GR AI Text2ArtGenerator
|
3 |
-
emoji: 🦀
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: artistic-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/data/audio_dataset.py
DELETED
@@ -1,587 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
"""AudioDataset support. In order to handle a larger number of files
|
7 |
-
without having to scan again the folders, we precompute some metadata
|
8 |
-
(filename, sample rate, duration), and use that to efficiently sample audio segments.
|
9 |
-
"""
|
10 |
-
import argparse
|
11 |
-
import copy
|
12 |
-
from concurrent.futures import ThreadPoolExecutor, Future
|
13 |
-
from dataclasses import dataclass, fields
|
14 |
-
from contextlib import ExitStack
|
15 |
-
from functools import lru_cache
|
16 |
-
import gzip
|
17 |
-
import json
|
18 |
-
import logging
|
19 |
-
import os
|
20 |
-
from pathlib import Path
|
21 |
-
import random
|
22 |
-
import sys
|
23 |
-
import typing as tp
|
24 |
-
|
25 |
-
import torch
|
26 |
-
import torch.nn.functional as F
|
27 |
-
|
28 |
-
from .audio import audio_read, audio_info
|
29 |
-
from .audio_utils import convert_audio
|
30 |
-
from .zip import PathInZip
|
31 |
-
|
32 |
-
try:
|
33 |
-
import dora
|
34 |
-
except ImportError:
|
35 |
-
dora = None # type: ignore
|
36 |
-
|
37 |
-
|
38 |
-
@dataclass(order=True)
|
39 |
-
class BaseInfo:
|
40 |
-
|
41 |
-
@classmethod
|
42 |
-
def _dict2fields(cls, dictionary: dict):
|
43 |
-
return {
|
44 |
-
field.name: dictionary[field.name]
|
45 |
-
for field in fields(cls) if field.name in dictionary
|
46 |
-
}
|
47 |
-
|
48 |
-
@classmethod
|
49 |
-
def from_dict(cls, dictionary: dict):
|
50 |
-
_dictionary = cls._dict2fields(dictionary)
|
51 |
-
return cls(**_dictionary)
|
52 |
-
|
53 |
-
def to_dict(self):
|
54 |
-
return {
|
55 |
-
field.name: self.__getattribute__(field.name)
|
56 |
-
for field in fields(self)
|
57 |
-
}
|
58 |
-
|
59 |
-
|
60 |
-
@dataclass(order=True)
|
61 |
-
class AudioMeta(BaseInfo):
|
62 |
-
path: str
|
63 |
-
duration: float
|
64 |
-
sample_rate: int
|
65 |
-
amplitude: tp.Optional[float] = None
|
66 |
-
weight: tp.Optional[float] = None
|
67 |
-
# info_path is used to load additional information about the audio file that is stored in zip files.
|
68 |
-
info_path: tp.Optional[PathInZip] = None
|
69 |
-
|
70 |
-
@classmethod
|
71 |
-
def from_dict(cls, dictionary: dict):
|
72 |
-
base = cls._dict2fields(dictionary)
|
73 |
-
if 'info_path' in base and base['info_path'] is not None:
|
74 |
-
base['info_path'] = PathInZip(base['info_path'])
|
75 |
-
return cls(**base)
|
76 |
-
|
77 |
-
def to_dict(self):
|
78 |
-
d = super().to_dict()
|
79 |
-
if d['info_path'] is not None:
|
80 |
-
d['info_path'] = str(d['info_path'])
|
81 |
-
return d
|
82 |
-
|
83 |
-
|
84 |
-
@dataclass(order=True)
|
85 |
-
class SegmentInfo(BaseInfo):
|
86 |
-
meta: AudioMeta
|
87 |
-
seek_time: float
|
88 |
-
# The following values are given once the audio is processed, e.g.
|
89 |
-
# at the target sample rate and target number of channels.
|
90 |
-
n_frames: int # actual number of frames without padding
|
91 |
-
total_frames: int # total number of frames, padding included
|
92 |
-
sample_rate: int # actual sample rate
|
93 |
-
channels: int # number of audio channels.
|
94 |
-
|
95 |
-
|
96 |
-
DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a']
|
97 |
-
|
98 |
-
logger = logging.getLogger(__name__)
|
99 |
-
|
100 |
-
|
101 |
-
def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta:
|
102 |
-
"""AudioMeta from a path to an audio file.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
file_path (str): Resolved path of valid audio file.
|
106 |
-
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
|
107 |
-
Returns:
|
108 |
-
AudioMeta: Audio file path and its metadata.
|
109 |
-
"""
|
110 |
-
info = audio_info(file_path)
|
111 |
-
amplitude: tp.Optional[float] = None
|
112 |
-
if not minimal:
|
113 |
-
wav, sr = audio_read(file_path)
|
114 |
-
amplitude = wav.abs().max().item()
|
115 |
-
return AudioMeta(file_path, info.duration, info.sample_rate, amplitude)
|
116 |
-
|
117 |
-
|
118 |
-
def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta:
|
119 |
-
"""If Dora is available as a dependency, try to resolve potential relative paths
|
120 |
-
in list of AudioMeta. This method is expected to be used when loading meta from file.
|
121 |
-
|
122 |
-
Args:
|
123 |
-
m (AudioMeta): Audio meta to resolve.
|
124 |
-
fast (bool): If True, uses a really fast check for determining if a file
|
125 |
-
is already absolute or not. Only valid on Linux/Mac.
|
126 |
-
Returns:
|
127 |
-
AudioMeta: Audio meta with resolved path.
|
128 |
-
"""
|
129 |
-
def is_abs(m):
|
130 |
-
if fast:
|
131 |
-
return str(m)[0] == '/'
|
132 |
-
else:
|
133 |
-
os.path.isabs(str(m))
|
134 |
-
|
135 |
-
if not dora:
|
136 |
-
return m
|
137 |
-
|
138 |
-
if not is_abs(m.path):
|
139 |
-
m.path = dora.git_save.to_absolute_path(m.path)
|
140 |
-
if m.info_path is not None and not is_abs(m.info_path.zip_path):
|
141 |
-
m.info_path.zip_path = dora.git_save.to_absolute_path(m.path)
|
142 |
-
return m
|
143 |
-
|
144 |
-
|
145 |
-
def find_audio_files(path: tp.Union[Path, str],
|
146 |
-
exts: tp.List[str] = DEFAULT_EXTS,
|
147 |
-
resolve: bool = True,
|
148 |
-
minimal: bool = True,
|
149 |
-
progress: bool = False,
|
150 |
-
workers: int = 0) -> tp.List[AudioMeta]:
|
151 |
-
"""Build a list of AudioMeta from a given path,
|
152 |
-
collecting relevant audio files and fetching meta info.
|
153 |
-
|
154 |
-
Args:
|
155 |
-
path (str or Path): Path to folder containing audio files.
|
156 |
-
exts (list of str): List of file extensions to consider for audio files.
|
157 |
-
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
|
158 |
-
progress (bool): Whether to log progress on audio files collection.
|
159 |
-
workers (int): number of parallel workers, if 0, use only the current thread.
|
160 |
-
Returns:
|
161 |
-
list of AudioMeta: List of audio file path and its metadata.
|
162 |
-
"""
|
163 |
-
audio_files = []
|
164 |
-
futures: tp.List[Future] = []
|
165 |
-
pool: tp.Optional[ThreadPoolExecutor] = None
|
166 |
-
with ExitStack() as stack:
|
167 |
-
if workers > 0:
|
168 |
-
pool = ThreadPoolExecutor(workers)
|
169 |
-
stack.enter_context(pool)
|
170 |
-
|
171 |
-
if progress:
|
172 |
-
print("Finding audio files...")
|
173 |
-
for root, folders, files in os.walk(path, followlinks=True):
|
174 |
-
for file in files:
|
175 |
-
full_path = Path(root) / file
|
176 |
-
if full_path.suffix.lower() in exts:
|
177 |
-
audio_files.append(full_path)
|
178 |
-
if pool is not None:
|
179 |
-
futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal))
|
180 |
-
if progress:
|
181 |
-
print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr)
|
182 |
-
|
183 |
-
if progress:
|
184 |
-
print("Getting audio metadata...")
|
185 |
-
meta: tp.List[AudioMeta] = []
|
186 |
-
for idx, file_path in enumerate(audio_files):
|
187 |
-
try:
|
188 |
-
if pool is None:
|
189 |
-
m = _get_audio_meta(str(file_path), minimal)
|
190 |
-
else:
|
191 |
-
m = futures[idx].result()
|
192 |
-
if resolve:
|
193 |
-
m = _resolve_audio_meta(m)
|
194 |
-
except Exception as err:
|
195 |
-
print("Error with", str(file_path), err, file=sys.stderr)
|
196 |
-
continue
|
197 |
-
meta.append(m)
|
198 |
-
if progress:
|
199 |
-
print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr)
|
200 |
-
meta.sort()
|
201 |
-
return meta
|
202 |
-
|
203 |
-
|
204 |
-
def load_audio_meta(path: tp.Union[str, Path],
|
205 |
-
resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]:
|
206 |
-
"""Load list of AudioMeta from an optionally compressed json file.
|
207 |
-
|
208 |
-
Args:
|
209 |
-
path (str or Path): Path to JSON file.
|
210 |
-
resolve (bool): Whether to resolve the path from AudioMeta (default=True).
|
211 |
-
fast (bool): activates some tricks to make things faster.
|
212 |
-
Returns:
|
213 |
-
list of AudioMeta: List of audio file path and its total duration.
|
214 |
-
"""
|
215 |
-
open_fn = gzip.open if str(path).lower().endswith('.gz') else open
|
216 |
-
with open_fn(path, 'rb') as fp: # type: ignore
|
217 |
-
lines = fp.readlines()
|
218 |
-
meta = []
|
219 |
-
for line in lines:
|
220 |
-
d = json.loads(line)
|
221 |
-
m = AudioMeta.from_dict(d)
|
222 |
-
if resolve:
|
223 |
-
m = _resolve_audio_meta(m, fast=fast)
|
224 |
-
meta.append(m)
|
225 |
-
return meta
|
226 |
-
|
227 |
-
|
228 |
-
def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]):
|
229 |
-
"""Save the audio metadata to the file pointer as json.
|
230 |
-
|
231 |
-
Args:
|
232 |
-
path (str or Path): Path to JSON file.
|
233 |
-
metadata (list of BaseAudioMeta): List of audio meta to save.
|
234 |
-
"""
|
235 |
-
Path(path).parent.mkdir(exist_ok=True, parents=True)
|
236 |
-
open_fn = gzip.open if str(path).lower().endswith('.gz') else open
|
237 |
-
with open_fn(path, 'wb') as fp: # type: ignore
|
238 |
-
for m in meta:
|
239 |
-
json_str = json.dumps(m.to_dict()) + '\n'
|
240 |
-
json_bytes = json_str.encode('utf-8')
|
241 |
-
fp.write(json_bytes)
|
242 |
-
|
243 |
-
|
244 |
-
class AudioDataset:
|
245 |
-
"""Base audio dataset.
|
246 |
-
|
247 |
-
The dataset takes a list of AudioMeta and create a dataset composed of segments of audio
|
248 |
-
and potentially additional information, by creating random segments from the list of audio
|
249 |
-
files referenced in the metadata and applying minimal data pre-processing such as resampling,
|
250 |
-
mixing of channels, padding, etc.
|
251 |
-
|
252 |
-
If no segment_duration value is provided, the AudioDataset will return the full wav for each
|
253 |
-
audio file. Otherwise, it will randomly sample audio files and create a segment of the specified
|
254 |
-
duration, applying padding if required.
|
255 |
-
|
256 |
-
By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True
|
257 |
-
allows to return a tuple containing the torch Tensor and additional metadata on the segment and the
|
258 |
-
original audio meta.
|
259 |
-
|
260 |
-
Note that you can call `start_epoch(epoch)` in order to get
|
261 |
-
a deterministic "randomization" for `shuffle=True`.
|
262 |
-
For a given epoch and dataset index, this will always return the same extract.
|
263 |
-
You can get back some diversity by setting the `shuffle_seed` param.
|
264 |
-
|
265 |
-
Args:
|
266 |
-
meta (list of AudioMeta): List of audio files metadata.
|
267 |
-
segment_duration (float, optional): Optional segment duration of audio to load.
|
268 |
-
If not specified, the dataset will load the full audio segment from the file.
|
269 |
-
shuffle (bool): Set to `True` to have the data reshuffled at every epoch.
|
270 |
-
sample_rate (int): Target sample rate of the loaded audio samples.
|
271 |
-
channels (int): Target number of channels of the loaded audio samples.
|
272 |
-
sample_on_duration (bool): Set to `True` to sample segments with probability
|
273 |
-
dependent on audio file duration. This is only used if `segment_duration` is provided.
|
274 |
-
sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of
|
275 |
-
`AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product
|
276 |
-
of the file duration and file weight. This is only used if `segment_duration` is provided.
|
277 |
-
min_segment_ratio (float): Minimum segment ratio to use when the audio file
|
278 |
-
is shorter than the desired segment.
|
279 |
-
max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset.
|
280 |
-
return_info (bool): Whether to return the wav only or return wav along with segment info and metadata.
|
281 |
-
min_audio_duration (float, optional): Minimum audio file duration, in seconds, if provided
|
282 |
-
audio shorter than this will be filtered out.
|
283 |
-
max_audio_duration (float, optional): Maximal audio file duration in seconds, if provided
|
284 |
-
audio longer than this will be filtered out.
|
285 |
-
shuffle_seed (int): can be used to further randomize
|
286 |
-
load_wav (bool): if False, skip loading the wav but returns a tensor of 0
|
287 |
-
with the expected segment_duration (which must be provided if load_wav is False).
|
288 |
-
permutation_on_files (bool): only if `sample_on_weight` and `sample_on_duration`
|
289 |
-
are False. Will ensure a permutation on files when going through the dataset.
|
290 |
-
In that case the epoch number must be provided in order for the model
|
291 |
-
to continue the permutation across epochs. In that case, it is assumed
|
292 |
-
that `num_samples = total_batch_size * num_updates_per_epoch`, with
|
293 |
-
`total_batch_size` the overall batch size accounting for all gpus.
|
294 |
-
"""
|
295 |
-
def __init__(self,
|
296 |
-
meta: tp.List[AudioMeta],
|
297 |
-
segment_duration: tp.Optional[float] = None,
|
298 |
-
shuffle: bool = True,
|
299 |
-
num_samples: int = 10_000,
|
300 |
-
sample_rate: int = 48_000,
|
301 |
-
channels: int = 2,
|
302 |
-
pad: bool = True,
|
303 |
-
sample_on_duration: bool = True,
|
304 |
-
sample_on_weight: bool = True,
|
305 |
-
min_segment_ratio: float = 0.5,
|
306 |
-
max_read_retry: int = 10,
|
307 |
-
return_info: bool = False,
|
308 |
-
min_audio_duration: tp.Optional[float] = None,
|
309 |
-
max_audio_duration: tp.Optional[float] = None,
|
310 |
-
shuffle_seed: int = 0,
|
311 |
-
load_wav: bool = True,
|
312 |
-
permutation_on_files: bool = False,
|
313 |
-
):
|
314 |
-
assert len(meta) > 0, "No audio meta provided to AudioDataset. Please check loading of audio meta."
|
315 |
-
assert segment_duration is None or segment_duration > 0
|
316 |
-
assert segment_duration is None or min_segment_ratio >= 0
|
317 |
-
self.segment_duration = segment_duration
|
318 |
-
self.min_segment_ratio = min_segment_ratio
|
319 |
-
self.max_audio_duration = max_audio_duration
|
320 |
-
self.min_audio_duration = min_audio_duration
|
321 |
-
if self.min_audio_duration is not None and self.max_audio_duration is not None:
|
322 |
-
assert self.min_audio_duration <= self.max_audio_duration
|
323 |
-
self.meta: tp.List[AudioMeta] = self._filter_duration(meta)
|
324 |
-
assert len(self.meta) # Fail fast if all data has been filtered.
|
325 |
-
self.total_duration = sum(d.duration for d in self.meta)
|
326 |
-
|
327 |
-
if segment_duration is None:
|
328 |
-
num_samples = len(self.meta)
|
329 |
-
self.num_samples = num_samples
|
330 |
-
self.shuffle = shuffle
|
331 |
-
self.sample_rate = sample_rate
|
332 |
-
self.channels = channels
|
333 |
-
self.pad = pad
|
334 |
-
self.sample_on_weight = sample_on_weight
|
335 |
-
self.sample_on_duration = sample_on_duration
|
336 |
-
self.sampling_probabilities = self._get_sampling_probabilities()
|
337 |
-
self.max_read_retry = max_read_retry
|
338 |
-
self.return_info = return_info
|
339 |
-
self.shuffle_seed = shuffle_seed
|
340 |
-
self.current_epoch: tp.Optional[int] = None
|
341 |
-
self.load_wav = load_wav
|
342 |
-
if not load_wav:
|
343 |
-
assert segment_duration is not None
|
344 |
-
self.permutation_on_files = permutation_on_files
|
345 |
-
if permutation_on_files:
|
346 |
-
assert not self.sample_on_duration
|
347 |
-
assert not self.sample_on_weight
|
348 |
-
assert self.shuffle
|
349 |
-
|
350 |
-
def start_epoch(self, epoch: int):
|
351 |
-
self.current_epoch = epoch
|
352 |
-
|
353 |
-
def __len__(self):
|
354 |
-
return self.num_samples
|
355 |
-
|
356 |
-
def _get_sampling_probabilities(self, normalized: bool = True):
|
357 |
-
"""Return the sampling probabilities for each file inside `self.meta`."""
|
358 |
-
scores: tp.List[float] = []
|
359 |
-
for file_meta in self.meta:
|
360 |
-
score = 1.
|
361 |
-
if self.sample_on_weight and file_meta.weight is not None:
|
362 |
-
score *= file_meta.weight
|
363 |
-
if self.sample_on_duration:
|
364 |
-
score *= file_meta.duration
|
365 |
-
scores.append(score)
|
366 |
-
probabilities = torch.tensor(scores)
|
367 |
-
if normalized:
|
368 |
-
probabilities /= probabilities.sum()
|
369 |
-
return probabilities
|
370 |
-
|
371 |
-
@staticmethod
|
372 |
-
@lru_cache(16)
|
373 |
-
def _get_file_permutation(num_files: int, permutation_index: int, base_seed: int):
|
374 |
-
# Used to keep the most recent files permutation in memory implicitely.
|
375 |
-
# will work unless someone is using a lot of Datasets in parallel.
|
376 |
-
rng = torch.Generator()
|
377 |
-
rng.manual_seed(base_seed + permutation_index)
|
378 |
-
return torch.randperm(num_files, generator=rng)
|
379 |
-
|
380 |
-
def sample_file(self, index: int, rng: torch.Generator) -> AudioMeta:
|
381 |
-
"""Sample a given file from `self.meta`. Can be overridden in subclasses.
|
382 |
-
This is only called if `segment_duration` is not None.
|
383 |
-
|
384 |
-
You must use the provided random number generator `rng` for reproducibility.
|
385 |
-
You can further make use of the index accessed.
|
386 |
-
"""
|
387 |
-
if self.permutation_on_files:
|
388 |
-
assert self.current_epoch is not None
|
389 |
-
total_index = self.current_epoch * len(self) + index
|
390 |
-
permutation_index = total_index // len(self.meta)
|
391 |
-
relative_index = total_index % len(self.meta)
|
392 |
-
permutation = AudioDataset._get_file_permutation(
|
393 |
-
len(self.meta), permutation_index, self.shuffle_seed)
|
394 |
-
file_index = permutation[relative_index]
|
395 |
-
return self.meta[file_index]
|
396 |
-
|
397 |
-
if not self.sample_on_weight and not self.sample_on_duration:
|
398 |
-
file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item())
|
399 |
-
else:
|
400 |
-
file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item())
|
401 |
-
|
402 |
-
return self.meta[file_index]
|
403 |
-
|
404 |
-
def _audio_read(self, path: str, seek_time: float = 0, duration: float = -1):
|
405 |
-
# Override this method in subclass if needed.
|
406 |
-
if self.load_wav:
|
407 |
-
return audio_read(path, seek_time, duration, pad=False)
|
408 |
-
else:
|
409 |
-
assert self.segment_duration is not None
|
410 |
-
n_frames = int(self.sample_rate * self.segment_duration)
|
411 |
-
return torch.zeros(self.channels, n_frames), self.sample_rate
|
412 |
-
|
413 |
-
def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]:
|
414 |
-
if self.segment_duration is None:
|
415 |
-
file_meta = self.meta[index]
|
416 |
-
out, sr = audio_read(file_meta.path)
|
417 |
-
out = convert_audio(out, sr, self.sample_rate, self.channels)
|
418 |
-
n_frames = out.shape[-1]
|
419 |
-
segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames,
|
420 |
-
sample_rate=self.sample_rate, channels=out.shape[0])
|
421 |
-
else:
|
422 |
-
rng = torch.Generator()
|
423 |
-
if self.shuffle:
|
424 |
-
# We use index, plus extra randomness, either totally random if we don't know the epoch.
|
425 |
-
# otherwise we make use of the epoch number and optional shuffle_seed.
|
426 |
-
if self.current_epoch is None:
|
427 |
-
rng.manual_seed(index + self.num_samples * random.randint(0, 2**24))
|
428 |
-
else:
|
429 |
-
rng.manual_seed(index + self.num_samples * (self.current_epoch + self.shuffle_seed))
|
430 |
-
else:
|
431 |
-
# We only use index
|
432 |
-
rng.manual_seed(index)
|
433 |
-
|
434 |
-
for retry in range(self.max_read_retry):
|
435 |
-
file_meta = self.sample_file(index, rng)
|
436 |
-
# We add some variance in the file position even if audio file is smaller than segment
|
437 |
-
# without ending up with empty segments
|
438 |
-
max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio)
|
439 |
-
seek_time = torch.rand(1, generator=rng).item() * max_seek
|
440 |
-
try:
|
441 |
-
out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False)
|
442 |
-
out = convert_audio(out, sr, self.sample_rate, self.channels)
|
443 |
-
n_frames = out.shape[-1]
|
444 |
-
target_frames = int(self.segment_duration * self.sample_rate)
|
445 |
-
if self.pad:
|
446 |
-
out = F.pad(out, (0, target_frames - n_frames))
|
447 |
-
segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames,
|
448 |
-
sample_rate=self.sample_rate, channels=out.shape[0])
|
449 |
-
except Exception as exc:
|
450 |
-
logger.warning("Error opening file %s: %r", file_meta.path, exc)
|
451 |
-
if retry == self.max_read_retry - 1:
|
452 |
-
raise
|
453 |
-
else:
|
454 |
-
break
|
455 |
-
|
456 |
-
if self.return_info:
|
457 |
-
# Returns the wav and additional information on the wave segment
|
458 |
-
return out, segment_info
|
459 |
-
else:
|
460 |
-
return out
|
461 |
-
|
462 |
-
def collater(self, samples):
|
463 |
-
"""The collater function has to be provided to the dataloader
|
464 |
-
if AudioDataset has return_info=True in order to properly collate
|
465 |
-
the samples of a batch.
|
466 |
-
"""
|
467 |
-
if self.segment_duration is None and len(samples) > 1:
|
468 |
-
assert self.pad, "Must allow padding when batching examples of different durations."
|
469 |
-
|
470 |
-
# In this case the audio reaching the collater is of variable length as segment_duration=None.
|
471 |
-
to_pad = self.segment_duration is None and self.pad
|
472 |
-
if to_pad:
|
473 |
-
max_len = max([wav.shape[-1] for wav, _ in samples])
|
474 |
-
|
475 |
-
def _pad_wav(wav):
|
476 |
-
return F.pad(wav, (0, max_len - wav.shape[-1]))
|
477 |
-
|
478 |
-
if self.return_info:
|
479 |
-
if len(samples) > 0:
|
480 |
-
assert len(samples[0]) == 2
|
481 |
-
assert isinstance(samples[0][0], torch.Tensor)
|
482 |
-
assert isinstance(samples[0][1], SegmentInfo)
|
483 |
-
|
484 |
-
wavs = [wav for wav, _ in samples]
|
485 |
-
segment_infos = [copy.deepcopy(info) for _, info in samples]
|
486 |
-
|
487 |
-
if to_pad:
|
488 |
-
# Each wav could be of a different duration as they are not segmented.
|
489 |
-
for i in range(len(samples)):
|
490 |
-
# Determines the total length of the signal with padding, so we update here as we pad.
|
491 |
-
segment_infos[i].total_frames = max_len
|
492 |
-
wavs[i] = _pad_wav(wavs[i])
|
493 |
-
|
494 |
-
wav = torch.stack(wavs)
|
495 |
-
return wav, segment_infos
|
496 |
-
else:
|
497 |
-
assert isinstance(samples[0], torch.Tensor)
|
498 |
-
if to_pad:
|
499 |
-
samples = [_pad_wav(s) for s in samples]
|
500 |
-
return torch.stack(samples)
|
501 |
-
|
502 |
-
def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
|
503 |
-
"""Filters out audio files with audio durations that will not allow to sample examples from them."""
|
504 |
-
orig_len = len(meta)
|
505 |
-
|
506 |
-
# Filter data that is too short.
|
507 |
-
if self.min_audio_duration is not None:
|
508 |
-
meta = [m for m in meta if m.duration >= self.min_audio_duration]
|
509 |
-
|
510 |
-
# Filter data that is too long.
|
511 |
-
if self.max_audio_duration is not None:
|
512 |
-
meta = [m for m in meta if m.duration <= self.max_audio_duration]
|
513 |
-
|
514 |
-
filtered_len = len(meta)
|
515 |
-
removed_percentage = 100*(1-float(filtered_len)/orig_len)
|
516 |
-
msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage
|
517 |
-
if removed_percentage < 10:
|
518 |
-
logging.debug(msg)
|
519 |
-
else:
|
520 |
-
logging.warning(msg)
|
521 |
-
return meta
|
522 |
-
|
523 |
-
@classmethod
|
524 |
-
def from_meta(cls, root: tp.Union[str, Path], **kwargs):
|
525 |
-
"""Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file.
|
526 |
-
|
527 |
-
Args:
|
528 |
-
root (str or Path): Path to root folder containing audio files.
|
529 |
-
kwargs: Additional keyword arguments for the AudioDataset.
|
530 |
-
"""
|
531 |
-
root = Path(root)
|
532 |
-
if root.is_dir():
|
533 |
-
if (root / 'data.jsonl').exists():
|
534 |
-
root = root / 'data.jsonl'
|
535 |
-
elif (root / 'data.jsonl.gz').exists():
|
536 |
-
root = root / 'data.jsonl.gz'
|
537 |
-
else:
|
538 |
-
raise ValueError("Don't know where to read metadata from in the dir. "
|
539 |
-
"Expecting either a data.jsonl or data.jsonl.gz file but none found.")
|
540 |
-
meta = load_audio_meta(root)
|
541 |
-
return cls(meta, **kwargs)
|
542 |
-
|
543 |
-
@classmethod
|
544 |
-
def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True,
|
545 |
-
exts: tp.List[str] = DEFAULT_EXTS, **kwargs):
|
546 |
-
"""Instantiate AudioDataset from a path containing (possibly nested) audio files.
|
547 |
-
|
548 |
-
Args:
|
549 |
-
root (str or Path): Path to root folder containing audio files.
|
550 |
-
minimal_meta (bool): Whether to only load minimal metadata or not.
|
551 |
-
exts (list of str): Extensions for audio files.
|
552 |
-
kwargs: Additional keyword arguments for the AudioDataset.
|
553 |
-
"""
|
554 |
-
root = Path(root)
|
555 |
-
if root.is_file():
|
556 |
-
meta = load_audio_meta(root, resolve=True)
|
557 |
-
else:
|
558 |
-
meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True)
|
559 |
-
return cls(meta, **kwargs)
|
560 |
-
|
561 |
-
|
562 |
-
def main():
|
563 |
-
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
|
564 |
-
parser = argparse.ArgumentParser(
|
565 |
-
prog='audio_dataset',
|
566 |
-
description='Generate .jsonl files by scanning a folder.')
|
567 |
-
parser.add_argument('root', help='Root folder with all the audio files')
|
568 |
-
parser.add_argument('output_meta_file',
|
569 |
-
help='Output file to store the metadata, ')
|
570 |
-
parser.add_argument('--complete',
|
571 |
-
action='store_false', dest='minimal', default=True,
|
572 |
-
help='Retrieve all metadata, even the one that are expansive '
|
573 |
-
'to compute (e.g. normalization).')
|
574 |
-
parser.add_argument('--resolve',
|
575 |
-
action='store_true', default=False,
|
576 |
-
help='Resolve the paths to be absolute and with no symlinks.')
|
577 |
-
parser.add_argument('--workers',
|
578 |
-
default=10, type=int,
|
579 |
-
help='Number of workers.')
|
580 |
-
args = parser.parse_args()
|
581 |
-
meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True,
|
582 |
-
resolve=args.resolve, minimal=args.minimal, workers=args.workers)
|
583 |
-
save_audio_meta(args.output_meta_file, meta)
|
584 |
-
|
585 |
-
|
586 |
-
if __name__ == '__main__':
|
587 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AICopilot/Dropbox/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Dropbox
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.2.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/configs/transforms_config.py
DELETED
@@ -1,242 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
import torchvision.transforms as transforms
|
3 |
-
from datasets import augmentations
|
4 |
-
|
5 |
-
|
6 |
-
class TransformsConfig(object):
|
7 |
-
|
8 |
-
def __init__(self, opts):
|
9 |
-
self.opts = opts
|
10 |
-
|
11 |
-
@abstractmethod
|
12 |
-
def get_transforms(self):
|
13 |
-
pass
|
14 |
-
|
15 |
-
|
16 |
-
class EncodeTransforms(TransformsConfig):
|
17 |
-
|
18 |
-
def __init__(self, opts):
|
19 |
-
super(EncodeTransforms, self).__init__(opts)
|
20 |
-
|
21 |
-
def get_transforms(self):
|
22 |
-
transforms_dict = {
|
23 |
-
'transform_gt_train': transforms.Compose([
|
24 |
-
transforms.Resize((320, 320)),
|
25 |
-
transforms.RandomHorizontalFlip(0.5),
|
26 |
-
transforms.ToTensor(),
|
27 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
28 |
-
'transform_source': None,
|
29 |
-
'transform_test': transforms.Compose([
|
30 |
-
transforms.Resize((320, 320)),
|
31 |
-
transforms.ToTensor(),
|
32 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
33 |
-
'transform_inference': transforms.Compose([
|
34 |
-
transforms.Resize((320, 320)),
|
35 |
-
transforms.ToTensor(),
|
36 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
37 |
-
}
|
38 |
-
return transforms_dict
|
39 |
-
|
40 |
-
|
41 |
-
class FrontalizationTransforms(TransformsConfig):
|
42 |
-
|
43 |
-
def __init__(self, opts):
|
44 |
-
super(FrontalizationTransforms, self).__init__(opts)
|
45 |
-
|
46 |
-
def get_transforms(self):
|
47 |
-
transforms_dict = {
|
48 |
-
'transform_gt_train': transforms.Compose([
|
49 |
-
transforms.Resize((256, 256)),
|
50 |
-
transforms.RandomHorizontalFlip(0.5),
|
51 |
-
transforms.ToTensor(),
|
52 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
53 |
-
'transform_source': transforms.Compose([
|
54 |
-
transforms.Resize((256, 256)),
|
55 |
-
transforms.RandomHorizontalFlip(0.5),
|
56 |
-
transforms.ToTensor(),
|
57 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
58 |
-
'transform_test': transforms.Compose([
|
59 |
-
transforms.Resize((256, 256)),
|
60 |
-
transforms.ToTensor(),
|
61 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
62 |
-
'transform_inference': transforms.Compose([
|
63 |
-
transforms.Resize((256, 256)),
|
64 |
-
transforms.ToTensor(),
|
65 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
66 |
-
}
|
67 |
-
return transforms_dict
|
68 |
-
|
69 |
-
|
70 |
-
class SketchToImageTransforms(TransformsConfig):
|
71 |
-
|
72 |
-
def __init__(self, opts):
|
73 |
-
super(SketchToImageTransforms, self).__init__(opts)
|
74 |
-
|
75 |
-
def get_transforms(self):
|
76 |
-
transforms_dict = {
|
77 |
-
'transform_gt_train': transforms.Compose([
|
78 |
-
transforms.Resize((320, 320)),
|
79 |
-
transforms.ToTensor(),
|
80 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
81 |
-
'transform_source': transforms.Compose([
|
82 |
-
transforms.Resize((320, 320)),
|
83 |
-
transforms.ToTensor()]),
|
84 |
-
'transform_test': transforms.Compose([
|
85 |
-
transforms.Resize((320, 320)),
|
86 |
-
transforms.ToTensor(),
|
87 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
88 |
-
'transform_inference': transforms.Compose([
|
89 |
-
transforms.Resize((320, 320)),
|
90 |
-
transforms.ToTensor()]),
|
91 |
-
}
|
92 |
-
return transforms_dict
|
93 |
-
|
94 |
-
|
95 |
-
class SegToImageTransforms(TransformsConfig):
|
96 |
-
|
97 |
-
def __init__(self, opts):
|
98 |
-
super(SegToImageTransforms, self).__init__(opts)
|
99 |
-
|
100 |
-
def get_transforms(self):
|
101 |
-
transforms_dict = {
|
102 |
-
'transform_gt_train': transforms.Compose([
|
103 |
-
transforms.Resize((320, 320)),
|
104 |
-
transforms.ToTensor(),
|
105 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
106 |
-
'transform_source': transforms.Compose([
|
107 |
-
transforms.Resize((320, 320)),
|
108 |
-
augmentations.ToOneHot(self.opts.label_nc),
|
109 |
-
transforms.ToTensor()]),
|
110 |
-
'transform_test': transforms.Compose([
|
111 |
-
transforms.Resize((320, 320)),
|
112 |
-
transforms.ToTensor(),
|
113 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
114 |
-
'transform_inference': transforms.Compose([
|
115 |
-
transforms.Resize((320, 320)),
|
116 |
-
augmentations.ToOneHot(self.opts.label_nc),
|
117 |
-
transforms.ToTensor()])
|
118 |
-
}
|
119 |
-
return transforms_dict
|
120 |
-
|
121 |
-
|
122 |
-
class SuperResTransforms(TransformsConfig):
|
123 |
-
|
124 |
-
def __init__(self, opts):
|
125 |
-
super(SuperResTransforms, self).__init__(opts)
|
126 |
-
|
127 |
-
def get_transforms(self):
|
128 |
-
if self.opts.resize_factors is None:
|
129 |
-
self.opts.resize_factors = '1,2,4,8,16,32'
|
130 |
-
factors = [int(f) for f in self.opts.resize_factors.split(",")]
|
131 |
-
print("Performing down-sampling with factors: {}".format(factors))
|
132 |
-
transforms_dict = {
|
133 |
-
'transform_gt_train': transforms.Compose([
|
134 |
-
transforms.Resize((1280, 1280)),
|
135 |
-
transforms.ToTensor(),
|
136 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
137 |
-
'transform_source': transforms.Compose([
|
138 |
-
transforms.Resize((320, 320)),
|
139 |
-
augmentations.BilinearResize(factors=factors),
|
140 |
-
transforms.Resize((320, 320)),
|
141 |
-
transforms.ToTensor(),
|
142 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
143 |
-
'transform_test': transforms.Compose([
|
144 |
-
transforms.Resize((1280, 1280)),
|
145 |
-
transforms.ToTensor(),
|
146 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
147 |
-
'transform_inference': transforms.Compose([
|
148 |
-
transforms.Resize((320, 320)),
|
149 |
-
augmentations.BilinearResize(factors=factors),
|
150 |
-
transforms.Resize((320, 320)),
|
151 |
-
transforms.ToTensor(),
|
152 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
153 |
-
}
|
154 |
-
return transforms_dict
|
155 |
-
|
156 |
-
|
157 |
-
class SuperResTransforms_320(TransformsConfig):
|
158 |
-
|
159 |
-
def __init__(self, opts):
|
160 |
-
super(SuperResTransforms_320, self).__init__(opts)
|
161 |
-
|
162 |
-
def get_transforms(self):
|
163 |
-
if self.opts.resize_factors is None:
|
164 |
-
self.opts.resize_factors = '1,2,4,8,16,32'
|
165 |
-
factors = [int(f) for f in self.opts.resize_factors.split(",")]
|
166 |
-
print("Performing down-sampling with factors: {}".format(factors))
|
167 |
-
transforms_dict = {
|
168 |
-
'transform_gt_train': transforms.Compose([
|
169 |
-
transforms.Resize((320, 320)),
|
170 |
-
transforms.ToTensor(),
|
171 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
172 |
-
'transform_source': transforms.Compose([
|
173 |
-
transforms.Resize((320, 320)),
|
174 |
-
augmentations.BilinearResize(factors=factors),
|
175 |
-
transforms.Resize((320, 320)),
|
176 |
-
transforms.ToTensor(),
|
177 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
178 |
-
'transform_test': transforms.Compose([
|
179 |
-
transforms.Resize((320, 320)),
|
180 |
-
transforms.ToTensor(),
|
181 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
182 |
-
'transform_inference': transforms.Compose([
|
183 |
-
transforms.Resize((320, 320)),
|
184 |
-
augmentations.BilinearResize(factors=factors),
|
185 |
-
transforms.Resize((320, 320)),
|
186 |
-
transforms.ToTensor(),
|
187 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
188 |
-
}
|
189 |
-
return transforms_dict
|
190 |
-
|
191 |
-
|
192 |
-
class ToonifyTransforms(TransformsConfig):
|
193 |
-
|
194 |
-
def __init__(self, opts):
|
195 |
-
super(ToonifyTransforms, self).__init__(opts)
|
196 |
-
|
197 |
-
def get_transforms(self):
|
198 |
-
transforms_dict = {
|
199 |
-
'transform_gt_train': transforms.Compose([
|
200 |
-
transforms.Resize((1024, 1024)),
|
201 |
-
transforms.ToTensor(),
|
202 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
203 |
-
'transform_source': transforms.Compose([
|
204 |
-
transforms.Resize((256, 256)),
|
205 |
-
transforms.ToTensor(),
|
206 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
207 |
-
'transform_test': transforms.Compose([
|
208 |
-
transforms.Resize((1024, 1024)),
|
209 |
-
transforms.ToTensor(),
|
210 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
211 |
-
'transform_inference': transforms.Compose([
|
212 |
-
transforms.Resize((256, 256)),
|
213 |
-
transforms.ToTensor(),
|
214 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
215 |
-
}
|
216 |
-
return transforms_dict
|
217 |
-
|
218 |
-
class EditingTransforms(TransformsConfig):
|
219 |
-
|
220 |
-
def __init__(self, opts):
|
221 |
-
super(EditingTransforms, self).__init__(opts)
|
222 |
-
|
223 |
-
def get_transforms(self):
|
224 |
-
transforms_dict = {
|
225 |
-
'transform_gt_train': transforms.Compose([
|
226 |
-
transforms.Resize((1280, 1280)),
|
227 |
-
transforms.ToTensor(),
|
228 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
229 |
-
'transform_source': transforms.Compose([
|
230 |
-
transforms.Resize((320, 320)),
|
231 |
-
transforms.ToTensor(),
|
232 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
233 |
-
'transform_test': transforms.Compose([
|
234 |
-
transforms.Resize((1280, 1280)),
|
235 |
-
transforms.ToTensor(),
|
236 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
237 |
-
'transform_inference': transforms.Compose([
|
238 |
-
transforms.Resize((320, 320)),
|
239 |
-
transforms.ToTensor(),
|
240 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
241 |
-
}
|
242 |
-
return transforms_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/base_model.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
|
4 |
-
class BaseModel(torch.nn.Module):
|
5 |
-
def load(self, path):
|
6 |
-
"""Load model from file.
|
7 |
-
|
8 |
-
Args:
|
9 |
-
path (str): file path
|
10 |
-
"""
|
11 |
-
parameters = torch.load(path, map_location=torch.device('cpu'))
|
12 |
-
|
13 |
-
if "optimizer" in parameters:
|
14 |
-
parameters = parameters["model"]
|
15 |
-
|
16 |
-
self.load_state_dict(parameters)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AISuperheroes/07GR-NLP-Seq2Seq-AutoQA/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 07GR NLP Seq2Seq AutoQA
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aaron299/bingo/Dockerfile
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
FROM weaigc/bingo:latest
|
2 |
-
|
3 |
-
ARG DEBIAN_FRONTEND=noninteractive
|
4 |
-
|
5 |
-
ENV BING_HEADER ""
|
6 |
-
|
7 |
-
CMD npm start
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/base_provider.py
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from asyncio import AbstractEventLoop
|
4 |
-
from concurrent.futures import ThreadPoolExecutor
|
5 |
-
from abc import ABC, abstractmethod
|
6 |
-
|
7 |
-
from .helper import get_event_loop, get_cookies, format_prompt
|
8 |
-
from ..typing import AsyncGenerator, CreateResult
|
9 |
-
|
10 |
-
|
11 |
-
class BaseProvider(ABC):
|
12 |
-
url: str
|
13 |
-
working: bool = False
|
14 |
-
needs_auth: bool = False
|
15 |
-
supports_stream: bool = False
|
16 |
-
supports_gpt_35_turbo: bool = False
|
17 |
-
supports_gpt_4: bool = False
|
18 |
-
|
19 |
-
@staticmethod
|
20 |
-
@abstractmethod
|
21 |
-
def create_completion(
|
22 |
-
model: str,
|
23 |
-
messages: list[dict[str, str]],
|
24 |
-
stream: bool,
|
25 |
-
**kwargs
|
26 |
-
) -> CreateResult:
|
27 |
-
raise NotImplementedError()
|
28 |
-
|
29 |
-
@classmethod
|
30 |
-
async def create_async(
|
31 |
-
cls,
|
32 |
-
model: str,
|
33 |
-
messages: list[dict[str, str]],
|
34 |
-
*,
|
35 |
-
loop: AbstractEventLoop = None,
|
36 |
-
executor: ThreadPoolExecutor = None,
|
37 |
-
**kwargs
|
38 |
-
) -> str:
|
39 |
-
if not loop:
|
40 |
-
loop = get_event_loop()
|
41 |
-
|
42 |
-
def create_func() -> str:
|
43 |
-
return "".join(cls.create_completion(
|
44 |
-
model,
|
45 |
-
messages,
|
46 |
-
False,
|
47 |
-
**kwargs
|
48 |
-
))
|
49 |
-
|
50 |
-
return await loop.run_in_executor(
|
51 |
-
executor,
|
52 |
-
create_func
|
53 |
-
)
|
54 |
-
|
55 |
-
@classmethod
|
56 |
-
@property
|
57 |
-
def params(cls) -> str:
|
58 |
-
params = [
|
59 |
-
("model", "str"),
|
60 |
-
("messages", "list[dict[str, str]]"),
|
61 |
-
("stream", "bool"),
|
62 |
-
]
|
63 |
-
param = ", ".join([": ".join(p) for p in params])
|
64 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
65 |
-
|
66 |
-
|
67 |
-
class AsyncProvider(BaseProvider):
|
68 |
-
@classmethod
|
69 |
-
def create_completion(
|
70 |
-
cls,
|
71 |
-
model: str,
|
72 |
-
messages: list[dict[str, str]],
|
73 |
-
stream: bool = False,
|
74 |
-
**kwargs
|
75 |
-
) -> CreateResult:
|
76 |
-
loop = get_event_loop()
|
77 |
-
coro = cls.create_async(model, messages, **kwargs)
|
78 |
-
yield loop.run_until_complete(coro)
|
79 |
-
|
80 |
-
@staticmethod
|
81 |
-
@abstractmethod
|
82 |
-
async def create_async(
|
83 |
-
model: str,
|
84 |
-
messages: list[dict[str, str]],
|
85 |
-
**kwargs
|
86 |
-
) -> str:
|
87 |
-
raise NotImplementedError()
|
88 |
-
|
89 |
-
|
90 |
-
class AsyncGeneratorProvider(AsyncProvider):
|
91 |
-
supports_stream = True
|
92 |
-
|
93 |
-
@classmethod
|
94 |
-
def create_completion(
|
95 |
-
cls,
|
96 |
-
model: str,
|
97 |
-
messages: list[dict[str, str]],
|
98 |
-
stream: bool = True,
|
99 |
-
**kwargs
|
100 |
-
) -> CreateResult:
|
101 |
-
loop = get_event_loop()
|
102 |
-
generator = cls.create_async_generator(
|
103 |
-
model,
|
104 |
-
messages,
|
105 |
-
stream=stream,
|
106 |
-
**kwargs
|
107 |
-
)
|
108 |
-
gen = generator.__aiter__()
|
109 |
-
while True:
|
110 |
-
try:
|
111 |
-
yield loop.run_until_complete(gen.__anext__())
|
112 |
-
except StopAsyncIteration:
|
113 |
-
break
|
114 |
-
|
115 |
-
@classmethod
|
116 |
-
async def create_async(
|
117 |
-
cls,
|
118 |
-
model: str,
|
119 |
-
messages: list[dict[str, str]],
|
120 |
-
**kwargs
|
121 |
-
) -> str:
|
122 |
-
return "".join([
|
123 |
-
chunk async for chunk in cls.create_async_generator(
|
124 |
-
model,
|
125 |
-
messages,
|
126 |
-
stream=False,
|
127 |
-
**kwargs
|
128 |
-
)
|
129 |
-
])
|
130 |
-
|
131 |
-
@staticmethod
|
132 |
-
@abstractmethod
|
133 |
-
def create_async_generator(
|
134 |
-
model: str,
|
135 |
-
messages: list[dict[str, str]],
|
136 |
-
**kwargs
|
137 |
-
) -> AsyncGenerator:
|
138 |
-
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasinput.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CanvasInput from './gameobjects/dynamictext/canvasinput/CanvasInput.js';
|
2 |
-
export default CanvasInput;
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/networks.py
DELETED
@@ -1,966 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
import numpy as np
|
12 |
-
import torch
|
13 |
-
from torch_utils import misc
|
14 |
-
from torch_utils import persistence
|
15 |
-
from torch_utils.ops import conv2d_resample
|
16 |
-
from torch_utils.ops import upfirdn2d
|
17 |
-
from torch_utils.ops import bias_act
|
18 |
-
from torch_utils.ops import fma
|
19 |
-
|
20 |
-
# ----------------------------------------------------------------------------
|
21 |
-
|
22 |
-
|
23 |
-
@misc.profiled_function
|
24 |
-
def normalize_2nd_moment(x, dim=1, eps=1e-8):
|
25 |
-
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
|
26 |
-
|
27 |
-
# ----------------------------------------------------------------------------
|
28 |
-
|
29 |
-
|
30 |
-
@misc.profiled_function
|
31 |
-
def modulated_conv2d(
|
32 |
-
# Input tensor of shape [batch_size, in_channels, in_height, in_width].
|
33 |
-
x,
|
34 |
-
# Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
|
35 |
-
weight,
|
36 |
-
# Modulation coefficients of shape [batch_size, in_channels].
|
37 |
-
styles,
|
38 |
-
noise=None, # Optional noise tensor to add to the output activations.
|
39 |
-
up=1, # Integer upsampling factor.
|
40 |
-
down=1, # Integer downsampling factor.
|
41 |
-
padding=0, # Padding with respect to the upsampled image.
|
42 |
-
# Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
|
43 |
-
resample_filter=None,
|
44 |
-
demodulate=True, # Apply weight demodulation?
|
45 |
-
# False = convolution, True = correlation (matches torch.nn.functional.conv2d).
|
46 |
-
flip_weight=True,
|
47 |
-
# Perform modulation, convolution, and demodulation as a single fused operation?
|
48 |
-
fused_modconv=True,
|
49 |
-
):
|
50 |
-
batch_size = x.shape[0]
|
51 |
-
out_channels, in_channels, kh, kw = weight.shape
|
52 |
-
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
|
53 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
|
54 |
-
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
|
55 |
-
|
56 |
-
# Pre-normalize inputs to avoid FP16 overflow.
|
57 |
-
if x.dtype == torch.float16 and demodulate:
|
58 |
-
weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
|
59 |
-
weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
|
60 |
-
styles = styles / \
|
61 |
-
styles.norm(float('inf'), dim=1, keepdim=True) # max_I
|
62 |
-
|
63 |
-
# Calculate per-sample weights and demodulation coefficients.
|
64 |
-
w = None
|
65 |
-
dcoefs = None
|
66 |
-
if demodulate or fused_modconv:
|
67 |
-
w = weight.unsqueeze(0) # [NOIkk]
|
68 |
-
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
|
69 |
-
if demodulate:
|
70 |
-
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
|
71 |
-
if demodulate and fused_modconv:
|
72 |
-
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
|
73 |
-
|
74 |
-
# Execute by scaling the activations before and after the convolution.
|
75 |
-
if not fused_modconv:
|
76 |
-
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
77 |
-
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
|
78 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
|
79 |
-
if demodulate and noise is not None:
|
80 |
-
x = fma.fma(x, dcoefs.to(x.dtype).reshape(
|
81 |
-
batch_size, -1, 1, 1), noise.to(x.dtype))
|
82 |
-
elif demodulate:
|
83 |
-
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
84 |
-
elif noise is not None:
|
85 |
-
x = x.add_(noise.to(x.dtype))
|
86 |
-
return x
|
87 |
-
|
88 |
-
# Execute as one fused op using grouped convolution.
|
89 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
90 |
-
batch_size = int(batch_size)
|
91 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None])
|
92 |
-
x = x.reshape(1, -1, *x.shape[2:])
|
93 |
-
w = w.reshape(-1, in_channels, kh, kw)
|
94 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
95 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
|
96 |
-
x = x.reshape(batch_size, -1, *x.shape[2:])
|
97 |
-
if noise is not None:
|
98 |
-
x = x.add_(noise)
|
99 |
-
return x
|
100 |
-
|
101 |
-
# ----------------------------------------------------------------------------
|
102 |
-
|
103 |
-
|
104 |
-
@persistence.persistent_class
|
105 |
-
class FullyConnectedLayer(torch.nn.Module):
|
106 |
-
def __init__(self,
|
107 |
-
in_features, # Number of input features.
|
108 |
-
out_features, # Number of output features.
|
109 |
-
bias=True, # Apply additive bias before the activation function?
|
110 |
-
# Activation function: 'relu', 'lrelu', etc.
|
111 |
-
activation='linear',
|
112 |
-
lr_multiplier=1, # Learning rate multiplier.
|
113 |
-
bias_init=0, # Initial value for the additive bias.
|
114 |
-
):
|
115 |
-
super().__init__()
|
116 |
-
self.activation = activation
|
117 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
118 |
-
[out_features, in_features]) / lr_multiplier)
|
119 |
-
self.bias = torch.nn.Parameter(torch.full(
|
120 |
-
[out_features], np.float32(bias_init))) if bias else None
|
121 |
-
self.weight_gain = lr_multiplier / np.sqrt(in_features)
|
122 |
-
self.bias_gain = lr_multiplier
|
123 |
-
|
124 |
-
def forward(self, x):
|
125 |
-
w = self.weight.to(x.dtype) * self.weight_gain
|
126 |
-
b = self.bias
|
127 |
-
if b is not None:
|
128 |
-
b = b.to(x.dtype)
|
129 |
-
if self.bias_gain != 1:
|
130 |
-
b = b * self.bias_gain
|
131 |
-
|
132 |
-
if self.activation == 'linear' and b is not None:
|
133 |
-
x = torch.addmm(b.unsqueeze(0), x, w.t())
|
134 |
-
else:
|
135 |
-
x = x.matmul(w.t())
|
136 |
-
x = bias_act.bias_act(x, b, act=self.activation)
|
137 |
-
return x
|
138 |
-
|
139 |
-
# ----------------------------------------------------------------------------
|
140 |
-
|
141 |
-
|
142 |
-
@persistence.persistent_class
|
143 |
-
class Conv2dLayer(torch.nn.Module):
|
144 |
-
def __init__(self,
|
145 |
-
in_channels, # Number of input channels.
|
146 |
-
out_channels, # Number of output channels.
|
147 |
-
# Width and height of the convolution kernel.
|
148 |
-
kernel_size,
|
149 |
-
bias=True, # Apply additive bias before the activation function?
|
150 |
-
# Activation function: 'relu', 'lrelu', etc.
|
151 |
-
activation='linear',
|
152 |
-
up=1, # Integer upsampling factor.
|
153 |
-
down=1, # Integer downsampling factor.
|
154 |
-
# Low-pass filter to apply when resampling activations.
|
155 |
-
resample_filter=[1, 3, 3, 1],
|
156 |
-
# Clamp the output to +-X, None = disable clamping.
|
157 |
-
conv_clamp=None,
|
158 |
-
channels_last=False, # Expect the input to have memory_format=channels_last?
|
159 |
-
trainable=True, # Update the weights of this layer during training?
|
160 |
-
):
|
161 |
-
super().__init__()
|
162 |
-
self.activation = activation
|
163 |
-
self.up = up
|
164 |
-
self.down = down
|
165 |
-
self.conv_clamp = conv_clamp
|
166 |
-
self.register_buffer(
|
167 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
168 |
-
self.padding = kernel_size // 2
|
169 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
170 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
171 |
-
|
172 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
173 |
-
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
|
174 |
-
memory_format=memory_format)
|
175 |
-
bias = torch.zeros([out_channels]) if bias else None
|
176 |
-
if trainable:
|
177 |
-
self.weight = torch.nn.Parameter(weight)
|
178 |
-
self.bias = torch.nn.Parameter(bias) if bias is not None else None
|
179 |
-
else:
|
180 |
-
self.register_buffer('weight', weight)
|
181 |
-
if bias is not None:
|
182 |
-
self.register_buffer('bias', bias)
|
183 |
-
else:
|
184 |
-
self.bias = None
|
185 |
-
|
186 |
-
def forward(self, x, gain=1):
|
187 |
-
w = self.weight * self.weight_gain
|
188 |
-
b = self.bias.to(x.dtype) if self.bias is not None else None
|
189 |
-
flip_weight = (self.up == 1) # slightly faster
|
190 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
191 |
-
x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
|
192 |
-
|
193 |
-
act_gain = self.act_gain * gain
|
194 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
195 |
-
x = bias_act.bias_act(x, b, act=self.activation,
|
196 |
-
gain=act_gain, clamp=act_clamp)
|
197 |
-
return x
|
198 |
-
|
199 |
-
# ----------------------------------------------------------------------------
|
200 |
-
|
201 |
-
|
202 |
-
@persistence.persistent_class
|
203 |
-
class MappingNetwork(torch.nn.Module):
|
204 |
-
def __init__(self,
|
205 |
-
# Input latent (Z) dimensionality, 0 = no latent.
|
206 |
-
z_dim,
|
207 |
-
# Conditioning label (C) dimensionality, 0 = no label.
|
208 |
-
c_dim,
|
209 |
-
# Intermediate latent (W) dimensionality.
|
210 |
-
w_dim,
|
211 |
-
# Number of intermediate latents to output, None = do not broadcast.
|
212 |
-
num_ws,
|
213 |
-
num_layers=8, # Number of mapping layers.
|
214 |
-
# Label embedding dimensionality, None = same as w_dim.
|
215 |
-
embed_features=None,
|
216 |
-
# Number of intermediate features in the mapping layers, None = same as w_dim.
|
217 |
-
layer_features=None,
|
218 |
-
# Activation function: 'relu', 'lrelu', etc.
|
219 |
-
activation='lrelu',
|
220 |
-
# Learning rate multiplier for the mapping layers.
|
221 |
-
lr_multiplier=0.01,
|
222 |
-
# Decay for tracking the moving average of W during training, None = do not track.
|
223 |
-
w_avg_beta=0.995,
|
224 |
-
):
|
225 |
-
super().__init__()
|
226 |
-
self.z_dim = z_dim
|
227 |
-
self.c_dim = c_dim
|
228 |
-
self.w_dim = w_dim
|
229 |
-
self.num_ws = num_ws
|
230 |
-
self.num_layers = num_layers
|
231 |
-
self.w_avg_beta = w_avg_beta
|
232 |
-
|
233 |
-
if embed_features is None:
|
234 |
-
embed_features = w_dim
|
235 |
-
if c_dim == 0:
|
236 |
-
embed_features = 0
|
237 |
-
if layer_features is None:
|
238 |
-
layer_features = w_dim
|
239 |
-
features_list = [z_dim + embed_features] + \
|
240 |
-
[layer_features] * (num_layers - 1) + [w_dim]
|
241 |
-
|
242 |
-
if c_dim > 0:
|
243 |
-
self.embed = FullyConnectedLayer(c_dim, embed_features)
|
244 |
-
for idx in range(num_layers):
|
245 |
-
in_features = features_list[idx]
|
246 |
-
out_features = features_list[idx + 1]
|
247 |
-
layer = FullyConnectedLayer(
|
248 |
-
in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
|
249 |
-
setattr(self, f'fc{idx}', layer)
|
250 |
-
|
251 |
-
if num_ws is not None and w_avg_beta is not None:
|
252 |
-
self.register_buffer('w_avg', torch.zeros([w_dim]))
|
253 |
-
|
254 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):
|
255 |
-
# Embed, normalize, and concat inputs.
|
256 |
-
x = None
|
257 |
-
with torch.autograd.profiler.record_function('input'):
|
258 |
-
if self.z_dim > 0:
|
259 |
-
misc.assert_shape(z, [None, self.z_dim])
|
260 |
-
x = normalize_2nd_moment(z.to(torch.float32))
|
261 |
-
if self.c_dim > 0:
|
262 |
-
misc.assert_shape(c, [None, self.c_dim])
|
263 |
-
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
|
264 |
-
x = torch.cat([x, y], dim=1) if x is not None else y
|
265 |
-
|
266 |
-
# Main layers.
|
267 |
-
for idx in range(self.num_layers):
|
268 |
-
layer = getattr(self, f'fc{idx}')
|
269 |
-
x = layer(x)
|
270 |
-
|
271 |
-
# Update moving average of W.
|
272 |
-
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
|
273 |
-
with torch.autograd.profiler.record_function('update_w_avg'):
|
274 |
-
self.w_avg.copy_(x.detach().mean(
|
275 |
-
dim=0).lerp(self.w_avg, self.w_avg_beta))
|
276 |
-
|
277 |
-
# Broadcast.
|
278 |
-
if self.num_ws is not None:
|
279 |
-
with torch.autograd.profiler.record_function('broadcast'):
|
280 |
-
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
|
281 |
-
|
282 |
-
# Apply truncation.
|
283 |
-
if truncation_psi != 1:
|
284 |
-
with torch.autograd.profiler.record_function('truncate'):
|
285 |
-
assert self.w_avg_beta is not None
|
286 |
-
if self.num_ws is None or truncation_cutoff is None:
|
287 |
-
x = self.w_avg.lerp(x, truncation_psi)
|
288 |
-
else:
|
289 |
-
x[:, :truncation_cutoff] = self.w_avg.lerp(
|
290 |
-
x[:, :truncation_cutoff], truncation_psi)
|
291 |
-
return x
|
292 |
-
|
293 |
-
# ----------------------------------------------------------------------------
|
294 |
-
|
295 |
-
|
296 |
-
@persistence.persistent_class
|
297 |
-
class SynthesisLayer(torch.nn.Module):
|
298 |
-
def __init__(self,
|
299 |
-
in_channels, # Number of input channels.
|
300 |
-
out_channels, # Number of output channels.
|
301 |
-
# Intermediate latent (W) dimensionality.
|
302 |
-
w_dim,
|
303 |
-
resolution, # Resolution of this layer.
|
304 |
-
kernel_size=3, # Convolution kernel size.
|
305 |
-
up=1, # Integer upsampling factor.
|
306 |
-
use_noise=True, # Enable noise input?
|
307 |
-
# Activation function: 'relu', 'lrelu', etc.
|
308 |
-
activation='lrelu',
|
309 |
-
# Low-pass filter to apply when resampling activations.
|
310 |
-
resample_filter=[1, 3, 3, 1],
|
311 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
312 |
-
conv_clamp=None,
|
313 |
-
channels_last=False, # Use channels_last format for the weights?
|
314 |
-
square=False, # default if for rectangle images
|
315 |
-
):
|
316 |
-
super().__init__()
|
317 |
-
self.resolution = resolution
|
318 |
-
self.up = up
|
319 |
-
self.use_noise = use_noise
|
320 |
-
self.activation = activation
|
321 |
-
self.conv_clamp = conv_clamp
|
322 |
-
self.register_buffer(
|
323 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
324 |
-
self.padding = kernel_size // 2
|
325 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
326 |
-
self.square = square
|
327 |
-
|
328 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
329 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
330 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
331 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
332 |
-
if use_noise:
|
333 |
-
if self.square:
|
334 |
-
self.register_buffer(
|
335 |
-
'noise_const', torch.randn([resolution, resolution]))
|
336 |
-
else:
|
337 |
-
self.register_buffer('noise_const', torch.randn(
|
338 |
-
[resolution, resolution // 2]))
|
339 |
-
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
|
340 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
341 |
-
|
342 |
-
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
|
343 |
-
assert noise_mode in ['random', 'const', 'none']
|
344 |
-
in_resolution = self.resolution // self.up
|
345 |
-
if self.square:
|
346 |
-
misc.assert_shape(
|
347 |
-
x, [None, self.weight.shape[1], in_resolution, in_resolution])
|
348 |
-
else:
|
349 |
-
misc.assert_shape(
|
350 |
-
x, [None, self.weight.shape[1], in_resolution, in_resolution // 2])
|
351 |
-
styles = self.affine(w)
|
352 |
-
|
353 |
-
noise = None
|
354 |
-
if self.use_noise and noise_mode == 'random':
|
355 |
-
if self.square:
|
356 |
-
noise = torch.randn(
|
357 |
-
[x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
|
358 |
-
else:
|
359 |
-
noise = torch.randn(
|
360 |
-
[x.shape[0], 1, self.resolution, self.resolution // 2], device=x.device) * self.noise_strength
|
361 |
-
if self.use_noise and noise_mode == 'const':
|
362 |
-
noise = self.noise_const * self.noise_strength
|
363 |
-
|
364 |
-
flip_weight = (self.up == 1) # slightly faster
|
365 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
|
366 |
-
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
|
367 |
-
|
368 |
-
act_gain = self.act_gain * gain
|
369 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
370 |
-
x = bias_act.bias_act(x, self.bias.to(
|
371 |
-
x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
|
372 |
-
return x
|
373 |
-
|
374 |
-
# ----------------------------------------------------------------------------
|
375 |
-
|
376 |
-
|
377 |
-
@persistence.persistent_class
|
378 |
-
class ToRGBLayer(torch.nn.Module):
|
379 |
-
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
|
380 |
-
super().__init__()
|
381 |
-
self.conv_clamp = conv_clamp
|
382 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
383 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
384 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
385 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
386 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
387 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
388 |
-
|
389 |
-
def forward(self, x, w, fused_modconv=True):
|
390 |
-
styles = self.affine(w) * self.weight_gain
|
391 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
|
392 |
-
demodulate=False, fused_modconv=fused_modconv)
|
393 |
-
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
|
394 |
-
return x
|
395 |
-
|
396 |
-
# ----------------------------------------------------------------------------
|
397 |
-
|
398 |
-
|
399 |
-
@persistence.persistent_class
|
400 |
-
class SynthesisBlock(torch.nn.Module):
|
401 |
-
def __init__(self,
|
402 |
-
# Number of input channels, 0 = first block.
|
403 |
-
in_channels,
|
404 |
-
# Number of output channels.
|
405 |
-
out_channels,
|
406 |
-
# Intermediate latent (W) dimensionality.
|
407 |
-
w_dim,
|
408 |
-
# Resolution of this block.
|
409 |
-
resolution,
|
410 |
-
# Number of output color channels.
|
411 |
-
img_channels,
|
412 |
-
is_last, # Is this the last block?
|
413 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
414 |
-
architecture='skip',
|
415 |
-
# Low-pass filter to apply when resampling activations.
|
416 |
-
resample_filter=[1, 3, 3, 1],
|
417 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
418 |
-
conv_clamp=None,
|
419 |
-
use_fp16=False, # Use FP16 for this block?
|
420 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
421 |
-
square=False, # default is for rectangle images
|
422 |
-
# Arguments for SynthesisLayer.
|
423 |
-
**layer_kwargs,
|
424 |
-
):
|
425 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
426 |
-
super().__init__()
|
427 |
-
self.in_channels = in_channels
|
428 |
-
self.w_dim = w_dim
|
429 |
-
self.resolution = resolution
|
430 |
-
self.img_channels = img_channels
|
431 |
-
self.is_last = is_last
|
432 |
-
self.architecture = architecture
|
433 |
-
self.use_fp16 = use_fp16
|
434 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
435 |
-
self.register_buffer(
|
436 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
437 |
-
self.num_conv = 0
|
438 |
-
self.num_torgb = 0
|
439 |
-
self.square = square
|
440 |
-
|
441 |
-
if in_channels == 0:
|
442 |
-
if self.square:
|
443 |
-
self.const = torch.nn.Parameter(torch.randn(
|
444 |
-
[out_channels, resolution, resolution]))
|
445 |
-
else: # rectangle
|
446 |
-
self.const = torch.nn.Parameter(torch.randn(
|
447 |
-
[out_channels, resolution, resolution // 2]))
|
448 |
-
|
449 |
-
if in_channels != 0:
|
450 |
-
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
|
451 |
-
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
|
452 |
-
self.num_conv += 1
|
453 |
-
|
454 |
-
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
|
455 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
|
456 |
-
self.num_conv += 1
|
457 |
-
|
458 |
-
if is_last or architecture == 'skip':
|
459 |
-
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
|
460 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last)
|
461 |
-
self.num_torgb += 1
|
462 |
-
|
463 |
-
if in_channels != 0 and architecture == 'resnet':
|
464 |
-
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
|
465 |
-
resample_filter=resample_filter, channels_last=self.channels_last)
|
466 |
-
|
467 |
-
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, **layer_kwargs):
|
468 |
-
misc.assert_shape(
|
469 |
-
ws, [None, self.num_conv + self.num_torgb, self.w_dim])
|
470 |
-
w_iter = iter(ws.unbind(dim=1))
|
471 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
472 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
473 |
-
if fused_modconv is None:
|
474 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
475 |
-
fused_modconv = (not self.training) and (
|
476 |
-
dtype == torch.float32 or int(x.shape[0]) == 1)
|
477 |
-
|
478 |
-
# Input.
|
479 |
-
if self.in_channels == 0:
|
480 |
-
x = self.const.to(dtype=dtype, memory_format=memory_format)
|
481 |
-
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
|
482 |
-
else:
|
483 |
-
if self.square:
|
484 |
-
misc.assert_shape(
|
485 |
-
x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
|
486 |
-
else: # rectangle
|
487 |
-
misc.assert_shape(
|
488 |
-
x, [None, self.in_channels, self.resolution // 2, self.resolution // 4])
|
489 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
490 |
-
|
491 |
-
# Main layers.
|
492 |
-
if self.in_channels == 0:
|
493 |
-
x = self.conv1(x, next(w_iter),
|
494 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
495 |
-
elif self.architecture == 'resnet':
|
496 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
497 |
-
x = self.conv0(x, next(w_iter),
|
498 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
499 |
-
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
|
500 |
-
gain=np.sqrt(0.5), **layer_kwargs)
|
501 |
-
x = y.add_(x)
|
502 |
-
else:
|
503 |
-
x = self.conv0(x, next(w_iter),
|
504 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
505 |
-
x = self.conv1(x, next(w_iter),
|
506 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
507 |
-
|
508 |
-
# ToRGB.
|
509 |
-
if img is not None:
|
510 |
-
if self.square:
|
511 |
-
misc.assert_shape(
|
512 |
-
img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
|
513 |
-
else:
|
514 |
-
misc.assert_shape(
|
515 |
-
img, [None, self.img_channels, self.resolution // 2, self.resolution // 4])
|
516 |
-
img = upfirdn2d.upsample2d(img, self.resample_filter)
|
517 |
-
if self.is_last or self.architecture == 'skip':
|
518 |
-
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
|
519 |
-
y = y.to(dtype=torch.float32,
|
520 |
-
memory_format=torch.contiguous_format)
|
521 |
-
img = img.add_(y) if img is not None else y
|
522 |
-
|
523 |
-
assert x.dtype == dtype
|
524 |
-
assert img is None or img.dtype == torch.float32
|
525 |
-
return x, img
|
526 |
-
|
527 |
-
# ----------------------------------------------------------------------------
|
528 |
-
|
529 |
-
|
530 |
-
@persistence.persistent_class
|
531 |
-
class SynthesisNetwork(torch.nn.Module):
|
532 |
-
def __init__(self,
|
533 |
-
# Intermediate latent (W) dimensionality.
|
534 |
-
w_dim,
|
535 |
-
img_resolution, # Output image resolution.
|
536 |
-
img_channels, # Number of color channels.
|
537 |
-
square,
|
538 |
-
# Overall multiplier for the number of channels.
|
539 |
-
channel_base=32768,
|
540 |
-
# Maximum number of channels in any layer.
|
541 |
-
channel_max=512,
|
542 |
-
# Use FP16 for the N highest resolutions.
|
543 |
-
num_fp16_res=0,
|
544 |
-
**block_kwargs, # Arguments for SynthesisBlock.
|
545 |
-
):
|
546 |
-
assert img_resolution >= 4 and img_resolution & (
|
547 |
-
img_resolution - 1) == 0
|
548 |
-
super().__init__()
|
549 |
-
self.w_dim = w_dim
|
550 |
-
self.img_resolution = img_resolution
|
551 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
552 |
-
self.img_channels = img_channels
|
553 |
-
self.square = square
|
554 |
-
self.block_resolutions = [
|
555 |
-
2 ** i for i in range(2, self.img_resolution_log2 + 1)]
|
556 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
557 |
-
for res in self.block_resolutions}
|
558 |
-
fp16_resolution = max(
|
559 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
560 |
-
|
561 |
-
self.num_ws = 0
|
562 |
-
for res in self.block_resolutions:
|
563 |
-
in_channels = channels_dict[res // 2] if res > 4 else 0
|
564 |
-
out_channels = channels_dict[res]
|
565 |
-
use_fp16 = (res >= fp16_resolution)
|
566 |
-
is_last = (res == self.img_resolution)
|
567 |
-
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
|
568 |
-
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, square=square, **block_kwargs)
|
569 |
-
self.num_ws += block.num_conv
|
570 |
-
if is_last:
|
571 |
-
self.num_ws += block.num_torgb
|
572 |
-
setattr(self, f'b{res}', block)
|
573 |
-
|
574 |
-
def forward(self, ws, return_feature=False, **block_kwargs):
|
575 |
-
block_ws = []
|
576 |
-
features = []
|
577 |
-
with torch.autograd.profiler.record_function('split_ws'):
|
578 |
-
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
|
579 |
-
ws = ws.to(torch.float32)
|
580 |
-
w_idx = 0
|
581 |
-
for res in self.block_resolutions:
|
582 |
-
block = getattr(self, f'b{res}')
|
583 |
-
block_ws.append(
|
584 |
-
ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
|
585 |
-
w_idx += block.num_conv
|
586 |
-
|
587 |
-
x = img = None
|
588 |
-
for res, cur_ws in zip(self.block_resolutions, block_ws):
|
589 |
-
block = getattr(self, f'b{res}')
|
590 |
-
x, img = block(x, img, cur_ws, **block_kwargs)
|
591 |
-
features.append(x)
|
592 |
-
if return_feature:
|
593 |
-
return img, features
|
594 |
-
else:
|
595 |
-
return img
|
596 |
-
|
597 |
-
# ----------------------------------------------------------------------------
|
598 |
-
|
599 |
-
|
600 |
-
@persistence.persistent_class
|
601 |
-
class Generator(torch.nn.Module):
|
602 |
-
def __init__(self,
|
603 |
-
z_dim, # Input latent (Z) dimensionality.
|
604 |
-
# Conditioning label (C) dimensionality.
|
605 |
-
c_dim,
|
606 |
-
# Intermediate latent (W) dimensionality.
|
607 |
-
w_dim,
|
608 |
-
img_resolution, # Output resolution.
|
609 |
-
square,
|
610 |
-
img_channels, # Number of output color channels.
|
611 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
612 |
-
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
|
613 |
-
padding=False
|
614 |
-
):
|
615 |
-
super().__init__()
|
616 |
-
self.z_dim = z_dim
|
617 |
-
self.c_dim = c_dim
|
618 |
-
self.w_dim = w_dim
|
619 |
-
self.square = square
|
620 |
-
self.img_resolution = img_resolution
|
621 |
-
self.img_channels = img_channels
|
622 |
-
self.padding = padding
|
623 |
-
self.synthesis = SynthesisNetwork(
|
624 |
-
w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, square=square, **synthesis_kwargs)
|
625 |
-
self.num_ws = self.synthesis.num_ws
|
626 |
-
self.mapping = MappingNetwork(
|
627 |
-
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
|
628 |
-
|
629 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, input_is_w=False, return_feature=False, **synthesis_kwargs):
|
630 |
-
if input_is_w:
|
631 |
-
ws = z
|
632 |
-
if ws.dim() == 2:
|
633 |
-
ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
|
634 |
-
else:
|
635 |
-
ws = self.mapping(z, c, truncation_psi=truncation_psi,
|
636 |
-
truncation_cutoff=truncation_cutoff)
|
637 |
-
img = self.synthesis(
|
638 |
-
ws, return_feature=return_feature, **synthesis_kwargs)
|
639 |
-
if return_feature:
|
640 |
-
img, feature = img
|
641 |
-
if self.padding:
|
642 |
-
pad = (img.size(2) - img.size(3)) // 2
|
643 |
-
img = torch.nn.functional.pad(img, (pad, pad), "constant", 1)
|
644 |
-
if return_feature:
|
645 |
-
for i, feat in enumerate(feature):
|
646 |
-
pad = (feat.size(2) - feat.size(3)) // 2
|
647 |
-
feature[i] = torch.nn.functional.pad(
|
648 |
-
feat, (pad, pad), "constant", 0)
|
649 |
-
if return_feature:
|
650 |
-
return img, feature
|
651 |
-
else:
|
652 |
-
return img
|
653 |
-
|
654 |
-
# ----------------------------------------------------------------------------
|
655 |
-
|
656 |
-
|
657 |
-
@persistence.persistent_class
|
658 |
-
class DiscriminatorBlock(torch.nn.Module):
|
659 |
-
def __init__(self,
|
660 |
-
# Number of input channels, 0 = first block.
|
661 |
-
in_channels,
|
662 |
-
# Number of intermediate channels.
|
663 |
-
tmp_channels,
|
664 |
-
# Number of output channels.
|
665 |
-
out_channels,
|
666 |
-
# Resolution of this block.
|
667 |
-
resolution,
|
668 |
-
# Number of input color channels.
|
669 |
-
img_channels,
|
670 |
-
# Index of the first layer.
|
671 |
-
first_layer_idx,
|
672 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
673 |
-
architecture='resnet',
|
674 |
-
# Activation function: 'relu', 'lrelu', etc.
|
675 |
-
activation='lrelu',
|
676 |
-
# Low-pass filter to apply when resampling activations.
|
677 |
-
resample_filter=[1, 3, 3, 1],
|
678 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
679 |
-
conv_clamp=None,
|
680 |
-
use_fp16=False, # Use FP16 for this block?
|
681 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
682 |
-
# Freeze-D: Number of layers to freeze.
|
683 |
-
freeze_layers=0,
|
684 |
-
square=False,
|
685 |
-
):
|
686 |
-
assert in_channels in [0, tmp_channels]
|
687 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
688 |
-
super().__init__()
|
689 |
-
self.in_channels = in_channels
|
690 |
-
self.resolution = resolution
|
691 |
-
self.img_channels = img_channels
|
692 |
-
self.first_layer_idx = first_layer_idx
|
693 |
-
self.architecture = architecture
|
694 |
-
self.use_fp16 = use_fp16
|
695 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
696 |
-
self.register_buffer(
|
697 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
698 |
-
self.square = square
|
699 |
-
|
700 |
-
self.num_layers = 0
|
701 |
-
|
702 |
-
def trainable_gen():
|
703 |
-
while True:
|
704 |
-
layer_idx = self.first_layer_idx + self.num_layers
|
705 |
-
trainable = (layer_idx >= freeze_layers)
|
706 |
-
self.num_layers += 1
|
707 |
-
yield trainable
|
708 |
-
trainable_iter = trainable_gen()
|
709 |
-
|
710 |
-
if in_channels == 0 or architecture == 'skip':
|
711 |
-
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
|
712 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
713 |
-
|
714 |
-
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
|
715 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
716 |
-
|
717 |
-
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
|
718 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
|
719 |
-
|
720 |
-
if architecture == 'resnet':
|
721 |
-
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
|
722 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
|
723 |
-
|
724 |
-
def forward(self, x, img, force_fp32=False):
|
725 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
726 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
727 |
-
|
728 |
-
# Input.
|
729 |
-
if x is not None:
|
730 |
-
if self.square:
|
731 |
-
misc.assert_shape(
|
732 |
-
x, [None, self.in_channels, self.resolution, self.resolution])
|
733 |
-
else:
|
734 |
-
misc.assert_shape(
|
735 |
-
x, [None, self.in_channels, self.resolution, self.resolution // 2])
|
736 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
737 |
-
|
738 |
-
# FromRGB.
|
739 |
-
if self.in_channels == 0 or self.architecture == 'skip':
|
740 |
-
if self.square:
|
741 |
-
misc.assert_shape(
|
742 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
743 |
-
else:
|
744 |
-
misc.assert_shape(
|
745 |
-
img, [None, self.img_channels, self.resolution, self.resolution // 2])
|
746 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
747 |
-
y = self.fromrgb(img)
|
748 |
-
x = x + y if x is not None else y
|
749 |
-
img = upfirdn2d.downsample2d(
|
750 |
-
img, self.resample_filter) if self.architecture == 'skip' else None
|
751 |
-
|
752 |
-
# Main layers.
|
753 |
-
if self.architecture == 'resnet':
|
754 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
755 |
-
x = self.conv0(x)
|
756 |
-
x = self.conv1(x, gain=np.sqrt(0.5))
|
757 |
-
x = y.add_(x)
|
758 |
-
else:
|
759 |
-
x = self.conv0(x)
|
760 |
-
x = self.conv1(x)
|
761 |
-
|
762 |
-
assert x.dtype == dtype
|
763 |
-
return x, img
|
764 |
-
|
765 |
-
# ----------------------------------------------------------------------------
|
766 |
-
|
767 |
-
|
768 |
-
@persistence.persistent_class
|
769 |
-
class MinibatchStdLayer(torch.nn.Module):
|
770 |
-
def __init__(self, group_size, num_channels=1):
|
771 |
-
super().__init__()
|
772 |
-
self.group_size = group_size
|
773 |
-
self.num_channels = num_channels
|
774 |
-
|
775 |
-
def forward(self, x):
|
776 |
-
N, C, H, W = x.shape
|
777 |
-
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
|
778 |
-
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
|
779 |
-
N)) if self.group_size is not None else N
|
780 |
-
F = self.num_channels
|
781 |
-
c = C // F
|
782 |
-
|
783 |
-
# [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
|
784 |
-
y = x.reshape(G, -1, F, c, H, W)
|
785 |
-
# [GnFcHW] Subtract mean over group.
|
786 |
-
y = y - y.mean(dim=0)
|
787 |
-
# [nFcHW] Calc variance over group.
|
788 |
-
y = y.square().mean(dim=0)
|
789 |
-
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
|
790 |
-
# [nF] Take average over channels and pixels.
|
791 |
-
y = y.mean(dim=[2, 3, 4])
|
792 |
-
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
|
793 |
-
# [NFHW] Replicate over group and pixels.
|
794 |
-
y = y.repeat(G, 1, H, W)
|
795 |
-
# [NCHW] Append to input as new channels.
|
796 |
-
x = torch.cat([x, y], dim=1)
|
797 |
-
return x
|
798 |
-
|
799 |
-
# ----------------------------------------------------------------------------
|
800 |
-
|
801 |
-
|
802 |
-
@persistence.persistent_class
|
803 |
-
class DiscriminatorEpilogue(torch.nn.Module):
|
804 |
-
def __init__(self,
|
805 |
-
in_channels, # Number of input channels.
|
806 |
-
# Dimensionality of mapped conditioning label, 0 = no label.
|
807 |
-
cmap_dim,
|
808 |
-
resolution, # Resolution of this block.
|
809 |
-
# Number of input color channels.
|
810 |
-
img_channels,
|
811 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
812 |
-
architecture='resnet',
|
813 |
-
# Group size for the minibatch standard deviation layer, None = entire minibatch.
|
814 |
-
mbstd_group_size=4,
|
815 |
-
# Number of features for the minibatch standard deviation layer, 0 = disable.
|
816 |
-
mbstd_num_channels=1,
|
817 |
-
# Activation function: 'relu', 'lrelu', etc.
|
818 |
-
activation='lrelu',
|
819 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
820 |
-
conv_clamp=None,
|
821 |
-
square=False,
|
822 |
-
):
|
823 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
824 |
-
super().__init__()
|
825 |
-
self.in_channels = in_channels
|
826 |
-
self.cmap_dim = cmap_dim
|
827 |
-
self.resolution = resolution
|
828 |
-
self.img_channels = img_channels
|
829 |
-
self.architecture = architecture
|
830 |
-
self.square = square
|
831 |
-
|
832 |
-
if architecture == 'skip':
|
833 |
-
self.fromrgb = Conv2dLayer(
|
834 |
-
img_channels, in_channels, kernel_size=1, activation=activation)
|
835 |
-
self.mbstd = MinibatchStdLayer(
|
836 |
-
group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
|
837 |
-
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
|
838 |
-
kernel_size=3, activation=activation, conv_clamp=conv_clamp)
|
839 |
-
|
840 |
-
if self.square:
|
841 |
-
self.fc = FullyConnectedLayer(
|
842 |
-
in_channels * (resolution ** 2), in_channels, activation=activation)
|
843 |
-
else:
|
844 |
-
self.fc = FullyConnectedLayer(
|
845 |
-
in_channels * (resolution ** 2 // 2), in_channels, activation=activation)
|
846 |
-
|
847 |
-
self.out = FullyConnectedLayer(
|
848 |
-
in_channels, 1 if cmap_dim == 0 else cmap_dim)
|
849 |
-
|
850 |
-
def forward(self, x, img, cmap, force_fp32=False):
|
851 |
-
if self.square:
|
852 |
-
misc.assert_shape(x, [None, self.in_channels,
|
853 |
-
self.resolution, self.resolution])
|
854 |
-
else:
|
855 |
-
misc.assert_shape(
|
856 |
-
x, [None, self.in_channels, self.resolution, self.resolution // 2]) # [NCHW]
|
857 |
-
_ = force_fp32 # unused
|
858 |
-
dtype = torch.float32
|
859 |
-
memory_format = torch.contiguous_format
|
860 |
-
|
861 |
-
# FromRGB.
|
862 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
863 |
-
if self.architecture == 'skip':
|
864 |
-
if self.square:
|
865 |
-
misc.assert_shape(
|
866 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
867 |
-
else:
|
868 |
-
misc.assert_shape(
|
869 |
-
img, [None, self.img_channels, self.resolution, self.resolution // 2])
|
870 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
871 |
-
x = x + self.fromrgb(img)
|
872 |
-
|
873 |
-
# Main layers.
|
874 |
-
if self.mbstd is not None:
|
875 |
-
x = self.mbstd(x)
|
876 |
-
x = self.conv(x)
|
877 |
-
x = self.fc(x.flatten(1))
|
878 |
-
x = self.out(x)
|
879 |
-
|
880 |
-
# Conditioning.
|
881 |
-
if self.cmap_dim > 0:
|
882 |
-
misc.assert_shape(cmap, [None, self.cmap_dim])
|
883 |
-
x = (x * cmap).sum(dim=1, keepdim=True) * \
|
884 |
-
(1 / np.sqrt(self.cmap_dim))
|
885 |
-
|
886 |
-
assert x.dtype == dtype
|
887 |
-
return x
|
888 |
-
|
889 |
-
# ----------------------------------------------------------------------------
|
890 |
-
|
891 |
-
|
892 |
-
@persistence.persistent_class
|
893 |
-
class Discriminator(torch.nn.Module):
|
894 |
-
def __init__(self,
|
895 |
-
# Conditioning label (C) dimensionality.
|
896 |
-
c_dim,
|
897 |
-
img_resolution, # Input resolution.
|
898 |
-
# Number of input color channels.
|
899 |
-
img_channels,
|
900 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
901 |
-
architecture='resnet',
|
902 |
-
# Overall multiplier for the number of channels.
|
903 |
-
channel_base=32768,
|
904 |
-
# Maximum number of channels in any layer.
|
905 |
-
channel_max=512,
|
906 |
-
# Use FP16 for the N highest resolutions.
|
907 |
-
num_fp16_res=0,
|
908 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
909 |
-
conv_clamp=None,
|
910 |
-
# Dimensionality of mapped conditioning label, None = default.
|
911 |
-
cmap_dim=None,
|
912 |
-
square=False, # default for rectangle images
|
913 |
-
block_kwargs={}, # Arguments for DiscriminatorBlock.
|
914 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
915 |
-
# Arguments for DiscriminatorEpilogue.
|
916 |
-
epilogue_kwargs={},
|
917 |
-
):
|
918 |
-
super().__init__()
|
919 |
-
self.c_dim = c_dim
|
920 |
-
self.img_resolution = img_resolution
|
921 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
922 |
-
self.img_channels = img_channels
|
923 |
-
self.square = square
|
924 |
-
self.block_resolutions = [
|
925 |
-
2 ** i for i in range(self.img_resolution_log2, 2, -1)]
|
926 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
927 |
-
for res in self.block_resolutions + [4]}
|
928 |
-
fp16_resolution = max(
|
929 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
930 |
-
|
931 |
-
if cmap_dim is None:
|
932 |
-
cmap_dim = channels_dict[4]
|
933 |
-
if c_dim == 0:
|
934 |
-
cmap_dim = 0
|
935 |
-
|
936 |
-
common_kwargs = dict(img_channels=img_channels,
|
937 |
-
architecture=architecture, conv_clamp=conv_clamp)
|
938 |
-
cur_layer_idx = 0
|
939 |
-
for res in self.block_resolutions:
|
940 |
-
in_channels = channels_dict[res] if res < img_resolution else 0
|
941 |
-
tmp_channels = channels_dict[res]
|
942 |
-
out_channels = channels_dict[res // 2]
|
943 |
-
use_fp16 = (res >= fp16_resolution)
|
944 |
-
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
|
945 |
-
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, square=square, **block_kwargs, **common_kwargs)
|
946 |
-
setattr(self, f'b{res}', block)
|
947 |
-
cur_layer_idx += block.num_layers
|
948 |
-
if c_dim > 0:
|
949 |
-
self.mapping = MappingNetwork(
|
950 |
-
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
|
951 |
-
self.b4 = DiscriminatorEpilogue(
|
952 |
-
channels_dict[4], cmap_dim=cmap_dim, resolution=4, square=square, **epilogue_kwargs, **common_kwargs)
|
953 |
-
|
954 |
-
def forward(self, img, c, **block_kwargs):
|
955 |
-
x = None
|
956 |
-
for res in self.block_resolutions:
|
957 |
-
block = getattr(self, f'b{res}')
|
958 |
-
x, img = block(x, img, **block_kwargs)
|
959 |
-
|
960 |
-
cmap = None
|
961 |
-
if self.c_dim > 0:
|
962 |
-
cmap = self.mapping(None, c)
|
963 |
-
x = self.b4(x, img, cmap)
|
964 |
-
return x
|
965 |
-
|
966 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/openai-whisper-live-transcribe/app.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import whisper
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
model = whisper.load_model("small")
|
5 |
-
|
6 |
-
def transcribe(audio):
|
7 |
-
|
8 |
-
#time.sleep(3)
|
9 |
-
# load audio and pad/trim it to fit 30 seconds
|
10 |
-
audio = whisper.load_audio(audio)
|
11 |
-
audio = whisper.pad_or_trim(audio)
|
12 |
-
|
13 |
-
# make log-Mel spectrogram and move to the same device as the model
|
14 |
-
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
15 |
-
|
16 |
-
# detect the spoken language
|
17 |
-
_, probs = model.detect_language(mel)
|
18 |
-
print(f"Detected language: {max(probs, key=probs.get)}")
|
19 |
-
|
20 |
-
# decode the audio
|
21 |
-
options = whisper.DecodingOptions(fp16 = False)
|
22 |
-
result = whisper.decode(model, mel, options)
|
23 |
-
return result.text
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
gr.Interface(
|
28 |
-
title = 'OpenAI Whisper ASR Gradio Web UI',
|
29 |
-
fn=transcribe,
|
30 |
-
inputs=[
|
31 |
-
gr.inputs.Audio(source="microphone", type="filepath")
|
32 |
-
],
|
33 |
-
outputs=[
|
34 |
-
"textbox"
|
35 |
-
],
|
36 |
-
live=True).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/paint_by_example.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# PaintByExample
|
14 |
-
|
15 |
-
[Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://huggingface.co/papers/2211.13227) is by Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, Fang Wen.
|
16 |
-
|
17 |
-
The abstract from the paper is:
|
18 |
-
|
19 |
-
*Language-guided image editing has achieved great success recently. In this paper, for the first time, we investigate exemplar-guided image editing for more precise control. We achieve this goal by leveraging self-supervised training to disentangle and re-organize the source image and the exemplar. However, the naive approach will cause obvious fusing artifacts. We carefully analyze it and propose an information bottleneck and strong augmentations to avoid the trivial solution of directly copying and pasting the exemplar image. Meanwhile, to ensure the controllability of the editing process, we design an arbitrary shape mask for the exemplar image and leverage the classifier-free guidance to increase the similarity to the exemplar image. The whole framework involves a single forward of the diffusion model without any iterative optimization. We demonstrate that our method achieves an impressive performance and enables controllable editing on in-the-wild images with high fidelity.*
|
20 |
-
|
21 |
-
The original codebase can be found at [Fantasy-Studio/Paint-by-Example](https://github.com/Fantasy-Studio/Paint-by-Example), and you can try it out in a [demo](https://huggingface.co/spaces/Fantasy-Studio/Paint-by-Example).
|
22 |
-
|
23 |
-
## Tips
|
24 |
-
|
25 |
-
PaintByExample is supported by the official [Fantasy-Studio/Paint-by-Example](https://huggingface.co/Fantasy-Studio/Paint-by-Example) checkpoint. The checkpoint is warm-started from [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to inpaint partly masked images conditioned on example and reference images.
|
26 |
-
|
27 |
-
<Tip>
|
28 |
-
|
29 |
-
Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
30 |
-
|
31 |
-
</Tip>
|
32 |
-
|
33 |
-
## PaintByExamplePipeline
|
34 |
-
[[autodoc]] PaintByExamplePipeline
|
35 |
-
- all
|
36 |
-
- __call__
|
37 |
-
|
38 |
-
## StableDiffusionPipelineOutput
|
39 |
-
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from transformers import GPT2Config, GPT2LMHeadModel
|
7 |
-
from transformers.modeling_utils import ModuleUtilsMixin
|
8 |
-
|
9 |
-
from ...configuration_utils import ConfigMixin, register_to_config
|
10 |
-
from ...models import ModelMixin
|
11 |
-
|
12 |
-
|
13 |
-
# Modified from ClipCaptionModel in https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py
|
14 |
-
class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
|
15 |
-
"""
|
16 |
-
Text decoder model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is used to
|
17 |
-
generate text from the UniDiffuser image-text embedding.
|
18 |
-
|
19 |
-
Parameters:
|
20 |
-
prefix_length (`int`):
|
21 |
-
Max number of prefix tokens that will be supplied to the model.
|
22 |
-
prefix_inner_dim (`int`):
|
23 |
-
The hidden size of the the incoming prefix embeddings. For UniDiffuser, this would be the hidden dim of the
|
24 |
-
CLIP text encoder.
|
25 |
-
prefix_hidden_dim (`int`, *optional*):
|
26 |
-
Hidden dim of the MLP if we encode the prefix.
|
27 |
-
vocab_size (`int`, *optional*, defaults to 50257):
|
28 |
-
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
|
29 |
-
`inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
|
30 |
-
n_positions (`int`, *optional*, defaults to 1024):
|
31 |
-
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
32 |
-
just in case (e.g., 512 or 1024 or 2048).
|
33 |
-
n_embd (`int`, *optional*, defaults to 768):
|
34 |
-
Dimensionality of the embeddings and hidden states.
|
35 |
-
n_layer (`int`, *optional*, defaults to 12):
|
36 |
-
Number of hidden layers in the Transformer encoder.
|
37 |
-
n_head (`int`, *optional*, defaults to 12):
|
38 |
-
Number of attention heads for each attention layer in the Transformer encoder.
|
39 |
-
n_inner (`int`, *optional*, defaults to None):
|
40 |
-
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
|
41 |
-
activation_function (`str`, *optional*, defaults to `"gelu"`):
|
42 |
-
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
|
43 |
-
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
44 |
-
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
45 |
-
embd_pdrop (`float`, *optional*, defaults to 0.1):
|
46 |
-
The dropout ratio for the embeddings.
|
47 |
-
attn_pdrop (`float`, *optional*, defaults to 0.1):
|
48 |
-
The dropout ratio for the attention.
|
49 |
-
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
50 |
-
The epsilon to use in the layer normalization layers.
|
51 |
-
initializer_range (`float`, *optional*, defaults to 0.02):
|
52 |
-
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
53 |
-
scale_attn_weights (`bool`, *optional*, defaults to `True`):
|
54 |
-
Scale attention weights by dividing by sqrt(hidden_size)..
|
55 |
-
use_cache (`bool`, *optional*, defaults to `True`):
|
56 |
-
Whether or not the model should return the last key/values attentions (not used by all models).
|
57 |
-
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
|
58 |
-
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
|
59 |
-
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
|
60 |
-
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
|
61 |
-
dot-product/softmax to float() when training with mixed precision.
|
62 |
-
"""
|
63 |
-
|
64 |
-
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
|
65 |
-
|
66 |
-
@register_to_config
|
67 |
-
def __init__(
|
68 |
-
self,
|
69 |
-
prefix_length: int,
|
70 |
-
prefix_inner_dim: int,
|
71 |
-
prefix_hidden_dim: Optional[int] = None,
|
72 |
-
vocab_size: int = 50257, # Start of GPT2 config args
|
73 |
-
n_positions: int = 1024,
|
74 |
-
n_embd: int = 768,
|
75 |
-
n_layer: int = 12,
|
76 |
-
n_head: int = 12,
|
77 |
-
n_inner: Optional[int] = None,
|
78 |
-
activation_function: str = "gelu_new",
|
79 |
-
resid_pdrop: float = 0.1,
|
80 |
-
embd_pdrop: float = 0.1,
|
81 |
-
attn_pdrop: float = 0.1,
|
82 |
-
layer_norm_epsilon: float = 1e-5,
|
83 |
-
initializer_range: float = 0.02,
|
84 |
-
scale_attn_weights: bool = True,
|
85 |
-
use_cache: bool = True,
|
86 |
-
scale_attn_by_inverse_layer_idx: bool = False,
|
87 |
-
reorder_and_upcast_attn: bool = False,
|
88 |
-
):
|
89 |
-
super().__init__()
|
90 |
-
|
91 |
-
self.prefix_length = prefix_length
|
92 |
-
|
93 |
-
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
|
94 |
-
raise ValueError(
|
95 |
-
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
|
96 |
-
f" `n_embd`: {n_embd} are not equal."
|
97 |
-
)
|
98 |
-
|
99 |
-
self.prefix_inner_dim = prefix_inner_dim
|
100 |
-
self.prefix_hidden_dim = prefix_hidden_dim
|
101 |
-
|
102 |
-
self.encode_prefix = (
|
103 |
-
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim)
|
104 |
-
if self.prefix_hidden_dim is not None
|
105 |
-
else nn.Identity()
|
106 |
-
)
|
107 |
-
self.decode_prefix = (
|
108 |
-
nn.Linear(self.prefix_hidden_dim, n_embd) if self.prefix_hidden_dim is not None else nn.Identity()
|
109 |
-
)
|
110 |
-
|
111 |
-
gpt_config = GPT2Config(
|
112 |
-
vocab_size=vocab_size,
|
113 |
-
n_positions=n_positions,
|
114 |
-
n_embd=n_embd,
|
115 |
-
n_layer=n_layer,
|
116 |
-
n_head=n_head,
|
117 |
-
n_inner=n_inner,
|
118 |
-
activation_function=activation_function,
|
119 |
-
resid_pdrop=resid_pdrop,
|
120 |
-
embd_pdrop=embd_pdrop,
|
121 |
-
attn_pdrop=attn_pdrop,
|
122 |
-
layer_norm_epsilon=layer_norm_epsilon,
|
123 |
-
initializer_range=initializer_range,
|
124 |
-
scale_attn_weights=scale_attn_weights,
|
125 |
-
use_cache=use_cache,
|
126 |
-
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
|
127 |
-
reorder_and_upcast_attn=reorder_and_upcast_attn,
|
128 |
-
)
|
129 |
-
self.transformer = GPT2LMHeadModel(gpt_config)
|
130 |
-
|
131 |
-
def forward(
|
132 |
-
self,
|
133 |
-
input_ids: torch.Tensor,
|
134 |
-
prefix_embeds: torch.Tensor,
|
135 |
-
attention_mask: Optional[torch.Tensor] = None,
|
136 |
-
labels: Optional[torch.Tensor] = None,
|
137 |
-
):
|
138 |
-
"""
|
139 |
-
Args:
|
140 |
-
input_ids (`torch.Tensor` of shape `(N, max_seq_len)`):
|
141 |
-
Text tokens to use for inference.
|
142 |
-
prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`):
|
143 |
-
Prefix embedding to preprend to the embedded tokens.
|
144 |
-
attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*):
|
145 |
-
Attention mask for the prefix embedding.
|
146 |
-
labels (`torch.Tensor`, *optional*):
|
147 |
-
Labels to use for language modeling.
|
148 |
-
"""
|
149 |
-
embedding_text = self.transformer.transformer.wte(input_ids)
|
150 |
-
hidden = self.encode_prefix(prefix_embeds)
|
151 |
-
prefix_embeds = self.decode_prefix(hidden)
|
152 |
-
embedding_cat = torch.cat((prefix_embeds, embedding_text), dim=1)
|
153 |
-
|
154 |
-
if labels is not None:
|
155 |
-
dummy_token = self.get_dummy_token(input_ids.shape[0], input_ids.device)
|
156 |
-
labels = torch.cat((dummy_token, input_ids), dim=1)
|
157 |
-
out = self.transformer(inputs_embeds=embedding_cat, labels=labels, attention_mask=attention_mask)
|
158 |
-
if self.prefix_hidden_dim is not None:
|
159 |
-
return out, hidden
|
160 |
-
else:
|
161 |
-
return out
|
162 |
-
|
163 |
-
def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor:
|
164 |
-
return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device)
|
165 |
-
|
166 |
-
def encode(self, prefix):
|
167 |
-
return self.encode_prefix(prefix)
|
168 |
-
|
169 |
-
@torch.no_grad()
|
170 |
-
def generate_captions(self, features, eos_token_id, device):
|
171 |
-
"""
|
172 |
-
Generate captions given text embedding features. Returns list[L].
|
173 |
-
|
174 |
-
Args:
|
175 |
-
features (`torch.Tensor` of shape `(B, L, D)`):
|
176 |
-
Text embedding features to generate captions from.
|
177 |
-
eos_token_id (`int`):
|
178 |
-
The token ID of the EOS token for the text decoder model.
|
179 |
-
device:
|
180 |
-
Device to perform text generation on.
|
181 |
-
|
182 |
-
Returns:
|
183 |
-
`List[str]`: A list of strings generated from the decoder model.
|
184 |
-
"""
|
185 |
-
|
186 |
-
features = torch.split(features, 1, dim=0)
|
187 |
-
generated_tokens = []
|
188 |
-
generated_seq_lengths = []
|
189 |
-
for feature in features:
|
190 |
-
feature = self.decode_prefix(feature.to(device)) # back to the clip feature
|
191 |
-
# Only support beam search for now
|
192 |
-
output_tokens, seq_lengths = self.generate_beam(
|
193 |
-
input_embeds=feature, device=device, eos_token_id=eos_token_id
|
194 |
-
)
|
195 |
-
generated_tokens.append(output_tokens[0])
|
196 |
-
generated_seq_lengths.append(seq_lengths[0])
|
197 |
-
generated_tokens = torch.stack(generated_tokens)
|
198 |
-
generated_seq_lengths = torch.stack(generated_seq_lengths)
|
199 |
-
return generated_tokens, generated_seq_lengths
|
200 |
-
|
201 |
-
@torch.no_grad()
|
202 |
-
def generate_beam(
|
203 |
-
self,
|
204 |
-
input_ids=None,
|
205 |
-
input_embeds=None,
|
206 |
-
device=None,
|
207 |
-
beam_size: int = 5,
|
208 |
-
entry_length: int = 67,
|
209 |
-
temperature: float = 1.0,
|
210 |
-
eos_token_id: Optional[int] = None,
|
211 |
-
):
|
212 |
-
"""
|
213 |
-
Generates text using the given tokenizer and text prompt or token embedding via beam search. This
|
214 |
-
implementation is based on the beam search implementation from the [original UniDiffuser
|
215 |
-
code](https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py#L89).
|
216 |
-
|
217 |
-
Args:
|
218 |
-
eos_token_id (`int`, *optional*):
|
219 |
-
The token ID of the EOS token for the text decoder model.
|
220 |
-
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
221 |
-
Tokenizer indices of input sequence tokens in the vocabulary. One of `input_ids` and `input_embeds`
|
222 |
-
must be supplied.
|
223 |
-
input_embeds (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
|
224 |
-
An embedded representation to directly pass to the transformer as a prefix for beam search. One of
|
225 |
-
`input_ids` and `input_embeds` must be supplied.
|
226 |
-
device:
|
227 |
-
The device to perform beam search on.
|
228 |
-
beam_size (`int`, *optional*, defaults to `5`):
|
229 |
-
The number of best states to store during beam search.
|
230 |
-
entry_length (`int`, *optional*, defaults to `67`):
|
231 |
-
The number of iterations to run beam search.
|
232 |
-
temperature (`float`, *optional*, defaults to 1.0):
|
233 |
-
The temperature to use when performing the softmax over logits from the decoding model.
|
234 |
-
|
235 |
-
Returns:
|
236 |
-
`Tuple(torch.Tensor, torch.Tensor)`: A tuple of tensors where the first element is a tensor of generated
|
237 |
-
token sequences sorted by score in descending order, and the second element is the sequence lengths
|
238 |
-
corresponding to those sequences.
|
239 |
-
"""
|
240 |
-
# Generates text until stop_token is reached using beam search with the desired beam size.
|
241 |
-
stop_token_index = eos_token_id
|
242 |
-
tokens = None
|
243 |
-
scores = None
|
244 |
-
seq_lengths = torch.ones(beam_size, device=device, dtype=torch.int)
|
245 |
-
is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool)
|
246 |
-
|
247 |
-
if input_embeds is not None:
|
248 |
-
generated = input_embeds
|
249 |
-
else:
|
250 |
-
generated = self.transformer.transformer.wte(input_ids)
|
251 |
-
|
252 |
-
for i in range(entry_length):
|
253 |
-
outputs = self.transformer(inputs_embeds=generated)
|
254 |
-
logits = outputs.logits
|
255 |
-
logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
|
256 |
-
logits = logits.softmax(-1).log()
|
257 |
-
|
258 |
-
if scores is None:
|
259 |
-
scores, next_tokens = logits.topk(beam_size, -1)
|
260 |
-
generated = generated.expand(beam_size, *generated.shape[1:])
|
261 |
-
next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0)
|
262 |
-
if tokens is None:
|
263 |
-
tokens = next_tokens
|
264 |
-
else:
|
265 |
-
tokens = tokens.expand(beam_size, *tokens.shape[1:])
|
266 |
-
tokens = torch.cat((tokens, next_tokens), dim=1)
|
267 |
-
else:
|
268 |
-
logits[is_stopped] = -float(np.inf)
|
269 |
-
logits[is_stopped, 0] = 0
|
270 |
-
scores_sum = scores[:, None] + logits
|
271 |
-
seq_lengths[~is_stopped] += 1
|
272 |
-
scores_sum_average = scores_sum / seq_lengths[:, None]
|
273 |
-
scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(beam_size, -1)
|
274 |
-
next_tokens_source = next_tokens // scores_sum.shape[1]
|
275 |
-
seq_lengths = seq_lengths[next_tokens_source]
|
276 |
-
next_tokens = next_tokens % scores_sum.shape[1]
|
277 |
-
next_tokens = next_tokens.unsqueeze(1)
|
278 |
-
tokens = tokens[next_tokens_source]
|
279 |
-
tokens = torch.cat((tokens, next_tokens), dim=1)
|
280 |
-
generated = generated[next_tokens_source]
|
281 |
-
scores = scores_sum_average * seq_lengths
|
282 |
-
is_stopped = is_stopped[next_tokens_source]
|
283 |
-
|
284 |
-
next_token_embed = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1)
|
285 |
-
generated = torch.cat((generated, next_token_embed), dim=1)
|
286 |
-
is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze()
|
287 |
-
if is_stopped.all():
|
288 |
-
break
|
289 |
-
|
290 |
-
scores = scores / seq_lengths
|
291 |
-
order = scores.argsort(descending=True)
|
292 |
-
# tokens tensors are already padded to max_seq_length
|
293 |
-
output_texts = [tokens[i] for i in order]
|
294 |
-
output_texts = torch.stack(output_texts, dim=0)
|
295 |
-
seq_lengths = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype)
|
296 |
-
return output_texts, seq_lengths
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/deprecation_utils.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import warnings
|
3 |
-
from typing import Any, Dict, Optional, Union
|
4 |
-
|
5 |
-
from packaging import version
|
6 |
-
|
7 |
-
|
8 |
-
def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2):
|
9 |
-
from .. import __version__
|
10 |
-
|
11 |
-
deprecated_kwargs = take_from
|
12 |
-
values = ()
|
13 |
-
if not isinstance(args[0], tuple):
|
14 |
-
args = (args,)
|
15 |
-
|
16 |
-
for attribute, version_name, message in args:
|
17 |
-
if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):
|
18 |
-
raise ValueError(
|
19 |
-
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
|
20 |
-
f" version {__version__} is >= {version_name}"
|
21 |
-
)
|
22 |
-
|
23 |
-
warning = None
|
24 |
-
if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:
|
25 |
-
values += (deprecated_kwargs.pop(attribute),)
|
26 |
-
warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
|
27 |
-
elif hasattr(deprecated_kwargs, attribute):
|
28 |
-
values += (getattr(deprecated_kwargs, attribute),)
|
29 |
-
warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
|
30 |
-
elif deprecated_kwargs is None:
|
31 |
-
warning = f"`{attribute}` is deprecated and will be removed in version {version_name}."
|
32 |
-
|
33 |
-
if warning is not None:
|
34 |
-
warning = warning + " " if standard_warn else ""
|
35 |
-
warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel)
|
36 |
-
|
37 |
-
if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:
|
38 |
-
call_frame = inspect.getouterframes(inspect.currentframe())[1]
|
39 |
-
filename = call_frame.filename
|
40 |
-
line_number = call_frame.lineno
|
41 |
-
function = call_frame.function
|
42 |
-
key, value = next(iter(deprecated_kwargs.items()))
|
43 |
-
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`")
|
44 |
-
|
45 |
-
if len(values) == 0:
|
46 |
-
return
|
47 |
-
elif len(values) == 1:
|
48 |
-
return values[0]
|
49 |
-
return values
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_lora_layers.py
DELETED
@@ -1,841 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import os
|
16 |
-
import tempfile
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
import torch.nn as nn
|
22 |
-
import torch.nn.functional as F
|
23 |
-
from huggingface_hub.repocard import RepoCard
|
24 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
25 |
-
|
26 |
-
from diffusers import (
|
27 |
-
AutoencoderKL,
|
28 |
-
DDIMScheduler,
|
29 |
-
EulerDiscreteScheduler,
|
30 |
-
StableDiffusionPipeline,
|
31 |
-
StableDiffusionXLPipeline,
|
32 |
-
UNet2DConditionModel,
|
33 |
-
)
|
34 |
-
from diffusers.loaders import AttnProcsLayers, LoraLoaderMixin, PatchedLoraProjection, text_encoder_attn_modules
|
35 |
-
from diffusers.models.attention_processor import (
|
36 |
-
Attention,
|
37 |
-
AttnProcessor,
|
38 |
-
AttnProcessor2_0,
|
39 |
-
LoRAAttnProcessor,
|
40 |
-
LoRAAttnProcessor2_0,
|
41 |
-
LoRAXFormersAttnProcessor,
|
42 |
-
XFormersAttnProcessor,
|
43 |
-
)
|
44 |
-
from diffusers.utils import floats_tensor, torch_device
|
45 |
-
from diffusers.utils.testing_utils import require_torch_gpu, slow
|
46 |
-
|
47 |
-
|
48 |
-
def create_unet_lora_layers(unet: nn.Module):
|
49 |
-
lora_attn_procs = {}
|
50 |
-
for name in unet.attn_processors.keys():
|
51 |
-
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
52 |
-
if name.startswith("mid_block"):
|
53 |
-
hidden_size = unet.config.block_out_channels[-1]
|
54 |
-
elif name.startswith("up_blocks"):
|
55 |
-
block_id = int(name[len("up_blocks.")])
|
56 |
-
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
|
57 |
-
elif name.startswith("down_blocks"):
|
58 |
-
block_id = int(name[len("down_blocks.")])
|
59 |
-
hidden_size = unet.config.block_out_channels[block_id]
|
60 |
-
lora_attn_processor_class = (
|
61 |
-
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
|
62 |
-
)
|
63 |
-
lora_attn_procs[name] = lora_attn_processor_class(
|
64 |
-
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
|
65 |
-
)
|
66 |
-
unet_lora_layers = AttnProcsLayers(lora_attn_procs)
|
67 |
-
return lora_attn_procs, unet_lora_layers
|
68 |
-
|
69 |
-
|
70 |
-
def create_text_encoder_lora_attn_procs(text_encoder: nn.Module):
|
71 |
-
text_lora_attn_procs = {}
|
72 |
-
lora_attn_processor_class = (
|
73 |
-
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
|
74 |
-
)
|
75 |
-
for name, module in text_encoder_attn_modules(text_encoder):
|
76 |
-
if isinstance(module.out_proj, nn.Linear):
|
77 |
-
out_features = module.out_proj.out_features
|
78 |
-
elif isinstance(module.out_proj, PatchedLoraProjection):
|
79 |
-
out_features = module.out_proj.regular_linear_layer.out_features
|
80 |
-
else:
|
81 |
-
assert False, module.out_proj.__class__
|
82 |
-
|
83 |
-
text_lora_attn_procs[name] = lora_attn_processor_class(hidden_size=out_features, cross_attention_dim=None)
|
84 |
-
return text_lora_attn_procs
|
85 |
-
|
86 |
-
|
87 |
-
def create_text_encoder_lora_layers(text_encoder: nn.Module):
|
88 |
-
text_lora_attn_procs = create_text_encoder_lora_attn_procs(text_encoder)
|
89 |
-
text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs)
|
90 |
-
return text_encoder_lora_layers
|
91 |
-
|
92 |
-
|
93 |
-
def set_lora_weights(lora_attn_parameters, randn_weight=False):
|
94 |
-
with torch.no_grad():
|
95 |
-
for parameter in lora_attn_parameters:
|
96 |
-
if randn_weight:
|
97 |
-
parameter[:] = torch.randn_like(parameter)
|
98 |
-
else:
|
99 |
-
torch.zero_(parameter)
|
100 |
-
|
101 |
-
|
102 |
-
class LoraLoaderMixinTests(unittest.TestCase):
|
103 |
-
def get_dummy_components(self):
|
104 |
-
torch.manual_seed(0)
|
105 |
-
unet = UNet2DConditionModel(
|
106 |
-
block_out_channels=(32, 64),
|
107 |
-
layers_per_block=2,
|
108 |
-
sample_size=32,
|
109 |
-
in_channels=4,
|
110 |
-
out_channels=4,
|
111 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
112 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
113 |
-
cross_attention_dim=32,
|
114 |
-
)
|
115 |
-
scheduler = DDIMScheduler(
|
116 |
-
beta_start=0.00085,
|
117 |
-
beta_end=0.012,
|
118 |
-
beta_schedule="scaled_linear",
|
119 |
-
clip_sample=False,
|
120 |
-
set_alpha_to_one=False,
|
121 |
-
steps_offset=1,
|
122 |
-
)
|
123 |
-
torch.manual_seed(0)
|
124 |
-
vae = AutoencoderKL(
|
125 |
-
block_out_channels=[32, 64],
|
126 |
-
in_channels=3,
|
127 |
-
out_channels=3,
|
128 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
129 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
130 |
-
latent_channels=4,
|
131 |
-
)
|
132 |
-
text_encoder_config = CLIPTextConfig(
|
133 |
-
bos_token_id=0,
|
134 |
-
eos_token_id=2,
|
135 |
-
hidden_size=32,
|
136 |
-
intermediate_size=37,
|
137 |
-
layer_norm_eps=1e-05,
|
138 |
-
num_attention_heads=4,
|
139 |
-
num_hidden_layers=5,
|
140 |
-
pad_token_id=1,
|
141 |
-
vocab_size=1000,
|
142 |
-
)
|
143 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
144 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
145 |
-
|
146 |
-
unet_lora_attn_procs, unet_lora_layers = create_unet_lora_layers(unet)
|
147 |
-
text_encoder_lora_layers = create_text_encoder_lora_layers(text_encoder)
|
148 |
-
|
149 |
-
pipeline_components = {
|
150 |
-
"unet": unet,
|
151 |
-
"scheduler": scheduler,
|
152 |
-
"vae": vae,
|
153 |
-
"text_encoder": text_encoder,
|
154 |
-
"tokenizer": tokenizer,
|
155 |
-
"safety_checker": None,
|
156 |
-
"feature_extractor": None,
|
157 |
-
}
|
158 |
-
lora_components = {
|
159 |
-
"unet_lora_layers": unet_lora_layers,
|
160 |
-
"text_encoder_lora_layers": text_encoder_lora_layers,
|
161 |
-
"unet_lora_attn_procs": unet_lora_attn_procs,
|
162 |
-
}
|
163 |
-
return pipeline_components, lora_components
|
164 |
-
|
165 |
-
def get_dummy_inputs(self, with_generator=True):
|
166 |
-
batch_size = 1
|
167 |
-
sequence_length = 10
|
168 |
-
num_channels = 4
|
169 |
-
sizes = (32, 32)
|
170 |
-
|
171 |
-
generator = torch.manual_seed(0)
|
172 |
-
noise = floats_tensor((batch_size, num_channels) + sizes)
|
173 |
-
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
174 |
-
|
175 |
-
pipeline_inputs = {
|
176 |
-
"prompt": "A painting of a squirrel eating a burger",
|
177 |
-
"num_inference_steps": 2,
|
178 |
-
"guidance_scale": 6.0,
|
179 |
-
"output_type": "np",
|
180 |
-
}
|
181 |
-
if with_generator:
|
182 |
-
pipeline_inputs.update({"generator": generator})
|
183 |
-
|
184 |
-
return noise, input_ids, pipeline_inputs
|
185 |
-
|
186 |
-
# copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb
|
187 |
-
def get_dummy_tokens(self):
|
188 |
-
max_seq_length = 77
|
189 |
-
|
190 |
-
inputs = torch.randint(2, 56, size=(1, max_seq_length), generator=torch.manual_seed(0))
|
191 |
-
|
192 |
-
prepared_inputs = {}
|
193 |
-
prepared_inputs["input_ids"] = inputs
|
194 |
-
return prepared_inputs
|
195 |
-
|
196 |
-
def create_lora_weight_file(self, tmpdirname):
|
197 |
-
_, lora_components = self.get_dummy_components()
|
198 |
-
LoraLoaderMixin.save_lora_weights(
|
199 |
-
save_directory=tmpdirname,
|
200 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
201 |
-
text_encoder_lora_layers=lora_components["text_encoder_lora_layers"],
|
202 |
-
)
|
203 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
204 |
-
|
205 |
-
def test_lora_save_load(self):
|
206 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
207 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
208 |
-
sd_pipe = sd_pipe.to(torch_device)
|
209 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
210 |
-
|
211 |
-
_, _, pipeline_inputs = self.get_dummy_inputs()
|
212 |
-
|
213 |
-
original_images = sd_pipe(**pipeline_inputs).images
|
214 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
215 |
-
|
216 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
217 |
-
LoraLoaderMixin.save_lora_weights(
|
218 |
-
save_directory=tmpdirname,
|
219 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
220 |
-
text_encoder_lora_layers=lora_components["text_encoder_lora_layers"],
|
221 |
-
)
|
222 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
223 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
224 |
-
|
225 |
-
lora_images = sd_pipe(**pipeline_inputs).images
|
226 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
227 |
-
|
228 |
-
# Outputs shouldn't match.
|
229 |
-
self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice)))
|
230 |
-
|
231 |
-
def test_lora_save_load_safetensors(self):
|
232 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
233 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
234 |
-
sd_pipe = sd_pipe.to(torch_device)
|
235 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
236 |
-
|
237 |
-
_, _, pipeline_inputs = self.get_dummy_inputs()
|
238 |
-
|
239 |
-
original_images = sd_pipe(**pipeline_inputs).images
|
240 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
241 |
-
|
242 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
243 |
-
LoraLoaderMixin.save_lora_weights(
|
244 |
-
save_directory=tmpdirname,
|
245 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
246 |
-
text_encoder_lora_layers=lora_components["text_encoder_lora_layers"],
|
247 |
-
safe_serialization=True,
|
248 |
-
)
|
249 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
|
250 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
251 |
-
|
252 |
-
lora_images = sd_pipe(**pipeline_inputs).images
|
253 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
254 |
-
|
255 |
-
# Outputs shouldn't match.
|
256 |
-
self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice)))
|
257 |
-
|
258 |
-
def test_lora_save_load_legacy(self):
|
259 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
260 |
-
unet_lora_attn_procs = lora_components["unet_lora_attn_procs"]
|
261 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
262 |
-
sd_pipe = sd_pipe.to(torch_device)
|
263 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
264 |
-
|
265 |
-
_, _, pipeline_inputs = self.get_dummy_inputs()
|
266 |
-
|
267 |
-
original_images = sd_pipe(**pipeline_inputs).images
|
268 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
269 |
-
|
270 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
271 |
-
unet = sd_pipe.unet
|
272 |
-
unet.set_attn_processor(unet_lora_attn_procs)
|
273 |
-
unet.save_attn_procs(tmpdirname)
|
274 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
275 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
276 |
-
|
277 |
-
lora_images = sd_pipe(**pipeline_inputs).images
|
278 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
279 |
-
|
280 |
-
# Outputs shouldn't match.
|
281 |
-
self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice)))
|
282 |
-
|
283 |
-
def test_text_encoder_lora_monkey_patch(self):
|
284 |
-
pipeline_components, _ = self.get_dummy_components()
|
285 |
-
pipe = StableDiffusionPipeline(**pipeline_components)
|
286 |
-
|
287 |
-
dummy_tokens = self.get_dummy_tokens()
|
288 |
-
|
289 |
-
# inference without lora
|
290 |
-
outputs_without_lora = pipe.text_encoder(**dummy_tokens)[0]
|
291 |
-
assert outputs_without_lora.shape == (1, 77, 32)
|
292 |
-
|
293 |
-
# monkey patch
|
294 |
-
params = pipe._modify_text_encoder(pipe.text_encoder, pipe.lora_scale)
|
295 |
-
|
296 |
-
set_lora_weights(params, randn_weight=False)
|
297 |
-
|
298 |
-
# inference with lora
|
299 |
-
outputs_with_lora = pipe.text_encoder(**dummy_tokens)[0]
|
300 |
-
assert outputs_with_lora.shape == (1, 77, 32)
|
301 |
-
|
302 |
-
assert torch.allclose(
|
303 |
-
outputs_without_lora, outputs_with_lora
|
304 |
-
), "lora_up_weight are all zero, so the lora outputs should be the same to without lora outputs"
|
305 |
-
|
306 |
-
# create lora_attn_procs with randn up.weights
|
307 |
-
create_text_encoder_lora_attn_procs(pipe.text_encoder)
|
308 |
-
|
309 |
-
# monkey patch
|
310 |
-
params = pipe._modify_text_encoder(pipe.text_encoder, pipe.lora_scale)
|
311 |
-
|
312 |
-
set_lora_weights(params, randn_weight=True)
|
313 |
-
|
314 |
-
# inference with lora
|
315 |
-
outputs_with_lora = pipe.text_encoder(**dummy_tokens)[0]
|
316 |
-
assert outputs_with_lora.shape == (1, 77, 32)
|
317 |
-
|
318 |
-
assert not torch.allclose(
|
319 |
-
outputs_without_lora, outputs_with_lora
|
320 |
-
), "lora_up_weight are not zero, so the lora outputs should be different to without lora outputs"
|
321 |
-
|
322 |
-
def test_text_encoder_lora_remove_monkey_patch(self):
|
323 |
-
pipeline_components, _ = self.get_dummy_components()
|
324 |
-
pipe = StableDiffusionPipeline(**pipeline_components)
|
325 |
-
|
326 |
-
dummy_tokens = self.get_dummy_tokens()
|
327 |
-
|
328 |
-
# inference without lora
|
329 |
-
outputs_without_lora = pipe.text_encoder(**dummy_tokens)[0]
|
330 |
-
assert outputs_without_lora.shape == (1, 77, 32)
|
331 |
-
|
332 |
-
# monkey patch
|
333 |
-
params = pipe._modify_text_encoder(pipe.text_encoder, pipe.lora_scale)
|
334 |
-
|
335 |
-
set_lora_weights(params, randn_weight=True)
|
336 |
-
|
337 |
-
# inference with lora
|
338 |
-
outputs_with_lora = pipe.text_encoder(**dummy_tokens)[0]
|
339 |
-
assert outputs_with_lora.shape == (1, 77, 32)
|
340 |
-
|
341 |
-
assert not torch.allclose(
|
342 |
-
outputs_without_lora, outputs_with_lora
|
343 |
-
), "lora outputs should be different to without lora outputs"
|
344 |
-
|
345 |
-
# remove monkey patch
|
346 |
-
pipe._remove_text_encoder_monkey_patch()
|
347 |
-
|
348 |
-
# inference with removed lora
|
349 |
-
outputs_without_lora_removed = pipe.text_encoder(**dummy_tokens)[0]
|
350 |
-
assert outputs_without_lora_removed.shape == (1, 77, 32)
|
351 |
-
|
352 |
-
assert torch.allclose(
|
353 |
-
outputs_without_lora, outputs_without_lora_removed
|
354 |
-
), "remove lora monkey patch should restore the original outputs"
|
355 |
-
|
356 |
-
def test_text_encoder_lora_scale(self):
|
357 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
358 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
359 |
-
sd_pipe = sd_pipe.to(torch_device)
|
360 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
361 |
-
|
362 |
-
_, _, pipeline_inputs = self.get_dummy_inputs()
|
363 |
-
|
364 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
365 |
-
LoraLoaderMixin.save_lora_weights(
|
366 |
-
save_directory=tmpdirname,
|
367 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
368 |
-
text_encoder_lora_layers=lora_components["text_encoder_lora_layers"],
|
369 |
-
)
|
370 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
371 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
372 |
-
|
373 |
-
lora_images = sd_pipe(**pipeline_inputs).images
|
374 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
375 |
-
|
376 |
-
lora_images_with_scale = sd_pipe(**pipeline_inputs, cross_attention_kwargs={"scale": 0.5}).images
|
377 |
-
lora_image_with_scale_slice = lora_images_with_scale[0, -3:, -3:, -1]
|
378 |
-
|
379 |
-
# Outputs shouldn't match.
|
380 |
-
self.assertFalse(
|
381 |
-
torch.allclose(torch.from_numpy(lora_image_slice), torch.from_numpy(lora_image_with_scale_slice))
|
382 |
-
)
|
383 |
-
|
384 |
-
def test_lora_unet_attn_processors(self):
|
385 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
386 |
-
self.create_lora_weight_file(tmpdirname)
|
387 |
-
|
388 |
-
pipeline_components, _ = self.get_dummy_components()
|
389 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
390 |
-
sd_pipe = sd_pipe.to(torch_device)
|
391 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
392 |
-
|
393 |
-
# check if vanilla attention processors are used
|
394 |
-
for _, module in sd_pipe.unet.named_modules():
|
395 |
-
if isinstance(module, Attention):
|
396 |
-
self.assertIsInstance(module.processor, (AttnProcessor, AttnProcessor2_0))
|
397 |
-
|
398 |
-
# load LoRA weight file
|
399 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
400 |
-
|
401 |
-
# check if lora attention processors are used
|
402 |
-
for _, module in sd_pipe.unet.named_modules():
|
403 |
-
if isinstance(module, Attention):
|
404 |
-
attn_proc_class = (
|
405 |
-
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
|
406 |
-
)
|
407 |
-
self.assertIsInstance(module.processor, attn_proc_class)
|
408 |
-
|
409 |
-
def test_unload_lora_sd(self):
|
410 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
411 |
-
_, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False)
|
412 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
413 |
-
|
414 |
-
original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images
|
415 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
416 |
-
|
417 |
-
# Emulate training.
|
418 |
-
set_lora_weights(lora_components["unet_lora_layers"].parameters(), randn_weight=True)
|
419 |
-
set_lora_weights(lora_components["text_encoder_lora_layers"].parameters(), randn_weight=True)
|
420 |
-
|
421 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
422 |
-
LoraLoaderMixin.save_lora_weights(
|
423 |
-
save_directory=tmpdirname,
|
424 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
425 |
-
text_encoder_lora_layers=lora_components["text_encoder_lora_layers"],
|
426 |
-
)
|
427 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
428 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
429 |
-
|
430 |
-
lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images
|
431 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
432 |
-
|
433 |
-
# Unload LoRA parameters.
|
434 |
-
sd_pipe.unload_lora_weights()
|
435 |
-
original_images_two = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images
|
436 |
-
orig_image_slice_two = original_images_two[0, -3:, -3:, -1]
|
437 |
-
|
438 |
-
assert not np.allclose(
|
439 |
-
orig_image_slice, lora_image_slice
|
440 |
-
), "LoRA parameters should lead to a different image slice."
|
441 |
-
assert not np.allclose(
|
442 |
-
orig_image_slice_two, lora_image_slice
|
443 |
-
), "LoRA parameters should lead to a different image slice."
|
444 |
-
assert np.allclose(
|
445 |
-
orig_image_slice, orig_image_slice_two, atol=1e-3
|
446 |
-
), "Unloading LoRA parameters should lead to results similar to what was obtained with the pipeline without any LoRA parameters."
|
447 |
-
|
448 |
-
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU")
|
449 |
-
def test_lora_unet_attn_processors_with_xformers(self):
|
450 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
451 |
-
self.create_lora_weight_file(tmpdirname)
|
452 |
-
|
453 |
-
pipeline_components, _ = self.get_dummy_components()
|
454 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
455 |
-
sd_pipe = sd_pipe.to(torch_device)
|
456 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
457 |
-
|
458 |
-
# enable XFormers
|
459 |
-
sd_pipe.enable_xformers_memory_efficient_attention()
|
460 |
-
|
461 |
-
# check if xFormers attention processors are used
|
462 |
-
for _, module in sd_pipe.unet.named_modules():
|
463 |
-
if isinstance(module, Attention):
|
464 |
-
self.assertIsInstance(module.processor, XFormersAttnProcessor)
|
465 |
-
|
466 |
-
# load LoRA weight file
|
467 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
468 |
-
|
469 |
-
# check if lora attention processors are used
|
470 |
-
for _, module in sd_pipe.unet.named_modules():
|
471 |
-
if isinstance(module, Attention):
|
472 |
-
self.assertIsInstance(module.processor, LoRAXFormersAttnProcessor)
|
473 |
-
|
474 |
-
# unload lora weights
|
475 |
-
sd_pipe.unload_lora_weights()
|
476 |
-
|
477 |
-
# check if attention processors are reverted back to xFormers
|
478 |
-
for _, module in sd_pipe.unet.named_modules():
|
479 |
-
if isinstance(module, Attention):
|
480 |
-
self.assertIsInstance(module.processor, XFormersAttnProcessor)
|
481 |
-
|
482 |
-
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU")
|
483 |
-
def test_lora_save_load_with_xformers(self):
|
484 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
485 |
-
sd_pipe = StableDiffusionPipeline(**pipeline_components)
|
486 |
-
sd_pipe = sd_pipe.to(torch_device)
|
487 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
488 |
-
|
489 |
-
_, _, pipeline_inputs = self.get_dummy_inputs()
|
490 |
-
|
491 |
-
# enable XFormers
|
492 |
-
sd_pipe.enable_xformers_memory_efficient_attention()
|
493 |
-
|
494 |
-
original_images = sd_pipe(**pipeline_inputs).images
|
495 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
496 |
-
|
497 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
498 |
-
LoraLoaderMixin.save_lora_weights(
|
499 |
-
save_directory=tmpdirname,
|
500 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
501 |
-
text_encoder_lora_layers=lora_components["text_encoder_lora_layers"],
|
502 |
-
)
|
503 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
504 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
505 |
-
|
506 |
-
lora_images = sd_pipe(**pipeline_inputs).images
|
507 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
508 |
-
|
509 |
-
# Outputs shouldn't match.
|
510 |
-
self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice)))
|
511 |
-
|
512 |
-
|
513 |
-
class SDXLLoraLoaderMixinTests(unittest.TestCase):
|
514 |
-
def get_dummy_components(self):
|
515 |
-
torch.manual_seed(0)
|
516 |
-
unet = UNet2DConditionModel(
|
517 |
-
block_out_channels=(32, 64),
|
518 |
-
layers_per_block=2,
|
519 |
-
sample_size=32,
|
520 |
-
in_channels=4,
|
521 |
-
out_channels=4,
|
522 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
523 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
524 |
-
# SD2-specific config below
|
525 |
-
attention_head_dim=(2, 4),
|
526 |
-
use_linear_projection=True,
|
527 |
-
addition_embed_type="text_time",
|
528 |
-
addition_time_embed_dim=8,
|
529 |
-
transformer_layers_per_block=(1, 2),
|
530 |
-
projection_class_embeddings_input_dim=80, # 6 * 8 + 32
|
531 |
-
cross_attention_dim=64,
|
532 |
-
)
|
533 |
-
scheduler = EulerDiscreteScheduler(
|
534 |
-
beta_start=0.00085,
|
535 |
-
beta_end=0.012,
|
536 |
-
steps_offset=1,
|
537 |
-
beta_schedule="scaled_linear",
|
538 |
-
timestep_spacing="leading",
|
539 |
-
)
|
540 |
-
torch.manual_seed(0)
|
541 |
-
vae = AutoencoderKL(
|
542 |
-
block_out_channels=[32, 64],
|
543 |
-
in_channels=3,
|
544 |
-
out_channels=3,
|
545 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
546 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
547 |
-
latent_channels=4,
|
548 |
-
sample_size=128,
|
549 |
-
)
|
550 |
-
torch.manual_seed(0)
|
551 |
-
text_encoder_config = CLIPTextConfig(
|
552 |
-
bos_token_id=0,
|
553 |
-
eos_token_id=2,
|
554 |
-
hidden_size=32,
|
555 |
-
intermediate_size=37,
|
556 |
-
layer_norm_eps=1e-05,
|
557 |
-
num_attention_heads=4,
|
558 |
-
num_hidden_layers=5,
|
559 |
-
pad_token_id=1,
|
560 |
-
vocab_size=1000,
|
561 |
-
# SD2-specific config below
|
562 |
-
hidden_act="gelu",
|
563 |
-
projection_dim=32,
|
564 |
-
)
|
565 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
566 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
567 |
-
|
568 |
-
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
|
569 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
570 |
-
|
571 |
-
unet_lora_attn_procs, unet_lora_layers = create_unet_lora_layers(unet)
|
572 |
-
text_encoder_one_lora_layers = create_text_encoder_lora_layers(text_encoder)
|
573 |
-
text_encoder_two_lora_layers = create_text_encoder_lora_layers(text_encoder_2)
|
574 |
-
|
575 |
-
pipeline_components = {
|
576 |
-
"unet": unet,
|
577 |
-
"scheduler": scheduler,
|
578 |
-
"vae": vae,
|
579 |
-
"text_encoder": text_encoder,
|
580 |
-
"text_encoder_2": text_encoder_2,
|
581 |
-
"tokenizer": tokenizer,
|
582 |
-
"tokenizer_2": tokenizer_2,
|
583 |
-
}
|
584 |
-
lora_components = {
|
585 |
-
"unet_lora_layers": unet_lora_layers,
|
586 |
-
"text_encoder_one_lora_layers": text_encoder_one_lora_layers,
|
587 |
-
"text_encoder_two_lora_layers": text_encoder_two_lora_layers,
|
588 |
-
"unet_lora_attn_procs": unet_lora_attn_procs,
|
589 |
-
}
|
590 |
-
return pipeline_components, lora_components
|
591 |
-
|
592 |
-
def get_dummy_inputs(self, with_generator=True):
|
593 |
-
batch_size = 1
|
594 |
-
sequence_length = 10
|
595 |
-
num_channels = 4
|
596 |
-
sizes = (32, 32)
|
597 |
-
|
598 |
-
generator = torch.manual_seed(0)
|
599 |
-
noise = floats_tensor((batch_size, num_channels) + sizes)
|
600 |
-
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
601 |
-
|
602 |
-
pipeline_inputs = {
|
603 |
-
"prompt": "A painting of a squirrel eating a burger",
|
604 |
-
"num_inference_steps": 2,
|
605 |
-
"guidance_scale": 6.0,
|
606 |
-
"output_type": "np",
|
607 |
-
}
|
608 |
-
if with_generator:
|
609 |
-
pipeline_inputs.update({"generator": generator})
|
610 |
-
|
611 |
-
return noise, input_ids, pipeline_inputs
|
612 |
-
|
613 |
-
def test_lora_save_load(self):
|
614 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
615 |
-
sd_pipe = StableDiffusionXLPipeline(**pipeline_components)
|
616 |
-
sd_pipe = sd_pipe.to(torch_device)
|
617 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
618 |
-
|
619 |
-
_, _, pipeline_inputs = self.get_dummy_inputs()
|
620 |
-
|
621 |
-
original_images = sd_pipe(**pipeline_inputs).images
|
622 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
623 |
-
|
624 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
625 |
-
StableDiffusionXLPipeline.save_lora_weights(
|
626 |
-
save_directory=tmpdirname,
|
627 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
628 |
-
text_encoder_lora_layers=lora_components["text_encoder_one_lora_layers"],
|
629 |
-
text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_layers"],
|
630 |
-
)
|
631 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
632 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
633 |
-
|
634 |
-
lora_images = sd_pipe(**pipeline_inputs).images
|
635 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
636 |
-
|
637 |
-
# Outputs shouldn't match.
|
638 |
-
self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice)))
|
639 |
-
|
640 |
-
def test_unload_lora_sdxl(self):
|
641 |
-
pipeline_components, lora_components = self.get_dummy_components()
|
642 |
-
_, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False)
|
643 |
-
sd_pipe = StableDiffusionXLPipeline(**pipeline_components)
|
644 |
-
|
645 |
-
original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images
|
646 |
-
orig_image_slice = original_images[0, -3:, -3:, -1]
|
647 |
-
|
648 |
-
# Emulate training.
|
649 |
-
set_lora_weights(lora_components["unet_lora_layers"].parameters(), randn_weight=True)
|
650 |
-
set_lora_weights(lora_components["text_encoder_one_lora_layers"].parameters(), randn_weight=True)
|
651 |
-
set_lora_weights(lora_components["text_encoder_two_lora_layers"].parameters(), randn_weight=True)
|
652 |
-
|
653 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
654 |
-
StableDiffusionXLPipeline.save_lora_weights(
|
655 |
-
save_directory=tmpdirname,
|
656 |
-
unet_lora_layers=lora_components["unet_lora_layers"],
|
657 |
-
text_encoder_lora_layers=lora_components["text_encoder_one_lora_layers"],
|
658 |
-
text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_layers"],
|
659 |
-
)
|
660 |
-
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
|
661 |
-
sd_pipe.load_lora_weights(tmpdirname)
|
662 |
-
|
663 |
-
lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images
|
664 |
-
lora_image_slice = lora_images[0, -3:, -3:, -1]
|
665 |
-
|
666 |
-
# Unload LoRA parameters.
|
667 |
-
sd_pipe.unload_lora_weights()
|
668 |
-
original_images_two = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images
|
669 |
-
orig_image_slice_two = original_images_two[0, -3:, -3:, -1]
|
670 |
-
|
671 |
-
assert not np.allclose(
|
672 |
-
orig_image_slice, lora_image_slice
|
673 |
-
), "LoRA parameters should lead to a different image slice."
|
674 |
-
assert not np.allclose(
|
675 |
-
orig_image_slice_two, lora_image_slice
|
676 |
-
), "LoRA parameters should lead to a different image slice."
|
677 |
-
assert np.allclose(
|
678 |
-
orig_image_slice, orig_image_slice_two, atol=1e-3
|
679 |
-
), "Unloading LoRA parameters should lead to results similar to what was obtained with the pipeline without any LoRA parameters."
|
680 |
-
|
681 |
-
|
682 |
-
@slow
|
683 |
-
@require_torch_gpu
|
684 |
-
class LoraIntegrationTests(unittest.TestCase):
|
685 |
-
def test_dreambooth_old_format(self):
|
686 |
-
generator = torch.Generator("cpu").manual_seed(0)
|
687 |
-
|
688 |
-
lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example"
|
689 |
-
card = RepoCard.load(lora_model_id)
|
690 |
-
base_model_id = card.data.to_dict()["base_model"]
|
691 |
-
|
692 |
-
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
|
693 |
-
pipe = pipe.to(torch_device)
|
694 |
-
pipe.load_lora_weights(lora_model_id)
|
695 |
-
|
696 |
-
images = pipe(
|
697 |
-
"A photo of a sks dog floating in the river", output_type="np", generator=generator, num_inference_steps=2
|
698 |
-
).images
|
699 |
-
|
700 |
-
images = images[0, -3:, -3:, -1].flatten()
|
701 |
-
|
702 |
-
expected = np.array([0.7207, 0.6787, 0.6010, 0.7478, 0.6838, 0.6064, 0.6984, 0.6443, 0.5785])
|
703 |
-
|
704 |
-
self.assertTrue(np.allclose(images, expected, atol=1e-4))
|
705 |
-
|
706 |
-
def test_dreambooth_text_encoder_new_format(self):
|
707 |
-
generator = torch.Generator().manual_seed(0)
|
708 |
-
|
709 |
-
lora_model_id = "hf-internal-testing/lora-trained"
|
710 |
-
card = RepoCard.load(lora_model_id)
|
711 |
-
base_model_id = card.data.to_dict()["base_model"]
|
712 |
-
|
713 |
-
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
|
714 |
-
pipe = pipe.to(torch_device)
|
715 |
-
pipe.load_lora_weights(lora_model_id)
|
716 |
-
|
717 |
-
images = pipe("A photo of a sks dog", output_type="np", generator=generator, num_inference_steps=2).images
|
718 |
-
|
719 |
-
images = images[0, -3:, -3:, -1].flatten()
|
720 |
-
|
721 |
-
expected = np.array([0.6628, 0.6138, 0.5390, 0.6625, 0.6130, 0.5463, 0.6166, 0.5788, 0.5359])
|
722 |
-
|
723 |
-
self.assertTrue(np.allclose(images, expected, atol=1e-4))
|
724 |
-
|
725 |
-
def test_a1111(self):
|
726 |
-
generator = torch.Generator().manual_seed(0)
|
727 |
-
|
728 |
-
pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None).to(
|
729 |
-
torch_device
|
730 |
-
)
|
731 |
-
lora_model_id = "hf-internal-testing/civitai-light-shadow-lora"
|
732 |
-
lora_filename = "light_and_shadow.safetensors"
|
733 |
-
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
734 |
-
|
735 |
-
images = pipe(
|
736 |
-
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
737 |
-
).images
|
738 |
-
|
739 |
-
images = images[0, -3:, -3:, -1].flatten()
|
740 |
-
expected = np.array([0.3725, 0.3767, 0.3761, 0.3796, 0.3827, 0.3763, 0.3831, 0.3809, 0.3392])
|
741 |
-
|
742 |
-
self.assertTrue(np.allclose(images, expected, atol=1e-4))
|
743 |
-
|
744 |
-
def test_vanilla_funetuning(self):
|
745 |
-
generator = torch.Generator().manual_seed(0)
|
746 |
-
|
747 |
-
lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4"
|
748 |
-
card = RepoCard.load(lora_model_id)
|
749 |
-
base_model_id = card.data.to_dict()["base_model"]
|
750 |
-
|
751 |
-
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
|
752 |
-
pipe = pipe.to(torch_device)
|
753 |
-
pipe.load_lora_weights(lora_model_id)
|
754 |
-
|
755 |
-
images = pipe("A pokemon with blue eyes.", output_type="np", generator=generator, num_inference_steps=2).images
|
756 |
-
|
757 |
-
images = images[0, -3:, -3:, -1].flatten()
|
758 |
-
|
759 |
-
expected = np.array([0.7406, 0.699, 0.5963, 0.7493, 0.7045, 0.6096, 0.6886, 0.6388, 0.583])
|
760 |
-
|
761 |
-
self.assertTrue(np.allclose(images, expected, atol=1e-4))
|
762 |
-
|
763 |
-
def test_unload_lora(self):
|
764 |
-
generator = torch.manual_seed(0)
|
765 |
-
prompt = "masterpiece, best quality, mountain"
|
766 |
-
num_inference_steps = 2
|
767 |
-
|
768 |
-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
|
769 |
-
torch_device
|
770 |
-
)
|
771 |
-
initial_images = pipe(
|
772 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
773 |
-
).images
|
774 |
-
initial_images = initial_images[0, -3:, -3:, -1].flatten()
|
775 |
-
|
776 |
-
lora_model_id = "hf-internal-testing/civitai-colored-icons-lora"
|
777 |
-
lora_filename = "Colored_Icons_by_vizsumit.safetensors"
|
778 |
-
|
779 |
-
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
780 |
-
generator = torch.manual_seed(0)
|
781 |
-
lora_images = pipe(
|
782 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
783 |
-
).images
|
784 |
-
lora_images = lora_images[0, -3:, -3:, -1].flatten()
|
785 |
-
|
786 |
-
pipe.unload_lora_weights()
|
787 |
-
generator = torch.manual_seed(0)
|
788 |
-
unloaded_lora_images = pipe(
|
789 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
790 |
-
).images
|
791 |
-
unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten()
|
792 |
-
|
793 |
-
self.assertFalse(np.allclose(initial_images, lora_images))
|
794 |
-
self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3))
|
795 |
-
|
796 |
-
def test_load_unload_load_kohya_lora(self):
|
797 |
-
# This test ensures that a Kohya-style LoRA can be safely unloaded and then loaded
|
798 |
-
# without introducing any side-effects. Even though the test uses a Kohya-style
|
799 |
-
# LoRA, the underlying adapter handling mechanism is format-agnostic.
|
800 |
-
generator = torch.manual_seed(0)
|
801 |
-
prompt = "masterpiece, best quality, mountain"
|
802 |
-
num_inference_steps = 2
|
803 |
-
|
804 |
-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
|
805 |
-
torch_device
|
806 |
-
)
|
807 |
-
initial_images = pipe(
|
808 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
809 |
-
).images
|
810 |
-
initial_images = initial_images[0, -3:, -3:, -1].flatten()
|
811 |
-
|
812 |
-
lora_model_id = "hf-internal-testing/civitai-colored-icons-lora"
|
813 |
-
lora_filename = "Colored_Icons_by_vizsumit.safetensors"
|
814 |
-
|
815 |
-
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
816 |
-
generator = torch.manual_seed(0)
|
817 |
-
lora_images = pipe(
|
818 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
819 |
-
).images
|
820 |
-
lora_images = lora_images[0, -3:, -3:, -1].flatten()
|
821 |
-
|
822 |
-
pipe.unload_lora_weights()
|
823 |
-
generator = torch.manual_seed(0)
|
824 |
-
unloaded_lora_images = pipe(
|
825 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
826 |
-
).images
|
827 |
-
unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten()
|
828 |
-
|
829 |
-
self.assertFalse(np.allclose(initial_images, lora_images))
|
830 |
-
self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3))
|
831 |
-
|
832 |
-
# make sure we can load a LoRA again after unloading and they don't have
|
833 |
-
# any undesired effects.
|
834 |
-
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
835 |
-
generator = torch.manual_seed(0)
|
836 |
-
lora_images_again = pipe(
|
837 |
-
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
838 |
-
).images
|
839 |
-
lora_images_again = lora_images_again[0, -3:, -3:, -1].flatten()
|
840 |
-
|
841 |
-
self.assertTrue(np.allclose(lora_images, lora_images_again, atol=1e-3))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gcnet/README.md
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
# GCNet for Object Detection
|
2 |
-
|
3 |
-
By [Yue Cao](http://yue-cao.me), [Jiarui Xu](http://jerryxu.net), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), Fangyun Wei, [Han Hu](https://sites.google.com/site/hanhushomepage/).
|
4 |
-
|
5 |
-
We provide config files to reproduce the results in the paper for
|
6 |
-
["GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond"](https://arxiv.org/abs/1904.11492) on COCO object detection.
|
7 |
-
|
8 |
-
## Introduction
|
9 |
-
|
10 |
-
[ALGORITHM]
|
11 |
-
|
12 |
-
**GCNet** is initially described in [arxiv](https://arxiv.org/abs/1904.11492). Via absorbing advantages of Non-Local Networks (NLNet) and Squeeze-Excitation Networks (SENet), GCNet provides a simple, fast and effective approach for global context modeling, which generally outperforms both NLNet and SENet on major benchmarks for various recognition tasks.
|
13 |
-
|
14 |
-
## Citing GCNet
|
15 |
-
|
16 |
-
```latex
|
17 |
-
@article{cao2019GCNet,
|
18 |
-
title={GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond},
|
19 |
-
author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han},
|
20 |
-
journal={arXiv preprint arXiv:1904.11492},
|
21 |
-
year={2019}
|
22 |
-
}
|
23 |
-
```
|
24 |
-
|
25 |
-
## Results and models
|
26 |
-
|
27 |
-
The results on COCO 2017val are shown in the below table.
|
28 |
-
|
29 |
-
| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
30 |
-
| :-------: | :--------------: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
|
31 |
-
| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | | 39.7 | 35.9 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915.log.json) |
|
32 |
-
| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.0 | 39.9 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204_024626.log.json) |
|
33 |
-
| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 11.4 | 41.3 | 37.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205_192835.log.json) |
|
34 |
-
| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.6 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206_112128.log.json) |
|
35 |
-
|
36 |
-
| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
37 |
-
| :-------: | :--------------: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :-------: |
|
38 |
-
| R-50-FPN | Mask | - | 1x | 4.4 | 16.6 | 38.4 | 34.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202_214122.log.json) |
|
39 |
-
| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | 15.5 | 40.4 | 36.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202_174907.log.json) |
|
40 |
-
| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.1 | 40.7 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) |
|
41 |
-
| R-101-FPN | Mask | - | 1x | 6.4 | 13.3 | 40.5 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210_220422.log.json) |
|
42 |
-
| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 12.0 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207_015330.log.json) |
|
43 |
-
| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.8 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) |
|
44 |
-
| X-101-FPN | Mask | - | 1x | 7.6 | 11.3 | 42.4 | 37.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211_054326.log.json) |
|
45 |
-
| X-101-FPN | Mask | GC(c3-c5, r16) | 1x | 8.8 | 9.8 | 43.5 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_164715.log.json) |
|
46 |
-
| X-101-FPN | Mask | GC(c3-c5, r4) | 1x | 9.0 | 9.7 | 43.9 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212_070942.log.json) |
|
47 |
-
| X-101-FPN | Cascade Mask | - | 1x | 9.2 | 8.4 | 44.7 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310_115217.log.json) |
|
48 |
-
| X-101-FPN | Cascade Mask | GC(c3-c5, r16) | 1x | 10.3 | 7.7 | 46.2 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_184154.log.json) |
|
49 |
-
| X-101-FPN | Cascade Mask | GC(c3-c5, r4) | 1x | 10.6 | | 46.4 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653.log.json) |
|
50 |
-
| X-101-FPN | DCN Cascade Mask | - | 1x | | | 44.9 | 38.9 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20200516_182249-680fc3f2.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20200516_182249.log.json)|
|
51 |
-
| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r16) | 1x | | | 44.6 | |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20200516_015634-08f56b56.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20200516_015634.log.json) |
|
52 |
-
| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r4) | 1x | | | 45.7 | 39.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20200518_041145-24cabcfd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20200518_041145.log.json) |
|
53 |
-
|
54 |
-
**Notes:**
|
55 |
-
|
56 |
-
- The `SyncBN` is added in the backbone for all models in **Table 2**.
|
57 |
-
- `GC` denotes Global Context (GC) block is inserted after 1x1 conv of backbone.
|
58 |
-
- `DCN` denotes replace 3x3 conv with 3x3 Deformable Convolution in `c3-c5` stages of backbone.
|
59 |
-
- `r4` and `r16` denote ratio 4 and ratio 16 in GC block respectively.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://contrib/resnet50_gn',
|
5 |
-
backbone=dict(norm_cfg=norm_cfg),
|
6 |
-
neck=dict(norm_cfg=norm_cfg),
|
7 |
-
roi_head=dict(
|
8 |
-
bbox_head=dict(
|
9 |
-
type='Shared4Conv1FCBBoxHead',
|
10 |
-
conv_out_channels=256,
|
11 |
-
norm_cfg=norm_cfg),
|
12 |
-
mask_head=dict(norm_cfg=norm_cfg)))
|
13 |
-
# learning policy
|
14 |
-
lr_config = dict(step=[16, 22])
|
15 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_1x_coco.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/cascade_mask_rcnn_swin_fpn.py',
|
3 |
-
'../_base_/datasets/coco_instance.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
|
7 |
-
model = dict(
|
8 |
-
backbone=dict(
|
9 |
-
embed_dim=96,
|
10 |
-
depths=[2, 2, 6, 2],
|
11 |
-
num_heads=[3, 6, 12, 24],
|
12 |
-
window_size=7,
|
13 |
-
ape=False,
|
14 |
-
drop_path_rate=0.0,
|
15 |
-
patch_norm=True,
|
16 |
-
use_checkpoint=False
|
17 |
-
),
|
18 |
-
neck=dict(in_channels=[96, 192, 384, 768]),
|
19 |
-
roi_head=dict(
|
20 |
-
bbox_head=[
|
21 |
-
dict(
|
22 |
-
type='ConvFCBBoxHead',
|
23 |
-
num_shared_convs=4,
|
24 |
-
num_shared_fcs=1,
|
25 |
-
in_channels=256,
|
26 |
-
conv_out_channels=256,
|
27 |
-
fc_out_channels=1024,
|
28 |
-
roi_feat_size=7,
|
29 |
-
num_classes=80,
|
30 |
-
bbox_coder=dict(
|
31 |
-
type='DeltaXYWHBBoxCoder',
|
32 |
-
target_means=[0., 0., 0., 0.],
|
33 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
34 |
-
reg_class_agnostic=False,
|
35 |
-
reg_decoded_bbox=True,
|
36 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
37 |
-
loss_cls=dict(
|
38 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
39 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
|
40 |
-
dict(
|
41 |
-
type='ConvFCBBoxHead',
|
42 |
-
num_shared_convs=4,
|
43 |
-
num_shared_fcs=1,
|
44 |
-
in_channels=256,
|
45 |
-
conv_out_channels=256,
|
46 |
-
fc_out_channels=1024,
|
47 |
-
roi_feat_size=7,
|
48 |
-
num_classes=80,
|
49 |
-
bbox_coder=dict(
|
50 |
-
type='DeltaXYWHBBoxCoder',
|
51 |
-
target_means=[0., 0., 0., 0.],
|
52 |
-
target_stds=[0.05, 0.05, 0.1, 0.1]),
|
53 |
-
reg_class_agnostic=False,
|
54 |
-
reg_decoded_bbox=True,
|
55 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
56 |
-
loss_cls=dict(
|
57 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
58 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
|
59 |
-
dict(
|
60 |
-
type='ConvFCBBoxHead',
|
61 |
-
num_shared_convs=4,
|
62 |
-
num_shared_fcs=1,
|
63 |
-
in_channels=256,
|
64 |
-
conv_out_channels=256,
|
65 |
-
fc_out_channels=1024,
|
66 |
-
roi_feat_size=7,
|
67 |
-
num_classes=80,
|
68 |
-
bbox_coder=dict(
|
69 |
-
type='DeltaXYWHBBoxCoder',
|
70 |
-
target_means=[0., 0., 0., 0.],
|
71 |
-
target_stds=[0.033, 0.033, 0.067, 0.067]),
|
72 |
-
reg_class_agnostic=False,
|
73 |
-
reg_decoded_bbox=True,
|
74 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
75 |
-
loss_cls=dict(
|
76 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
77 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
|
78 |
-
]))
|
79 |
-
|
80 |
-
img_norm_cfg = dict(
|
81 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
82 |
-
|
83 |
-
# augmentation strategy originates from DETR / Sparse RCNN
|
84 |
-
train_pipeline = [
|
85 |
-
dict(type='LoadImageFromFile'),
|
86 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
87 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
88 |
-
dict(type='AutoAugment',
|
89 |
-
policies=[
|
90 |
-
[
|
91 |
-
dict(type='Resize',
|
92 |
-
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
|
93 |
-
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
|
94 |
-
(736, 1333), (768, 1333), (800, 1333)],
|
95 |
-
multiscale_mode='value',
|
96 |
-
keep_ratio=True)
|
97 |
-
],
|
98 |
-
[
|
99 |
-
dict(type='Resize',
|
100 |
-
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
|
101 |
-
multiscale_mode='value',
|
102 |
-
keep_ratio=True),
|
103 |
-
dict(type='RandomCrop',
|
104 |
-
crop_type='absolute_range',
|
105 |
-
crop_size=(384, 600),
|
106 |
-
allow_negative_crop=True),
|
107 |
-
dict(type='Resize',
|
108 |
-
img_scale=[(480, 1333), (512, 1333), (544, 1333),
|
109 |
-
(576, 1333), (608, 1333), (640, 1333),
|
110 |
-
(672, 1333), (704, 1333), (736, 1333),
|
111 |
-
(768, 1333), (800, 1333)],
|
112 |
-
multiscale_mode='value',
|
113 |
-
override=True,
|
114 |
-
keep_ratio=True)
|
115 |
-
]
|
116 |
-
]),
|
117 |
-
dict(type='Normalize', **img_norm_cfg),
|
118 |
-
dict(type='Pad', size_divisor=32),
|
119 |
-
dict(type='DefaultFormatBundle'),
|
120 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
121 |
-
]
|
122 |
-
data = dict(train=dict(pipeline=train_pipeline))
|
123 |
-
|
124 |
-
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
|
125 |
-
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
126 |
-
'relative_position_bias_table': dict(decay_mult=0.),
|
127 |
-
'norm': dict(decay_mult=0.)}))
|
128 |
-
lr_config = dict(step=[8, 11])
|
129 |
-
runner = dict(type='EpochBasedRunnerAmp', max_epochs=12)
|
130 |
-
|
131 |
-
# do not use mmdet version fp16
|
132 |
-
fp16 = None
|
133 |
-
optimizer_config = dict(
|
134 |
-
type="DistOptimizerHook",
|
135 |
-
update_interval=1,
|
136 |
-
grad_clip=None,
|
137 |
-
coalesce=True,
|
138 |
-
bucket_size_mb=-1,
|
139 |
-
use_fp16=True,
|
140 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_r50-d8_512x512_20k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/RWKV.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
This loader is not currently maintained as RWKV can now be loaded
|
3 |
-
through the transformers library.
|
4 |
-
'''
|
5 |
-
|
6 |
-
import copy
|
7 |
-
import os
|
8 |
-
from pathlib import Path
|
9 |
-
|
10 |
-
import numpy as np
|
11 |
-
from tokenizers import Tokenizer
|
12 |
-
|
13 |
-
import modules.shared as shared
|
14 |
-
from modules.callbacks import Iteratorize
|
15 |
-
|
16 |
-
np.set_printoptions(precision=4, suppress=True, linewidth=200)
|
17 |
-
|
18 |
-
os.environ['RWKV_JIT_ON'] = '1'
|
19 |
-
os.environ["RWKV_CUDA_ON"] = '1' if shared.args.rwkv_cuda_on else '0' # use CUDA kernel for seq mode (much faster)
|
20 |
-
|
21 |
-
from rwkv.model import RWKV
|
22 |
-
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
23 |
-
|
24 |
-
|
25 |
-
class RWKVModel:
|
26 |
-
def __init__(self):
|
27 |
-
pass
|
28 |
-
|
29 |
-
@classmethod
|
30 |
-
def from_pretrained(self, path, dtype="fp16", device="cuda"):
|
31 |
-
tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json")
|
32 |
-
if shared.args.rwkv_strategy is None:
|
33 |
-
model = RWKV(model=str(path), strategy=f'{device} {dtype}')
|
34 |
-
else:
|
35 |
-
model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy)
|
36 |
-
|
37 |
-
pipeline = PIPELINE(model, str(tokenizer_path))
|
38 |
-
result = self()
|
39 |
-
result.pipeline = pipeline
|
40 |
-
result.model = model
|
41 |
-
result.cached_context = ""
|
42 |
-
result.cached_model_state = None
|
43 |
-
result.cached_output_logits = None
|
44 |
-
return result
|
45 |
-
|
46 |
-
def generate(self, prompt, state, callback=None):
|
47 |
-
args = PIPELINE_ARGS(
|
48 |
-
temperature=state['temperature'],
|
49 |
-
top_p=state['top_p'],
|
50 |
-
top_k=state['top_k'],
|
51 |
-
alpha_frequency=0.1, # Frequency Penalty (as in GPT-3)
|
52 |
-
alpha_presence=0.1, # Presence Penalty (as in GPT-3)
|
53 |
-
token_ban=[0], # ban the generation of some tokens
|
54 |
-
token_stop=[]
|
55 |
-
)
|
56 |
-
|
57 |
-
if self.cached_context != "":
|
58 |
-
if prompt.startswith(self.cached_context):
|
59 |
-
prompt = prompt[len(self.cached_context):]
|
60 |
-
else:
|
61 |
-
self.cached_context = ""
|
62 |
-
self.cached_model_state = None
|
63 |
-
self.cached_output_logits = None
|
64 |
-
|
65 |
-
# out = self.pipeline.generate(prompt, token_count=state['max_new_tokens'], args=args, callback=callback)
|
66 |
-
out = self.generate_from_cached_state(prompt, token_count=state['max_new_tokens'], args=args, callback=callback)
|
67 |
-
return out
|
68 |
-
|
69 |
-
def generate_with_streaming(self, *args, **kwargs):
|
70 |
-
with Iteratorize(self.generate, args, kwargs, callback=None) as generator:
|
71 |
-
reply = ''
|
72 |
-
for token in generator:
|
73 |
-
reply += token
|
74 |
-
yield reply
|
75 |
-
|
76 |
-
# Similar to the PIPELINE.generate, but lets us maintain the cached_model_state
|
77 |
-
def generate_from_cached_state(self, ctx="", token_count=20, args=None, callback=None):
|
78 |
-
all_tokens = []
|
79 |
-
out_str = ''
|
80 |
-
occurrence = {}
|
81 |
-
state = copy.deepcopy(self.cached_model_state) if self.cached_model_state is not None else None
|
82 |
-
|
83 |
-
# if we ended up with an empty context, just reuse the cached logits
|
84 |
-
# this can happen if a user undoes a message and then sends the exact message again
|
85 |
-
# in that case the full context ends up being the same as the cached_context, so the remaining context is empty.
|
86 |
-
if ctx == "":
|
87 |
-
out = self.cached_output_logits
|
88 |
-
|
89 |
-
token = None
|
90 |
-
for i in range(token_count):
|
91 |
-
# forward
|
92 |
-
tokens = self.pipeline.encode(ctx) if i == 0 else [token]
|
93 |
-
while len(tokens) > 0:
|
94 |
-
out, state = self.model.forward(tokens[:args.chunk_len], state)
|
95 |
-
tokens = tokens[args.chunk_len:]
|
96 |
-
if i == 0:
|
97 |
-
begin_token = len(all_tokens)
|
98 |
-
last_token_posi = begin_token
|
99 |
-
# cache the model state after scanning the context
|
100 |
-
# we don't cache the state after processing our own generated tokens because
|
101 |
-
# the output string might be post-processed arbitrarily. Therefore, what's fed into the model
|
102 |
-
# on the next round of chat might be slightly different what what it output on the previous round
|
103 |
-
if i == 0:
|
104 |
-
self.cached_context += ctx
|
105 |
-
self.cached_model_state = copy.deepcopy(state)
|
106 |
-
self.cached_output_logits = copy.deepcopy(out)
|
107 |
-
|
108 |
-
# adjust probabilities
|
109 |
-
for n in args.token_ban:
|
110 |
-
out[n] = -float('inf')
|
111 |
-
|
112 |
-
for n in occurrence:
|
113 |
-
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
|
114 |
-
|
115 |
-
# sampler
|
116 |
-
token = self.pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k)
|
117 |
-
if token in args.token_stop:
|
118 |
-
break
|
119 |
-
|
120 |
-
all_tokens += [token]
|
121 |
-
if token not in occurrence:
|
122 |
-
occurrence[token] = 1
|
123 |
-
else:
|
124 |
-
occurrence[token] += 1
|
125 |
-
|
126 |
-
# output
|
127 |
-
tmp = self.pipeline.decode(all_tokens[last_token_posi:])
|
128 |
-
if '\ufffd' not in tmp: # is valid utf-8 string?
|
129 |
-
if callback:
|
130 |
-
callback(tmp)
|
131 |
-
|
132 |
-
out_str += tmp
|
133 |
-
last_token_posi = begin_token + i + 1
|
134 |
-
return out_str
|
135 |
-
|
136 |
-
|
137 |
-
class RWKVTokenizer:
|
138 |
-
def __init__(self):
|
139 |
-
pass
|
140 |
-
|
141 |
-
@classmethod
|
142 |
-
def from_pretrained(self, path):
|
143 |
-
tokenizer_path = path / "20B_tokenizer.json"
|
144 |
-
tokenizer = Tokenizer.from_file(str(tokenizer_path))
|
145 |
-
result = self()
|
146 |
-
result.tokenizer = tokenizer
|
147 |
-
return result
|
148 |
-
|
149 |
-
def encode(self, prompt):
|
150 |
-
return self.tokenizer.encode(prompt).ids
|
151 |
-
|
152 |
-
def decode(self, ids):
|
153 |
-
return self.tokenizer.decode(ids)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnkitGaur2811/Image_Conversion_app_using_Opencv/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Image Conversion App Using Opencv
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.20
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/main.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from optimization.image_editor import ImageEditor
|
2 |
-
from optimization.arguments import get_arguments
|
3 |
-
|
4 |
-
|
5 |
-
if __name__ == "__main__":
|
6 |
-
args = get_arguments()
|
7 |
-
image_editor = ImageEditor(args)
|
8 |
-
image_editor.edit_image_by_prompt()
|
9 |
-
# image_editor.reconstruct_image()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/gather_points.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.autograd import Function
|
3 |
-
|
4 |
-
from ..utils import ext_loader
|
5 |
-
|
6 |
-
ext_module = ext_loader.load_ext(
|
7 |
-
'_ext', ['gather_points_forward', 'gather_points_backward'])
|
8 |
-
|
9 |
-
|
10 |
-
class GatherPoints(Function):
|
11 |
-
"""Gather points with given index."""
|
12 |
-
|
13 |
-
@staticmethod
|
14 |
-
def forward(ctx, features: torch.Tensor,
|
15 |
-
indices: torch.Tensor) -> torch.Tensor:
|
16 |
-
"""
|
17 |
-
Args:
|
18 |
-
features (Tensor): (B, C, N) features to gather.
|
19 |
-
indices (Tensor): (B, M) where M is the number of points.
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
Tensor: (B, C, M) where M is the number of points.
|
23 |
-
"""
|
24 |
-
assert features.is_contiguous()
|
25 |
-
assert indices.is_contiguous()
|
26 |
-
|
27 |
-
B, npoint = indices.size()
|
28 |
-
_, C, N = features.size()
|
29 |
-
output = torch.cuda.FloatTensor(B, C, npoint)
|
30 |
-
|
31 |
-
ext_module.gather_points_forward(
|
32 |
-
features, indices, output, b=B, c=C, n=N, npoints=npoint)
|
33 |
-
|
34 |
-
ctx.for_backwards = (indices, C, N)
|
35 |
-
if torch.__version__ != 'parrots':
|
36 |
-
ctx.mark_non_differentiable(indices)
|
37 |
-
return output
|
38 |
-
|
39 |
-
@staticmethod
|
40 |
-
def backward(ctx, grad_out):
|
41 |
-
idx, C, N = ctx.for_backwards
|
42 |
-
B, npoint = idx.size()
|
43 |
-
|
44 |
-
grad_features = torch.cuda.FloatTensor(B, C, N).zero_()
|
45 |
-
grad_out_data = grad_out.data.contiguous()
|
46 |
-
ext_module.gather_points_backward(
|
47 |
-
grad_out_data,
|
48 |
-
idx,
|
49 |
-
grad_features.data,
|
50 |
-
b=B,
|
51 |
-
c=C,
|
52 |
-
n=N,
|
53 |
-
npoints=npoint)
|
54 |
-
return grad_features, None
|
55 |
-
|
56 |
-
|
57 |
-
gather_points = GatherPoints.apply
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AntX-ai/Fintech/index.html
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<meta name="viewport" content="width=device-width" />
|
6 |
-
<title>My static Space</title>
|
7 |
-
<link rel="stylesheet" href="style.css" />
|
8 |
-
</head>
|
9 |
-
<body>
|
10 |
-
<div class="card">
|
11 |
-
<h1>Welcome to AntX.ai Fintech Space!</h1>
|
12 |
-
<p> You will explore various topics and algorithms about fintech </p>
|
13 |
-
</div>
|
14 |
-
</body>
|
15 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnthonyTruchetPoC/persistent-docker/scripts/build-clean-docs.sh
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
#!/usr/bin/env sh
|
2 |
-
|
3 |
-
DOCS_GENERATED_API_SRC=src/athai
|
4 |
-
DOCS_SRC=doc
|
5 |
-
DOCS_GENERATED_API_DST=doc/_autosummary
|
6 |
-
DOCS_DST=dist/doc
|
7 |
-
|
8 |
-
rm -rf $DOCS_DST
|
9 |
-
rm -rf $DOCS_GENERATED_API_DST
|
10 |
-
|
11 |
-
poetry run sphinx-build -E -a $DOCS_SRC $DOCS_DST
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/nono/roop/typing.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from typing import Any
|
2 |
-
|
3 |
-
from insightface.app.common import Face
|
4 |
-
import numpy
|
5 |
-
|
6 |
-
Face = Face
|
7 |
-
Frame = numpy.ndarray[Any, Any]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Atsushi/kinoko-mini-AI/app.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
pkl = "all_20211108_res34.pkl"
|
4 |
-
from fastai.vision.all import *
|
5 |
-
from fastai.vision.widgets import *
|
6 |
-
import jaconv
|
7 |
-
import pathlib
|
8 |
-
plt = platform.system()
|
9 |
-
if plt == 'Linux': pathlib.WindowsPath = pathlib.PosixPath
|
10 |
-
model_inf = load_learner(pkl)
|
11 |
-
#print(os.getcwd())
|
12 |
-
title = "きのこミニAI"
|
13 |
-
description = "615種類のきのこを判定します。日本国内で撮られた約10万枚の写真を学習に使用。食べる人ではなく学ぶ人のためのツールです。ご利用は自己責任で。最終更新日:2021/11/9"
|
14 |
-
def kinoko_uranai(img):
|
15 |
-
replace_dic = {"_ッロウッ":" (group)","ー":""}
|
16 |
-
result_dic = {}
|
17 |
-
pred_class, pred_idxs, outputs = model_inf.predict(img)
|
18 |
-
top_5_conf, i = outputs.topk(5)
|
19 |
-
itr = 0
|
20 |
-
classes = model_inf.dls.vocab
|
21 |
-
result_dic = {}
|
22 |
-
for x in i:
|
23 |
-
kwamei = jaconv.alphabet2kata(classes[x.item()].lower())
|
24 |
-
for k,v in replace_dic.items():
|
25 |
-
kwamei = kwamei.replace(k,v)
|
26 |
-
result_dic[kwamei] = str(round(top_5_conf[itr].item(),2))
|
27 |
-
itr=itr+1
|
28 |
-
return result_dic
|
29 |
-
outputs = gr.outputs.Label(num_top_classes=5)
|
30 |
-
iface = gr.Interface(fn=kinoko_uranai, inputs="image", outputs=outputs,title=title,description=description).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bavesh/Oral_Cancer_Detection/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Oral_Cancer_Detection
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.9.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: afl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Download Traffic Rider Hack.md
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar e instalar tráfico jinete Hack APK en Android</h1>
|
3 |
-
<p>Traffic Rider es uno de los juegos de carreras de motos más populares y adictivos en Android. Ofrece una experiencia de juego realista e inmersiva con una vista en primera persona, sonidos reales de bicicleta, entornos detallados y un modo de carrera completo. Puedes elegir entre más de 30 bicicletas diferentes, personalizarlas y competir a través de varios escenarios evitando el tráfico y los obstáculos. </p>
|
4 |
-
<h2>apk download traffic rider hack</h2><br /><p><b><b>Download File</b> ☆☆☆ <a href="https://bltlly.com/2v6LWU">https://bltlly.com/2v6LWU</a></b></p><br /><br />
|
5 |
-
<p>Sin embargo, si desea disfrutar del juego sin limitaciones o restricciones, es posible que desee probar Traffic Rider hack APK. Esta es una versión modificada del juego original que te da acceso a dinero ilimitado y oro, todas las bicicletas desbloqueadas y actualizadas, sin anuncios, sin temporizadores y más. Con este hack, usted puede tener más diversión y desafío en Traffic Rider.</p>
|
6 |
-
<p>En este artículo, le mostraremos cómo descargar e instalar Traffic Rider hack APK en su dispositivo Android. También discutiremos los beneficios y riesgos de usar este hack, así como algunos consejos y trucos para jugar el juego. Siga los pasos a continuación para comenzar. </p>
|
7 |
-
<h2>Pasos para descargar e instalar tráfico Rider hack APK en Android</h2>
|
8 |
-
<p>Antes de que pueda instalar Traffic Rider hack APK en su dispositivo, es necesario asegurarse de que ha habilitado fuentes desconocidas en su configuración. Esto te permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Te mostramos cómo hacerlo:</p>
|
9 |
-
<h3>Paso 1: Habilitar fuentes desconocidas en el dispositivo</h3>
|
10 |
-
<ul>
|
11 |
-
<li>Ve a la configuración de tu dispositivo y toca Aplicaciones y notificaciones (o Aplicaciones en versiones anteriores de Android). </li>
|
12 |
-
<li>Toque los tres puntos en la esquina superior derecha. </li>
|
13 |
-
<li>Toque Acceso especial. </li>
|
14 |
-
<li>Toca Instalar aplicaciones desconocidas. </li>
|
15 |
-
<li>Toque Chrome (o cualquier navegador web que utilice). </li>
|
16 |
-
<li>Mover Permitir desde esta fuente a la posición On. </li>
|
17 |
-
</ul>
|
18 |
-
|
19 |
-
<h3>Paso 2: Descargar tráfico Rider hack archivo APK de una fuente de buena reputación</h3>
|
20 |
-
<ul>
|
21 |
-
<li>Abra su navegador web y vaya a la página web que ofrece Traffic Rider hack archivo APK. Por ejemplo, puedes ir a [APK Done]( 1 ) o [Traffic Rider Games]( 2 ). </li>
|
22 |
-
<li> Encontrar el enlace de descarga para el tráfico Rider hack archivo APK y toque en él. </li>
|
23 |
-
<li>Aceptar cualquier ventana emergente o permisos que puedan aparecer. </li>
|
24 |
-
<li>Espera a que termine la descarga. </li>
|
25 |
-
</ul>
|
26 |
-
<p>Una vez que haya descargado el archivo APK, debe ubicarlo en su dispositivo y pulsar para instalarlo. Puede usar cualquier aplicación del explorador de archivos que tenga en su teléfono, como Cx File Explorer o Administrador de archivos. Aquí está cómo hacerlo:</p>
|
27 |
-
<h3>Paso 3: Localizar y tocar el archivo APK para instalarlo</h3>
|
28 |
-
<ul>
|
29 |
-
<li>Abra su aplicación explorador de archivos y vaya a la carpeta Descargas en su dispositivo. </li>
|
30 |
-
<li> Encontrar el tráfico Rider hack APK archivo que ha descargado y toque. </li>
|
31 |
-
<li>Puede ver un mensaje de advertencia diciendo que este tipo de archivo puede dañar su dispositivo. Toque OK o Instalar de todos modos (inseguro) para proceder. </li>
|
32 |
-
<li>Pulse Instalar y espere a que se complete la instalación. </li>
|
33 |
-
<li>Pulse Abrir para iniciar el juego o Listo para salir del instalador. </li>
|
34 |
-
</ul>
|
35 |
-
<p>Felicitaciones, que ha instalado con éxito Traffic Rider hack APK en su dispositivo. Ahora puede disfrutar del juego con todas las funciones de hackeo habilitadas. Aquí está cómo hacerlo:</p>
|
36 |
-
<p></p>
|
37 |
-
<h3>Paso 4: Iniciar el juego y disfrutar de las características de corte</h3>
|
38 |
-
<ul>
|
39 |
-
<li>Abre el juego desde el cajón de la aplicación o la pantalla de inicio. </li>
|
40 |
-
<li>Puede ver una ventana emergente pidiéndole que permita el acceso a sus fotos, medios y archivos. Pulse Permitir continuar. </li>
|
41 |
-
<li>También puede ver una ventana emergente pidiéndole que califique el juego. Toque Más tarde o Califique ahora como desee. </li>
|
42 |
-
<li>Verás el menú principal del juego con cuatro opciones: carrera, Endless, Time Trial y Free Ride. Toca cualquiera de ellas para empezar a jugar. </li>
|
43 |
-
|
44 |
-
<li>También notará que todas las bicicletas están desbloqueadas y actualizadas al nivel máximo. Puedes elegir cualquier bicicleta que te guste y personalizarla con diferentes colores y ruedas. </li>
|
45 |
-
<li>No verás anuncios ni temporizadores en el juego. Puedes jugar el tiempo que quieras sin interrupciones ni tiempos de espera. </li>
|
46 |
-
</ul>
|
47 |
-
<h2>Beneficios de usar Traffic Rider hack APK</h2>
|
48 |
-
<p>Usando Traffic Rider hack APK tiene muchos beneficios que pueden mejorar su experiencia de juego. Estos son algunos de ellos:</p>
|
49 |
-
<h4>Dinero y oro ilimitados</h4>
|
50 |
-
<p>Con dinero y oro ilimitados, puede comprar y actualizar cualquier bicicleta que desee sin preocuparse por el costo. También puede desbloquear todos los logros y recompensas en el juego con facilidad. Usted puede tener más diversión y variedad en Traffic Rider con dinero ilimitado y oro. </p>
|
51 |
-
<h4>Todas las bicicletas desbloqueadas y actualizadas</h4>
|
52 |
-
<p>Con todas las bicicletas desbloqueadas y actualizadas, puede elegir entre más de 30 bicicletas diferentes, cada una con sus propias características y rendimiento. También puede personalizarlos con diferentes colores y ruedas para adaptarse a su estilo. Puedes disfrutar de más realismo e inmersión en Traffic Rider con todas las bicicletas desbloqueadas y actualizadas. </p>
|
53 |
-
<h4>No hay anuncios ni temporizadores</h4>
|
54 |
-
<p>Sin anuncios y sin temporizadores, puede jugar Traffic Rider sin interrupciones ni tiempos de espera. Usted puede centrarse en el juego y los gráficos sin ser molestado por los anuncios o temporizadores. Puedes tener más desafío y emoción en Traffic Rider sin anuncios y sin temporizadores. </p>
|
55 |
-
<h2>Los riesgos de usar Traffic Rider hack APK</h2>
|
56 |
-
<p>Sin embargo, el uso de Traffic Rider hack APK también tiene algunos riesgos que usted debe tener en cuenta antes de instalarlo. Estos son algunos de ellos:</p>
|
57 |
-
<h4>Infección potencial de malware o virus</h4>
|
58 |
-
|
59 |
-
<h4>Posible prohibición o suspensión del juego</h4>
|
60 |
-
<p>Dado que Traffic Rider hack APK es una herramienta de trucos que le da una ventaja injusta sobre otros jugadores, puede violar los términos de servicio del desarrollador de juegos o editor. Puedes enfrentarte a una prohibición o suspensión del juego si te pillan usándolo por su sistema anti-trampa o por otros jugadores que te denuncien. Usted debe utilizar Traffic Rider hack APK a su propio riesgo y discreción. </p>
|
61 |
-
<h4>Cuestiones jurídicas o éticas</h4>
|
62 |
-
<p>Dado que Traffic Rider hack APK es una versión pirata del juego original, puede infringir los derechos de propiedad intelectual del desarrollador o editor del juego. Usted puede enfrentar problemas legales o éticos si lo usa sin su permiso o consentimiento. Debes respetar el trabajo y el esfuerzo del desarrollador o editor de juegos y apoyarlos comprando sus productos oficiales. </p>
|
63 |
-
<h2>Conclusión</h2>
|
64 |
-
<p>Traffic Rider es un gran juego de carreras de motos que ofrece una experiencia de juego realista e inmersiva con una vista en primera persona, sonidos reales de bicicletas, entornos detallados y un modo de carrera completo. Sin embargo, si desea disfrutar del juego sin limitaciones o restricciones, es posible que desee probar Traffic Rider hack APK.</p>
|
65 |
-
<p>Tráfico Rider hack APK es una versión modificada del juego original que le da acceso a dinero ilimitado y oro, todas las bicicletas desbloqueadas y actualizadas, sin anuncios, sin temporizadores, y más. Con este hack, usted puede tener más diversión y desafío en Traffic Rider.</p>
|
66 |
-
<p>En este artículo, le mostramos cómo descargar e instalar Traffic Rider hack APK en su dispositivo Android También discutimos los beneficios y riesgos de usar este hack, así como algunos consejos y trucos para jugar el juego. Esperamos que haya encontrado este artículo útil e informativo. Si tiene algún comentario o pregunta, no dude en dejar un comentario a continuación. Aquí hay algunas preguntas frecuentes (Preguntas frecuentes) sobre Traffic Rider hack APK: <h3>Q: ¿Es seguro de usar Traffic Rider hack APK? </h3>
|
67 |
-
|
68 |
-
<h3>Q: ¿Cómo puedo actualizar Traffic Rider hack APK? </h3>
|
69 |
-
<p>A: Tráfico Rider hack APK puede no ser compatible con la última versión del juego original. Es posible que tenga que desinstalar el hack y descargar una nueva versión de la misma fuente o una diferente. También debe hacer una copia de seguridad de los datos del juego antes de actualizar el hack. </p>
|
70 |
-
<h3>Q: ¿Puedo jugar Traffic Rider hack APK en línea con otros jugadores? </h3>
|
71 |
-
<p>A: Tráfico Rider hack APK no es compatible con el modo multijugador en línea. Solo se puede jugar el juego sin conexión con las características de corte habilitado. También puede enfrentar una prohibición o suspensión del juego si intenta jugar en línea con el hack. </p>
|
72 |
-
<h3>Q: ¿Puedo usar Traffic Rider hack APK en otros dispositivos o plataformas? </h3>
|
73 |
-
<p>A: Tráfico Rider hack APK solo está diseñado para dispositivos Android. No se puede utilizar en otros dispositivos o plataformas, como iOS, Windows o Mac. Es posible que necesite encontrar un hack o mod diferente para esos dispositivos o plataformas. </p>
|
74 |
-
<h3>Q: ¿Cuáles son algunas alternativas a Traffic Rider hack APK? </h3>
|
75 |
-
<p>A: Si no desea utilizar Traffic Rider hack APK, puede probar algunas alternativas que pueden mejorar su experiencia de juego. Por ejemplo, puedes usar trucos, consejos, guías o trucos de Traffic Rider que pueden ayudarte a mejorar tus habilidades y rendimiento en el juego. También puedes usar Traffic Rider mod APKs que pueden ofrecer diferentes características o modos que el juego original. </p> 64aa2da5cf<br />
|
76 |
-
<br />
|
77 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Apk Mvil Zingspeed.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1> Cómo descargar ZingSpeed móvil APK para Android</h1>
|
3 |
-
<p>Si usted está buscando un ritmo rápido, emocionante, y la adrenalina de bombeo juego de carreras, es posible que desee echa un vistazo ZingSpeed Mobile. Este juego vietnamita es muy popular localmente y te permite personalizar todo lo que quieras en tu coche, y además de eso, ¡también es multijugador! De esta manera se puede correr con cualquier persona y realizar acrobacias increíbles en varias pistas. </p>
|
4 |
-
<p>En este artículo, le mostraremos cómo descargar ZingSpeed Mobile APK para Android, cómo instalarlo, y cómo jugarlo. También compartiremos algunos consejos y trucos para ZingSpeed Mobile juego que le ayudará a mejorar sus habilidades de carreras y el rendimiento. Así que vamos a empezar! </p>
|
5 |
-
<h2>descargar apk móvil zingspeed</h2><br /><p><b><b>DOWNLOAD</b> ❤ <a href="https://bltlly.com/2v6JTC">https://bltlly.com/2v6JTC</a></b></p><br /><br />
|
6 |
-
<h2>¿Qué es ZingSpeed Mobile? </h2>
|
7 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
8 |
-
<p>ZingSpeed Mobile es un juego de carreras en 3D para móviles, basado en el PC ZingSpeed original, que mantiene las habilidades y la profundidad del control, mientras mejora enormemente la creación de personajes y la moda. Fue desarrollado por VNG Corporation, uno de los principales desarrolladores de juegos en Vietnam.</p>
|
9 |
-
<p>ZingSpeed Mobile ofrece varios modos de juego, como Speed Race, Props Race, Ranked Race, Storyline, Couple Racing, Speed Racing Superpowers, Skateboarding Race, Border Races, Pig Wars y más. También puedes elegir entre diferentes tipos de vehículos, como coches, motocicletas, monopatines, etc. Puedes personalizar tu vehículo con diferentes partes, colores, pegatinas, calcomanías, etc. También puedes crear tu propio personaje con diferentes atuendos, peinados, accesorios, etc.</p>
|
10 |
-
<p>ZingSpeed Mobile es también un juego multijugador que te permite competir con otros jugadores de todo el mundo. Puede unirse o crear un equipo con sus amigos u otros corredores que comparten la misma pasión por la velocidad. También puedes participar en torneos de varios tamaños y competir con oponentes internacionales. También puedes chatear con otros jugadores en el juego o en plataformas de redes sociales. </p>
|
11 |
-
|
12 |
-
<p>Jugar a ZingSpeed Mobile puede traerte muchos beneficios, como:</p>
|
13 |
-
<ul>
|
14 |
-
<li> Puede mejorar la coordinación mano-ojo y los reflejos como usted tiene que controlar su vehículo y evitar obstáculos en la pista. </li>
|
15 |
-
<li> Puede mejorar su creatividad e imaginación, ya que puede personalizar su vehículo y carácter de acuerdo a sus preferencias. </li>
|
16 |
-
<li>Puede aumentar su confianza y autoestima, ya que puede mostrar sus habilidades y logros de carreras a otros jugadores. </li>
|
17 |
-
<li>Puede reducir el estrés y el aburrimiento, ya que puede disfrutar de la emoción y la emoción de las carreras. </li>
|
18 |
-
<li> Puede aumentar sus habilidades sociales y la red como usted puede hacer amigos con otros jugadores que comparten el mismo interés en las carreras. </li>
|
19 |
-
</ul>
|
20 |
-
<h2>Cómo descargar ZingSpeed móvil APK de Google Play Store</h2>
|
21 |
-
<h3>Los pasos para descargar el archivo APK directamente a su dispositivo</h3>
|
22 |
-
<p>Si desea descargar ZingSpeed Mobile APK directamente a su dispositivo Android, necesita una conexión a Internet y un navegador. Estos son los pasos:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Abra la Google Play Store en su dispositivo Android y busque ZingSpeed Mobile. Toque en el nombre de la aplicación para abrir su página de detalles. </li>
|
25 |
-
<li>Toque en el menú de tres puntos en la esquina superior derecha de la pantalla y seleccione Compartir. Aparecerá un menú emergente con diferentes opciones para compartir el enlace de la aplicación. </li>
|
26 |
-
<li>Seleccione la opción que le permite copiar el enlace al portapapeles, como Copiar al portapapeles, Copiar enlace, etc.</li>
|
27 |
-
<li>Abra el navegador en su dispositivo y pegue el enlace en la barra de direcciones. Toque en Ir o Entrar para cargar la página. </li>
|
28 |
-
<li>En la página, verá un botón que dice Descargar APK. Toque en él y espere a que comience la descarga. </li>
|
29 |
-
<li>Una vez que se complete la descarga, verá una notificación que dice ZingSpeed Mobile APK descargado. Toque en él para abrir el archivo. </li>
|
30 |
-
</ol>
|
31 |
-
<h3>Los pasos para descargar el archivo APK a su ordenador y transferir a su dispositivo</h3>
|
32 |
-
|
33 |
-
<ol>
|
34 |
-
<li>Abra la Google Play Store en su computadora y busque ZingSpeed Mobile. Haga clic en el nombre de la aplicación para abrir su página de detalles. </li>
|
35 |
-
<li>Copie la URL de la página desde la barra de direcciones de su navegador. </li>
|
36 |
-
<li>Abra una nueva pestaña en su navegador y vaya a un sitio web que le permite descargar archivos APK de Google Play Store, como APKPure, APKMirror, etc.</li>
|
37 |
-
<li>Pegue la URL de la página de la aplicación ZingSpeed Mobile en el cuadro de búsqueda del sitio web y haga clic en Buscar o Enter.</li>
|
38 |
-
<li>Verá una lista de resultados con diferentes versiones de ZingSpeed Mobile APK. Elija la última versión y haga clic en Descargar o Descargar APK.</li>
|
39 |
-
<li>Espere a que la descarga termine y localice el archivo en su computadora. </li>
|
40 |
-
<li>Conecte su dispositivo Android a su computadora usando un cable USB o una conexión inalámbrica. Asegúrese de que su dispositivo sea detectado por su computadora. </li>
|
41 |
-
<li>Copie o mueva el archivo APK de ZingSpeed Mobile desde su computadora al almacenamiento de su dispositivo. Puede elegir la carpeta que desee, pero asegúrese de recordar su ubicación. </li>
|
42 |
-
<li>Desconecte el dispositivo de su computadora y abra la aplicación de administrador de archivos en su dispositivo. Vaya a la carpeta donde guardó el archivo APK de ZingSpeed Mobile y toque en él para abrirlo. </li>
|
43 |
-
</ol>
|
44 |
-
<h2> Cómo instalar ZingSpeed móvil APK en Android</h2>
|
45 |
-
<h3>Los pasos para habilitar fuentes desconocidas e instalar el archivo APK</h3>
|
46 |
-
<p>Antes de que pueda instalar ZingSpeed Mobile APK en su dispositivo Android, es necesario habilitar fuentes desconocidas, que le permite instalar aplicaciones de fuentes distintas de Google Play Store. Estos son los pasos:</p>
|
47 |
-
<ol>
|
48 |
-
<li>Vaya a Configuración en su dispositivo y toque en Seguridad o Privacidad.</li>
|
49 |
-
<li>Encontrar la opción que dice Fuentes desconocidas o Instalar aplicaciones desconocidas y alternar en. Es posible que vea un mensaje de advertencia que indica que instalar aplicaciones de fuentes desconocidas puede dañar su dispositivo. Toque en OK o Permitir proceder. </li>
|
50 |
-
|
51 |
-
<li>Espere a que se complete la instalación y toque en Abrir o Listo.</li>
|
52 |
-
</ol>
|
53 |
-
<h3>Los pasos para lanzar el juego y disfrutarlo</h3>
|
54 |
-
<p>Después de haber instalado ZingSpeed Mobile APK en su dispositivo Android, puede iniciar el juego y disfrutarlo. Estos son los pasos:</p>
|
55 |
-
<ol>
|
56 |
-
<li>Ir a su cajón de aplicaciones o pantalla de inicio y encontrar el icono de ZingSpeed Mobile. Toque en él para iniciar el juego. </li>
|
57 |
-
<li>Puede ver una pantalla de bienvenida con el logotipo del juego y algunas animaciones de carga. Espere unos segundos hasta que el juego se cargue por completo. </li>
|
58 |
-
<li>Puede ver una pantalla de bienvenida con algunas opciones, como Inicio de sesión, Registro, Invitado, etc. Elija la opción que más le convenga. Si tiene una cuenta existente, puede iniciar sesión con su nombre de usuario y contraseña. Si no tiene una cuenta, puede registrarse con su correo electrónico o número de teléfono. Si no quieres crear una cuenta, puedes jugar como invitado sin guardar tu progreso. </li>
|
59 |
-
<li>Usted puede ver una pantalla de tutorial que explica cómo jugar ZingSpeed juego móvil. Puede seguir las instrucciones o omitirlas si ya sabe cómo jugar. </li>
|
60 |
-
<li>Puede ver una pantalla de menú principal con diferentes opciones, como Modo de juego, Garaje, Tienda, Equipo, Chat, Configuración, etc. Puede explorar estas opciones o comenzar a jugar de inmediato tocando en Modo de juego.</li>
|
61 |
-
<li>Puedes ver una lista de modos de juego que puedes elegir, como Carrera de velocidad, Carrera de accesorios, Carrera clasificada, Historia, Carreras de pareja, Superpotencias de carreras de velocidad, Carrera de skateboarding, Carreras fronterizas, Guerras de cerdos , y más. También puede filtrar los modos de juego por dificultad, región, modo, etc. Toque en el modo de juego que desea jugar y espere a que comience el partido. </li>
|
62 |
-
<li>Puedes ver una pantalla del lobby donde puedes ver a tus oponentes, tu vehículo y algunos ajustes. También puede chatear con otros jugadores, invitar a amigos, cambiar de vehículo, etc. Toque en Listo o Comenzar cuando esté listo para la carrera. </li>
|
63 |
-
|
64 |
-
<li>Puedes ver una pantalla de carreras donde puedes controlar tu vehículo y competir con otros jugadores. Puedes usar los botones de la pantalla para acelerar, frenar, desviar, usar objetos, etc. También puedes inclinar el dispositivo para dirigir tu vehículo. Puede ver su posición, velocidad, tiempo, vuelta, etc. en la pantalla. Trate de llegar a la línea de meta lo más rápido posible y ganar la carrera. </li>
|
65 |
-
<li>Puede ver una pantalla de resultados donde puede ver su rango, puntuación, recompensas, etc. También puede calificar el partido, chatear con otros jugadores, reproducir la carrera, etc. Toque en Siguiente o Volver para continuar jugando o volver al menú principal. </li>
|
66 |
-
</ol>
|
67 |
-
<h2>Consejos y trucos para el juego móvil ZingSpeed</h2>
|
68 |
-
<h3>Algunos consejos y trucos útiles para mejorar sus habilidades de carreras y rendimiento</h3>
|
69 |
-
<p>ZingSpeed El juego móvil no solo se trata de velocidad, sino también de habilidades y estrategia. Aquí hay algunos consejos y trucos útiles que pueden ayudarle a mejorar sus habilidades de carreras y rendimiento:</p>
|
70 |
-
<p></p>
|
71 |
-
<ul>
|
72 |
-
La práctica lo hace perfecto. Cuanto más juegues a ZingSpeed Mobile, más familiarizados estarás con las pistas, los vehículos, los objetos, etc. También podrás practicar en diferentes modos de juego y niveles de dificultad para desafiarte y aprender nuevas habilidades. </li>
|
73 |
-
<li>Personaliza tu vehículo y personaje. El juego ZingSpeed Mobile te permite personalizar todo lo que quieras en tu vehículo y personaje. Puede cambiar las piezas, colores, pegatinas, calcomanías, etc. de su vehículo para mejorar su rendimiento y apariencia. También puedes cambiar los atuendos, peinados, accesorios, etc. de tu personaje para expresar tu personalidad y estilo. </li>
|
74 |
-
|
75 |
-
<li>Deriva como un profesional. A la deriva es una de las habilidades más importantes en ZingSpeed juego móvil. Drifting le permite girar las esquinas sin problemas y rápidamente sin perder velocidad o control. Para la deriva, es necesario tocar y mantener pulsado el botón de freno mientras conduce su vehículo. Cuanto más tiempo mantenga pulsado el botón de freno, más ángulo y humo se creará. Drifting también llena su medidor de nitro, que puede utilizar para aumentar su velocidad tocando el botón nitro. </li>
|
76 |
-
<li>Realizar acrobacias y trucos. ZingSpeed juego móvil tiene varias pistas que tienen rampas, bucles, saltos, etc. Estas pistas le permiten realizar acrobacias y trucos que pueden hacer su carrera más emocionante y divertido. Para realizar acrobacias y trucos, debe pulsar el botón de acrobacias mientras está en el aire o en una rampa. También puede inclinar el dispositivo para ajustar la dirección y el equilibrio. Realizar acrobacias y trucos también llena tu medidor de nitro y te da puntos extra. </li>
|
77 |
-
</ul>
|
78 |
-
<h3>Algunos problemas comunes y soluciones para ZingSpeed juego móvil</h3>
|
79 |
-
<p>ZingSpeed Mobile es un gran juego de carreras que puede proporcionarle horas de entretenimiento y disfrute. Sin embargo, como cualquier otro juego, puede tener algunos problemas que pueden afectar su experiencia de juego. Aquí hay algunos problemas y soluciones comunes para el juego ZingSpeed Mobile:</p>
|
80 |
-
<tabla>
|
81 |
-
<tr><th>Problema</th><th>Solución</th></tr>
|
82 |
-
<tr><td>El juego se bloquea o se congela. </td><td>Esto puede ser causado por la baja memoria o el espacio de almacenamiento en el dispositivo. Puedes intentar despejar el espacio eliminando aplicaciones o archivos no deseados. También puedes intentar reiniciar el dispositivo o reinstalar el juego. </td></tr>
|
83 |
-
<tr><td>El juego se retrasa o se ejecuta lentamente. </td><td>Esto puede ser causado por una mala conexión a Internet o un alto tráfico en el servidor. Puede intentar cambiar a una red diferente o jugar en un momento diferente cuando hay menos jugadores en línea. También puede intentar reducir la calidad de los gráficos o cerrar otras aplicaciones que se ejecutan en segundo plano. </td></tr>
|
84 |
-
|
85 |
-
<tr><td>El juego no se conecta ni sincroniza. </td><td>Esto puede ser causado por un firewall o software antivirus que bloquea el juego para acceder a Internet o al servidor. Puedes intentar desactivar o permitir el juego a través de tu firewall o configuración antivirus. También puede intentar cerrar sesión y volver a iniciar sesión en su cuenta o usar una cuenta diferente. </td></tr>
|
86 |
-
<tr><td>El juego no reconoce mi cuenta ni mis compras. </td><td>Esto puede ser causado por un error en el juego o en el servidor. Puedes intentar ponerte en contacto con el servicio de atención al cliente del juego o con Google Play Store y facilitarles los detalles de tu cuenta y el comprobante de compra. También puedes intentar restaurar tus compras desde el menú de configuración del juego. </td></tr>
|
87 |
-
</tabla>
|
88 |
-
<h2>Conclusión</h2>
|
89 |
-
<h3>Un resumen de los puntos principales y una llamada a la acción</h3>
|
90 |
-
<p>ZingSpeed Mobile es un juego de carreras en 3D para móviles que ofrece varios modos de juego, vehículos, opciones de personalización y funciones multijugador. Es un juego divertido y emocionante que puede mejorar sus habilidades de carreras, creatividad, confianza, habilidades sociales y más. En este artículo, le hemos mostrado cómo descargar ZingSpeed Mobile APK para Android, cómo instalarlo, y cómo jugarlo. También hemos compartido algunos consejos y trucos para ZingSpeed Mobile juego que puede ayudar a mejorar su rendimiento y resolver algunos problemas comunes. </p>
|
91 |
-
<p>Si usted está listo para experimentar la emoción y la adrenalina del juego ZingSpeed Mobile, descargarlo ahora y unirse a la carrera! También puedes compartir este artículo con tus amigos que aman los juegos de carreras e invitarlos a jugar contigo. ¡Diviértete y buena suerte! </p>
|
92 |
-
<h2>Preguntas frecuentes</h2>
|
93 |
-
<h3>Cinco preguntas frecuentes únicas sobre el juego ZingSpeed Mobile y sus respuestas</h3>
|
94 |
-
<p>Aquí hay algunas preguntas frecuentes sobre ZingSpeed Mobile juego y sus respuestas:</p>
|
95 |
-
<ol>
|
96 |
-
|
97 |
-
<li>P: ¿Cómo puedo cambiar mi vehículo o personaje en el juego ZingSpeed Mobile? <br>A: Puedes cambiar tu vehículo o personaje yendo al garaje o a la tienda. Puedes elegir entre diferentes tipos de vehículos, como coches, motocicletas, monopatines, etc. También puedes elegir entre diferentes trajes, peinados, accesorios, etc. para tu personaje. </li>
|
98 |
-
<li>Q: ¿Cómo puedo unirme o crear un equipo en el juego ZingSpeed Mobile? <br>A: Puedes unirte o crear un equipo yendo al menú del equipo. Puede buscar un equipo existente o crear su propio equipo con un nombre, logotipo, descripción, etc. También puede invitar a amigos u otros jugadores a unirse a su equipo. </li>
|
99 |
-
<li>P: ¿Cómo puedo chatear con otros jugadores en el juego ZingSpeed Mobile? <br>A: Puedes chatear con otros jugadores yendo al menú de chat. Puedes elegir entre diferentes canales de chat, como global, equipo, amigo, etc. También puedes enviar mensajes privados a otros jugadores tocando sus nombres. </li>
|
100 |
-
<li>Q: ¿Cómo puedo reportar un error o un problema en el juego ZingSpeed Mobile? <br>A: Puede reportar un error o un problema yendo al menú de configuración y tocando en la retroalimentación. Puede rellenar un formulario con sus datos y describir su problema. También puede adjuntar capturas de pantalla o vídeos si es posible. También puede ponerse en contacto con el servicio de atención al cliente del juego a través de correo electrónico o plataformas de redes sociales. </li>
|
101 |
-
</ol></p> 64aa2da5cf<br />
|
102 |
-
<br />
|
103 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar El Formulario 29 30.md
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar el formulario 29 30</h1>
|
3 |
-
<p>Hola, esto es Bing. Estoy aquí para ayudarte a escribir un artículo de estilo conversacional sobre cómo descargar <strong>29 30 form</strong>. Antes de empezar, déjame explicarte qué es la forma <strong>29 30</strong> y por qué la necesitas. </p>
|
4 |
-
<h2>descargar el formulario 29 30</h2><br /><p><b><b>Download</b> ❤ <a href="https://bltlly.com/2v6MG8">https://bltlly.com/2v6MG8</a></b></p><br /><br />
|
5 |
-
<p><strong>29 30 form</strong> es un conjunto de documentos que se requieren para transferir la propiedad de un vehículo de motor en la India. Consta de dos formas: <strong>Form 29</strong> y <strong>Form 30</strong>. <strong>Formulario 29</strong> es el aviso de transferencia de propiedad de un vehículo de motor, mientras que <strong>Formulario 30</strong> es el informe de transferencia de propiedad de un vehículo de motor. Debe llenar y enviar estos formularios a la Oficina Regional de Transporte (RTO) cuando venda o compre un vehículo usado. </p>
|
6 |
-
<p>Ahora que sabes lo que es la forma <strong>29 30</strong>, veamos cómo puedes descargarla desde el sitio web oficial de Parivahan Sewa, que es el portal en línea para los servicios de transporte por carretera en la India. Estos son los pasos a seguir:</p>
|
7 |
-
<h2>Cómo descargar el formulario 29 30 desde el sitio web de Parivahan Sewa</h2>
|
8 |
-
<ol>
|
9 |
-
<li>Ir al sitio web de Parivahan Sewa en <a href=" 2 ">https://www.parivahan.gov.in/parivahan/</a>. </li>
|
10 |
-
<li>Haga clic en la pestaña "Servicios en línea" en la barra de menú superior y seleccione "Servicios relacionados con vehículos". </li>
|
11 |
-
<li>Introduzca el número de su vehículo y haga clic en "Proceder". </li>
|
12 |
-
<li>En la página siguiente, haga clic en "Descargar formularios" en la sección "Misceláneos". </li>
|
13 |
-
<li>Verá una lista de todos los formularios disponibles para descargar. Desplácese hacia abajo para encontrar <strong>Form 29</strong> y <strong>Form 30</strong>. Haga clic en el botón "Descargar" junto a cada formulario para guardarlos en su computadora. </li>
|
14 |
-
<li>Imprima los formularios y llénelos con los detalles requeridos. Deberá proporcionar información como el número del vehículo, el número del motor, el número del chasis, el nombre y la dirección del vendedor, el nombre y la dirección del comprador, la fecha de transferencia, etc.</li>
|
15 |
-
</ol>
|
16 |
-
<h2>Cómo llenar y enviar 29 30 Formulario</h2>
|
17 |
-
|
18 |
-
<ul>
|
19 |
-
<li>Firme los formularios y adjunte los documentos necesarios, como prueba de identidad, prueba de domicilio, certificado de seguro, certificado de contaminación bajo control, etc.</li>
|
20 |
-
<li>Envíe los formularios y los documentos a la RTO más cercana dentro de los 14 días de la transferencia de la propiedad. También puede tener que pagar una tarifa nominal por procesar los formularios. </li>
|
21 |
-
<li>También puede solicitar un nuevo certificado de registro (RC) para su vehículo en línea a través del sitio web de Parivahan Sewa. Tendrá que subir copias escaneadas de sus documentos y pagar la tarifa en línea. Recibirá un recibo y un número de referencia que puede usar para rastrear el estado de su solicitud. </li>
|
22 |
-
<li>También puede verificar el estado de su transferencia de propiedad en línea ingresando su número de vehículo y número de solicitud en el sitio web de Parivahan Sewa. </li>
|
23 |
-
</ul>
|
24 |
-
<h2>Conclusión</h2>
|
25 |
-
<p>En este artículo, hemos aprendido cómo descargar <strong>29 30 form</strong> del sitio web de Parivahan Sewa y cómo llenarlo y enviarlo a la RTO. También hemos visto cómo solicitar un nuevo RC en línea y cómo comprobar el estado de nuestra transferencia de propiedad. Siguiendo estos pasos, puede transferir fácilmente la propiedad de su vehículo en la India sin problemas. </p>
|
26 |
-
<p>Espero que este artículo sea útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
|
27 |
-
<h3>Preguntas frecuentes</h3>
|
28 |
-
<ol>
|
29 |
-
<li><strong>¿Qué es la forma 29 30? </strong></li>
|
30 |
-
<p><strong>29 30 form</strong> es un conjunto de documentos que se requieren para transferir la propiedad de un vehículo de motor en la India. Consta de dos formas: <strong>Form 29</strong> y <strong>Form 30</strong>. <strong>Formulario 29</strong> es el aviso de transferencia de propiedad de un vehículo de motor, mientras que <strong>Formulario 30</strong> es el informe de transferencia de propiedad de un vehículo de motor. </p>
|
31 |
-
<p></p>
|
32 |
-
<li><strong>¿Dónde puedo descargar 29 30 formas? </strong></li>
|
33 |
-
|
34 |
-
<li><strong>¿Qué documentos necesito presentar con el formulario 29 30? </strong></li>
|
35 |
-
<p>Necesitas enviar los siguientes documentos con <strong>29 30 form</strong>:</p>
|
36 |
-
<ul>
|
37 |
-
<li>Prueba de identidad (como tarjeta Aadhaar, tarjeta PAN, licencia de conducir, etc.)</li>
|
38 |
-
<li>Prueba de domicilio (como factura de electricidad, factura de agua, contrato de alquiler, etc.)</li>
|
39 |
-
<li>Certificado de seguro</li>
|
40 |
-
<li>Contaminación bajo certificado de control</li>
|
41 |
-
<li>Certificado de no objeción (NOC) del propietario anterior (si procede)</li>
|
42 |
-
<li>Certificado de no objeción (NOC) del financiador (si procede)</li>
|
43 |
-
<li>Declaración jurada que indica que el vehículo está libre de cualquier gravamen legal (si es aplicable)</li>
|
44 |
-
</ul>
|
45 |
-
<li><strong>¿Cuánto tiempo se necesita para transferir la propiedad de un vehículo? </strong></li>
|
46 |
-
<p>El tiempo necesario para transferir la propiedad de un vehículo depende de varios factores, como la ubicación de la RTO, el tipo de vehículo, el modo de pago, etc. En general, el RTO tarda de 15 a 30 días en procesar su solicitud y emitir un nuevo RC. Sin embargo, puede comprobar el estado de su solicitud en línea a través del sitio web de Parivahan Sewa. </p>
|
47 |
-
<li><strong>¿Cuánto pago por transferir la propiedad de un vehículo? </ </strong></li>
|
48 |
-
<p>La tarifa para transferir la propiedad de un vehículo varía dependiendo de la ubicación de RTO, el tipo de vehículo, la edad del vehículo, etc. Puede consultar la tarifa exacta para su vehículo en el sitio web de Parivahan Sewa o ponerse en contacto con su RTO más cercano para obtener más detalles. En general, la tarifa varía de Rs. 200 a Rs. 500 para vehículos de dos ruedas y de Rs. 300 a Rs. 1000 para vehículos de cuatro ruedas. </p>
|
49 |
-
</ol></p> 64aa2da5cf<br />
|
50 |
-
<br />
|
51 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/certifi/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .core import contents, where
|
2 |
-
|
3 |
-
__all__ = ["contents", "where"]
|
4 |
-
__version__ = "2022.12.07"
|
|
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/minigpt4/models/eva_vit.py
DELETED
@@ -1,442 +0,0 @@
|
|
1 |
-
# Based on EVA, BEIT, timm and DeiT code bases
|
2 |
-
# https://github.com/baaivision/EVA
|
3 |
-
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
4 |
-
# https://github.com/microsoft/unilm/tree/master/beit
|
5 |
-
# https://github.com/facebookresearch/deit/
|
6 |
-
# https://github.com/facebookresearch/dino
|
7 |
-
# --------------------------------------------------------'
|
8 |
-
import math
|
9 |
-
from functools import partial
|
10 |
-
|
11 |
-
import torch
|
12 |
-
import torch.nn as nn
|
13 |
-
import torch.nn.functional as F
|
14 |
-
import torch.utils.checkpoint as checkpoint
|
15 |
-
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
16 |
-
from timm.models.registry import register_model
|
17 |
-
|
18 |
-
from minigpt4.common.dist_utils import download_cached_file
|
19 |
-
|
20 |
-
def _cfg(url='', **kwargs):
|
21 |
-
return {
|
22 |
-
'url': url,
|
23 |
-
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
24 |
-
'crop_pct': .9, 'interpolation': 'bicubic',
|
25 |
-
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
|
26 |
-
**kwargs
|
27 |
-
}
|
28 |
-
|
29 |
-
|
30 |
-
class DropPath(nn.Module):
|
31 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
32 |
-
"""
|
33 |
-
def __init__(self, drop_prob=None):
|
34 |
-
super(DropPath, self).__init__()
|
35 |
-
self.drop_prob = drop_prob
|
36 |
-
|
37 |
-
def forward(self, x):
|
38 |
-
return drop_path(x, self.drop_prob, self.training)
|
39 |
-
|
40 |
-
def extra_repr(self) -> str:
|
41 |
-
return 'p={}'.format(self.drop_prob)
|
42 |
-
|
43 |
-
|
44 |
-
class Mlp(nn.Module):
|
45 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
46 |
-
super().__init__()
|
47 |
-
out_features = out_features or in_features
|
48 |
-
hidden_features = hidden_features or in_features
|
49 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
50 |
-
self.act = act_layer()
|
51 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
52 |
-
self.drop = nn.Dropout(drop)
|
53 |
-
|
54 |
-
def forward(self, x):
|
55 |
-
x = self.fc1(x)
|
56 |
-
x = self.act(x)
|
57 |
-
# x = self.drop(x)
|
58 |
-
# commit this for the orignal BERT implement
|
59 |
-
x = self.fc2(x)
|
60 |
-
x = self.drop(x)
|
61 |
-
return x
|
62 |
-
|
63 |
-
|
64 |
-
class Attention(nn.Module):
|
65 |
-
def __init__(
|
66 |
-
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
|
67 |
-
proj_drop=0., window_size=None, attn_head_dim=None):
|
68 |
-
super().__init__()
|
69 |
-
self.num_heads = num_heads
|
70 |
-
head_dim = dim // num_heads
|
71 |
-
if attn_head_dim is not None:
|
72 |
-
head_dim = attn_head_dim
|
73 |
-
all_head_dim = head_dim * self.num_heads
|
74 |
-
self.scale = qk_scale or head_dim ** -0.5
|
75 |
-
|
76 |
-
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
77 |
-
if qkv_bias:
|
78 |
-
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
79 |
-
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
80 |
-
else:
|
81 |
-
self.q_bias = None
|
82 |
-
self.v_bias = None
|
83 |
-
|
84 |
-
if window_size:
|
85 |
-
self.window_size = window_size
|
86 |
-
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
87 |
-
self.relative_position_bias_table = nn.Parameter(
|
88 |
-
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
89 |
-
# cls to token & token 2 cls & cls to cls
|
90 |
-
|
91 |
-
# get pair-wise relative position index for each token inside the window
|
92 |
-
coords_h = torch.arange(window_size[0])
|
93 |
-
coords_w = torch.arange(window_size[1])
|
94 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
95 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
96 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
97 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
98 |
-
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
99 |
-
relative_coords[:, :, 1] += window_size[1] - 1
|
100 |
-
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
101 |
-
relative_position_index = \
|
102 |
-
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
|
103 |
-
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
104 |
-
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
105 |
-
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
106 |
-
relative_position_index[0, 0] = self.num_relative_distance - 1
|
107 |
-
|
108 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
109 |
-
else:
|
110 |
-
self.window_size = None
|
111 |
-
self.relative_position_bias_table = None
|
112 |
-
self.relative_position_index = None
|
113 |
-
|
114 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
115 |
-
self.proj = nn.Linear(all_head_dim, dim)
|
116 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
117 |
-
|
118 |
-
def forward(self, x, rel_pos_bias=None):
|
119 |
-
B, N, C = x.shape
|
120 |
-
qkv_bias = None
|
121 |
-
if self.q_bias is not None:
|
122 |
-
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
|
123 |
-
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
124 |
-
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
125 |
-
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
126 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
127 |
-
|
128 |
-
q = q * self.scale
|
129 |
-
attn = (q @ k.transpose(-2, -1))
|
130 |
-
|
131 |
-
if self.relative_position_bias_table is not None:
|
132 |
-
relative_position_bias = \
|
133 |
-
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
134 |
-
self.window_size[0] * self.window_size[1] + 1,
|
135 |
-
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
136 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
137 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
138 |
-
|
139 |
-
if rel_pos_bias is not None:
|
140 |
-
attn = attn + rel_pos_bias
|
141 |
-
|
142 |
-
attn = attn.softmax(dim=-1)
|
143 |
-
attn = self.attn_drop(attn)
|
144 |
-
|
145 |
-
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
146 |
-
x = self.proj(x)
|
147 |
-
x = self.proj_drop(x)
|
148 |
-
return x
|
149 |
-
|
150 |
-
|
151 |
-
class Block(nn.Module):
|
152 |
-
|
153 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
154 |
-
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
|
155 |
-
window_size=None, attn_head_dim=None):
|
156 |
-
super().__init__()
|
157 |
-
self.norm1 = norm_layer(dim)
|
158 |
-
self.attn = Attention(
|
159 |
-
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
160 |
-
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
|
161 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
162 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
163 |
-
self.norm2 = norm_layer(dim)
|
164 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
165 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
166 |
-
|
167 |
-
if init_values is not None and init_values > 0:
|
168 |
-
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
|
169 |
-
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
|
170 |
-
else:
|
171 |
-
self.gamma_1, self.gamma_2 = None, None
|
172 |
-
|
173 |
-
def forward(self, x, rel_pos_bias=None):
|
174 |
-
if self.gamma_1 is None:
|
175 |
-
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
|
176 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
177 |
-
else:
|
178 |
-
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
|
179 |
-
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
180 |
-
return x
|
181 |
-
|
182 |
-
|
183 |
-
class PatchEmbed(nn.Module):
|
184 |
-
""" Image to Patch Embedding
|
185 |
-
"""
|
186 |
-
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
187 |
-
super().__init__()
|
188 |
-
img_size = to_2tuple(img_size)
|
189 |
-
patch_size = to_2tuple(patch_size)
|
190 |
-
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
|
191 |
-
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
|
192 |
-
self.img_size = img_size
|
193 |
-
self.patch_size = patch_size
|
194 |
-
self.num_patches = num_patches
|
195 |
-
|
196 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
197 |
-
|
198 |
-
def forward(self, x, **kwargs):
|
199 |
-
B, C, H, W = x.shape
|
200 |
-
# FIXME look at relaxing size constraints
|
201 |
-
assert H == self.img_size[0] and W == self.img_size[1], \
|
202 |
-
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
203 |
-
x = self.proj(x).flatten(2).transpose(1, 2)
|
204 |
-
return x
|
205 |
-
|
206 |
-
|
207 |
-
class RelativePositionBias(nn.Module):
|
208 |
-
|
209 |
-
def __init__(self, window_size, num_heads):
|
210 |
-
super().__init__()
|
211 |
-
self.window_size = window_size
|
212 |
-
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
213 |
-
self.relative_position_bias_table = nn.Parameter(
|
214 |
-
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
215 |
-
# cls to token & token 2 cls & cls to cls
|
216 |
-
|
217 |
-
# get pair-wise relative position index for each token inside the window
|
218 |
-
coords_h = torch.arange(window_size[0])
|
219 |
-
coords_w = torch.arange(window_size[1])
|
220 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
221 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
222 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
223 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
224 |
-
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
225 |
-
relative_coords[:, :, 1] += window_size[1] - 1
|
226 |
-
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
227 |
-
relative_position_index = \
|
228 |
-
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
229 |
-
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
230 |
-
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
231 |
-
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
232 |
-
relative_position_index[0, 0] = self.num_relative_distance - 1
|
233 |
-
|
234 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
235 |
-
|
236 |
-
# trunc_normal_(self.relative_position_bias_table, std=.02)
|
237 |
-
|
238 |
-
def forward(self):
|
239 |
-
relative_position_bias = \
|
240 |
-
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
241 |
-
self.window_size[0] * self.window_size[1] + 1,
|
242 |
-
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
243 |
-
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
244 |
-
|
245 |
-
|
246 |
-
class VisionTransformer(nn.Module):
|
247 |
-
""" Vision Transformer with support for patch or hybrid CNN input stage
|
248 |
-
"""
|
249 |
-
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
250 |
-
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
|
251 |
-
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
|
252 |
-
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
|
253 |
-
use_mean_pooling=True, init_scale=0.001, use_checkpoint=False):
|
254 |
-
super().__init__()
|
255 |
-
self.image_size = img_size
|
256 |
-
self.num_classes = num_classes
|
257 |
-
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
258 |
-
|
259 |
-
self.patch_embed = PatchEmbed(
|
260 |
-
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
261 |
-
num_patches = self.patch_embed.num_patches
|
262 |
-
|
263 |
-
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
264 |
-
if use_abs_pos_emb:
|
265 |
-
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
266 |
-
else:
|
267 |
-
self.pos_embed = None
|
268 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
269 |
-
|
270 |
-
if use_shared_rel_pos_bias:
|
271 |
-
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
|
272 |
-
else:
|
273 |
-
self.rel_pos_bias = None
|
274 |
-
self.use_checkpoint = use_checkpoint
|
275 |
-
|
276 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
277 |
-
self.use_rel_pos_bias = use_rel_pos_bias
|
278 |
-
self.blocks = nn.ModuleList([
|
279 |
-
Block(
|
280 |
-
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
281 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
282 |
-
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
|
283 |
-
for i in range(depth)])
|
284 |
-
# self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
|
285 |
-
# self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
|
286 |
-
# self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
287 |
-
|
288 |
-
if self.pos_embed is not None:
|
289 |
-
trunc_normal_(self.pos_embed, std=.02)
|
290 |
-
trunc_normal_(self.cls_token, std=.02)
|
291 |
-
# trunc_normal_(self.mask_token, std=.02)
|
292 |
-
# if isinstance(self.head, nn.Linear):
|
293 |
-
# trunc_normal_(self.head.weight, std=.02)
|
294 |
-
self.apply(self._init_weights)
|
295 |
-
self.fix_init_weight()
|
296 |
-
# if isinstance(self.head, nn.Linear):
|
297 |
-
# self.head.weight.data.mul_(init_scale)
|
298 |
-
# self.head.bias.data.mul_(init_scale)
|
299 |
-
|
300 |
-
def fix_init_weight(self):
|
301 |
-
def rescale(param, layer_id):
|
302 |
-
param.div_(math.sqrt(2.0 * layer_id))
|
303 |
-
|
304 |
-
for layer_id, layer in enumerate(self.blocks):
|
305 |
-
rescale(layer.attn.proj.weight.data, layer_id + 1)
|
306 |
-
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
|
307 |
-
|
308 |
-
def _init_weights(self, m):
|
309 |
-
if isinstance(m, nn.Linear):
|
310 |
-
trunc_normal_(m.weight, std=.02)
|
311 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
312 |
-
nn.init.constant_(m.bias, 0)
|
313 |
-
elif isinstance(m, nn.LayerNorm):
|
314 |
-
nn.init.constant_(m.bias, 0)
|
315 |
-
nn.init.constant_(m.weight, 1.0)
|
316 |
-
|
317 |
-
def get_classifier(self):
|
318 |
-
return self.head
|
319 |
-
|
320 |
-
def reset_classifier(self, num_classes, global_pool=''):
|
321 |
-
self.num_classes = num_classes
|
322 |
-
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
323 |
-
|
324 |
-
def forward_features(self, x):
|
325 |
-
x = self.patch_embed(x)
|
326 |
-
batch_size, seq_len, _ = x.size()
|
327 |
-
|
328 |
-
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
329 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
330 |
-
if self.pos_embed is not None:
|
331 |
-
x = x + self.pos_embed
|
332 |
-
x = self.pos_drop(x)
|
333 |
-
|
334 |
-
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
335 |
-
for blk in self.blocks:
|
336 |
-
if self.use_checkpoint:
|
337 |
-
x = checkpoint.checkpoint(blk, x, rel_pos_bias)
|
338 |
-
else:
|
339 |
-
x = blk(x, rel_pos_bias)
|
340 |
-
return x
|
341 |
-
# x = self.norm(x)
|
342 |
-
|
343 |
-
# if self.fc_norm is not None:
|
344 |
-
# t = x[:, 1:, :]
|
345 |
-
# return self.fc_norm(t.mean(1))
|
346 |
-
# else:
|
347 |
-
# return x[:, 0]
|
348 |
-
|
349 |
-
def forward(self, x):
|
350 |
-
x = self.forward_features(x)
|
351 |
-
# x = self.head(x)
|
352 |
-
return x
|
353 |
-
|
354 |
-
def get_intermediate_layers(self, x):
|
355 |
-
x = self.patch_embed(x)
|
356 |
-
batch_size, seq_len, _ = x.size()
|
357 |
-
|
358 |
-
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
359 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
360 |
-
if self.pos_embed is not None:
|
361 |
-
x = x + self.pos_embed
|
362 |
-
x = self.pos_drop(x)
|
363 |
-
|
364 |
-
features = []
|
365 |
-
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
366 |
-
for blk in self.blocks:
|
367 |
-
x = blk(x, rel_pos_bias)
|
368 |
-
features.append(x)
|
369 |
-
|
370 |
-
return features
|
371 |
-
|
372 |
-
|
373 |
-
def interpolate_pos_embed(model, checkpoint_model):
|
374 |
-
if 'pos_embed' in checkpoint_model:
|
375 |
-
pos_embed_checkpoint = checkpoint_model['pos_embed'].float()
|
376 |
-
embedding_size = pos_embed_checkpoint.shape[-1]
|
377 |
-
num_patches = model.patch_embed.num_patches
|
378 |
-
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
|
379 |
-
# height (== width) for the checkpoint position embedding
|
380 |
-
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
381 |
-
# height (== width) for the new position embedding
|
382 |
-
new_size = int(num_patches ** 0.5)
|
383 |
-
# class_token and dist_token are kept unchanged
|
384 |
-
if orig_size != new_size:
|
385 |
-
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
386 |
-
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
387 |
-
# only the position tokens are interpolated
|
388 |
-
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
389 |
-
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
390 |
-
pos_tokens = torch.nn.functional.interpolate(
|
391 |
-
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
392 |
-
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
393 |
-
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
394 |
-
checkpoint_model['pos_embed'] = new_pos_embed
|
395 |
-
|
396 |
-
|
397 |
-
def convert_weights_to_fp16(model: nn.Module):
|
398 |
-
"""Convert applicable model parameters to fp16"""
|
399 |
-
|
400 |
-
def _convert_weights_to_fp16(l):
|
401 |
-
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
402 |
-
l.weight.data = l.weight.data.half()
|
403 |
-
if l.bias is not None:
|
404 |
-
l.bias.data = l.bias.data.half()
|
405 |
-
|
406 |
-
# if isinstance(l, (nn.MultiheadAttention, Attention)):
|
407 |
-
# for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
408 |
-
# tensor = getattr(l, attr)
|
409 |
-
# if tensor is not None:
|
410 |
-
# tensor.data = tensor.data.half()
|
411 |
-
|
412 |
-
model.apply(_convert_weights_to_fp16)
|
413 |
-
|
414 |
-
|
415 |
-
def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"):
|
416 |
-
model = VisionTransformer(
|
417 |
-
img_size=img_size,
|
418 |
-
patch_size=14,
|
419 |
-
use_mean_pooling=False,
|
420 |
-
embed_dim=1408,
|
421 |
-
depth=39,
|
422 |
-
num_heads=1408//88,
|
423 |
-
mlp_ratio=4.3637,
|
424 |
-
qkv_bias=True,
|
425 |
-
drop_path_rate=drop_path_rate,
|
426 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
427 |
-
use_checkpoint=use_checkpoint,
|
428 |
-
)
|
429 |
-
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth"
|
430 |
-
cached_file = download_cached_file(
|
431 |
-
url, check_hash=False, progress=True
|
432 |
-
)
|
433 |
-
state_dict = torch.load(cached_file, map_location="cpu")
|
434 |
-
interpolate_pos_embed(model,state_dict)
|
435 |
-
|
436 |
-
incompatible_keys = model.load_state_dict(state_dict, strict=False)
|
437 |
-
# print(incompatible_keys)
|
438 |
-
|
439 |
-
if precision == "fp16":
|
440 |
-
# model.to("cuda")
|
441 |
-
convert_weights_to_fp16(model)
|
442 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/include/pybind11/attr.h
DELETED
@@ -1,528 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
pybind11/attr.h: Infrastructure for processing custom
|
3 |
-
type and function attributes
|
4 |
-
|
5 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
6 |
-
|
7 |
-
All rights reserved. Use of this source code is governed by a
|
8 |
-
BSD-style license that can be found in the LICENSE file.
|
9 |
-
*/
|
10 |
-
|
11 |
-
#pragma once
|
12 |
-
|
13 |
-
#include "cast.h"
|
14 |
-
|
15 |
-
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
16 |
-
|
17 |
-
/// \addtogroup annotations
|
18 |
-
/// @{
|
19 |
-
|
20 |
-
/// Annotation for methods
|
21 |
-
struct is_method { handle class_; is_method(const handle &c) : class_(c) { } };
|
22 |
-
|
23 |
-
/// Annotation for operators
|
24 |
-
struct is_operator { };
|
25 |
-
|
26 |
-
/// Annotation for classes that cannot be subclassed
|
27 |
-
struct is_final { };
|
28 |
-
|
29 |
-
/// Annotation for parent scope
|
30 |
-
struct scope { handle value; scope(const handle &s) : value(s) { } };
|
31 |
-
|
32 |
-
/// Annotation for documentation
|
33 |
-
struct doc { const char *value; doc(const char *value) : value(value) { } };
|
34 |
-
|
35 |
-
/// Annotation for function names
|
36 |
-
struct name { const char *value; name(const char *value) : value(value) { } };
|
37 |
-
|
38 |
-
/// Annotation indicating that a function is an overload associated with a given "sibling"
|
39 |
-
struct sibling { handle value; sibling(const handle &value) : value(value.ptr()) { } };
|
40 |
-
|
41 |
-
/// Annotation indicating that a class derives from another given type
|
42 |
-
template <typename T> struct base {
|
43 |
-
PYBIND11_DEPRECATED("base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
|
44 |
-
base() { }
|
45 |
-
};
|
46 |
-
|
47 |
-
/// Keep patient alive while nurse lives
|
48 |
-
template <size_t Nurse, size_t Patient> struct keep_alive { };
|
49 |
-
|
50 |
-
/// Annotation indicating that a class is involved in a multiple inheritance relationship
|
51 |
-
struct multiple_inheritance { };
|
52 |
-
|
53 |
-
/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class
|
54 |
-
struct dynamic_attr { };
|
55 |
-
|
56 |
-
/// Annotation which enables the buffer protocol for a type
|
57 |
-
struct buffer_protocol { };
|
58 |
-
|
59 |
-
/// Annotation which requests that a special metaclass is created for a type
|
60 |
-
struct metaclass {
|
61 |
-
handle value;
|
62 |
-
|
63 |
-
PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
|
64 |
-
metaclass() {}
|
65 |
-
|
66 |
-
/// Override pybind11's default metaclass
|
67 |
-
explicit metaclass(handle value) : value(value) { }
|
68 |
-
};
|
69 |
-
|
70 |
-
/// Annotation that marks a class as local to the module:
|
71 |
-
struct module_local { const bool value; constexpr module_local(bool v = true) : value(v) { } };
|
72 |
-
|
73 |
-
/// Annotation to mark enums as an arithmetic type
|
74 |
-
struct arithmetic { };
|
75 |
-
|
76 |
-
/** \rst
|
77 |
-
A call policy which places one or more guard variables (``Ts...``) around the function call.
|
78 |
-
|
79 |
-
For example, this definition:
|
80 |
-
|
81 |
-
.. code-block:: cpp
|
82 |
-
|
83 |
-
m.def("foo", foo, py::call_guard<T>());
|
84 |
-
|
85 |
-
is equivalent to the following pseudocode:
|
86 |
-
|
87 |
-
.. code-block:: cpp
|
88 |
-
|
89 |
-
m.def("foo", [](args...) {
|
90 |
-
T scope_guard;
|
91 |
-
return foo(args...); // forwarded arguments
|
92 |
-
});
|
93 |
-
\endrst */
|
94 |
-
template <typename... Ts> struct call_guard;
|
95 |
-
|
96 |
-
template <> struct call_guard<> { using type = detail::void_type; };
|
97 |
-
|
98 |
-
template <typename T>
|
99 |
-
struct call_guard<T> {
|
100 |
-
static_assert(std::is_default_constructible<T>::value,
|
101 |
-
"The guard type must be default constructible");
|
102 |
-
|
103 |
-
using type = T;
|
104 |
-
};
|
105 |
-
|
106 |
-
template <typename T, typename... Ts>
|
107 |
-
struct call_guard<T, Ts...> {
|
108 |
-
struct type {
|
109 |
-
T guard{}; // Compose multiple guard types with left-to-right default-constructor order
|
110 |
-
typename call_guard<Ts...>::type next{};
|
111 |
-
};
|
112 |
-
};
|
113 |
-
|
114 |
-
/// @} annotations
|
115 |
-
|
116 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
117 |
-
/* Forward declarations */
|
118 |
-
enum op_id : int;
|
119 |
-
enum op_type : int;
|
120 |
-
struct undefined_t;
|
121 |
-
template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t> struct op_;
|
122 |
-
inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);
|
123 |
-
|
124 |
-
/// Internal data structure which holds metadata about a keyword argument
|
125 |
-
struct argument_record {
|
126 |
-
const char *name; ///< Argument name
|
127 |
-
const char *descr; ///< Human-readable version of the argument value
|
128 |
-
handle value; ///< Associated Python object
|
129 |
-
bool convert : 1; ///< True if the argument is allowed to convert when loading
|
130 |
-
bool none : 1; ///< True if None is allowed when loading
|
131 |
-
|
132 |
-
argument_record(const char *name, const char *descr, handle value, bool convert, bool none)
|
133 |
-
: name(name), descr(descr), value(value), convert(convert), none(none) { }
|
134 |
-
};
|
135 |
-
|
136 |
-
/// Internal data structure which holds metadata about a bound function (signature, overloads, etc.)
|
137 |
-
struct function_record {
|
138 |
-
function_record()
|
139 |
-
: is_constructor(false), is_new_style_constructor(false), is_stateless(false),
|
140 |
-
is_operator(false), is_method(false),
|
141 |
-
has_args(false), has_kwargs(false), has_kwonly_args(false) { }
|
142 |
-
|
143 |
-
/// Function name
|
144 |
-
char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
|
145 |
-
|
146 |
-
// User-specified documentation string
|
147 |
-
char *doc = nullptr;
|
148 |
-
|
149 |
-
/// Human-readable version of the function signature
|
150 |
-
char *signature = nullptr;
|
151 |
-
|
152 |
-
/// List of registered keyword arguments
|
153 |
-
std::vector<argument_record> args;
|
154 |
-
|
155 |
-
/// Pointer to lambda function which converts arguments and performs the actual call
|
156 |
-
handle (*impl) (function_call &) = nullptr;
|
157 |
-
|
158 |
-
/// Storage for the wrapped function pointer and captured data, if any
|
159 |
-
void *data[3] = { };
|
160 |
-
|
161 |
-
/// Pointer to custom destructor for 'data' (if needed)
|
162 |
-
void (*free_data) (function_record *ptr) = nullptr;
|
163 |
-
|
164 |
-
/// Return value policy associated with this function
|
165 |
-
return_value_policy policy = return_value_policy::automatic;
|
166 |
-
|
167 |
-
/// True if name == '__init__'
|
168 |
-
bool is_constructor : 1;
|
169 |
-
|
170 |
-
/// True if this is a new-style `__init__` defined in `detail/init.h`
|
171 |
-
bool is_new_style_constructor : 1;
|
172 |
-
|
173 |
-
/// True if this is a stateless function pointer
|
174 |
-
bool is_stateless : 1;
|
175 |
-
|
176 |
-
/// True if this is an operator (__add__), etc.
|
177 |
-
bool is_operator : 1;
|
178 |
-
|
179 |
-
/// True if this is a method
|
180 |
-
bool is_method : 1;
|
181 |
-
|
182 |
-
/// True if the function has a '*args' argument
|
183 |
-
bool has_args : 1;
|
184 |
-
|
185 |
-
/// True if the function has a '**kwargs' argument
|
186 |
-
bool has_kwargs : 1;
|
187 |
-
|
188 |
-
/// True once a 'py::kwonly' is encountered (any following args are keyword-only)
|
189 |
-
bool has_kwonly_args : 1;
|
190 |
-
|
191 |
-
/// Number of arguments (including py::args and/or py::kwargs, if present)
|
192 |
-
std::uint16_t nargs;
|
193 |
-
|
194 |
-
/// Number of trailing arguments (counted in `nargs`) that are keyword-only
|
195 |
-
std::uint16_t nargs_kwonly = 0;
|
196 |
-
|
197 |
-
/// Python method object
|
198 |
-
PyMethodDef *def = nullptr;
|
199 |
-
|
200 |
-
/// Python handle to the parent scope (a class or a module)
|
201 |
-
handle scope;
|
202 |
-
|
203 |
-
/// Python handle to the sibling function representing an overload chain
|
204 |
-
handle sibling;
|
205 |
-
|
206 |
-
/// Pointer to next overload
|
207 |
-
function_record *next = nullptr;
|
208 |
-
};
|
209 |
-
|
210 |
-
/// Special data structure which (temporarily) holds metadata about a bound class
|
211 |
-
struct type_record {
|
212 |
-
PYBIND11_NOINLINE type_record()
|
213 |
-
: multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
|
214 |
-
default_holder(true), module_local(false), is_final(false) { }
|
215 |
-
|
216 |
-
/// Handle to the parent scope
|
217 |
-
handle scope;
|
218 |
-
|
219 |
-
/// Name of the class
|
220 |
-
const char *name = nullptr;
|
221 |
-
|
222 |
-
// Pointer to RTTI type_info data structure
|
223 |
-
const std::type_info *type = nullptr;
|
224 |
-
|
225 |
-
/// How large is the underlying C++ type?
|
226 |
-
size_t type_size = 0;
|
227 |
-
|
228 |
-
/// What is the alignment of the underlying C++ type?
|
229 |
-
size_t type_align = 0;
|
230 |
-
|
231 |
-
/// How large is the type's holder?
|
232 |
-
size_t holder_size = 0;
|
233 |
-
|
234 |
-
/// The global operator new can be overridden with a class-specific variant
|
235 |
-
void *(*operator_new)(size_t) = nullptr;
|
236 |
-
|
237 |
-
/// Function pointer to class_<..>::init_instance
|
238 |
-
void (*init_instance)(instance *, const void *) = nullptr;
|
239 |
-
|
240 |
-
/// Function pointer to class_<..>::dealloc
|
241 |
-
void (*dealloc)(detail::value_and_holder &) = nullptr;
|
242 |
-
|
243 |
-
/// List of base classes of the newly created type
|
244 |
-
list bases;
|
245 |
-
|
246 |
-
/// Optional docstring
|
247 |
-
const char *doc = nullptr;
|
248 |
-
|
249 |
-
/// Custom metaclass (optional)
|
250 |
-
handle metaclass;
|
251 |
-
|
252 |
-
/// Multiple inheritance marker
|
253 |
-
bool multiple_inheritance : 1;
|
254 |
-
|
255 |
-
/// Does the class manage a __dict__?
|
256 |
-
bool dynamic_attr : 1;
|
257 |
-
|
258 |
-
/// Does the class implement the buffer protocol?
|
259 |
-
bool buffer_protocol : 1;
|
260 |
-
|
261 |
-
/// Is the default (unique_ptr) holder type used?
|
262 |
-
bool default_holder : 1;
|
263 |
-
|
264 |
-
/// Is the class definition local to the module shared object?
|
265 |
-
bool module_local : 1;
|
266 |
-
|
267 |
-
/// Is the class inheritable from python classes?
|
268 |
-
bool is_final : 1;
|
269 |
-
|
270 |
-
PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *)) {
|
271 |
-
auto base_info = detail::get_type_info(base, false);
|
272 |
-
if (!base_info) {
|
273 |
-
std::string tname(base.name());
|
274 |
-
detail::clean_type_id(tname);
|
275 |
-
pybind11_fail("generic_type: type \"" + std::string(name) +
|
276 |
-
"\" referenced unknown base type \"" + tname + "\"");
|
277 |
-
}
|
278 |
-
|
279 |
-
if (default_holder != base_info->default_holder) {
|
280 |
-
std::string tname(base.name());
|
281 |
-
detail::clean_type_id(tname);
|
282 |
-
pybind11_fail("generic_type: type \"" + std::string(name) + "\" " +
|
283 |
-
(default_holder ? "does not have" : "has") +
|
284 |
-
" a non-default holder type while its base \"" + tname + "\" " +
|
285 |
-
(base_info->default_holder ? "does not" : "does"));
|
286 |
-
}
|
287 |
-
|
288 |
-
bases.append((PyObject *) base_info->type);
|
289 |
-
|
290 |
-
if (base_info->type->tp_dictoffset != 0)
|
291 |
-
dynamic_attr = true;
|
292 |
-
|
293 |
-
if (caster)
|
294 |
-
base_info->implicit_casts.emplace_back(type, caster);
|
295 |
-
}
|
296 |
-
};
|
297 |
-
|
298 |
-
inline function_call::function_call(const function_record &f, handle p) :
|
299 |
-
func(f), parent(p) {
|
300 |
-
args.reserve(f.nargs);
|
301 |
-
args_convert.reserve(f.nargs);
|
302 |
-
}
|
303 |
-
|
304 |
-
/// Tag for a new-style `__init__` defined in `detail/init.h`
|
305 |
-
struct is_new_style_constructor { };
|
306 |
-
|
307 |
-
/**
|
308 |
-
* Partial template specializations to process custom attributes provided to
|
309 |
-
* cpp_function_ and class_. These are either used to initialize the respective
|
310 |
-
* fields in the type_record and function_record data structures or executed at
|
311 |
-
* runtime to deal with custom call policies (e.g. keep_alive).
|
312 |
-
*/
|
313 |
-
template <typename T, typename SFINAE = void> struct process_attribute;
|
314 |
-
|
315 |
-
template <typename T> struct process_attribute_default {
|
316 |
-
/// Default implementation: do nothing
|
317 |
-
static void init(const T &, function_record *) { }
|
318 |
-
static void init(const T &, type_record *) { }
|
319 |
-
static void precall(function_call &) { }
|
320 |
-
static void postcall(function_call &, handle) { }
|
321 |
-
};
|
322 |
-
|
323 |
-
/// Process an attribute specifying the function's name
|
324 |
-
template <> struct process_attribute<name> : process_attribute_default<name> {
|
325 |
-
static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }
|
326 |
-
};
|
327 |
-
|
328 |
-
/// Process an attribute specifying the function's docstring
|
329 |
-
template <> struct process_attribute<doc> : process_attribute_default<doc> {
|
330 |
-
static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }
|
331 |
-
};
|
332 |
-
|
333 |
-
/// Process an attribute specifying the function's docstring (provided as a C-style string)
|
334 |
-
template <> struct process_attribute<const char *> : process_attribute_default<const char *> {
|
335 |
-
static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
|
336 |
-
static void init(const char *d, type_record *r) { r->doc = const_cast<char *>(d); }
|
337 |
-
};
|
338 |
-
template <> struct process_attribute<char *> : process_attribute<const char *> { };
|
339 |
-
|
340 |
-
/// Process an attribute indicating the function's return value policy
|
341 |
-
template <> struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {
|
342 |
-
static void init(const return_value_policy &p, function_record *r) { r->policy = p; }
|
343 |
-
};
|
344 |
-
|
345 |
-
/// Process an attribute which indicates that this is an overloaded function associated with a given sibling
|
346 |
-
template <> struct process_attribute<sibling> : process_attribute_default<sibling> {
|
347 |
-
static void init(const sibling &s, function_record *r) { r->sibling = s.value; }
|
348 |
-
};
|
349 |
-
|
350 |
-
/// Process an attribute which indicates that this function is a method
|
351 |
-
template <> struct process_attribute<is_method> : process_attribute_default<is_method> {
|
352 |
-
static void init(const is_method &s, function_record *r) { r->is_method = true; r->scope = s.class_; }
|
353 |
-
};
|
354 |
-
|
355 |
-
/// Process an attribute which indicates the parent scope of a method
|
356 |
-
template <> struct process_attribute<scope> : process_attribute_default<scope> {
|
357 |
-
static void init(const scope &s, function_record *r) { r->scope = s.value; }
|
358 |
-
};
|
359 |
-
|
360 |
-
/// Process an attribute which indicates that this function is an operator
|
361 |
-
template <> struct process_attribute<is_operator> : process_attribute_default<is_operator> {
|
362 |
-
static void init(const is_operator &, function_record *r) { r->is_operator = true; }
|
363 |
-
};
|
364 |
-
|
365 |
-
template <> struct process_attribute<is_new_style_constructor> : process_attribute_default<is_new_style_constructor> {
|
366 |
-
static void init(const is_new_style_constructor &, function_record *r) { r->is_new_style_constructor = true; }
|
367 |
-
};
|
368 |
-
|
369 |
-
inline void process_kwonly_arg(const arg &a, function_record *r) {
|
370 |
-
if (!a.name || strlen(a.name) == 0)
|
371 |
-
pybind11_fail("arg(): cannot specify an unnamed argument after an kwonly() annotation");
|
372 |
-
++r->nargs_kwonly;
|
373 |
-
}
|
374 |
-
|
375 |
-
/// Process a keyword argument attribute (*without* a default value)
|
376 |
-
template <> struct process_attribute<arg> : process_attribute_default<arg> {
|
377 |
-
static void init(const arg &a, function_record *r) {
|
378 |
-
if (r->is_method && r->args.empty())
|
379 |
-
r->args.emplace_back("self", nullptr, handle(), true /*convert*/, false /*none not allowed*/);
|
380 |
-
r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
|
381 |
-
|
382 |
-
if (r->has_kwonly_args) process_kwonly_arg(a, r);
|
383 |
-
}
|
384 |
-
};
|
385 |
-
|
386 |
-
/// Process a keyword argument attribute (*with* a default value)
|
387 |
-
template <> struct process_attribute<arg_v> : process_attribute_default<arg_v> {
|
388 |
-
static void init(const arg_v &a, function_record *r) {
|
389 |
-
if (r->is_method && r->args.empty())
|
390 |
-
r->args.emplace_back("self", nullptr /*descr*/, handle() /*parent*/, true /*convert*/, false /*none not allowed*/);
|
391 |
-
|
392 |
-
if (!a.value) {
|
393 |
-
#if !defined(NDEBUG)
|
394 |
-
std::string descr("'");
|
395 |
-
if (a.name) descr += std::string(a.name) + ": ";
|
396 |
-
descr += a.type + "'";
|
397 |
-
if (r->is_method) {
|
398 |
-
if (r->name)
|
399 |
-
descr += " in method '" + (std::string) str(r->scope) + "." + (std::string) r->name + "'";
|
400 |
-
else
|
401 |
-
descr += " in method of '" + (std::string) str(r->scope) + "'";
|
402 |
-
} else if (r->name) {
|
403 |
-
descr += " in function '" + (std::string) r->name + "'";
|
404 |
-
}
|
405 |
-
pybind11_fail("arg(): could not convert default argument "
|
406 |
-
+ descr + " into a Python object (type not registered yet?)");
|
407 |
-
#else
|
408 |
-
pybind11_fail("arg(): could not convert default argument "
|
409 |
-
"into a Python object (type not registered yet?). "
|
410 |
-
"Compile in debug mode for more information.");
|
411 |
-
#endif
|
412 |
-
}
|
413 |
-
r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
|
414 |
-
|
415 |
-
if (r->has_kwonly_args) process_kwonly_arg(a, r);
|
416 |
-
}
|
417 |
-
};
|
418 |
-
|
419 |
-
/// Process a keyword-only-arguments-follow pseudo argument
|
420 |
-
template <> struct process_attribute<kwonly> : process_attribute_default<kwonly> {
|
421 |
-
static void init(const kwonly &, function_record *r) {
|
422 |
-
r->has_kwonly_args = true;
|
423 |
-
}
|
424 |
-
};
|
425 |
-
|
426 |
-
/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees that)
|
427 |
-
template <typename T>
|
428 |
-
struct process_attribute<T, enable_if_t<is_pyobject<T>::value>> : process_attribute_default<handle> {
|
429 |
-
static void init(const handle &h, type_record *r) { r->bases.append(h); }
|
430 |
-
};
|
431 |
-
|
432 |
-
/// Process a parent class attribute (deprecated, does not support multiple inheritance)
|
433 |
-
template <typename T>
|
434 |
-
struct process_attribute<base<T>> : process_attribute_default<base<T>> {
|
435 |
-
static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }
|
436 |
-
};
|
437 |
-
|
438 |
-
/// Process a multiple inheritance attribute
|
439 |
-
template <>
|
440 |
-
struct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {
|
441 |
-
static void init(const multiple_inheritance &, type_record *r) { r->multiple_inheritance = true; }
|
442 |
-
};
|
443 |
-
|
444 |
-
template <>
|
445 |
-
struct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {
|
446 |
-
static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
|
447 |
-
};
|
448 |
-
|
449 |
-
template <>
|
450 |
-
struct process_attribute<is_final> : process_attribute_default<is_final> {
|
451 |
-
static void init(const is_final &, type_record *r) { r->is_final = true; }
|
452 |
-
};
|
453 |
-
|
454 |
-
template <>
|
455 |
-
struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
|
456 |
-
static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
|
457 |
-
};
|
458 |
-
|
459 |
-
template <>
|
460 |
-
struct process_attribute<metaclass> : process_attribute_default<metaclass> {
|
461 |
-
static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }
|
462 |
-
};
|
463 |
-
|
464 |
-
template <>
|
465 |
-
struct process_attribute<module_local> : process_attribute_default<module_local> {
|
466 |
-
static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
|
467 |
-
};
|
468 |
-
|
469 |
-
/// Process an 'arithmetic' attribute for enums (does nothing here)
|
470 |
-
template <>
|
471 |
-
struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
|
472 |
-
|
473 |
-
template <typename... Ts>
|
474 |
-
struct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> { };
|
475 |
-
|
476 |
-
/**
|
477 |
-
* Process a keep_alive call policy -- invokes keep_alive_impl during the
|
478 |
-
* pre-call handler if both Nurse, Patient != 0 and use the post-call handler
|
479 |
-
* otherwise
|
480 |
-
*/
|
481 |
-
template <size_t Nurse, size_t Patient> struct process_attribute<keep_alive<Nurse, Patient>> : public process_attribute_default<keep_alive<Nurse, Patient>> {
|
482 |
-
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
|
483 |
-
static void precall(function_call &call) { keep_alive_impl(Nurse, Patient, call, handle()); }
|
484 |
-
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
|
485 |
-
static void postcall(function_call &, handle) { }
|
486 |
-
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
|
487 |
-
static void precall(function_call &) { }
|
488 |
-
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
|
489 |
-
static void postcall(function_call &call, handle ret) { keep_alive_impl(Nurse, Patient, call, ret); }
|
490 |
-
};
|
491 |
-
|
492 |
-
/// Recursively iterate over variadic template arguments
|
493 |
-
template <typename... Args> struct process_attributes {
|
494 |
-
static void init(const Args&... args, function_record *r) {
|
495 |
-
int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::init(args, r), 0) ... };
|
496 |
-
ignore_unused(unused);
|
497 |
-
}
|
498 |
-
static void init(const Args&... args, type_record *r) {
|
499 |
-
int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::init(args, r), 0) ... };
|
500 |
-
ignore_unused(unused);
|
501 |
-
}
|
502 |
-
static void precall(function_call &call) {
|
503 |
-
int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::precall(call), 0) ... };
|
504 |
-
ignore_unused(unused);
|
505 |
-
}
|
506 |
-
static void postcall(function_call &call, handle fn_ret) {
|
507 |
-
int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0) ... };
|
508 |
-
ignore_unused(unused);
|
509 |
-
}
|
510 |
-
};
|
511 |
-
|
512 |
-
template <typename T>
|
513 |
-
using is_call_guard = is_instantiation<call_guard, T>;
|
514 |
-
|
515 |
-
/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)
|
516 |
-
template <typename... Extra>
|
517 |
-
using extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;
|
518 |
-
|
519 |
-
/// Check the number of named arguments at compile time
|
520 |
-
template <typename... Extra,
|
521 |
-
size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
|
522 |
-
size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
|
523 |
-
constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
|
524 |
-
return named == 0 || (self + named + has_args + has_kwargs) == nargs;
|
525 |
-
}
|
526 |
-
|
527 |
-
PYBIND11_NAMESPACE_END(detail)
|
528 |
-
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/allocator/no_throw_allocator.h
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
namespace detail
|
24 |
-
{
|
25 |
-
|
26 |
-
template<typename BaseAllocator>
|
27 |
-
struct no_throw_allocator : BaseAllocator
|
28 |
-
{
|
29 |
-
private:
|
30 |
-
typedef BaseAllocator super_t;
|
31 |
-
|
32 |
-
public:
|
33 |
-
inline __host__ __device__
|
34 |
-
no_throw_allocator(const BaseAllocator &other = BaseAllocator())
|
35 |
-
: super_t(other)
|
36 |
-
{}
|
37 |
-
|
38 |
-
template<typename U>
|
39 |
-
struct rebind
|
40 |
-
{
|
41 |
-
typedef no_throw_allocator<typename super_t::template rebind<U>::other> other;
|
42 |
-
}; // end rebind
|
43 |
-
|
44 |
-
__host__ __device__
|
45 |
-
void deallocate(typename super_t::pointer p, typename super_t::size_type n)
|
46 |
-
{
|
47 |
-
#ifndef __CUDA_ARCH__
|
48 |
-
try
|
49 |
-
{
|
50 |
-
super_t::deallocate(p, n);
|
51 |
-
} // end try
|
52 |
-
catch(...)
|
53 |
-
{
|
54 |
-
// catch anything
|
55 |
-
} // end catch
|
56 |
-
#else
|
57 |
-
super_t::deallocate(p, n);
|
58 |
-
#endif
|
59 |
-
} // end deallocate()
|
60 |
-
|
61 |
-
inline __host__ __device__
|
62 |
-
bool operator==(no_throw_allocator const &other) { return super_t::operator==(other); }
|
63 |
-
|
64 |
-
inline __host__ __device__
|
65 |
-
bool operator!=(no_throw_allocator const &other) { return super_t::operator!=(other); }
|
66 |
-
}; // end no_throw_allocator
|
67 |
-
|
68 |
-
} // end detail
|
69 |
-
} // end thrust
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/mask/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from .mask_target import mask_target
|
2 |
-
from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
|
3 |
-
from .utils import encode_mask_results, split_combined_polys
|
4 |
-
|
5 |
-
__all__ = [
|
6 |
-
'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
|
7 |
-
'PolygonMasks', 'encode_mask_results'
|
8 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/mask/mask_target.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
from torch.nn.modules.utils import _pair
|
4 |
-
|
5 |
-
|
6 |
-
def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
|
7 |
-
cfg):
|
8 |
-
"""Compute mask target for positive proposals in multiple images.
|
9 |
-
|
10 |
-
Args:
|
11 |
-
pos_proposals_list (list[Tensor]): Positive proposals in multiple
|
12 |
-
images.
|
13 |
-
pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
|
14 |
-
positive proposals.
|
15 |
-
gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
|
16 |
-
each image.
|
17 |
-
cfg (dict): Config dict that specifies the mask size.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
list[Tensor]: Mask target of each image.
|
21 |
-
|
22 |
-
Example:
|
23 |
-
>>> import mmcv
|
24 |
-
>>> import mmdet
|
25 |
-
>>> from mmdet.core.mask import BitmapMasks
|
26 |
-
>>> from mmdet.core.mask.mask_target import *
|
27 |
-
>>> H, W = 17, 18
|
28 |
-
>>> cfg = mmcv.Config({'mask_size': (13, 14)})
|
29 |
-
>>> rng = np.random.RandomState(0)
|
30 |
-
>>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
|
31 |
-
>>> pos_proposals_list = [
|
32 |
-
>>> torch.Tensor([
|
33 |
-
>>> [ 7.2425, 5.5929, 13.9414, 14.9541],
|
34 |
-
>>> [ 7.3241, 3.6170, 16.3850, 15.3102],
|
35 |
-
>>> ]),
|
36 |
-
>>> torch.Tensor([
|
37 |
-
>>> [ 4.8448, 6.4010, 7.0314, 9.7681],
|
38 |
-
>>> [ 5.9790, 2.6989, 7.4416, 4.8580],
|
39 |
-
>>> [ 0.0000, 0.0000, 0.1398, 9.8232],
|
40 |
-
>>> ]),
|
41 |
-
>>> ]
|
42 |
-
>>> # Corresponding class index for each proposal for each image
|
43 |
-
>>> pos_assigned_gt_inds_list = [
|
44 |
-
>>> torch.LongTensor([7, 0]),
|
45 |
-
>>> torch.LongTensor([5, 4, 1]),
|
46 |
-
>>> ]
|
47 |
-
>>> # Ground truth mask for each true object for each image
|
48 |
-
>>> gt_masks_list = [
|
49 |
-
>>> BitmapMasks(rng.rand(8, H, W), height=H, width=W),
|
50 |
-
>>> BitmapMasks(rng.rand(6, H, W), height=H, width=W),
|
51 |
-
>>> ]
|
52 |
-
>>> mask_targets = mask_target(
|
53 |
-
>>> pos_proposals_list, pos_assigned_gt_inds_list,
|
54 |
-
>>> gt_masks_list, cfg)
|
55 |
-
>>> assert mask_targets.shape == (5,) + cfg['mask_size']
|
56 |
-
"""
|
57 |
-
cfg_list = [cfg for _ in range(len(pos_proposals_list))]
|
58 |
-
mask_targets = map(mask_target_single, pos_proposals_list,
|
59 |
-
pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
|
60 |
-
mask_targets = list(mask_targets)
|
61 |
-
if len(mask_targets) > 0:
|
62 |
-
mask_targets = torch.cat(mask_targets)
|
63 |
-
return mask_targets
|
64 |
-
|
65 |
-
|
66 |
-
def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
|
67 |
-
"""Compute mask target for each positive proposal in the image.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
pos_proposals (Tensor): Positive proposals.
|
71 |
-
pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
|
72 |
-
gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
|
73 |
-
or Polygon.
|
74 |
-
cfg (dict): Config dict that indicate the mask size.
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
Tensor: Mask target of each positive proposals in the image.
|
78 |
-
|
79 |
-
Example:
|
80 |
-
>>> import mmcv
|
81 |
-
>>> import mmdet
|
82 |
-
>>> from mmdet.core.mask import BitmapMasks
|
83 |
-
>>> from mmdet.core.mask.mask_target import * # NOQA
|
84 |
-
>>> H, W = 32, 32
|
85 |
-
>>> cfg = mmcv.Config({'mask_size': (7, 11)})
|
86 |
-
>>> rng = np.random.RandomState(0)
|
87 |
-
>>> # Masks for each ground truth box (relative to the image)
|
88 |
-
>>> gt_masks_data = rng.rand(3, H, W)
|
89 |
-
>>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
|
90 |
-
>>> # Predicted positive boxes in one image
|
91 |
-
>>> pos_proposals = torch.FloatTensor([
|
92 |
-
>>> [ 16.2, 5.5, 19.9, 20.9],
|
93 |
-
>>> [ 17.3, 13.6, 19.3, 19.3],
|
94 |
-
>>> [ 14.8, 16.4, 17.0, 23.7],
|
95 |
-
>>> [ 0.0, 0.0, 16.0, 16.0],
|
96 |
-
>>> [ 4.0, 0.0, 20.0, 16.0],
|
97 |
-
>>> ])
|
98 |
-
>>> # For each predicted proposal, its assignment to a gt mask
|
99 |
-
>>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
|
100 |
-
>>> mask_targets = mask_target_single(
|
101 |
-
>>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
|
102 |
-
>>> assert mask_targets.shape == (5,) + cfg['mask_size']
|
103 |
-
"""
|
104 |
-
device = pos_proposals.device
|
105 |
-
mask_size = _pair(cfg.mask_size)
|
106 |
-
num_pos = pos_proposals.size(0)
|
107 |
-
if num_pos > 0:
|
108 |
-
proposals_np = pos_proposals.cpu().numpy()
|
109 |
-
maxh, maxw = gt_masks.height, gt_masks.width
|
110 |
-
proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
|
111 |
-
proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
|
112 |
-
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
|
113 |
-
|
114 |
-
mask_targets = gt_masks.crop_and_resize(
|
115 |
-
proposals_np, mask_size, device=device,
|
116 |
-
inds=pos_assigned_gt_inds).to_ndarray()
|
117 |
-
|
118 |
-
mask_targets = torch.from_numpy(mask_targets).float().to(device)
|
119 |
-
else:
|
120 |
-
mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
|
121 |
-
|
122 |
-
return mask_targets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/data/datasets/cityscapes_panoptic.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
|
6 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
7 |
-
from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
|
8 |
-
from detectron2.utils.file_io import PathManager
|
9 |
-
|
10 |
-
"""
|
11 |
-
This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
|
12 |
-
"""
|
13 |
-
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
|
18 |
-
def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
|
19 |
-
files = []
|
20 |
-
# scan through the directory
|
21 |
-
cities = PathManager.ls(image_dir)
|
22 |
-
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
|
23 |
-
image_dict = {}
|
24 |
-
for city in cities:
|
25 |
-
city_img_dir = os.path.join(image_dir, city)
|
26 |
-
for basename in PathManager.ls(city_img_dir):
|
27 |
-
image_file = os.path.join(city_img_dir, basename)
|
28 |
-
|
29 |
-
suffix = "_leftImg8bit.png"
|
30 |
-
assert basename.endswith(suffix), basename
|
31 |
-
basename = os.path.basename(basename)[: -len(suffix)]
|
32 |
-
|
33 |
-
image_dict[basename] = image_file
|
34 |
-
|
35 |
-
for ann in json_info["annotations"]:
|
36 |
-
image_file = image_dict.get(ann["image_id"], None)
|
37 |
-
assert image_file is not None, "No image {} found for annotation {}".format(
|
38 |
-
ann["image_id"], ann["file_name"]
|
39 |
-
)
|
40 |
-
label_file = os.path.join(gt_dir, ann["file_name"])
|
41 |
-
segments_info = ann["segments_info"]
|
42 |
-
|
43 |
-
files.append((image_file, label_file, segments_info))
|
44 |
-
|
45 |
-
assert len(files), "No images found in {}".format(image_dir)
|
46 |
-
assert PathManager.isfile(files[0][0]), files[0][0]
|
47 |
-
assert PathManager.isfile(files[0][1]), files[0][1]
|
48 |
-
return files
|
49 |
-
|
50 |
-
|
51 |
-
def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
|
52 |
-
"""
|
53 |
-
Args:
|
54 |
-
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
|
55 |
-
gt_dir (str): path to the raw annotations. e.g.,
|
56 |
-
"~/cityscapes/gtFine/cityscapes_panoptic_train".
|
57 |
-
gt_json (str): path to the json file. e.g.,
|
58 |
-
"~/cityscapes/gtFine/cityscapes_panoptic_train.json".
|
59 |
-
meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
|
60 |
-
and "stuff_dataset_id_to_contiguous_id" to map category ids to
|
61 |
-
contiguous ids for training.
|
62 |
-
|
63 |
-
Returns:
|
64 |
-
list[dict]: a list of dicts in Detectron2 standard format. (See
|
65 |
-
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
66 |
-
"""
|
67 |
-
|
68 |
-
def _convert_category_id(segment_info, meta):
|
69 |
-
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
70 |
-
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
71 |
-
segment_info["category_id"]
|
72 |
-
]
|
73 |
-
else:
|
74 |
-
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
75 |
-
segment_info["category_id"]
|
76 |
-
]
|
77 |
-
return segment_info
|
78 |
-
|
79 |
-
assert os.path.exists(
|
80 |
-
gt_json
|
81 |
-
), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
|
82 |
-
with open(gt_json) as f:
|
83 |
-
json_info = json.load(f)
|
84 |
-
files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
|
85 |
-
ret = []
|
86 |
-
for image_file, label_file, segments_info in files:
|
87 |
-
sem_label_file = (
|
88 |
-
image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
|
89 |
-
)
|
90 |
-
segments_info = [_convert_category_id(x, meta) for x in segments_info]
|
91 |
-
ret.append(
|
92 |
-
{
|
93 |
-
"file_name": image_file,
|
94 |
-
"image_id": "_".join(
|
95 |
-
os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
|
96 |
-
),
|
97 |
-
"sem_seg_file_name": sem_label_file,
|
98 |
-
"pan_seg_file_name": label_file,
|
99 |
-
"segments_info": segments_info,
|
100 |
-
}
|
101 |
-
)
|
102 |
-
assert len(ret), f"No images found in {image_dir}!"
|
103 |
-
assert PathManager.isfile(
|
104 |
-
ret[0]["sem_seg_file_name"]
|
105 |
-
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
|
106 |
-
assert PathManager.isfile(
|
107 |
-
ret[0]["pan_seg_file_name"]
|
108 |
-
), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
|
109 |
-
return ret
|
110 |
-
|
111 |
-
|
112 |
-
_RAW_CITYSCAPES_PANOPTIC_SPLITS = {
|
113 |
-
"cityscapes_fine_panoptic_train": (
|
114 |
-
"cityscapes/leftImg8bit/train",
|
115 |
-
"cityscapes/gtFine/cityscapes_panoptic_train",
|
116 |
-
"cityscapes/gtFine/cityscapes_panoptic_train.json",
|
117 |
-
),
|
118 |
-
"cityscapes_fine_panoptic_val": (
|
119 |
-
"cityscapes/leftImg8bit/val",
|
120 |
-
"cityscapes/gtFine/cityscapes_panoptic_val",
|
121 |
-
"cityscapes/gtFine/cityscapes_panoptic_val.json",
|
122 |
-
),
|
123 |
-
# "cityscapes_fine_panoptic_test": not supported yet
|
124 |
-
}
|
125 |
-
|
126 |
-
|
127 |
-
def register_all_cityscapes_panoptic(root):
|
128 |
-
meta = {}
|
129 |
-
# The following metadata maps contiguous id from [0, #thing categories +
|
130 |
-
# #stuff categories) to their names and colors. We have to replica of the
|
131 |
-
# same name and color under "thing_*" and "stuff_*" because the current
|
132 |
-
# visualization function in D2 handles thing and class classes differently
|
133 |
-
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
134 |
-
# enable reusing existing visualization functions.
|
135 |
-
thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
|
136 |
-
thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
|
137 |
-
stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
|
138 |
-
stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
|
139 |
-
|
140 |
-
meta["thing_classes"] = thing_classes
|
141 |
-
meta["thing_colors"] = thing_colors
|
142 |
-
meta["stuff_classes"] = stuff_classes
|
143 |
-
meta["stuff_colors"] = stuff_colors
|
144 |
-
|
145 |
-
# There are three types of ids in cityscapes panoptic segmentation:
|
146 |
-
# (1) category id: like semantic segmentation, it is the class id for each
|
147 |
-
# pixel. Since there are some classes not used in evaluation, the category
|
148 |
-
# id is not always contiguous and thus we have two set of category ids:
|
149 |
-
# - original category id: category id in the original dataset, mainly
|
150 |
-
# used for evaluation.
|
151 |
-
# - contiguous category id: [0, #classes), in order to train the classifier
|
152 |
-
# (2) instance id: this id is used to differentiate different instances from
|
153 |
-
# the same category. For "stuff" classes, the instance id is always 0; for
|
154 |
-
# "thing" classes, the instance id starts from 1 and 0 is reserved for
|
155 |
-
# ignored instances (e.g. crowd annotation).
|
156 |
-
# (3) panoptic id: this is the compact id that encode both category and
|
157 |
-
# instance id by: category_id * 1000 + instance_id.
|
158 |
-
thing_dataset_id_to_contiguous_id = {}
|
159 |
-
stuff_dataset_id_to_contiguous_id = {}
|
160 |
-
|
161 |
-
for k in CITYSCAPES_CATEGORIES:
|
162 |
-
if k["isthing"] == 1:
|
163 |
-
thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
|
164 |
-
else:
|
165 |
-
stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
|
166 |
-
|
167 |
-
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
168 |
-
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
169 |
-
|
170 |
-
for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
|
171 |
-
image_dir = os.path.join(root, image_dir)
|
172 |
-
gt_dir = os.path.join(root, gt_dir)
|
173 |
-
gt_json = os.path.join(root, gt_json)
|
174 |
-
|
175 |
-
DatasetCatalog.register(
|
176 |
-
key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
|
177 |
-
)
|
178 |
-
MetadataCatalog.get(key).set(
|
179 |
-
panoptic_root=gt_dir,
|
180 |
-
image_root=image_dir,
|
181 |
-
panoptic_json=gt_json,
|
182 |
-
gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
|
183 |
-
evaluator_type="cityscapes_panoptic_seg",
|
184 |
-
ignore_label=255,
|
185 |
-
label_divisor=1000,
|
186 |
-
**meta,
|
187 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/utils/visualization.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import matplotlib
|
3 |
-
|
4 |
-
matplotlib.use("Agg")
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
import numpy as np
|
7 |
-
import os
|
8 |
-
import seaborn as sns
|
9 |
-
|
10 |
-
##
|
11 |
-
from sklearn.manifold import TSNE
|
12 |
-
from sklearn.decomposition import PCA
|
13 |
-
|
14 |
-
##
|
15 |
-
import matplotlib.ticker as mtick
|
16 |
-
|
17 |
-
|
18 |
-
def plot_sample_cv2(names, imgs, scores_: dict, gts, save_folder=None):
|
19 |
-
# get subplot number
|
20 |
-
total_number = len(imgs)
|
21 |
-
|
22 |
-
scores = scores_.copy()
|
23 |
-
# normarlisze anomalies
|
24 |
-
for k, v in scores.items():
|
25 |
-
max_value = np.max(v)
|
26 |
-
min_value = np.min(v)
|
27 |
-
|
28 |
-
scores[k] = (scores[k] - min_value) / max_value * 255
|
29 |
-
scores[k] = scores[k].astype(np.uint8)
|
30 |
-
# draw gts
|
31 |
-
mask_imgs = []
|
32 |
-
for idx in range(total_number):
|
33 |
-
gts_ = gts[idx]
|
34 |
-
mask_imgs_ = imgs[idx].copy()
|
35 |
-
mask_imgs_[gts_ > 0.5] = (0, 0, 255)
|
36 |
-
mask_imgs.append(mask_imgs_)
|
37 |
-
|
38 |
-
# save imgs
|
39 |
-
for idx in range(total_number):
|
40 |
-
cv2.imwrite(os.path.join(save_folder, f'{names[idx]}_ori.jpg'), imgs[idx])
|
41 |
-
cv2.imwrite(os.path.join(save_folder, f'{names[idx]}_gt.jpg'), mask_imgs[idx])
|
42 |
-
|
43 |
-
for key in scores:
|
44 |
-
heat_map = cv2.applyColorMap(scores[key][idx], cv2.COLORMAP_JET)
|
45 |
-
visz_map = cv2.addWeighted(heat_map, 0.5, imgs[idx], 0.5, 0)
|
46 |
-
cv2.imwrite(os.path.join(save_folder, f'{names[idx]}_{key}.jpg'),
|
47 |
-
visz_map)
|
48 |
-
|
49 |
-
|
50 |
-
def plot_anomaly_score_distributions(scores: dict, ground_truths_list, save_folder, class_name):
|
51 |
-
ground_truths = np.stack(ground_truths_list, axis=0)
|
52 |
-
|
53 |
-
N_COUNT = 100000
|
54 |
-
|
55 |
-
for k, v in scores.items():
|
56 |
-
layer_score = np.stack(v, axis=0)
|
57 |
-
normal_score = layer_score[ground_truths == 0]
|
58 |
-
abnormal_score = layer_score[ground_truths != 0]
|
59 |
-
|
60 |
-
plt.clf()
|
61 |
-
plt.figure(figsize=(4, 3))
|
62 |
-
ax = plt.gca()
|
63 |
-
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2f'))
|
64 |
-
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.2f'))
|
65 |
-
|
66 |
-
# with plt.style.context(['science', 'ieee', 'no-latex']):
|
67 |
-
sns.histplot(np.random.choice(normal_score, N_COUNT), color="green", bins=50, label='${d(p_n)}$',
|
68 |
-
stat='probability', alpha=.75)
|
69 |
-
sns.histplot(np.random.choice(abnormal_score, N_COUNT), color="red", bins=50, label='${d(p_a)}$',
|
70 |
-
stat='probability', alpha=.75)
|
71 |
-
|
72 |
-
plt.xlim([0, 3])
|
73 |
-
|
74 |
-
save_path = os.path.join(save_folder, f'distributions_{class_name}_{k}.jpg')
|
75 |
-
|
76 |
-
plt.savefig(save_path, bbox_inches='tight', dpi=300)
|
77 |
-
|
78 |
-
|
79 |
-
valid_feature_visualization_methods = ['TSNE', 'PCA']
|
80 |
-
|
81 |
-
|
82 |
-
def visualize_feature(features, labels, legends, n_components=3, method='TSNE'):
|
83 |
-
assert method in valid_feature_visualization_methods
|
84 |
-
assert n_components in [2, 3]
|
85 |
-
|
86 |
-
if method == 'TSNE':
|
87 |
-
model = TSNE(n_components=n_components)
|
88 |
-
elif method == 'PCA':
|
89 |
-
model = PCA(n_components=n_components)
|
90 |
-
|
91 |
-
else:
|
92 |
-
raise NotImplementedError
|
93 |
-
|
94 |
-
feat_proj = model.fit_transform(features)
|
95 |
-
|
96 |
-
if n_components == 2:
|
97 |
-
ax = scatter_2d(feat_proj, labels)
|
98 |
-
elif n_components == 3:
|
99 |
-
ax = scatter_3d(feat_proj, labels)
|
100 |
-
else:
|
101 |
-
raise NotImplementedError
|
102 |
-
|
103 |
-
plt.legend(legends)
|
104 |
-
plt.axis('off')
|
105 |
-
|
106 |
-
|
107 |
-
def scatter_3d(feat_proj, label):
|
108 |
-
plt.clf()
|
109 |
-
ax1 = plt.axes(projection='3d')
|
110 |
-
|
111 |
-
label_unique = np.unique(label)
|
112 |
-
|
113 |
-
for l in label_unique:
|
114 |
-
ax1.scatter3D(feat_proj[label == l, 0],
|
115 |
-
feat_proj[label == l, 1],
|
116 |
-
feat_proj[label == l, 2], s=5)
|
117 |
-
|
118 |
-
return ax1
|
119 |
-
|
120 |
-
|
121 |
-
def scatter_2d(feat_proj, label):
|
122 |
-
plt.clf()
|
123 |
-
ax1 = plt.axes()
|
124 |
-
|
125 |
-
label_unique = np.unique(label)
|
126 |
-
|
127 |
-
for l in label_unique:
|
128 |
-
ax1.scatter(feat_proj[label == l, 0],
|
129 |
-
feat_proj[label == l, 1], s=5)
|
130 |
-
|
131 |
-
return ax1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CourserLi/classify/app.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
|
2 |
-
|
3 |
-
# %% auto 0
|
4 |
-
__all__ = ['temp', 'learn', 'categories', 'image', 'label', 'examples', 'intf', 'is_cat', 'classify_image']
|
5 |
-
|
6 |
-
# %% app.ipynb 1
|
7 |
-
from fastai.vision.all import *
|
8 |
-
import gradio as gr
|
9 |
-
|
10 |
-
# %% app.ipynb 2
|
11 |
-
def is_cat(x):
|
12 |
-
return x[0].isupper()
|
13 |
-
|
14 |
-
learn = load_learner('model.pkl')
|
15 |
-
|
16 |
-
# %% app.ipynb 5
|
17 |
-
categories = ('Dog', 'Cat')
|
18 |
-
|
19 |
-
def classify_image(img):
|
20 |
-
pred, idx, probs = learn.predict(img)
|
21 |
-
return dict(zip(categories, map(float, probs)))
|
22 |
-
|
23 |
-
# %% app.ipynb 8
|
24 |
-
image = gr.Image(shape=(192, 192))
|
25 |
-
label = gr.Label()
|
26 |
-
examples = ['dog.png', 'cat.png']
|
27 |
-
|
28 |
-
intf = gr.Interface(fn=classify_image,
|
29 |
-
inputs=image,
|
30 |
-
outputs=label,
|
31 |
-
examples=examples)
|
32 |
-
intf.launch(inline=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|