Commit
·
72cad55
1
Parent(s):
6b53fb4
Update parquet files (step 43 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackingpatching Alternatives A Comprehensive Guide on Where and How to Download Cracked Software.md +0 -65
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crysis RPCS3 Amazing Gaming Experience.md +0 -34
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fortress Forever V1.11 Torrent ((LINK)).md +0 -28
- spaces/1gistliPinn/ChatGPT4/Examples/Chello Divas 2015 Gujarati Movie Download !!LINK!!.md +0 -6
- spaces/1line/AutoGPT/autogpt/commands/__init__.py +0 -0
- spaces/1pelhydcardo/ChatGPT-prompt-generator/Painter-Roadkill-LINK.md +0 -68
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Beach Buggy Racing 3 for PC and Race Against Tropical Rivals.md +0 -153
- spaces/1phancelerku/anime-remove-background/Comware MIB-H3C What You Need to Know About H3C SNMP MIB.md +0 -110
- spaces/1phancelerku/anime-remove-background/Download Sniper 3D Full Mod APK and Become the Top Sniper in the World.md +0 -89
- spaces/2ndelement/voicevox/voicevox_engine/preset/PresetManager.py +0 -188
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/text_to_speech/egs/datasets/audio/biaobei_sing/preprocess.py +0 -16
- spaces/AIGuardians/SummarizeWikipediaDocument/inference.py +0 -3
- spaces/Ali-Maq/Calorie_Calculator/README.md +0 -13
- spaces/Aloento/9Nine-PITS/text/frontend/normalizer/normalizer.py +0 -35
- spaces/Aloento/9Nine-VITS/attentions.py +0 -250
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/__init__.py +0 -9
- spaces/Amrrs/DragGan-Inversion/PTI/utils/data_utils.py +0 -34
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_versatile_diffusion_to_diffusers.py +0 -791
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py +0 -749
- spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py +0 -11
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_model.py +0 -174
- spaces/AnishKumbhar/DogDiseasePredictor/README.md +0 -11
- spaces/Arnx/MusicGenXvAKN/app.py +0 -407
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/__main__.py +0 -47
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__init__.py +0 -97
- spaces/AtomdffAI/wechatgpt4atom/scripts/start.sh +0 -16
- spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/backbone/vit.py +0 -538
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/_framework_compat.py +0 -55
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/extern/__init__.py +0 -76
- spaces/BigSalmon/GPT2_Most_Probable/README.md +0 -38
- spaces/Bumpeet/faceTracking/README.md +0 -42
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/GETTING_STARTED.md +0 -80
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/replace.h +0 -23
- spaces/CVPR/WALT/walt/datasets/__init__.py +0 -29
- spaces/ChrisPreston/meaqua/README.md +0 -12
- spaces/CikeyQI/meme-api/meme_generator/memes/fanatic/__init__.py +0 -36
- spaces/Codecooker/rvcapi/src/infer_pack/attentions.py +0 -417
- spaces/CofAI/chat.v1/config.py +0 -9
- spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/__init__.py +0 -31
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/termui.py +0 -784
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/ModifyUpload-c89cfce3.js +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_cache_assets.py +0 -135
- spaces/Datasculptor/DescriptionGPT/detic/data/custom_build_augmentation.py +0 -51
- spaces/Datasculptor/StyleGAN-NADA/e4e/models/encoders/helpers.py +0 -140
- spaces/DhruvShek/chatlm/README.md +0 -13
- spaces/Docfile/open_llm_leaderboard/README.md +0 -14
- spaces/DragGan/DragGan-Inversion/stylegan_human/openpose/src/util.py +0 -106
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackingpatching Alternatives A Comprehensive Guide on Where and How to Download Cracked Software.md
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Crackingpatching Alternatives: The Best Sites to Download Cracked Software</h1>
|
3 |
-
<p>Crackingpatching is a popular website that offers cracked versions of various software for free. However, it is not the only option available for those who want to download cracked software. There are many other websites that provide similar services, but with different features, advantages, and disadvantages.</p>
|
4 |
-
<h2>crackingpatching alternatives</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://byltly.com/2uKwgj">https://byltly.com/2uKwgj</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will explore some of the best crackingpatching alternatives that you can use to download cracked software safely and easily. We will compare them based on their content, quality, security, and usability. We will also provide some tips and warnings on how to use cracked software responsibly and legally.</p>
|
6 |
-
<h2>What is Cracked Software?</h2>
|
7 |
-
<p>Cracked software is software that has been modified or hacked to bypass or remove its original protection mechanisms, such as license keys, activation codes, or digital rights management (DRM). Cracked software is usually distributed for free or at a lower price than the original software.</p>
|
8 |
-
<p>Some of the reasons why people use cracked software are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>To save money and avoid paying for expensive software</li>
|
11 |
-
<li>To access premium features or functions that are not available in the free or trial versions of the software</li>
|
12 |
-
<li>To test or evaluate the software before buying it</li>
|
13 |
-
<li>To use the software without any restrictions or limitations</li>
|
14 |
-
</ul>
|
15 |
-
<p>However, using cracked software also comes with some risks and drawbacks, such as:</p>
|
16 |
-
<p></p>
|
17 |
-
<ul>
|
18 |
-
<li>Violating the intellectual property rights and terms of service of the software developers or publishers</li>
|
19 |
-
<li>Exposing your device and data to malware, viruses, spyware, ransomware, or other threats that may be hidden in the cracked software</li>
|
20 |
-
<li>Compromising the performance, stability, compatibility, and security of your device and other programs</li>
|
21 |
-
<li>Missing out on updates, patches, bug fixes, and technical support from the original software providers</li>
|
22 |
-
<li>Facing legal consequences or penalties if caught using cracked software</li>
|
23 |
-
</ul>
|
24 |
-
<h2>What are Crackingpatching Alternatives?</h2>
|
25 |
-
<p>Crackingpatching alternatives are websites that offer cracked versions of various software for free. They are similar to crackingpatching in terms of their content and purpose, but they may differ in their quality, security, and usability.</p>
|
26 |
-
<p>Some of the factors that you should consider when choosing a crackingpatching alternative are:</p>
|
27 |
-
<ul>
|
28 |
-
<li>The variety and availability of the software that you are looking for</li>
|
29 |
-
<li>The quality and reliability of the cracked software that you download</li>
|
30 |
-
<li>The safety and security of the website and the cracked software that you download</li>
|
31 |
-
<li>The ease and convenience of navigating and using the website and downloading the cracked software</li>
|
32 |
-
<li>The reputation and credibility of the website and its sources</li>
|
33 |
-
</ul>
|
34 |
-
|
35 |
-
<h2>The Best Crackingpatching Alternatives</h2>
|
36 |
-
|
37 |
-
<p>Here are some of the best crackingpatching alternatives that you can use to download cracked software:</p>
|
38 |
-
|
39 |
-
<h3>Getintopc</h3>
|
40 |
-
|
41 |
-
<p>Getintopc is one of the most popular and trusted websites for downloading cracked software. It offers a wide range of software categories, such as operating systems, antivirus, office tools, graphics, multimedia, games, and more. It also provides detailed information and screenshots for each software. It has a simple and user-friendly interface that allows you to easily find and download the software that you need.</p>
|
42 |
-
|
43 |
-
<p>Pros:</p>
|
44 |
-
|
45 |
-
<ul>
|
46 |
-
<li>Offers a large variety of high-quality and updated cracked software</li>
|
47 |
-
<li>Provides fast and direct download links without any surveys or ads</li>
|
48 |
-
<li>Has a clean and safe website that does not contain any malware or viruses</li>
|
49 |
-
<li>Has a responsive and helpful customer support team that can assist you with any issues or queries</li>
|
50 |
-
</ul>
|
51 |
-
|
52 |
-
<p>Cons:</p>
|
53 |
-
|
54 |
-
<ul>
|
55 |
-
<li>Does not have a search function or a filter option to narrow down your choices</li>
|
56 |
-
<li>Does not have a comment section or a rating system to get feedback from other users</li>
|
57 |
-
<li>Does not have a forum or a community where you can interact with other users or request for specific software</li>
|
58 |
-
</ul>
|
59 |
-
|
60 |
-
<h3>Oceanofgames</h3>
|
61 |
-
|
62 |
-
<p>Oceanofgames is another popular and trusted website for downloading cracked software. It specializes in offering cracked versions of various games for different platforms, such as PC, Mac, Linux,
|
63 |
-
Android, iOS,</p> ddb901b051<br />
|
64 |
-
<br />
|
65 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crysis RPCS3 Amazing Gaming Experience.md
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Play Crysis on RPCS3 - The Ultimate Guide</h1>
|
3 |
-
<p>Crysis is a legendary first-person shooter game that was released for PC in 2007 and later ported to PlayStation 3 in 2011. The game is known for its stunning graphics, immersive gameplay, and challenging difficulty. But what if you want to play Crysis on your PC using RPCS3, the best PS3 emulator for Windows, Linux, macOS and FreeBSD? In this article, we will show you how to do that step by step.</p>
|
4 |
-
<h2>What is RPCS3?</h2>
|
5 |
-
<p>RPCS3 is a multi-platform open-source Sony PlayStation 3 emulator and debugger written in C++ for Windows, Linux, macOS and FreeBSD made possible with the power of reverse engineering. It can run PS3 games and applications with varying degrees of compatibility and performance. You can check the compatibility list of RPCS3 to see which games are playable, ingame, intro, loadable, or nothing.</p>
|
6 |
-
<h2>crysis rpcs3</h2><br /><p><b><b>Download File</b> ↔ <a href="https://byltly.com/2uKwWs">https://byltly.com/2uKwWs</a></b></p><br /><br />
|
7 |
-
<h2>How to Download and Install RPCS3?</h2>
|
8 |
-
<p>To download and install RPCS3, you need to follow these steps:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Go to the official website of RPCS3 and click on the Download button.</li>
|
11 |
-
<li>Select your operating system and download the latest release or preview build of RPCS3.</li>
|
12 |
-
<li>Extract the downloaded file to a folder of your choice.</li>
|
13 |
-
<li>Run rpcs3.exe (Windows) or rpcs3 (Linux/macOS/FreeBSD) to launch the emulator.</li>
|
14 |
-
<li>Follow the instructions on the Quickstart guide to set up the emulator. You will need a PS3 firmware file, a PS3 controller or a compatible gamepad, and some PS3 games or applications.</li>
|
15 |
-
</ol>
|
16 |
-
<h2>How to Play Crysis on RPCS3?</h2>
|
17 |
-
<p>To play Crysis on RPCS3, you need to follow these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Make sure you have a digital copy of Crysis for PS3. You can buy it from the PlayStation Store or use a backup tool to dump it from your PS3 console. The game ID should be NPEB00575, NPUB30302, or NPJB00150.</li>
|
20 |
-
<li>Copy the game folder to the dev_hdd0/game folder of your RPCS3 installation. Alternatively, you can use the File > Install .pkg option in RPCS3 to install the game from a .pkg file.</li>
|
21 |
-
<li>Launch RPCS3 and select Crysis from the game list. Right-click on it and choose Configure to adjust the game-specific settings. No options that deviate from RPCS3's default settings are recommended for this title.</li>
|
22 |
-
<li>Double-click on Crysis to start the game. Enjoy!</li>
|
23 |
-
</ol>
|
24 |
-
<h2>Troubleshooting and Tips</h2>
|
25 |
-
<p>If you encounter any issues or problems while playing Crysis on RPCS3, here are some tips that might help you:</p>
|
26 |
-
<ul>
|
27 |
-
<li>Make sure you have the latest version of RPCS3 and update it regularly. You can use the Help > Check for Updates option in RPCS3 to do that.</li>
|
28 |
-
<li>Make sure you have a powerful PC that meets the minimum requirements of RPCS3. You can check the system requirements of RPCS3 here.</li>
|
29 |
-
<li>Make sure you have a compatible graphics card and driver that supports Vulkan or OpenGL. You can check the compatibility list of graphics cards and drivers here.</li>
|
30 |
-
<li>Make sure you have a valid PS3 firmware file and install it correctly. You can download the firmware file from here and follow the instructions here to install it.</li>
|
31 |
-
<li>Make sure you have a valid PS3 controller or a compatible gamepad that works with RPCS3. You can check the compatibility list of controllers and gamepads here and follow the instructions here to configure them.</li>
|
32 |
-
<li>If you experience any graphical glitches, audio issues, or performance drops while playing Crysis on RPCS3, try changing the renderer, resolution scale, frame limit, or other settings in RPCS3's configuration menu. You can also check the forums or wiki pages of RPCS3 for more tips and solutions</p> ddb901b051<br />
|
33 |
-
<br />
|
34 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fortress Forever V1.11 Torrent ((LINK)).md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Install Fortress Forever v1.11</h1>
|
3 |
-
<p>Fortress Forever is a Team Fortress mod for Half-life 2's Source Engine that aims to please both old and new TF players. It is a free and independent project that has been created entirely from scratch.</p>
|
4 |
-
<h2>Fortress Forever v1.11 torrent</h2><br /><p><b><b>Download</b> ---> <a href="https://byltly.com/2uKzdO">https://byltly.com/2uKzdO</a></b></p><br /><br />
|
5 |
-
<p>If you want to download and install Fortress Forever v1.11, you will need a torrent client such as BitTorrent or uTorrent. You can find the torrent file for Fortress Forever v1.11 <a href="https://forums.fortress-forever.com/showthread.php?t=14010">here</a>. This torrent contains the full installer for Fortress Forever v1.11, which is about 1.2 GB in size.</p>
|
6 |
-
<p>Once you have downloaded the torrent file, open it with your torrent client and start downloading the game files. The download speed may vary depending on your internet connection and the number of seeders and leechers. Please keep seeding after you finish downloading to help other players get the game faster.</p>
|
7 |
-
<p>After the download is complete, run the installer and follow the instructions on the screen. You will need to have Half-life 2 installed on your computer before installing Fortress Forever. The installer will automatically detect your Steam folder and install the game there.</p>
|
8 |
-
<p></p>
|
9 |
-
<p>Once the installation is done, you can launch Fortress Forever from your Steam library or from the desktop shortcut. You can also check for updates and patches on the official <a href="https://www.fortress-forever.com/">website</a> or on the <a href="https://www.gamefront.com/games/half-life-2/category/fortress-forever">GameFront</a> page.</p>
|
10 |
-
<p>Enjoy playing Fortress Forever v1.11!</p>
|
11 |
-
|
12 |
-
<h2>What is Fortress Forever?</h2>
|
13 |
-
<p>Fortress Forever is a mod for Half-life 2 that recreates the classic gameplay of Team Fortress Classic, a popular mod for Half-life 1. It features nine classes with different weapons and abilities, such as the Scout, the Soldier, the Pyro, the Demoman, the Heavy, the Medic, the Engineer, the Spy and the Sniper. Each class has its own strengths and weaknesses and can contribute to the team in different ways.</p>
|
14 |
-
<p>The game offers various game modes and maps to play on, such as Capture the Flag, Attack/Defend, Territory Control and more. The game also supports custom maps and mods made by the community. You can find and download them from sites like <a href="https://www.gamebanana.com/games/102">GameBanana</a> or <a href="https://www.moddb.com/mods/fortress-forever">ModDB</a>.</p>
|
15 |
-
<p>Fortress Forever is a fast-paced and fun game that requires teamwork and strategy to win. It also has a steep learning curve and a high skill ceiling, which makes it challenging and rewarding for both new and veteran players. If you are looking for a game that combines nostalgia and innovation, Fortress Forever is the game for you.</p>
|
16 |
-
|
17 |
-
<h2>Why Download Fortress Forever v1.11?</h2>
|
18 |
-
<p>Fortress Forever v1.11 is the latest version of the game as of April 2023. It includes many improvements and fixes over the previous versions, such as:</p>
|
19 |
-
<ul>
|
20 |
-
<li>New maps: ff_cornfield_b3, ff_dustbowl_classic_b2, ff_monkey_l_b5</li>
|
21 |
-
<li>New models: New player models for all classes, new weapon models for some classes</li>
|
22 |
-
<li>New sounds: New sounds for weapons, explosions, footsteps and more</li>
|
23 |
-
<li>New features: Added blur effect option to menu, added some missing map files</li>
|
24 |
-
<li>Bug fixes: Fixed various crashes, exploits, glitches and balance issues</li>
|
25 |
-
</ul>
|
26 |
-
<p>By downloading Fortress Forever v1.11 torrent, you can get the latest version of the game in a fast and convenient way. You can also help other players get the game faster by seeding the torrent after you finish downloading. You can also check for future updates and patches on the official <a href="https://www.fortress-forever.com/">website</a> or on the <a href="https://www.gamefront.com/games/half-life-2/category/fortress-forever">GameFront</a> page.</p> 7b8c122e87<br />
|
27 |
-
<br />
|
28 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chello Divas 2015 Gujarati Movie Download !!LINK!!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>chello divas 2015 gujarati movie download</h2><br /><p><b><b>Download</b> ⚡ <a href="https://imgfil.com/2uxXj2">https://imgfil.com/2uxXj2</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Gujarati movie 2015 download chello divas movieinstmank. The movie revolves around the lives of eight friends and their journey of growing up while they face ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/commands/__init__.py
DELETED
File without changes
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/Painter-Roadkill-LINK.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
## Painter Roadkill
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**Download File ->>> [https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2txjjP&sa=D&sntz=1&usg=AOvVaw22Mu7KnVtmRVO\_5qemKMu3](https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2txjjP&sa=D&sntz=1&usg=AOvVaw22Mu7KnVtmRVO\_5qemKMu3)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# Ryan Roadkill: The Artist Who Turns Dead Animals into Stunning Artworks
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
Ryan Roadkill is a British artist who uses roadkill as his main source of inspiration. He creates striking paintings and prints that depict the animals he finds on the roads, often in a stylized and surreal way. He says he wants to give them a second life and celebrate their beauty and spirit.
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
Roadkill has been working as an artist since 2015, when he quit his job as a graphic designer and started painting full-time. He says he was always fascinated by wildlife and nature, but also by the darker aspects of life and death. He says he sees roadkill as a symbol of the clash between man and nature, and the fragility of life.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
He uses acrylics, spray paint, ink, and collage to create his artworks, which range from realistic portraits to abstract compositions. He often adds elements of pop culture, such as logos, slogans, or icons, to create a contrast between the natural and the artificial. He also experiments with different techniques and materials, such as gold leaf, resin, or neon lights.
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
Roadkill has exhibited his works in various galleries and fairs around the world, such as Signari Gallery in London, Art Basel in Miami, or Scope in New York. He has also collaborated with brands like Vans, Converse, or Harley Davidson. He says he hopes his art can raise awareness about the plight of wildlife and the need for conservation.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
He says he is always on the lookout for new roadkill specimens to use as reference for his paintings. He says he respects the animals he finds and treats them with care. He says he does not kill any animals himself, nor does he use endangered or protected species. He says he only uses what he finds by chance on his travels.
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
Roadkill is one of the most original and innovative digital artists of our time. His art is a unique blend of beauty and horror, of life and death, of nature and culture. He challenges us to look at roadkill in a different way, and to appreciate the hidden wonders of the animal kingdom.
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
Roadkill says he draws inspiration from various sources, such as comic books, movies, music, or tattoos. He says he is influenced by artists like Frank Frazetta, Robert Williams, or Ed Roth. He says he likes to mix different styles and genres, such as fantasy, horror, sci-fi, or western. He says he enjoys creating his own characters and stories, and giving them a twist.
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
Roadkill says he works mostly digitally, using programs like Photoshop or Illustrator. He says he likes the flexibility and speed of digital tools, but he also tries to keep a traditional feel to his art. He says he uses a lot of textures, brushes, and filters to create a hand-made look. He says he also likes to print his works on different materials, such as wood, metal, or canvas.
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
Roadkill says he has a loyal fan base that supports his art and collects his prints. He says he sells his works online through his website and social media platforms. He says he also does commissions and custom projects for clients who want something special. He says he is always open to new challenges and opportunities.
|
62 |
-
|
63 |
-
dfd1c89656
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Beach Buggy Racing 3 for PC and Race Against Tropical Rivals.md
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Beach Buggy Racing 3: A Fun and Exciting Kart-Racing Game for PC</h1>
|
3 |
-
<p>If you are looking for a fun and exciting kart-racing game for your PC, you should check out Beach Buggy Racing 3. This is the official sequel to the popular Beach Buggy Racing series, which has over 100 million downloads worldwide. In this game, you can drive into an action-packed, surprise-filled world of off-road kart racing mayhem. You can race against a field of rival drivers, each with unique personalities and special abilities. You can also build a collection of crazy powerups, like Dodgeball Frenzy, Fireball, and Oil Slick. You can also unlock and upgrade a variety of cars, from dune buggies to monster trucks to lunar rovers. You can also test your skills in 6 different game modes on 15 imaginative 3D race tracks, against a pack of tropical-loving rivals with a serious case of road rage. This game is not just a great looking 3D racing game, it's an epic battle with spectacular physics-based gameplay.</p>
|
4 |
-
<h2>beach buggy racing 3 download for pc</h2><br /><p><b><b>Download Zip</b> ☆☆☆ <a href="https://urlin.us/2uST1n">https://urlin.us/2uST1n</a></b></p><br /><br />
|
5 |
-
<h2>What is Beach Buggy Racing 3?</h2>
|
6 |
-
<h3>The sequel to the popular Beach Buggy Racing series</h3>
|
7 |
-
<p>Beach Buggy Racing 3 is the official sequel to Beach Buggy Blitz and Beach Buggy Racing, the free driving games with over 30 million players worldwide. These games are developed by Vector Unit, a studio that specializes in racing games for mobile devices and consoles. Vector Unit has also created other hit games like Riptide GP, Hydro Thunder Hurricane, and MouseBot.</p>
|
8 |
-
<h3>A kart-racing game with amazing graphics, powerups, and tracks</h3>
|
9 |
-
<p>Beach Buggy Racing 3 is a kart-racing game that features amazing graphics, powerups, and tracks. The game uses a custom 3D engine that delivers stunning visuals and realistic physics. The game also has a variety of powerups that you can use to boost your speed, attack your opponents, or defend yourself. Some of the powerups include Fireball, Dodgeball Frenzy, Oil Slick, Confusion Spell, Teleportation, and more. The game also has 15 spectacular race tracks that are packed with hidden shortcuts and surprises. You can explore dinosaur-infested jungles, lava-spewing volcanoes, beautiful beaches, and mysterious swamps.</p>
|
10 |
-
<h3>A multiplayer game with split-screen and online modes</h3>
|
11 |
-
<p>Beach Buggy Racing 3 is also a multiplayer game that supports split-screen and online modes. You can race shoulder-to-shoulder with up to 4 friends on your PC using a TV or a monitor. You can also compete with your friends on leaderboards, earn achievements, and sync your progress across multiple devices using your Google account. You can also play online with other players from around the world in various modes like Quick Race, Tournament, Daily Challenge, and more.</p>
|
12 |
-
<h2>How to download Beach Buggy Racing 3 for PC?</h2>
|
13 |
-
<h3>The official way: using the Microsoft Store app</h3>
|
14 |
-
<p>The official way to download Beach Buggy Racing 3 for PC is to use the Microsoft Store app on your Windows device. This app allows you to browse, buy, and download games and apps from the Microsoft Store. To use this app, you need to have a Microsoft account and an internet connection. Here are the steps to download Beach Buggy Racing 3 for PC using the Microsoft Store app:</p>
|
15 |
-
<p>beach buggy racing 3 pc game free download<br />
|
16 |
-
beach buggy racing 3 for windows 10 download<br />
|
17 |
-
beach buggy racing 3 online play on pc<br />
|
18 |
-
beach buggy racing 3 full version download for pc<br />
|
19 |
-
beach buggy racing 3 microsoft store download<br />
|
20 |
-
beach buggy racing 3 pc game system requirements<br />
|
21 |
-
beach buggy racing 3 softonic download for pc<br />
|
22 |
-
beach buggy racing 3 offline installer for pc<br />
|
23 |
-
beach buggy racing 3 apk download for pc<br />
|
24 |
-
beach buggy racing 3 pc game review<br />
|
25 |
-
beach buggy racing 3 cheats and hacks for pc<br />
|
26 |
-
beach buggy racing 3 multiplayer mode on pc<br />
|
27 |
-
beach buggy racing 3 best cars and powerups for pc<br />
|
28 |
-
beach buggy racing 3 tips and tricks for pc<br />
|
29 |
-
beach buggy racing 3 mod apk download for pc<br />
|
30 |
-
beach buggy racing 3 latest update download for pc<br />
|
31 |
-
beach buggy racing 3 vector unit download for pc<br />
|
32 |
-
beach buggy racing 3 kart-racing island adventure for pc<br />
|
33 |
-
beach buggy racing 3 unlock all tracks and characters for pc<br />
|
34 |
-
beach buggy racing 3 how to play with controller on pc<br />
|
35 |
-
beach buggy racing 3 windows store download link<br />
|
36 |
-
beach buggy racing 3 free download without ads for pc<br />
|
37 |
-
beach buggy racing 3 high-speed fun in the sun for pc<br />
|
38 |
-
beach buggy racing 3 how to install on pc<br />
|
39 |
-
beach buggy racing 3 gameplay video for pc<br />
|
40 |
-
beach buggy racing 3 download size and speed for pc<br />
|
41 |
-
beach buggy racing 3 graphics and sound quality for pc<br />
|
42 |
-
beach buggy racing 3 how to upgrade and customize cars on pc<br />
|
43 |
-
beach buggy racing 3 how to use special abilities on pc<br />
|
44 |
-
beach buggy racing 3 how to earn gems and coins on pc<br />
|
45 |
-
beach buggy racing 3 how to get unlimited tickets on pc<br />
|
46 |
-
beach buggy racing 3 how to beat boss characters on pc<br />
|
47 |
-
beach buggy racing 3 how to unlock split-screen multiplayer on pc<br />
|
48 |
-
beach buggy racing 3 how to change language and settings on pc<br />
|
49 |
-
beach buggy racing 3 how to connect with facebook and google+ on pc<br />
|
50 |
-
beach buggy racing 3 how to report bugs and issues on pc<br />
|
51 |
-
beach buggy racing 3 how to contact developers and support on pc<br />
|
52 |
-
beach buggy racing 3 alternatives and similar games for pc<br />
|
53 |
-
beach buggy racing 3 ratings and reviews from users on pc<br />
|
54 |
-
beach buggy racing 3 frequently asked questions and answers for pc</p>
|
55 |
-
<ol>
|
56 |
-
<li>Open the Microsoft Store app on your PC. You can find it on your Start menu or search for it using the search box on the taskbar.</li>
|
57 |
-
<li>On the top right corner of the app, click on the sign-in button and enter your Microsoft account credentials. If you don't have a Microsoft account, you can create one for free.</li>
|
58 |
-
<li>On the search box of the app, type "Beach Buggy Racing 3" and hit enter. You will see the game's page on the app.</li>
|
59 |
-
<li>On the game's page, click on the "Get" button to download the game. You may need to enter your password or use a PIN or fingerprint to confirm your purchase. The game is free to download and play, but it may offer in-app purchases.</li>
|
60 |
-
<li>Wait for the game to download and install on your PC. You can see the progress on the "Downloads and updates" section of the app.</li>
|
61 |
-
<li>Once the game is installed, you can launch it from the app or from your Start menu. Enjoy playing Beach Buggy Racing 3 on your PC!</li>
|
62 |
-
</ol>
|
63 |
-
<h3>The alternative way: using an Android emulator</h3>
|
64 |
-
<p>The alternative way to download Beach Buggy Racing 3 for PC is to use an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. There are many Android emulators available online, such as BlueStacks, NoxPlayer, LDPlayer, and more. To use an Android emulator, you need to have a Google account and an internet connection. Here are the steps to download Beach Buggy Racing 3 for PC using an Android emulator:</p>
|
65 |
-
<ol>
|
66 |
-
<li>Download and install an Android emulator of your choice on your PC. You can find them on their official websites or other trusted sources.</li>
|
67 |
-
<li>Launch the Android emulator on your PC and sign in with your Google account. If you don't have a Google account, you can create one for free.</li>
|
68 |
-
<li>Open the Google Play Store app on the emulator and search for "Beach Buggy Racing 3". You will see the game's page on the app.</li>
|
69 |
-
<li>Click on the "Install" button to download and install the game on the emulator. You may need to accept some permissions and terms of service.</li>
|
70 |
-
<li>Wait for the game to download and install on the emulator. You can see the progress on the notification bar of the emulator.</li>
|
71 |
-
<li>Once the game is installed, you can launch it from the app drawer or home screen of the emulator. Enjoy playing Beach Buggy Racing 3 on your PC!</li>
|
72 |
-
</ol>
|
73 |
-
<h2>What are the features of Beach Buggy Racing 3?</h2>
|
74 |
-
<h3>Cool cars to customize and upgrade</h3>
|
75 |
-
<p>Beach Buggy Racing 3 has a variety of cool cars that you can customize and upgrade. You can choose from over 40 vehicles, ranging from dune buggies to monster trucks to lunar rovers. You can also change their colors, decals, wheels, and more. You can also upgrade their performance by improving their speed, acceleration, handling, and durability. You can earn coins and gems by playing the game or watching ads, or you can buy them with real money.</p>
|
76 |
-
<h3>Tons of amazing powerups to use and collect</h3>
|
77 |
-
<p>Beach Buggy Racing 3 also has tons of amazing powerups that you can use and collect. You can find them on the race tracks or buy them with coins or gems. Some of the powerups include:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Fireball: Launches a fiery projectile that explodes on impact and sets nearby racers on fire.</li>
|
80 |
-
<li>Dodgeball Frenzy: Fires a barrage of dodgeballs that bounce around and hit other racers.</li>
|
81 |
-
<li>Oil Slick: Spills a slippery puddle of oil that makes other racers spin out.</li>
|
82 |
-
<li>Confusion Spell: Casts a spell that reverses other racers' controls.</li>
|
83 |
-
<li>Teleportation: Teleports you to a random position ahead of other racers.</li>
|
84 |
-
<li>And many more!</li>
|
85 |
-
</ul>
|
86 |
-
<h3>15 spectacular race tracks to explore and master</h3>
|
87 |
-
<p>Beach Buggy Racing 3 also has 15 spectacular race tracks that you can explore and master. Each track has its own theme, layout, obstacles, shortcuts, and secrets. You can race in different environments, such as:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Dinosaur-infested jungles: Watch out for giant dinosaurs that may stomp or bite you.</li>
|
90 |
-
<li>Lava-spewing volcanoes: Avoid falling into lava pits or getting hit by flying rocks.</li>
|
91 |
-
<li>Beautiful beaches: Enjoy the sun, sand, and waves, but beware of crabs and seagulls.</li>
|
92 |
-
<li>Mysterious swamps: Navigate through foggy marshes and spooky graveyards, but don't get caught by the swamp monster.</li>
|
93 |
-
<li>And many more!</li>
|
94 |
-
</ul>
|
95 |
-
<h3>A team of racers with unique special powers</h3>
|
96 |
-
<p>Beach Buggy Racing 3 also has a team of racers with unique special powers. You can recruit and play as 12 different characters, each with their own personality and backstory. You can also unlock their special powers, which can give you an edge in the races. Some of the characters and their powers include:</p>
|
97 |
-
<ul>
|
98 |
-
<li>Rez: A hacker who can hack other racers' powerups and use them against them.</li>
|
99 |
-
<li>Beat Bot: A robot who can transform into a jet and fly over obstacles.</li>
|
100 |
-
<li>Tiki: A tribal warrior who can summon a giant tiki head that crushes other racers.</li>
|
101 |
-
<li>Lola: A pop star who can charm other racers with her music and make them follow her.</li>
|
102 |
-
<li>And many more!</li>
|
103 |
-
</ul>
|
104 |
-
<h3>6 different game modes to challenge your skills</h3>
|
105 |
-
<p>Beach Buggy Racing 3 also has 6 different game modes to challenge your skills. You can play these modes solo or with your friends. Some of the game modes include:</p>
|
106 |
-
<ul>
|
107 |
-
<li>Quick Race: A simple race where you can choose your car, track, and difficulty level.</li>
|
108 |
-
<li>Tournament: A series of races where you compete for trophies and prizes.</li>
|
109 |
-
<li>Daily Challenge: A special race that changes every day and offers rewards for completing it.</li>
|
110 |
-
<li>Adventure: A story mode where you follow the adventures of Rez and his team as they race across the world.</li>
|
111 |
-
<li>Time Trial: A mode where you try to beat the clock and set new records.</li>
|
112 |
-
<li>Boss Battle: A mode where you face off against powerful boss characters and try to defeat them.</li>
|
113 |
-
</ul>
|
114 |
-
<h2>Conclusion</h2>
|
115 |
-
<p>Beach Buggy Racing 3 is a fun and exciting kart-racing game for PC that offers a lot of features and content. You can download it for free from the Microsoft Store app or use an Android emulator to play it on your PC. You can customize and upgrade your cars, use and collect powerups, explore and master race tracks, recruit and play as racers with special powers, and challenge yourself in different game modes. If you are looking for a kart-racing game that is easy to play but hard to master, you should give Beach Buggy Racing 3 a try. You will not regret it!</p>
|
116 |
-
<h2>FAQs</h2>
|
117 |
-
<ol>
|
118 |
-
<li>What are the system requirements for Beach Buggy Racing 3 on PC?</li>
|
119 |
-
<p>The minimum system requirements for Beach Buggy Racing 3 on PC are:</p>
|
120 |
-
<ul>
|
121 |
-
<li>OS: Windows 10 version 18362.0 or higher</li>
|
122 |
-
<li>Processor: Intel Core i3-2100 or equivalent</li>
|
123 |
-
<li>Memory: 4 GB RAM</li>
|
124 |
-
<li>Graphics: NVIDIA GeForce GT 730 or equivalent</li>
|
125 |
-
<li>DirectX: Version 11</li>
|
126 |
-
<li>Storage: 1 GB available space</li>
|
127 |
-
</ul>
|
128 |
-
<li>How do I control my car in Beach Buggy Racing 3 on PC?</li>
|
129 |
-
<p>You can control your car in Beach Buggy Racing 3 on PC using your keyboard, mouse, or gamepad. The default controls are:</p>
|
130 |
-
<table border="1">
|
131 |
-
<tr><th>Action</th><th>Keyboard</th><th>Mouse</th><th>Gamepad</th></tr>
|
132 |
-
<tr><td>Steer left/right</td><td>A/D keys</td><td>Move mouse left/right</td><td>Left stick or D-pad left/right</td></tr>
|
133 |
-
<tr><td>Accelerate/brake</td><td>W/S keys</td><td>Left/right mouse buttons</td><td>A/B buttons or right trigger/left trigger</td></tr>
|
134 |
-
<tr><td>Use powerup</td><td>E key</td><td>Middle mouse button or scroll wheel click</td><td>X button or right bumper</td></tr>
|
135 |
-
<tr><td>Pause/resume game</td><td>P key or Esc key</td><td>N/A</td><td>Start button or Menu button</td></tr> </table>
|
136 |
-
<li>How do I change the language of Beach Buggy Racing 3 on PC?</li>
|
137 |
-
<p>You can change the language of Beach Buggy Racing 3 on PC by following these steps:</p>
|
138 |
-
<ol>
|
139 |
-
<li>Launch the game and go to the main menu.</li>
|
140 |
-
<li>Click on the gear icon on the top right corner of the screen to open the settings menu.</li>
|
141 |
-
<li>Click on the flag icon on the bottom left corner of the screen to open the language menu.</li>
|
142 |
-
<li>Select the language you want to use from the list of available languages.</li>
|
143 |
-
<li>Click on the back arrow on the top left corner of the screen to return to the settings menu.</li>
|
144 |
-
<li>Click on the back arrow again to return to the main menu.</li>
|
145 |
-
<li>Enjoy playing Beach Buggy Racing 3 in your preferred language!</li>
|
146 |
-
</ol>
|
147 |
-
<li>How do I unlock more cars, powerups, and tracks in Beach Buggy Racing 3 on PC?</li>
|
148 |
-
<p>You can unlock more cars, powerups, and tracks in Beach Buggy Racing 3 on PC by playing the game and earning stars, coins, and gems. Stars are awarded for completing races and challenges. Coins are earned by racing, watching ads, or buying them with real money. Gems are earned by completing achievements, daily challenges, or buying them with real money. You can use stars, coins, and gems to unlock and upgrade cars, powerups, and tracks in the garage menu.</p>
|
149 |
-
<li>How do I play with my friends in Beach Buggy Racing 3 on PC?</li>
|
150 |
-
<p>You can play with your friends in Beach Buggy Racing 3 on PC in two ways: split-screen mode or online mode. Split-screen mode allows you to play with up to 4 friends on your PC using a TV or a monitor. You need to have enough gamepads or keyboards and mice for each player. Online mode allows you to play with other players from around the world in various modes like Quick Race, Tournament, Daily Challenge, and more. You need to have an internet connection and a Google account to play online.</p>
|
151 |
-
<h2></h2></p> 197e85843d<br />
|
152 |
-
<br />
|
153 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Comware MIB-H3C What You Need to Know About H3C SNMP MIB.md
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Introduction</h1>
|
3 |
-
<p>If you are looking for a way to manage your network devices from different vendors, platforms, and technologies, you might want to consider using Simple Network Management Protocol (SNMP). SNMP is an Internet standard protocol that allows you to collect and organize information about managed devices on IP networks and modify that information to change device behavior. </p>
|
4 |
-
<h2>h3c snmp mib download</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://jinyurl.com/2uNQx1">https://jinyurl.com/2uNQx1</a></b></p><br /><br />
|
5 |
-
<p>One of the key components of SNMP is Management Information Base (MIB), which is a database that defines the information that an SNMP manager can request from an agent on a managed device. MIB contains variables that describe the device status, configuration, performance, and other attributes. </p>
|
6 |
-
<p>In this article, we will introduce you to H3C SNMP MIB, which is a collection of MIB files for H3C products, such as switches, routers, firewalls, wireless devices, and more. We will explain the benefits of using H3C SNMP MIB, how to download it from the official website of H3C, how to install and use it on your network management system, and some tips and best practices for optimizing your network management with H3C SNMP MIB.</p>
|
7 |
-
<h1>Benefits of using H3C SNMP MIB</h1>
|
8 |
-
<p>H3C SNMP MIB provides you with a comprehensive set of information about your H3C devices on the network. By using H3C SNMP MIB, you can:</p>
|
9 |
-
<h2>Monitor and manage network devices</h2>
|
10 |
-
<p>H3C SNMP MIB allows you to monitor the status, performance, traffic, errors, events, and other metrics of your H3C devices. You can also manage your devices remotely by setting their configuration parameters, such as IP address, port number, security settings, etc. You can also receive notifications from your devices when certain events or thresholds are triggered, such as interface state change, CPU usage spike, authentication failure, etc. </p>
|
11 |
-
<h2>Configure and troubleshoot network issues</h2>
|
12 |
-
<p>H3C SNMP MIB enables you to configure your network devices according to your needs and preferences. You can modify the values of the MIB objects to change the behavior of your devices. For example, you can enable or disable certain features or functions, adjust the thresholds or intervals for monitoring or reporting, etc. You can also troubleshoot network issues by querying the MIB objects to identify the root cause of the problem. For example, you can check the error counters or statistics to locate the source of the fault. </p>
|
13 |
-
<h2>Enhance network security and performance</h2>
|
14 |
-
<p>H3C SNMP MIB helps you to enhance your network security and performance by providing you with various options for securing and optimizing your network communication. You can choose different versions of SNMP protocol (v1, v2c, or v3) depending on your security requirements. You can also use different access control modes (community-based or user-based) to restrict access to your MIB objects. You can also use encryption and authentication mechanisms to protect your data transmission. Moreover, you can use different levels of security strength (low or high) to comply with FIPS standards. </p>
|
15 |
-
<h1>How to download H3C SNMP MIB</h1>
|
16 |
-
<p>If you want to use H3C SNMP MIB on your network management system, you need to download it from the official website of H3C. Here are the steps to follow:</p>
|
17 |
-
<h2>Visit the official website of H3C</h2>
|
18 |
-
<p>Go the benefits of using H3C SNMP MIB, such as monitoring and managing network devices, configuring and troubleshooting network issues, and enhancing network security and performance. We have also shown you how to download H3C SNMP MIB from the official website of H3C, how to install and use H3C SNMP MIB on your network management system, and some tips and best practices for optimizing your network management with H3C SNMP MIB.</p>
|
19 |
-
<p>h3c comware mib download<br />
|
20 |
-
h3c software download center<br />
|
21 |
-
h3c snmp configuration guide<br />
|
22 |
-
h3c cloud computing mib<br />
|
23 |
-
h3c switch snmp oid<br />
|
24 |
-
h3c wireless snmp trap<br />
|
25 |
-
h3c network management mib<br />
|
26 |
-
h3c security mib download<br />
|
27 |
-
h3c router snmp agent<br />
|
28 |
-
h3c server snmp monitoring<br />
|
29 |
-
h3c adnet intelligent terminal mib<br />
|
30 |
-
h3c cloudnet license server download<br />
|
31 |
-
h3c snmp v3 configuration example<br />
|
32 |
-
h3c switch mib reference manual<br />
|
33 |
-
h3c wireless controller snmp mib<br />
|
34 |
-
h3c network management software download<br />
|
35 |
-
h3c security device snmp settings<br />
|
36 |
-
h3c router mib file download<br />
|
37 |
-
h3c server management software mib<br />
|
38 |
-
h3c adnet terminal snmp configuration<br />
|
39 |
-
h3c cloudnet license server installation guide<br />
|
40 |
-
h3c snmp v2 configuration commands<br />
|
41 |
-
h3c switch snmp community string<br />
|
42 |
-
h3c wireless access point snmp mib<br />
|
43 |
-
h3c network management system mib<br />
|
44 |
-
h3c security firewall snmp download<br />
|
45 |
-
h3c router snmp trap configuration<br />
|
46 |
-
h3c server hardware monitoring mib<br />
|
47 |
-
h3c adnet terminal management software download<br />
|
48 |
-
h3c cloudnet license server user manual<br />
|
49 |
-
h3c snmp v1 configuration tutorial<br />
|
50 |
-
h3c switch snmp enable command<br />
|
51 |
-
h3c wireless bridge snmp mib<br />
|
52 |
-
h3c network management platform mib<br />
|
53 |
-
h3c security vpn gateway snmp download<br />
|
54 |
-
h3c router snmp get command<br />
|
55 |
-
h3c server firmware update mib<br />
|
56 |
-
h3c adnet terminal firmware download<br />
|
57 |
-
h3c cloudnet license server troubleshooting guide<br />
|
58 |
-
h3c snmp authentication configuration steps<br />
|
59 |
-
h3c switch snmp version command<br />
|
60 |
-
h3c wireless controller cluster snmp mib<br />
|
61 |
-
h3c network management tool mib<br />
|
62 |
-
h3c security ips device snmp download<br />
|
63 |
-
h3c router snmp set command<br />
|
64 |
-
h3c server bios settings mib<br />
|
65 |
-
h3c adnet terminal software upgrade guide<br />
|
66 |
-
h3c cloudnet license server backup and restore guide</p>
|
67 |
-
<p>We hope that this article has helped you to understand and use H3C SNMP MIB better. If you have any questions or feedback, please feel free to contact us or leave a comment below. We would love to hear from you and help you with your network management needs.</p>
|
68 |
-
<h1>FAQs</h1>
|
69 |
-
<p>Here are some frequently asked questions about H3C SNMP MIB:</p>
|
70 |
-
<h2>What is the difference between H3C SNMP MIB and standard SNMP MIB?</h2>
|
71 |
-
<p>H3C SNMP MIB is a collection of MIB files that are specific to H3C products, while standard SNMP MIB is a collection of MIB files that are common to all SNMP devices. H3C SNMP MIB contains more information and features than standard SNMP MIB, such as device model, serial number, firmware version, hardware status, etc. H3C SNMP MIB also supports more functions and commands than standard SNMP MIB, such as device backup, restore, upgrade, reboot, etc. </p>
|
72 |
-
<h2>Where can I find the description and definition of the MIB objects in H3C SNMP MIB?</h2>
|
73 |
-
<p>You can find the description and definition of the MIB objects in H3C SNMP MIB in the documentation file that you downloaded from the H3C website along with the MIB file. The documentation file is usually named as "H3C-PRODUCT-MIB-User Guide.pdf" or "H3C-PRODUCT-MIB-Reference.pdf", where PRODUCT is the name of your product. The documentation file contains the syntax, semantics, access mode, status, default value, range, units, description, and example of each MIB object in H3C SNMP MIB. You can also use your SNMP manager software to view the description and definition of the MIB objects in H3C SNMP MIB. </p>
|
74 |
-
<h2>How can I update or upgrade my H3C SNMP MIB?</h2>
|
75 |
-
<p>You can update or upgrade your H3C SNMP MIB by downloading the latest version of the MIB file and documentation from the H3C website and replacing the old version on your network management system. You may also need to recompile or revalidate the new version of the MIB file before using it. You can check the release notes or change log of the new version of the MIB file and documentation to see what changes or improvements have been made. You can also contact the technical support team of H3C for assistance or guidance on updating or upgrading your H3C SNMP MIB. </p>
|
76 |
-
<h2>How can I troubleshoot or resolve any issues or errors related to H3C SNMP MIB?</h2>
|
77 |
-
<p>If you encounter any issues or errors related to H3C SNMP MIB, you can try the following steps to troubleshoot or resolve them:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Check if your network devices are compatible with H3C SNMP MIB and support the same version of SNMP protocol as your network management system.</li>
|
80 |
-
<li>Check if your network devices are configured correctly with the IP address, port number, community string, user name, password, and other parameters required for SNMP communication.</li>
|
81 |
-
<li>Check if your network devices are reachable and responsive from your network management system via ping or traceroute commands.</li>
|
82 |
-
<li>Check if your network devices have enabled SNMP service and allowed access from your network management system via firewall or access control list rules.</li>
|
83 |
-
<li>Check if your network management system has loaded the correct version of the MIB file and documentation for your network devices.</li>
|
84 |
-
<li>Check if your network management system has sufficient resources (memory, CPU, disk space, etc.) to run the SNMP manager software and process the data from your network devices.</li>
|
85 |
-
<li>Check if your network management system has a stable and reliable network connection with your network devices.</li>
|
86 |
-
<li>Check if there are any conflicts or errors in the syntax or semantics of the MIB objects in H3C SNMP MIB.</li>
|
87 |
-
<li>Check if there are any bugs or glitches in the software or firmware of your network devices or network management system that may affect the functionality or performance of H3C SNMP MIB.</li>
|
88 |
-
<li>Contact the technical support team of H3C for further assistance or guidance on troubleshooting or resolving any issues or errors related to H3C SNMP MIB.</li>
|
89 |
-
</ul>
|
90 |
-
<h2>What are some tips and best practices for using H3C SNMP MIB?</h2>
|
91 |
-
<p>Here are some tips and best practices for using H3C SNMP MIB effectively and efficiently:</p>
|
92 |
-
<ul>
|
93 |
-
<li>Read the documentation file of H3C SNMP MIB carefully and thoroughly to understand the structure, function, and usage of the MIB objects in H3C SNMP MIB.</li>
|
94 |
-
<li>Use the latest version of the MIB file and documentation for your network devices and keep them updated regularly to ensure compatibility and functionality.</li>
|
95 |
-
<li>Use a reliable and secure SNMP manager software that supports H3C SNMP MIB and has a user-friendly interface and features.</li>
|
96 |
-
<li>Use the appropriate version of SNMP protocol (v1, v2c, or v3) and access control mode (community-based or user-based) for your network devices and network management system according to your security needs and preferences.</li>
|
97 |
-
<li>Use encryption and authentication mechanisms to protect your data transmission and prevent unauthorized access to your MIB objects.</li>
|
98 |
-
<li>Use different levels of security strength (low or high) to comply with FIPS standards if required by your organization or industry.</li>
|
99 |
-
<li>Use a consistent and meaningful naming convention for your community strings, user names, passwords, and other parameters for your network devices and network management system.</li>
|
100 |
-
<li>Use a logical and hierarchical structure for organizing your MIB objects in H3C SNMP MIB and use descriptive labels or comments for each MIB object.</li>
|
101 |
-
<li>Use the appropriate data type, range, units, and default value for each MIB object in H3C SNMP MIB and avoid using invalid or unreasonable values that may cause errors or conflicts.</li>
|
102 |
-
<li>Use the appropriate access mode (read-only, read-write, or not-accessible) for each MIB object in H3C SNMP MIB and avoid modifying the values of the read-only or not-accessible MIB objects that may cause errors or conflicts.</li>
|
103 |
-
<li>Use the appropriate status (mandatory, optional, obsolete, or deprecated) for each MIB object in H3C SNMP MIB and avoid using the obsolete or deprecated MIB objects that may cause errors or conflicts.</li>
|
104 |
-
<li>Use the appropriate syntax (scalar, table, row, column, index, etc.) for each MIB object in H3C SNMP MIB and follow the rules and conventions for defining and using them.</li>
|
105 |
-
<li>Use the appropriate semantics (object identifier, object name, object description, etc.) for each MIB object in H3C SNMP MIB and follow the rules and conventions for defining and using them.</li>
|
106 |
-
<li>Use the appropriate commands (get, get-next, get-bulk, set, trap, inform, etc.) for querying and setting the MIB objects in H3C SNMP MIB and follow the rules and conventions for using them.</li>
|
107 |
-
<li>Use the appropriate tools (report generator, graph generator, chart generator, alert generator, notification generator, etc.) for analyzing and presenting the data collected from your network devices via H3C SNMP MIB.</li>
|
108 |
-
</ul></p> 401be4b1e0<br />
|
109 |
-
<br />
|
110 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Sniper 3D Full Mod APK and Become the Top Sniper in the World.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Sniper 3D Full Mod Apk: A Fun and Action-Packed FPS Game</h1>
|
3 |
-
<p>If you are looking for a thrilling and addictive shooting game, then you should try Sniper 3D. This is a fun action-packed multiplayer FPS game that offers players versatile play modes for endless hours of free multiplayer fun. You can download the Sniper 3D FPS assassin game for free to engage in online FPS multiplayer warfare. Enjoy the ultimate fun experience now with this free online multiplayer FPS assassin game.</p>
|
4 |
-
<h2>sniper 3d full mod apk</h2><br /><p><b><b>DOWNLOAD</b> ———>>> <a href="https://jinyurl.com/2uNK15">https://jinyurl.com/2uNK15</a></b></p><br /><br />
|
5 |
-
<h2>What is Sniper 3D?</h2>
|
6 |
-
<p>Sniper 3D is a popular shooting game that lets you become a professional sniper and take down your enemies with precision and skill. You can use various weapons and gadgets to complete different missions and challenges. You can also customize your sniper with different skins, scopes, silencers, and more. You can play solo or join other players in online multiplayer mode. You can also compete with other snipers in PvP battles and rank up on the leaderboard.</p>
|
7 |
-
<h3>Features of Sniper 3D</h3>
|
8 |
-
<h4>Stunning graphics and realistic sound effects</h4>
|
9 |
-
<p>One of the best things about Sniper 3D is its amazing graphics and sound effects. The game has high-quality 3D graphics that make you feel like you are in a real battlefield. The game also has realistic sound effects that enhance the immersion and excitement of the game. You can hear the gunshots, explosions, screams, and more as you play the game.</p>
|
10 |
-
<h4>Various weapons and upgrades to choose from</h4>
|
11 |
-
<p>Another great feature of Sniper 3D is its wide range of weapons and upgrades that you can use to improve your performance. You can choose from different types of guns, such as rifles, shotguns, pistols, machine guns, and more. You can also upgrade your weapons with different attachments, such as scopes, silencers, magazines, barrels, stocks, and more. You can also buy new weapons and items with the money you earn from completing missions.</p>
|
12 |
-
<h4>Multiple game modes and missions to complete</h4>
|
13 |
-
<p>Sniper 3D also offers multiple game modes and missions that you can enjoy. You can play the campaign mode, where you have to complete various missions and objectives in different locations. You can also play the special ops mode, where you have to face more challenging scenarios and enemies. You can also play the zombie mode, where you have to survive waves of zombies and kill them with your weapons. You can also play the daily missions, where you can earn extra rewards and bonuses.</p>
|
14 |
-
<h4>Online multiplayer and PvP battles</h4>
|
15 |
-
<p>If you want to test your skills against other players, you can join the online multiplayer mode of Sniper 3D. You can team up with other snipers or play solo in different modes, such as team deathmatch, free for all, capture the flag, king of the hill, and more. You can also challenge other snipers in PvP battles and see who is the best sniper in the world. You can also chat with other players and make friends or enemies.</p>
|
16 |
-
<h2>Why download Sniper 3D Full Mod Apk?</h2>
|
17 |
-
<p>If you want to enjoy Sniper 3D without any limitations or restrictions, you should download Sniper 3D Full Mod Apk. This is a modified version of the original game that gives you access to unlimited money and diamonds, all weapons and items unlocked, no ads, no root required, and more <h3>Benefits of Sniper 3D Full Mod Apk</h3>
|
18 |
-
<p>By downloading Sniper 3D Full Mod Apk, you can enjoy the following benefits:</p>
|
19 |
-
<h4>Unlimited money and diamonds</h4>
|
20 |
-
<p>With Sniper 3D Full Mod Apk, you don't have to worry about running out of money or diamonds. You can use them to buy and upgrade any weapon or item you want. You can also use them to unlock premium features and rewards. You can also use them to skip missions or ads if you want. You can have unlimited fun and freedom with Sniper 3D Full Mod Apk.</p>
|
21 |
-
<h4>All weapons and items unlocked</h4>
|
22 |
-
<p>With Sniper 3D Full Mod Apk, you don't have to wait or grind to unlock new weapons and items. You can access all of them from the start. You can choose from hundreds of guns, gadgets, and accessories to customize your sniper. You can also try different combinations and strategies to suit your play style and preferences. You can have the best arsenal and equipment with Sniper 3D Full Mod Apk.</p>
|
23 |
-
<h4>No ads and no root required</h4>
|
24 |
-
<p>With Sniper 3D Full Mod Apk, you don't have to deal with annoying ads that interrupt your gameplay. You can play the game without any distractions or interruptions. You can also play the game without rooting your device. You don't have to risk damaging your device or losing your warranty. You can play the game safely and smoothly with Sniper 3D Full Mod Apk.</p>
|
25 |
-
<p>sniper 3d gun shooter mod apk unlimited money<br />
|
26 |
-
sniper 3d assassin mod apk download for android<br />
|
27 |
-
sniper 3d mod apk latest version 2023<br />
|
28 |
-
sniper 3d hack mod apk free download<br />
|
29 |
-
sniper 3d mod apk offline no ads<br />
|
30 |
-
sniper 3d mod apk unlimited diamonds and coins<br />
|
31 |
-
sniper 3d mod apk all guns unlocked<br />
|
32 |
-
sniper 3d mod apk unlimited energy and tokens<br />
|
33 |
-
sniper 3d mod apk premium unlocked<br />
|
34 |
-
sniper 3d mod apk android 1 com<br />
|
35 |
-
sniper 3d mod apk rexdl com<br />
|
36 |
-
sniper 3d mod apk happymod com<br />
|
37 |
-
sniper 3d mod apk revdl com<br />
|
38 |
-
sniper 3d mod apk apkpure com<br />
|
39 |
-
sniper 3d mod apk mob.org<br />
|
40 |
-
sniper 3d mod apk no root required<br />
|
41 |
-
sniper 3d mod apk anti ban<br />
|
42 |
-
sniper 3d mod apk online multiplayer<br />
|
43 |
-
sniper 3d mod apk unlimited everything<br />
|
44 |
-
sniper 3d mod apk high damage<br />
|
45 |
-
sniper 3d mod apk mega mod<br />
|
46 |
-
sniper 3d mod apk vip features<br />
|
47 |
-
sniper 3d mod apk new update<br />
|
48 |
-
sniper 3d mod apk best graphics<br />
|
49 |
-
sniper 3d mod apk realistic physics<br />
|
50 |
-
sniper 3d mod apk fun games for free<br />
|
51 |
-
sniper 3d mod apk world of snipers<br />
|
52 |
-
sniper 3d mod apk pvp mode<br />
|
53 |
-
sniper 3d mod apk zombie mode<br />
|
54 |
-
sniper 3d mod apk special ops mode<br />
|
55 |
-
sniper 3d mod apk silent assassin mode<br />
|
56 |
-
sniper 3d mod apk contract killer mode<br />
|
57 |
-
sniper 3d mod apk city hunter mode<br />
|
58 |
-
sniper 3d mod apk ghost warrior mode<br />
|
59 |
-
sniper 3d mod apk elite shooter mode<br />
|
60 |
-
sniper 3d mod apk pro shooter mode<br />
|
61 |
-
sniper 3d mod apk master shooter mode<br />
|
62 |
-
sniper 3d mod apk legendary shooter mode<br />
|
63 |
-
sniper 3d mod apk ultimate shooter mode<br />
|
64 |
-
sniper 3d mod apk super shooter mode<br />
|
65 |
-
sniper 3d mod apk extreme shooter mode<br />
|
66 |
-
sniper 3d mod apk epic shooter mode<br />
|
67 |
-
sniper 3d mod apk awesome shooter mode<br />
|
68 |
-
sniper 3d mod apk amazing shooter mode<br />
|
69 |
-
sniper 3d mod apk fantastic shooter mode<br />
|
70 |
-
sniper 3d mod apk incredible shooter mode<br />
|
71 |
-
sniper 3d mod apk marvelous shooter mode<br />
|
72 |
-
sniper 3d mod apk wonderful shooter mode<br />
|
73 |
-
sniper 3d mod apk brilliant shooter mode</p>
|
74 |
-
<h3>How to download and install Sniper 3D Full Mod Apk?</h3>
|
75 |
-
<p>If you want to download and install Sniper 3D Full Mod Apk, you can follow these simple steps:</p>
|
76 |
-
<h4>Step 1: Download the apk file from a trusted source</h4>
|
77 |
-
<p>The first step is to download the apk file of Sniper 3D Full Mod Apk from a reliable and secure source. You can use the link below to download the latest version of the mod apk file. Make sure you have enough storage space on your device before downloading the file.</p>
|
78 |
-
<p><a href="">Download Sniper 3D Full Mod Apk here</a></p>
|
79 |
-
<h4>Step 2: Enable unknown sources on your device settings</h4>
|
80 |
-
<p>The next step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may see a warning message, but you can ignore it and proceed.</p>
|
81 |
-
<h4>Step 3: Install the apk file and launch the game</h4>
|
82 |
-
<p>The final step is to install the apk file and launch the game. To do this, locate the downloaded apk file on your device storage, then tap on it and follow the instructions. Once the installation is complete, you can open the game and enjoy Sniper 3D Full Mod Apk.</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Sniper 3D is a fun and action-packed FPS game that offers players versatile play modes for endless hours of free multiplayer fun. You can download the Sniper 3D FPS assassin game for free to engage in online FPS multiplayer warfare. Enjoy the ultimate fun experience now with this free online multiplayer FPS assassin game.</p>
|
85 |
-
<p>If you want to enjoy Sniper 3D without any limitations or restrictions, you should download Sniper 3D Full Mod Apk. This is a modified version of the original game that gives you access to unlimited money and diamonds, all weapons and items unlocked, no ads, no root required, and more. You can download Sniper 3D Full Mod Apk from the link below and follow the steps to install it on your device.</p>
|
86 |
-
<p>We hope this article was helpful for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
87 |
-
FAQs Q: Is Sniper 3D Full Mod Apk safe to use? A: Yes, Sniper 3D Full Mod Apk is safe to use as long as you download it from a trusted source. We have tested the mod apk file and found no viruses or malware in it. Q: Is Sniper 3D Full Mod Apk compatible with my device? A: Sniper 3D Full Mod Apk is compatible with most Android devices that run on Android 4.1 or higher. However, some devices may not support some features or functions of the game. Q: How do I update Sniper 3D Full Mod Apk? A: To update Sniper 3D Full Mod Apk, you need to download the latest version of the mod apk file from the same source that you downloaded it from before. Then, you need to install the new version over the old one. You may need to enable unknown sources again if you have disabled it. Q: How do I uninstall Sniper 3D Full Mod Apk? A: To uninstall Sniper 3D Full Mod Apk, you need to go to your device settings, then apps, then Sniper 3D, and tap on uninstall. You may also need to delete the apk file from your device storage if you want to free up some space. Q: What are some tips and tricks for playing Sniper 3D? A: Some tips and tricks for playing Sniper 3D are: - Aim for the head or the heart for instant kills and bonus rewards. - Use the zoom and focus buttons to adjust your view and accuracy. - Use the radar and the map to locate your enemies and objectives. - Use the wind indicator and the bullet drop to adjust your shots accordingly. - Use the thermal vision and the night vision to see through obstacles and darkness. - Use the gadgets and items to enhance your performance and survival. - Complete the daily missions and achievements to earn extra money and diamonds. - Join a clan or create your own to team up with other players and share resources.</p> 401be4b1e0<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/preset/PresetManager.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
import yaml
|
5 |
-
from pydantic import ValidationError, parse_obj_as
|
6 |
-
|
7 |
-
from .Preset import Preset
|
8 |
-
from .PresetError import PresetError
|
9 |
-
|
10 |
-
|
11 |
-
class PresetManager:
|
12 |
-
def __init__(
|
13 |
-
self,
|
14 |
-
preset_path: Path,
|
15 |
-
):
|
16 |
-
self.presets = []
|
17 |
-
self.last_modified_time = 0
|
18 |
-
self.preset_path = preset_path
|
19 |
-
|
20 |
-
def load_presets(self):
|
21 |
-
"""
|
22 |
-
プリセットのYAMLファイルを読み込む
|
23 |
-
|
24 |
-
Returns
|
25 |
-
-------
|
26 |
-
ret: List[Preset]
|
27 |
-
プリセットのリスト
|
28 |
-
"""
|
29 |
-
|
30 |
-
# 設定ファイルのタイムスタンプを確認
|
31 |
-
try:
|
32 |
-
_last_modified_time = self.preset_path.stat().st_mtime
|
33 |
-
if _last_modified_time == self.last_modified_time:
|
34 |
-
return self.presets
|
35 |
-
except OSError:
|
36 |
-
raise PresetError("プリセットの設定ファイルが見つかりません")
|
37 |
-
|
38 |
-
with open(self.preset_path, mode="r", encoding="utf-8") as f:
|
39 |
-
obj = yaml.safe_load(f)
|
40 |
-
if obj is None:
|
41 |
-
raise PresetError("プリセットの設定ファイルが空の内容です")
|
42 |
-
|
43 |
-
try:
|
44 |
-
_presets = parse_obj_as(List[Preset], obj)
|
45 |
-
except ValidationError:
|
46 |
-
raise PresetError("プリセットの設定ファイルにミスがあります")
|
47 |
-
|
48 |
-
# idが一意か確認
|
49 |
-
if len([preset.id for preset in _presets]) != len(
|
50 |
-
{preset.id for preset in _presets}
|
51 |
-
):
|
52 |
-
raise PresetError("プリセットのidに重複があります")
|
53 |
-
|
54 |
-
self.presets = _presets
|
55 |
-
self.last_modified_time = _last_modified_time
|
56 |
-
return self.presets
|
57 |
-
|
58 |
-
def add_preset(self, preset: Preset):
|
59 |
-
"""
|
60 |
-
YAMLファイルに新規のプリセットを追加する
|
61 |
-
|
62 |
-
Parameters
|
63 |
-
----------
|
64 |
-
preset : Preset
|
65 |
-
追加するプリセットを渡す
|
66 |
-
|
67 |
-
Returns
|
68 |
-
-------
|
69 |
-
ret: int
|
70 |
-
追加したプリセットのプリセットID
|
71 |
-
"""
|
72 |
-
|
73 |
-
# 手動でファイルが更新されているかも知れないので、最新のYAMLファイルを読み直す
|
74 |
-
self.load_presets()
|
75 |
-
|
76 |
-
# IDが0未満、または存在するIDなら新しいIDを決定し、配列に追加
|
77 |
-
if preset.id < 0 or preset.id in {preset.id for preset in self.presets}:
|
78 |
-
preset.id = max([preset.id for preset in self.presets]) + 1
|
79 |
-
self.presets.append(preset)
|
80 |
-
|
81 |
-
# ファイルに書き込み
|
82 |
-
try:
|
83 |
-
with open(self.preset_path, mode="w", encoding="utf-8") as f:
|
84 |
-
yaml.safe_dump(
|
85 |
-
[preset.dict() for preset in self.presets],
|
86 |
-
f,
|
87 |
-
allow_unicode=True,
|
88 |
-
sort_keys=False,
|
89 |
-
)
|
90 |
-
except Exception as err:
|
91 |
-
self.presets.pop()
|
92 |
-
if isinstance(err, FileNotFoundError):
|
93 |
-
raise PresetError("プリセットの設定ファイルに書き込み失敗しました")
|
94 |
-
else:
|
95 |
-
raise err
|
96 |
-
|
97 |
-
return preset.id
|
98 |
-
|
99 |
-
def update_preset(self, preset: Preset):
|
100 |
-
"""
|
101 |
-
YAMLファイルのプリセットを更新する
|
102 |
-
|
103 |
-
Parameters
|
104 |
-
----------
|
105 |
-
preset : Preset
|
106 |
-
更新するプリセットを渡す
|
107 |
-
|
108 |
-
Returns
|
109 |
-
-------
|
110 |
-
ret: int
|
111 |
-
更新したプリセットのプリセットID
|
112 |
-
"""
|
113 |
-
|
114 |
-
# 手動でファイルが更新されているかも知れないので、最新のYAMLファイルを読み直す
|
115 |
-
self.load_presets()
|
116 |
-
|
117 |
-
# IDが存在するか探索
|
118 |
-
prev_preset = (-1, None)
|
119 |
-
for i in range(len(self.presets)):
|
120 |
-
if self.presets[i].id == preset.id:
|
121 |
-
prev_preset = (i, self.presets[i])
|
122 |
-
self.presets[i] = preset
|
123 |
-
break
|
124 |
-
else:
|
125 |
-
raise PresetError("更新先のプリセットが存在しません")
|
126 |
-
|
127 |
-
# ファイルに書き込み
|
128 |
-
try:
|
129 |
-
with open(self.preset_path, mode="w", encoding="utf-8") as f:
|
130 |
-
yaml.safe_dump(
|
131 |
-
[preset.dict() for preset in self.presets],
|
132 |
-
f,
|
133 |
-
allow_unicode=True,
|
134 |
-
sort_keys=False,
|
135 |
-
)
|
136 |
-
except Exception as err:
|
137 |
-
if prev_preset != (-1, None):
|
138 |
-
self.presets[prev_preset[0]] = prev_preset[1]
|
139 |
-
if isinstance(err, FileNotFoundError):
|
140 |
-
raise PresetError("プリセットの設定ファイルに書き込み失敗しました")
|
141 |
-
else:
|
142 |
-
raise err
|
143 |
-
|
144 |
-
return preset.id
|
145 |
-
|
146 |
-
def delete_preset(self, id: int):
|
147 |
-
"""
|
148 |
-
YAMLファイルのプリセットを削除する
|
149 |
-
|
150 |
-
Parameters
|
151 |
-
----------
|
152 |
-
id: int
|
153 |
-
削除するプリセットのプリセットIDを渡す
|
154 |
-
|
155 |
-
Returns
|
156 |
-
-------
|
157 |
-
ret: int
|
158 |
-
削除したプリセットのプリセットID
|
159 |
-
"""
|
160 |
-
|
161 |
-
# 手動でファイルが更新されているかも知れないので、最新のYAMLファイルを読み直す
|
162 |
-
self.load_presets()
|
163 |
-
|
164 |
-
# IDが存在するか探索
|
165 |
-
buf = None
|
166 |
-
buf_index = -1
|
167 |
-
for i in range(len(self.presets)):
|
168 |
-
if self.presets[i].id == id:
|
169 |
-
buf = self.presets.pop(i)
|
170 |
-
buf_index = i
|
171 |
-
break
|
172 |
-
else:
|
173 |
-
raise PresetError("削除対象のプリセットが存在しません")
|
174 |
-
|
175 |
-
# ファイルに書き込み
|
176 |
-
try:
|
177 |
-
with open(self.preset_path, mode="w", encoding="utf-8") as f:
|
178 |
-
yaml.safe_dump(
|
179 |
-
[preset.dict() for preset in self.presets],
|
180 |
-
f,
|
181 |
-
allow_unicode=True,
|
182 |
-
sort_keys=False,
|
183 |
-
)
|
184 |
-
except FileNotFoundError:
|
185 |
-
self.presets.insert(buf_index, buf)
|
186 |
-
raise PresetError("プリセットの設定ファイルに書き込み失敗しました")
|
187 |
-
|
188 |
-
return id
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/egs/datasets/audio/biaobei_sing/preprocess.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from data_gen.tts.base_preprocess import BasePreprocessor
|
2 |
-
import re
|
3 |
-
|
4 |
-
|
5 |
-
class BiaobeiPreprocess(BasePreprocessor):
|
6 |
-
def meta_data(self):
|
7 |
-
input_dir = self.raw_data_dir
|
8 |
-
with open(f"{input_dir}/ProsodyLabeling/000001-010000.txt", encoding='utf-8') as f:
|
9 |
-
bb_lines = f.readlines()[::2]
|
10 |
-
for l_idx, l in (enumerate([re.sub("\#\d+", "", l.split('\t')[1].strip()) for l in bb_lines])):
|
11 |
-
item_name = f'{l_idx + 1:06d}'
|
12 |
-
wav_fn = f"{input_dir}/wav/{l_idx + 1:06d}.wav"
|
13 |
-
yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': l}
|
14 |
-
|
15 |
-
if __name__ == "__main__":
|
16 |
-
BiaobeiPreprocess().process()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGuardians/SummarizeWikipediaDocument/inference.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from transformers import AutoModelForSeq2SeqLM
|
2 |
-
|
3 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("sgugger/my-awesome-model")
|
|
|
|
|
|
|
|
spaces/Ali-Maq/Calorie_Calculator/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Calorie Calculator
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/text/frontend/normalizer/normalizer.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
import re
|
15 |
-
from builtins import str as unicode
|
16 |
-
|
17 |
-
import unicodedata
|
18 |
-
|
19 |
-
from text.frontend.normalizer.numbers import normalize_numbers
|
20 |
-
|
21 |
-
|
22 |
-
def normalize(sentence):
|
23 |
-
""" Normalize English text.
|
24 |
-
"""
|
25 |
-
# preprocessing
|
26 |
-
sentence = unicode(sentence)
|
27 |
-
sentence = normalize_numbers(sentence)
|
28 |
-
sentence = ''.join(
|
29 |
-
char for char in unicodedata.normalize('NFD', sentence)
|
30 |
-
if unicodedata.category(char) != 'Mn') # Strip accents
|
31 |
-
sentence = sentence.lower()
|
32 |
-
sentence = re.sub(r"[^ a-z'.,?!\-]", "", sentence)
|
33 |
-
sentence = sentence.replace("i.e.", "that is")
|
34 |
-
sentence = sentence.replace("e.g.", "for example")
|
35 |
-
return sentence
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-VITS/attentions.py
DELETED
@@ -1,250 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
import commons
|
8 |
-
from modules import LayerNorm
|
9 |
-
|
10 |
-
|
11 |
-
class Encoder(nn.Module):
|
12 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
13 |
-
super().__init__()
|
14 |
-
self.hidden_channels = hidden_channels
|
15 |
-
self.filter_channels = filter_channels
|
16 |
-
self.n_heads = n_heads
|
17 |
-
self.n_layers = n_layers
|
18 |
-
self.kernel_size = kernel_size
|
19 |
-
self.p_dropout = p_dropout
|
20 |
-
self.window_size = window_size
|
21 |
-
|
22 |
-
self.drop = nn.Dropout(p_dropout)
|
23 |
-
self.attn_layers = nn.ModuleList()
|
24 |
-
self.norm_layers_1 = nn.ModuleList()
|
25 |
-
self.ffn_layers = nn.ModuleList()
|
26 |
-
self.norm_layers_2 = nn.ModuleList()
|
27 |
-
for i in range(self.n_layers):
|
28 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
29 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
30 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
31 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
32 |
-
|
33 |
-
def forward(self, x, x_mask):
|
34 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
35 |
-
x = x * x_mask
|
36 |
-
for i in range(self.n_layers):
|
37 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
38 |
-
y = self.drop(y)
|
39 |
-
x = self.norm_layers_1[i](x + y)
|
40 |
-
|
41 |
-
y = self.ffn_layers[i](x, x_mask)
|
42 |
-
y = self.drop(y)
|
43 |
-
x = self.norm_layers_2[i](x + y)
|
44 |
-
x = x * x_mask
|
45 |
-
return x
|
46 |
-
|
47 |
-
|
48 |
-
class MultiHeadAttention(nn.Module):
|
49 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
50 |
-
super().__init__()
|
51 |
-
assert channels % n_heads == 0
|
52 |
-
|
53 |
-
self.channels = channels
|
54 |
-
self.out_channels = out_channels
|
55 |
-
self.n_heads = n_heads
|
56 |
-
self.p_dropout = p_dropout
|
57 |
-
self.window_size = window_size
|
58 |
-
self.heads_share = heads_share
|
59 |
-
self.block_length = block_length
|
60 |
-
self.proximal_bias = proximal_bias
|
61 |
-
self.proximal_init = proximal_init
|
62 |
-
self.attn = None
|
63 |
-
|
64 |
-
self.k_channels = channels // n_heads
|
65 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
66 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
67 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
68 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
69 |
-
self.drop = nn.Dropout(p_dropout)
|
70 |
-
|
71 |
-
if window_size is not None:
|
72 |
-
n_heads_rel = 1 if heads_share else n_heads
|
73 |
-
rel_stddev = self.k_channels ** -0.5
|
74 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
75 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
76 |
-
|
77 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
78 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
79 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
80 |
-
if proximal_init:
|
81 |
-
with torch.no_grad():
|
82 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
83 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
84 |
-
|
85 |
-
def forward(self, x, c, attn_mask=None):
|
86 |
-
q = self.conv_q(x)
|
87 |
-
k = self.conv_k(c)
|
88 |
-
v = self.conv_v(c)
|
89 |
-
|
90 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
91 |
-
|
92 |
-
x = self.conv_o(x)
|
93 |
-
return x
|
94 |
-
|
95 |
-
def attention(self, query, key, value, mask=None):
|
96 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
97 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
98 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
99 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
100 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
101 |
-
|
102 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
103 |
-
if self.window_size is not None:
|
104 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
105 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
106 |
-
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
|
107 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
108 |
-
scores = scores + scores_local
|
109 |
-
if self.proximal_bias:
|
110 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
111 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
112 |
-
if mask is not None:
|
113 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
114 |
-
if self.block_length is not None:
|
115 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
116 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
117 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
118 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
119 |
-
p_attn = self.drop(p_attn)
|
120 |
-
output = torch.matmul(p_attn, value)
|
121 |
-
if self.window_size is not None:
|
122 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
123 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
124 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
125 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
126 |
-
return output, p_attn
|
127 |
-
|
128 |
-
def _matmul_with_relative_values(self, x, y):
|
129 |
-
"""
|
130 |
-
x: [b, h, l, m]
|
131 |
-
y: [h or 1, m, d]
|
132 |
-
ret: [b, h, l, d]
|
133 |
-
"""
|
134 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
135 |
-
return ret
|
136 |
-
|
137 |
-
def _matmul_with_relative_keys(self, x, y):
|
138 |
-
"""
|
139 |
-
x: [b, h, l, d]
|
140 |
-
y: [h or 1, m, d]
|
141 |
-
ret: [b, h, l, m]
|
142 |
-
"""
|
143 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
144 |
-
return ret
|
145 |
-
|
146 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
147 |
-
max_relative_position = 2 * self.window_size + 1
|
148 |
-
# Pad first before slice to avoid using cond ops.
|
149 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
150 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
151 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
152 |
-
if pad_length > 0:
|
153 |
-
padded_relative_embeddings = F.pad(
|
154 |
-
relative_embeddings,
|
155 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
156 |
-
else:
|
157 |
-
padded_relative_embeddings = relative_embeddings
|
158 |
-
used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
|
159 |
-
return used_relative_embeddings
|
160 |
-
|
161 |
-
def _relative_position_to_absolute_position(self, x):
|
162 |
-
"""
|
163 |
-
x: [b, h, l, 2*l-1]
|
164 |
-
ret: [b, h, l, l]
|
165 |
-
"""
|
166 |
-
batch, heads, length, _ = x.size()
|
167 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
168 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
169 |
-
|
170 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
171 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
172 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
|
173 |
-
|
174 |
-
# Reshape and slice out the padded elements.
|
175 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
|
176 |
-
return x_final
|
177 |
-
|
178 |
-
def _absolute_position_to_relative_position(self, x):
|
179 |
-
"""
|
180 |
-
x: [b, h, l, l]
|
181 |
-
ret: [b, h, l, 2*l-1]
|
182 |
-
"""
|
183 |
-
batch, heads, length, _ = x.size()
|
184 |
-
# padd along column
|
185 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
|
186 |
-
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
|
187 |
-
# add 0's in the beginning that will skew the elements after reshape
|
188 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
189 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
190 |
-
return x_final
|
191 |
-
|
192 |
-
def _attention_bias_proximal(self, length):
|
193 |
-
"""Bias for self-attention to encourage attention to close positions.
|
194 |
-
Args:
|
195 |
-
length: an integer scalar.
|
196 |
-
Returns:
|
197 |
-
a Tensor with shape [1, 1, length, length]
|
198 |
-
"""
|
199 |
-
r = torch.arange(length, dtype=torch.float32)
|
200 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
201 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
202 |
-
|
203 |
-
|
204 |
-
class FFN(nn.Module):
|
205 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
206 |
-
super().__init__()
|
207 |
-
self.in_channels = in_channels
|
208 |
-
self.out_channels = out_channels
|
209 |
-
self.filter_channels = filter_channels
|
210 |
-
self.kernel_size = kernel_size
|
211 |
-
self.p_dropout = p_dropout
|
212 |
-
self.activation = activation
|
213 |
-
self.causal = causal
|
214 |
-
|
215 |
-
if causal:
|
216 |
-
self.padding = self._causal_padding
|
217 |
-
else:
|
218 |
-
self.padding = self._same_padding
|
219 |
-
|
220 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
221 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
222 |
-
self.drop = nn.Dropout(p_dropout)
|
223 |
-
|
224 |
-
def forward(self, x, x_mask):
|
225 |
-
x = self.conv_1(self.padding(x * x_mask))
|
226 |
-
if self.activation == "gelu":
|
227 |
-
x = x * torch.sigmoid(1.702 * x)
|
228 |
-
else:
|
229 |
-
x = torch.relu(x)
|
230 |
-
x = self.drop(x)
|
231 |
-
x = self.conv_2(self.padding(x * x_mask))
|
232 |
-
return x * x_mask
|
233 |
-
|
234 |
-
def _causal_padding(self, x):
|
235 |
-
if self.kernel_size == 1:
|
236 |
-
return x
|
237 |
-
pad_l = self.kernel_size - 1
|
238 |
-
pad_r = 0
|
239 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
240 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
241 |
-
return x
|
242 |
-
|
243 |
-
def _same_padding(self, x):
|
244 |
-
if self.kernel_size == 1:
|
245 |
-
return x
|
246 |
-
pad_l = (self.kernel_size - 1) // 2
|
247 |
-
pad_r = self.kernel_size // 2
|
248 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
249 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
250 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
from .util import EasyDict, make_cache_dir_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/utils/data_utils.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
IMG_EXTENSIONS = [
|
6 |
-
'.jpg', '.JPG', '.jpeg', '.JPEG',
|
7 |
-
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
|
8 |
-
]
|
9 |
-
|
10 |
-
|
11 |
-
def is_image_file(filename):
|
12 |
-
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
13 |
-
|
14 |
-
|
15 |
-
def tensor2im(var):
|
16 |
-
# var shape: (3, H, W)
|
17 |
-
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
|
18 |
-
var = ((var + 1) / 2)
|
19 |
-
var[var < 0] = 0
|
20 |
-
var[var > 1] = 1
|
21 |
-
var = var * 255
|
22 |
-
return Image.fromarray(var.astype('uint8'))
|
23 |
-
|
24 |
-
|
25 |
-
def make_dataset(dir):
|
26 |
-
images = []
|
27 |
-
assert os.path.isdir(dir), '%s is not a valid directory' % dir
|
28 |
-
for root, _, fnames in sorted(os.walk(dir)):
|
29 |
-
for fname in fnames:
|
30 |
-
if is_image_file(fname):
|
31 |
-
path = os.path.join(root, fname)
|
32 |
-
fname = fname.split('.')[0]
|
33 |
-
images.append((fname, path))
|
34 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_versatile_diffusion_to_diffusers.py
DELETED
@@ -1,791 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
""" Conversion script for the Versatile Stable Diffusion checkpoints. """
|
16 |
-
|
17 |
-
import argparse
|
18 |
-
from argparse import Namespace
|
19 |
-
|
20 |
-
import torch
|
21 |
-
from transformers import (
|
22 |
-
CLIPImageProcessor,
|
23 |
-
CLIPTextModelWithProjection,
|
24 |
-
CLIPTokenizer,
|
25 |
-
CLIPVisionModelWithProjection,
|
26 |
-
)
|
27 |
-
|
28 |
-
from diffusers import (
|
29 |
-
AutoencoderKL,
|
30 |
-
DDIMScheduler,
|
31 |
-
DPMSolverMultistepScheduler,
|
32 |
-
EulerAncestralDiscreteScheduler,
|
33 |
-
EulerDiscreteScheduler,
|
34 |
-
LMSDiscreteScheduler,
|
35 |
-
PNDMScheduler,
|
36 |
-
UNet2DConditionModel,
|
37 |
-
VersatileDiffusionPipeline,
|
38 |
-
)
|
39 |
-
from diffusers.pipelines.versatile_diffusion.modeling_text_unet import UNetFlatConditionModel
|
40 |
-
|
41 |
-
|
42 |
-
SCHEDULER_CONFIG = Namespace(
|
43 |
-
**{
|
44 |
-
"beta_linear_start": 0.00085,
|
45 |
-
"beta_linear_end": 0.012,
|
46 |
-
"timesteps": 1000,
|
47 |
-
"scale_factor": 0.18215,
|
48 |
-
}
|
49 |
-
)
|
50 |
-
|
51 |
-
IMAGE_UNET_CONFIG = Namespace(
|
52 |
-
**{
|
53 |
-
"input_channels": 4,
|
54 |
-
"model_channels": 320,
|
55 |
-
"output_channels": 4,
|
56 |
-
"num_noattn_blocks": [2, 2, 2, 2],
|
57 |
-
"channel_mult": [1, 2, 4, 4],
|
58 |
-
"with_attn": [True, True, True, False],
|
59 |
-
"num_heads": 8,
|
60 |
-
"context_dim": 768,
|
61 |
-
"use_checkpoint": True,
|
62 |
-
}
|
63 |
-
)
|
64 |
-
|
65 |
-
TEXT_UNET_CONFIG = Namespace(
|
66 |
-
**{
|
67 |
-
"input_channels": 768,
|
68 |
-
"model_channels": 320,
|
69 |
-
"output_channels": 768,
|
70 |
-
"num_noattn_blocks": [2, 2, 2, 2],
|
71 |
-
"channel_mult": [1, 2, 4, 4],
|
72 |
-
"second_dim": [4, 4, 4, 4],
|
73 |
-
"with_attn": [True, True, True, False],
|
74 |
-
"num_heads": 8,
|
75 |
-
"context_dim": 768,
|
76 |
-
"use_checkpoint": True,
|
77 |
-
}
|
78 |
-
)
|
79 |
-
|
80 |
-
AUTOENCODER_CONFIG = Namespace(
|
81 |
-
**{
|
82 |
-
"double_z": True,
|
83 |
-
"z_channels": 4,
|
84 |
-
"resolution": 256,
|
85 |
-
"in_channels": 3,
|
86 |
-
"out_ch": 3,
|
87 |
-
"ch": 128,
|
88 |
-
"ch_mult": [1, 2, 4, 4],
|
89 |
-
"num_res_blocks": 2,
|
90 |
-
"attn_resolutions": [],
|
91 |
-
"dropout": 0.0,
|
92 |
-
}
|
93 |
-
)
|
94 |
-
|
95 |
-
|
96 |
-
def shave_segments(path, n_shave_prefix_segments=1):
|
97 |
-
"""
|
98 |
-
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
99 |
-
"""
|
100 |
-
if n_shave_prefix_segments >= 0:
|
101 |
-
return ".".join(path.split(".")[n_shave_prefix_segments:])
|
102 |
-
else:
|
103 |
-
return ".".join(path.split(".")[:n_shave_prefix_segments])
|
104 |
-
|
105 |
-
|
106 |
-
def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
|
107 |
-
"""
|
108 |
-
Updates paths inside resnets to the new naming scheme (local renaming)
|
109 |
-
"""
|
110 |
-
mapping = []
|
111 |
-
for old_item in old_list:
|
112 |
-
new_item = old_item.replace("in_layers.0", "norm1")
|
113 |
-
new_item = new_item.replace("in_layers.2", "conv1")
|
114 |
-
|
115 |
-
new_item = new_item.replace("out_layers.0", "norm2")
|
116 |
-
new_item = new_item.replace("out_layers.3", "conv2")
|
117 |
-
|
118 |
-
new_item = new_item.replace("emb_layers.1", "time_emb_proj")
|
119 |
-
new_item = new_item.replace("skip_connection", "conv_shortcut")
|
120 |
-
|
121 |
-
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
122 |
-
|
123 |
-
mapping.append({"old": old_item, "new": new_item})
|
124 |
-
|
125 |
-
return mapping
|
126 |
-
|
127 |
-
|
128 |
-
def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
|
129 |
-
"""
|
130 |
-
Updates paths inside resnets to the new naming scheme (local renaming)
|
131 |
-
"""
|
132 |
-
mapping = []
|
133 |
-
for old_item in old_list:
|
134 |
-
new_item = old_item
|
135 |
-
|
136 |
-
new_item = new_item.replace("nin_shortcut", "conv_shortcut")
|
137 |
-
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
138 |
-
|
139 |
-
mapping.append({"old": old_item, "new": new_item})
|
140 |
-
|
141 |
-
return mapping
|
142 |
-
|
143 |
-
|
144 |
-
def renew_attention_paths(old_list, n_shave_prefix_segments=0):
|
145 |
-
"""
|
146 |
-
Updates paths inside attentions to the new naming scheme (local renaming)
|
147 |
-
"""
|
148 |
-
mapping = []
|
149 |
-
for old_item in old_list:
|
150 |
-
new_item = old_item
|
151 |
-
|
152 |
-
# new_item = new_item.replace('norm.weight', 'group_norm.weight')
|
153 |
-
# new_item = new_item.replace('norm.bias', 'group_norm.bias')
|
154 |
-
|
155 |
-
# new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
|
156 |
-
# new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
|
157 |
-
|
158 |
-
# new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
159 |
-
|
160 |
-
mapping.append({"old": old_item, "new": new_item})
|
161 |
-
|
162 |
-
return mapping
|
163 |
-
|
164 |
-
|
165 |
-
def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
|
166 |
-
"""
|
167 |
-
Updates paths inside attentions to the new naming scheme (local renaming)
|
168 |
-
"""
|
169 |
-
mapping = []
|
170 |
-
for old_item in old_list:
|
171 |
-
new_item = old_item
|
172 |
-
|
173 |
-
new_item = new_item.replace("norm.weight", "group_norm.weight")
|
174 |
-
new_item = new_item.replace("norm.bias", "group_norm.bias")
|
175 |
-
|
176 |
-
new_item = new_item.replace("q.weight", "query.weight")
|
177 |
-
new_item = new_item.replace("q.bias", "query.bias")
|
178 |
-
|
179 |
-
new_item = new_item.replace("k.weight", "key.weight")
|
180 |
-
new_item = new_item.replace("k.bias", "key.bias")
|
181 |
-
|
182 |
-
new_item = new_item.replace("v.weight", "value.weight")
|
183 |
-
new_item = new_item.replace("v.bias", "value.bias")
|
184 |
-
|
185 |
-
new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
|
186 |
-
new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
|
187 |
-
|
188 |
-
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
189 |
-
|
190 |
-
mapping.append({"old": old_item, "new": new_item})
|
191 |
-
|
192 |
-
return mapping
|
193 |
-
|
194 |
-
|
195 |
-
def assign_to_checkpoint(
|
196 |
-
paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
|
197 |
-
):
|
198 |
-
"""
|
199 |
-
This does the final conversion step: take locally converted weights and apply a global renaming
|
200 |
-
to them. It splits attention layers, and takes into account additional replacements
|
201 |
-
that may arise.
|
202 |
-
|
203 |
-
Assigns the weights to the new checkpoint.
|
204 |
-
"""
|
205 |
-
assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
|
206 |
-
|
207 |
-
# Splits the attention layers into three variables.
|
208 |
-
if attention_paths_to_split is not None:
|
209 |
-
for path, path_map in attention_paths_to_split.items():
|
210 |
-
old_tensor = old_checkpoint[path]
|
211 |
-
channels = old_tensor.shape[0] // 3
|
212 |
-
|
213 |
-
target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
|
214 |
-
|
215 |
-
num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
|
216 |
-
|
217 |
-
old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
|
218 |
-
query, key, value = old_tensor.split(channels // num_heads, dim=1)
|
219 |
-
|
220 |
-
checkpoint[path_map["query"]] = query.reshape(target_shape)
|
221 |
-
checkpoint[path_map["key"]] = key.reshape(target_shape)
|
222 |
-
checkpoint[path_map["value"]] = value.reshape(target_shape)
|
223 |
-
|
224 |
-
for path in paths:
|
225 |
-
new_path = path["new"]
|
226 |
-
|
227 |
-
# These have already been assigned
|
228 |
-
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
|
229 |
-
continue
|
230 |
-
|
231 |
-
# Global renaming happens here
|
232 |
-
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
|
233 |
-
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
|
234 |
-
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
|
235 |
-
|
236 |
-
if additional_replacements is not None:
|
237 |
-
for replacement in additional_replacements:
|
238 |
-
new_path = new_path.replace(replacement["old"], replacement["new"])
|
239 |
-
|
240 |
-
# proj_attn.weight has to be converted from conv 1D to linear
|
241 |
-
if "proj_attn.weight" in new_path:
|
242 |
-
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
|
243 |
-
elif path["old"] in old_checkpoint:
|
244 |
-
checkpoint[new_path] = old_checkpoint[path["old"]]
|
245 |
-
|
246 |
-
|
247 |
-
def conv_attn_to_linear(checkpoint):
|
248 |
-
keys = list(checkpoint.keys())
|
249 |
-
attn_keys = ["query.weight", "key.weight", "value.weight"]
|
250 |
-
for key in keys:
|
251 |
-
if ".".join(key.split(".")[-2:]) in attn_keys:
|
252 |
-
if checkpoint[key].ndim > 2:
|
253 |
-
checkpoint[key] = checkpoint[key][:, :, 0, 0]
|
254 |
-
elif "proj_attn.weight" in key:
|
255 |
-
if checkpoint[key].ndim > 2:
|
256 |
-
checkpoint[key] = checkpoint[key][:, :, 0]
|
257 |
-
|
258 |
-
|
259 |
-
def create_image_unet_diffusers_config(unet_params):
|
260 |
-
"""
|
261 |
-
Creates a config for the diffusers based on the config of the VD model.
|
262 |
-
"""
|
263 |
-
|
264 |
-
block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult]
|
265 |
-
|
266 |
-
down_block_types = []
|
267 |
-
resolution = 1
|
268 |
-
for i in range(len(block_out_channels)):
|
269 |
-
block_type = "CrossAttnDownBlock2D" if unet_params.with_attn[i] else "DownBlock2D"
|
270 |
-
down_block_types.append(block_type)
|
271 |
-
if i != len(block_out_channels) - 1:
|
272 |
-
resolution *= 2
|
273 |
-
|
274 |
-
up_block_types = []
|
275 |
-
for i in range(len(block_out_channels)):
|
276 |
-
block_type = "CrossAttnUpBlock2D" if unet_params.with_attn[-i - 1] else "UpBlock2D"
|
277 |
-
up_block_types.append(block_type)
|
278 |
-
resolution //= 2
|
279 |
-
|
280 |
-
if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks):
|
281 |
-
raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.")
|
282 |
-
|
283 |
-
config = {
|
284 |
-
"sample_size": None,
|
285 |
-
"in_channels": unet_params.input_channels,
|
286 |
-
"out_channels": unet_params.output_channels,
|
287 |
-
"down_block_types": tuple(down_block_types),
|
288 |
-
"up_block_types": tuple(up_block_types),
|
289 |
-
"block_out_channels": tuple(block_out_channels),
|
290 |
-
"layers_per_block": unet_params.num_noattn_blocks[0],
|
291 |
-
"cross_attention_dim": unet_params.context_dim,
|
292 |
-
"attention_head_dim": unet_params.num_heads,
|
293 |
-
}
|
294 |
-
|
295 |
-
return config
|
296 |
-
|
297 |
-
|
298 |
-
def create_text_unet_diffusers_config(unet_params):
|
299 |
-
"""
|
300 |
-
Creates a config for the diffusers based on the config of the VD model.
|
301 |
-
"""
|
302 |
-
|
303 |
-
block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult]
|
304 |
-
|
305 |
-
down_block_types = []
|
306 |
-
resolution = 1
|
307 |
-
for i in range(len(block_out_channels)):
|
308 |
-
block_type = "CrossAttnDownBlockFlat" if unet_params.with_attn[i] else "DownBlockFlat"
|
309 |
-
down_block_types.append(block_type)
|
310 |
-
if i != len(block_out_channels) - 1:
|
311 |
-
resolution *= 2
|
312 |
-
|
313 |
-
up_block_types = []
|
314 |
-
for i in range(len(block_out_channels)):
|
315 |
-
block_type = "CrossAttnUpBlockFlat" if unet_params.with_attn[-i - 1] else "UpBlockFlat"
|
316 |
-
up_block_types.append(block_type)
|
317 |
-
resolution //= 2
|
318 |
-
|
319 |
-
if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks):
|
320 |
-
raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.")
|
321 |
-
|
322 |
-
config = {
|
323 |
-
"sample_size": None,
|
324 |
-
"in_channels": (unet_params.input_channels, 1, 1),
|
325 |
-
"out_channels": (unet_params.output_channels, 1, 1),
|
326 |
-
"down_block_types": tuple(down_block_types),
|
327 |
-
"up_block_types": tuple(up_block_types),
|
328 |
-
"block_out_channels": tuple(block_out_channels),
|
329 |
-
"layers_per_block": unet_params.num_noattn_blocks[0],
|
330 |
-
"cross_attention_dim": unet_params.context_dim,
|
331 |
-
"attention_head_dim": unet_params.num_heads,
|
332 |
-
}
|
333 |
-
|
334 |
-
return config
|
335 |
-
|
336 |
-
|
337 |
-
def create_vae_diffusers_config(vae_params):
|
338 |
-
"""
|
339 |
-
Creates a config for the diffusers based on the config of the VD model.
|
340 |
-
"""
|
341 |
-
|
342 |
-
block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult]
|
343 |
-
down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
|
344 |
-
up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
|
345 |
-
|
346 |
-
config = {
|
347 |
-
"sample_size": vae_params.resolution,
|
348 |
-
"in_channels": vae_params.in_channels,
|
349 |
-
"out_channels": vae_params.out_ch,
|
350 |
-
"down_block_types": tuple(down_block_types),
|
351 |
-
"up_block_types": tuple(up_block_types),
|
352 |
-
"block_out_channels": tuple(block_out_channels),
|
353 |
-
"latent_channels": vae_params.z_channels,
|
354 |
-
"layers_per_block": vae_params.num_res_blocks,
|
355 |
-
}
|
356 |
-
return config
|
357 |
-
|
358 |
-
|
359 |
-
def create_diffusers_scheduler(original_config):
|
360 |
-
schedular = DDIMScheduler(
|
361 |
-
num_train_timesteps=original_config.model.params.timesteps,
|
362 |
-
beta_start=original_config.model.params.linear_start,
|
363 |
-
beta_end=original_config.model.params.linear_end,
|
364 |
-
beta_schedule="scaled_linear",
|
365 |
-
)
|
366 |
-
return schedular
|
367 |
-
|
368 |
-
|
369 |
-
def convert_vd_unet_checkpoint(checkpoint, config, unet_key, extract_ema=False):
|
370 |
-
"""
|
371 |
-
Takes a state dict and a config, and returns a converted checkpoint.
|
372 |
-
"""
|
373 |
-
|
374 |
-
# extract state_dict for UNet
|
375 |
-
unet_state_dict = {}
|
376 |
-
keys = list(checkpoint.keys())
|
377 |
-
|
378 |
-
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
|
379 |
-
if sum(k.startswith("model_ema") for k in keys) > 100:
|
380 |
-
print("Checkpoint has both EMA and non-EMA weights.")
|
381 |
-
if extract_ema:
|
382 |
-
print(
|
383 |
-
"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
|
384 |
-
" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
|
385 |
-
)
|
386 |
-
for key in keys:
|
387 |
-
if key.startswith("model.diffusion_model"):
|
388 |
-
flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
|
389 |
-
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
|
390 |
-
else:
|
391 |
-
print(
|
392 |
-
"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
|
393 |
-
" weights (usually better for inference), please make sure to add the `--extract_ema` flag."
|
394 |
-
)
|
395 |
-
|
396 |
-
for key in keys:
|
397 |
-
if key.startswith(unet_key):
|
398 |
-
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
|
399 |
-
|
400 |
-
new_checkpoint = {}
|
401 |
-
|
402 |
-
new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["model.diffusion_model.time_embed.0.weight"]
|
403 |
-
new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["model.diffusion_model.time_embed.0.bias"]
|
404 |
-
new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["model.diffusion_model.time_embed.2.weight"]
|
405 |
-
new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["model.diffusion_model.time_embed.2.bias"]
|
406 |
-
|
407 |
-
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
|
408 |
-
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
|
409 |
-
|
410 |
-
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
|
411 |
-
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
|
412 |
-
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
|
413 |
-
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
|
414 |
-
|
415 |
-
# Retrieves the keys for the input blocks only
|
416 |
-
num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
|
417 |
-
input_blocks = {
|
418 |
-
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
|
419 |
-
for layer_id in range(num_input_blocks)
|
420 |
-
}
|
421 |
-
|
422 |
-
# Retrieves the keys for the middle blocks only
|
423 |
-
num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
|
424 |
-
middle_blocks = {
|
425 |
-
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
|
426 |
-
for layer_id in range(num_middle_blocks)
|
427 |
-
}
|
428 |
-
|
429 |
-
# Retrieves the keys for the output blocks only
|
430 |
-
num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
|
431 |
-
output_blocks = {
|
432 |
-
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
|
433 |
-
for layer_id in range(num_output_blocks)
|
434 |
-
}
|
435 |
-
|
436 |
-
for i in range(1, num_input_blocks):
|
437 |
-
block_id = (i - 1) // (config["layers_per_block"] + 1)
|
438 |
-
layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
|
439 |
-
|
440 |
-
resnets = [
|
441 |
-
key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
|
442 |
-
]
|
443 |
-
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
|
444 |
-
|
445 |
-
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
|
446 |
-
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
|
447 |
-
f"input_blocks.{i}.0.op.weight"
|
448 |
-
)
|
449 |
-
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
|
450 |
-
f"input_blocks.{i}.0.op.bias"
|
451 |
-
)
|
452 |
-
elif f"input_blocks.{i}.0.weight" in unet_state_dict:
|
453 |
-
# text_unet uses linear layers in place of downsamplers
|
454 |
-
shape = unet_state_dict[f"input_blocks.{i}.0.weight"].shape
|
455 |
-
if shape[0] != shape[1]:
|
456 |
-
continue
|
457 |
-
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.weight"] = unet_state_dict.pop(
|
458 |
-
f"input_blocks.{i}.0.weight"
|
459 |
-
)
|
460 |
-
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.bias"] = unet_state_dict.pop(
|
461 |
-
f"input_blocks.{i}.0.bias"
|
462 |
-
)
|
463 |
-
|
464 |
-
paths = renew_resnet_paths(resnets)
|
465 |
-
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
466 |
-
assign_to_checkpoint(
|
467 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
468 |
-
)
|
469 |
-
|
470 |
-
if len(attentions):
|
471 |
-
paths = renew_attention_paths(attentions)
|
472 |
-
meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
|
473 |
-
assign_to_checkpoint(
|
474 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
475 |
-
)
|
476 |
-
|
477 |
-
resnet_0 = middle_blocks[0]
|
478 |
-
attentions = middle_blocks[1]
|
479 |
-
resnet_1 = middle_blocks[2]
|
480 |
-
|
481 |
-
resnet_0_paths = renew_resnet_paths(resnet_0)
|
482 |
-
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
|
483 |
-
|
484 |
-
resnet_1_paths = renew_resnet_paths(resnet_1)
|
485 |
-
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
|
486 |
-
|
487 |
-
attentions_paths = renew_attention_paths(attentions)
|
488 |
-
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
|
489 |
-
assign_to_checkpoint(
|
490 |
-
attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
491 |
-
)
|
492 |
-
|
493 |
-
for i in range(num_output_blocks):
|
494 |
-
block_id = i // (config["layers_per_block"] + 1)
|
495 |
-
layer_in_block_id = i % (config["layers_per_block"] + 1)
|
496 |
-
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
|
497 |
-
output_block_list = {}
|
498 |
-
|
499 |
-
for layer in output_block_layers:
|
500 |
-
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
|
501 |
-
if layer_id in output_block_list:
|
502 |
-
output_block_list[layer_id].append(layer_name)
|
503 |
-
else:
|
504 |
-
output_block_list[layer_id] = [layer_name]
|
505 |
-
|
506 |
-
if len(output_block_list) > 1:
|
507 |
-
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
|
508 |
-
attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
|
509 |
-
|
510 |
-
paths = renew_resnet_paths(resnets)
|
511 |
-
|
512 |
-
meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
513 |
-
assign_to_checkpoint(
|
514 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
515 |
-
)
|
516 |
-
|
517 |
-
if ["conv.weight", "conv.bias"] in output_block_list.values():
|
518 |
-
index = list(output_block_list.values()).index(["conv.weight", "conv.bias"])
|
519 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
|
520 |
-
f"output_blocks.{i}.{index}.conv.weight"
|
521 |
-
]
|
522 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
|
523 |
-
f"output_blocks.{i}.{index}.conv.bias"
|
524 |
-
]
|
525 |
-
# Clear attentions as they have been attributed above.
|
526 |
-
if len(attentions) == 2:
|
527 |
-
attentions = []
|
528 |
-
elif f"output_blocks.{i}.1.weight" in unet_state_dict:
|
529 |
-
# text_unet uses linear layers in place of upsamplers
|
530 |
-
shape = unet_state_dict[f"output_blocks.{i}.1.weight"].shape
|
531 |
-
if shape[0] != shape[1]:
|
532 |
-
continue
|
533 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.weight"] = unet_state_dict.pop(
|
534 |
-
f"output_blocks.{i}.1.weight"
|
535 |
-
)
|
536 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.bias"] = unet_state_dict.pop(
|
537 |
-
f"output_blocks.{i}.1.bias"
|
538 |
-
)
|
539 |
-
# Clear attentions as they have been attributed above.
|
540 |
-
if len(attentions) == 2:
|
541 |
-
attentions = []
|
542 |
-
elif f"output_blocks.{i}.2.weight" in unet_state_dict:
|
543 |
-
# text_unet uses linear layers in place of upsamplers
|
544 |
-
shape = unet_state_dict[f"output_blocks.{i}.2.weight"].shape
|
545 |
-
if shape[0] != shape[1]:
|
546 |
-
continue
|
547 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.weight"] = unet_state_dict.pop(
|
548 |
-
f"output_blocks.{i}.2.weight"
|
549 |
-
)
|
550 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.bias"] = unet_state_dict.pop(
|
551 |
-
f"output_blocks.{i}.2.bias"
|
552 |
-
)
|
553 |
-
|
554 |
-
if len(attentions):
|
555 |
-
paths = renew_attention_paths(attentions)
|
556 |
-
meta_path = {
|
557 |
-
"old": f"output_blocks.{i}.1",
|
558 |
-
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
|
559 |
-
}
|
560 |
-
assign_to_checkpoint(
|
561 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
562 |
-
)
|
563 |
-
else:
|
564 |
-
resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
|
565 |
-
for path in resnet_0_paths:
|
566 |
-
old_path = ".".join(["output_blocks", str(i), path["old"]])
|
567 |
-
new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
|
568 |
-
|
569 |
-
new_checkpoint[new_path] = unet_state_dict[old_path]
|
570 |
-
|
571 |
-
return new_checkpoint
|
572 |
-
|
573 |
-
|
574 |
-
def convert_vd_vae_checkpoint(checkpoint, config):
|
575 |
-
# extract state dict for VAE
|
576 |
-
vae_state_dict = {}
|
577 |
-
keys = list(checkpoint.keys())
|
578 |
-
for key in keys:
|
579 |
-
vae_state_dict[key] = checkpoint.get(key)
|
580 |
-
|
581 |
-
new_checkpoint = {}
|
582 |
-
|
583 |
-
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
|
584 |
-
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
|
585 |
-
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
|
586 |
-
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
|
587 |
-
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
|
588 |
-
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
|
589 |
-
|
590 |
-
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
|
591 |
-
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
|
592 |
-
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
|
593 |
-
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
|
594 |
-
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
|
595 |
-
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
|
596 |
-
|
597 |
-
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
|
598 |
-
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
|
599 |
-
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
|
600 |
-
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
|
601 |
-
|
602 |
-
# Retrieves the keys for the encoder down blocks only
|
603 |
-
num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
|
604 |
-
down_blocks = {
|
605 |
-
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
|
606 |
-
}
|
607 |
-
|
608 |
-
# Retrieves the keys for the decoder up blocks only
|
609 |
-
num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
|
610 |
-
up_blocks = {
|
611 |
-
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
|
612 |
-
}
|
613 |
-
|
614 |
-
for i in range(num_down_blocks):
|
615 |
-
resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
|
616 |
-
|
617 |
-
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
|
618 |
-
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
|
619 |
-
f"encoder.down.{i}.downsample.conv.weight"
|
620 |
-
)
|
621 |
-
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
|
622 |
-
f"encoder.down.{i}.downsample.conv.bias"
|
623 |
-
)
|
624 |
-
|
625 |
-
paths = renew_vae_resnet_paths(resnets)
|
626 |
-
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
|
627 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
628 |
-
|
629 |
-
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
|
630 |
-
num_mid_res_blocks = 2
|
631 |
-
for i in range(1, num_mid_res_blocks + 1):
|
632 |
-
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
|
633 |
-
|
634 |
-
paths = renew_vae_resnet_paths(resnets)
|
635 |
-
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
636 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
637 |
-
|
638 |
-
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
|
639 |
-
paths = renew_vae_attention_paths(mid_attentions)
|
640 |
-
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
641 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
642 |
-
conv_attn_to_linear(new_checkpoint)
|
643 |
-
|
644 |
-
for i in range(num_up_blocks):
|
645 |
-
block_id = num_up_blocks - 1 - i
|
646 |
-
resnets = [
|
647 |
-
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
|
648 |
-
]
|
649 |
-
|
650 |
-
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
|
651 |
-
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
|
652 |
-
f"decoder.up.{block_id}.upsample.conv.weight"
|
653 |
-
]
|
654 |
-
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
|
655 |
-
f"decoder.up.{block_id}.upsample.conv.bias"
|
656 |
-
]
|
657 |
-
|
658 |
-
paths = renew_vae_resnet_paths(resnets)
|
659 |
-
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
|
660 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
661 |
-
|
662 |
-
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
|
663 |
-
num_mid_res_blocks = 2
|
664 |
-
for i in range(1, num_mid_res_blocks + 1):
|
665 |
-
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
|
666 |
-
|
667 |
-
paths = renew_vae_resnet_paths(resnets)
|
668 |
-
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
669 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
670 |
-
|
671 |
-
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
|
672 |
-
paths = renew_vae_attention_paths(mid_attentions)
|
673 |
-
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
674 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
675 |
-
conv_attn_to_linear(new_checkpoint)
|
676 |
-
return new_checkpoint
|
677 |
-
|
678 |
-
|
679 |
-
if __name__ == "__main__":
|
680 |
-
parser = argparse.ArgumentParser()
|
681 |
-
|
682 |
-
parser.add_argument(
|
683 |
-
"--unet_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert."
|
684 |
-
)
|
685 |
-
parser.add_argument(
|
686 |
-
"--vae_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert."
|
687 |
-
)
|
688 |
-
parser.add_argument(
|
689 |
-
"--optimus_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert."
|
690 |
-
)
|
691 |
-
parser.add_argument(
|
692 |
-
"--scheduler_type",
|
693 |
-
default="pndm",
|
694 |
-
type=str,
|
695 |
-
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
|
696 |
-
)
|
697 |
-
parser.add_argument(
|
698 |
-
"--extract_ema",
|
699 |
-
action="store_true",
|
700 |
-
help=(
|
701 |
-
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
|
702 |
-
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
|
703 |
-
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
|
704 |
-
),
|
705 |
-
)
|
706 |
-
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
|
707 |
-
|
708 |
-
args = parser.parse_args()
|
709 |
-
|
710 |
-
scheduler_config = SCHEDULER_CONFIG
|
711 |
-
|
712 |
-
num_train_timesteps = scheduler_config.timesteps
|
713 |
-
beta_start = scheduler_config.beta_linear_start
|
714 |
-
beta_end = scheduler_config.beta_linear_end
|
715 |
-
if args.scheduler_type == "pndm":
|
716 |
-
scheduler = PNDMScheduler(
|
717 |
-
beta_end=beta_end,
|
718 |
-
beta_schedule="scaled_linear",
|
719 |
-
beta_start=beta_start,
|
720 |
-
num_train_timesteps=num_train_timesteps,
|
721 |
-
skip_prk_steps=True,
|
722 |
-
steps_offset=1,
|
723 |
-
)
|
724 |
-
elif args.scheduler_type == "lms":
|
725 |
-
scheduler = LMSDiscreteScheduler(beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear")
|
726 |
-
elif args.scheduler_type == "euler":
|
727 |
-
scheduler = EulerDiscreteScheduler(beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear")
|
728 |
-
elif args.scheduler_type == "euler-ancestral":
|
729 |
-
scheduler = EulerAncestralDiscreteScheduler(
|
730 |
-
beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear"
|
731 |
-
)
|
732 |
-
elif args.scheduler_type == "dpm":
|
733 |
-
scheduler = DPMSolverMultistepScheduler(
|
734 |
-
beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear"
|
735 |
-
)
|
736 |
-
elif args.scheduler_type == "ddim":
|
737 |
-
scheduler = DDIMScheduler(
|
738 |
-
beta_start=beta_start,
|
739 |
-
beta_end=beta_end,
|
740 |
-
beta_schedule="scaled_linear",
|
741 |
-
clip_sample=False,
|
742 |
-
set_alpha_to_one=False,
|
743 |
-
steps_offset=1,
|
744 |
-
)
|
745 |
-
else:
|
746 |
-
raise ValueError(f"Scheduler of type {args.scheduler_type} doesn't exist!")
|
747 |
-
|
748 |
-
# Convert the UNet2DConditionModel models.
|
749 |
-
if args.unet_checkpoint_path is not None:
|
750 |
-
# image UNet
|
751 |
-
image_unet_config = create_image_unet_diffusers_config(IMAGE_UNET_CONFIG)
|
752 |
-
checkpoint = torch.load(args.unet_checkpoint_path)
|
753 |
-
converted_image_unet_checkpoint = convert_vd_unet_checkpoint(
|
754 |
-
checkpoint, image_unet_config, unet_key="model.diffusion_model.unet_image.", extract_ema=args.extract_ema
|
755 |
-
)
|
756 |
-
image_unet = UNet2DConditionModel(**image_unet_config)
|
757 |
-
image_unet.load_state_dict(converted_image_unet_checkpoint)
|
758 |
-
|
759 |
-
# text UNet
|
760 |
-
text_unet_config = create_text_unet_diffusers_config(TEXT_UNET_CONFIG)
|
761 |
-
converted_text_unet_checkpoint = convert_vd_unet_checkpoint(
|
762 |
-
checkpoint, text_unet_config, unet_key="model.diffusion_model.unet_text.", extract_ema=args.extract_ema
|
763 |
-
)
|
764 |
-
text_unet = UNetFlatConditionModel(**text_unet_config)
|
765 |
-
text_unet.load_state_dict(converted_text_unet_checkpoint)
|
766 |
-
|
767 |
-
# Convert the VAE model.
|
768 |
-
if args.vae_checkpoint_path is not None:
|
769 |
-
vae_config = create_vae_diffusers_config(AUTOENCODER_CONFIG)
|
770 |
-
checkpoint = torch.load(args.vae_checkpoint_path)
|
771 |
-
converted_vae_checkpoint = convert_vd_vae_checkpoint(checkpoint, vae_config)
|
772 |
-
|
773 |
-
vae = AutoencoderKL(**vae_config)
|
774 |
-
vae.load_state_dict(converted_vae_checkpoint)
|
775 |
-
|
776 |
-
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
777 |
-
image_feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
778 |
-
text_encoder = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
|
779 |
-
image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
|
780 |
-
|
781 |
-
pipe = VersatileDiffusionPipeline(
|
782 |
-
scheduler=scheduler,
|
783 |
-
tokenizer=tokenizer,
|
784 |
-
image_feature_extractor=image_feature_extractor,
|
785 |
-
text_encoder=text_encoder,
|
786 |
-
image_encoder=image_encoder,
|
787 |
-
image_unet=image_unet,
|
788 |
-
text_unet=text_unet,
|
789 |
-
vae=vae,
|
790 |
-
)
|
791 |
-
pipe.save_pretrained(args.dump_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py
DELETED
@@ -1,749 +0,0 @@
|
|
1 |
-
# Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
|
16 |
-
|
17 |
-
import math
|
18 |
-
from typing import List, Optional, Tuple, Union
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
|
23 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
24 |
-
from ..utils import randn_tensor
|
25 |
-
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
|
26 |
-
|
27 |
-
|
28 |
-
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
29 |
-
def betas_for_alpha_bar(
|
30 |
-
num_diffusion_timesteps,
|
31 |
-
max_beta=0.999,
|
32 |
-
alpha_transform_type="cosine",
|
33 |
-
):
|
34 |
-
"""
|
35 |
-
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
36 |
-
(1-beta) over time from t = [0,1].
|
37 |
-
|
38 |
-
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
39 |
-
to that part of the diffusion process.
|
40 |
-
|
41 |
-
|
42 |
-
Args:
|
43 |
-
num_diffusion_timesteps (`int`): the number of betas to produce.
|
44 |
-
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
45 |
-
prevent singularities.
|
46 |
-
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
47 |
-
Choose from `cosine` or `exp`
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
51 |
-
"""
|
52 |
-
if alpha_transform_type == "cosine":
|
53 |
-
|
54 |
-
def alpha_bar_fn(t):
|
55 |
-
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
56 |
-
|
57 |
-
elif alpha_transform_type == "exp":
|
58 |
-
|
59 |
-
def alpha_bar_fn(t):
|
60 |
-
return math.exp(t * -12.0)
|
61 |
-
|
62 |
-
else:
|
63 |
-
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
|
64 |
-
|
65 |
-
betas = []
|
66 |
-
for i in range(num_diffusion_timesteps):
|
67 |
-
t1 = i / num_diffusion_timesteps
|
68 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
69 |
-
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
70 |
-
return torch.tensor(betas, dtype=torch.float32)
|
71 |
-
|
72 |
-
|
73 |
-
class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
|
74 |
-
"""
|
75 |
-
DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with
|
76 |
-
the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality
|
77 |
-
samples, and it can generate quite good samples even in only 10 steps.
|
78 |
-
|
79 |
-
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
|
80 |
-
|
81 |
-
Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We
|
82 |
-
recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
|
83 |
-
|
84 |
-
We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
|
85 |
-
diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
|
86 |
-
thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as
|
87 |
-
stable-diffusion).
|
88 |
-
|
89 |
-
We also support the SDE variant of DPM-Solver and DPM-Solver++, which is a fast SDE solver for the reverse
|
90 |
-
diffusion SDE. Currently we only support the first-order and second-order solvers. We recommend using the
|
91 |
-
second-order `sde-dpmsolver++`.
|
92 |
-
|
93 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
94 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
95 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
96 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
100 |
-
beta_start (`float`): the starting `beta` value of inference.
|
101 |
-
beta_end (`float`): the final `beta` value.
|
102 |
-
beta_schedule (`str`):
|
103 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
104 |
-
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
105 |
-
trained_betas (`np.ndarray`, optional):
|
106 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
107 |
-
solver_order (`int`, default `2`):
|
108 |
-
the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided
|
109 |
-
sampling, and `solver_order=3` for unconditional sampling.
|
110 |
-
prediction_type (`str`, default `epsilon`, optional):
|
111 |
-
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
|
112 |
-
process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
|
113 |
-
https://imagen.research.google/video/paper.pdf)
|
114 |
-
thresholding (`bool`, default `False`):
|
115 |
-
whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
|
116 |
-
For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to
|
117 |
-
use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion
|
118 |
-
models (such as stable-diffusion).
|
119 |
-
dynamic_thresholding_ratio (`float`, default `0.995`):
|
120 |
-
the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
|
121 |
-
(https://arxiv.org/abs/2205.11487).
|
122 |
-
sample_max_value (`float`, default `1.0`):
|
123 |
-
the threshold value for dynamic thresholding. Valid only when `thresholding=True` and
|
124 |
-
`algorithm_type="dpmsolver++`.
|
125 |
-
algorithm_type (`str`, default `dpmsolver++`):
|
126 |
-
the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++` or `sde-dpmsolver` or
|
127 |
-
`sde-dpmsolver++`. The `dpmsolver` type implements the algorithms in https://arxiv.org/abs/2206.00927, and
|
128 |
-
the `dpmsolver++` type implements the algorithms in https://arxiv.org/abs/2211.01095. We recommend to use
|
129 |
-
`dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling (e.g. stable-diffusion).
|
130 |
-
solver_type (`str`, default `midpoint`):
|
131 |
-
the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects
|
132 |
-
the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are
|
133 |
-
slightly better, so we recommend to use the `midpoint` type.
|
134 |
-
lower_order_final (`bool`, default `True`):
|
135 |
-
whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
|
136 |
-
find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10.
|
137 |
-
use_karras_sigmas (`bool`, *optional*, defaults to `False`):
|
138 |
-
This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the
|
139 |
-
noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence
|
140 |
-
of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf.
|
141 |
-
lambda_min_clipped (`float`, default `-inf`):
|
142 |
-
the clipping threshold for the minimum value of lambda(t) for numerical stability. This is critical for
|
143 |
-
cosine (squaredcos_cap_v2) noise schedule.
|
144 |
-
variance_type (`str`, *optional*):
|
145 |
-
Set to "learned" or "learned_range" for diffusion models that predict variance. For example, OpenAI's
|
146 |
-
guided-diffusion (https://github.com/openai/guided-diffusion) predicts both mean and variance of the
|
147 |
-
Gaussian distribution in the model's output. DPM-Solver only needs the "mean" output because it is based on
|
148 |
-
diffusion ODEs. whether the model's output contains the predicted Gaussian variance. For example, OpenAI's
|
149 |
-
guided-diffusion (https://github.com/openai/guided-diffusion) predicts both mean and variance of the
|
150 |
-
Gaussian distribution in the model's output. DPM-Solver only needs the "mean" output because it is based on
|
151 |
-
diffusion ODEs.
|
152 |
-
timestep_spacing (`str`, default `"linspace"`):
|
153 |
-
The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
|
154 |
-
Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
|
155 |
-
steps_offset (`int`, default `0`):
|
156 |
-
an offset added to the inference steps. You can use a combination of `offset=1` and
|
157 |
-
`set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
|
158 |
-
stable diffusion.
|
159 |
-
"""
|
160 |
-
|
161 |
-
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
162 |
-
order = 1
|
163 |
-
|
164 |
-
@register_to_config
|
165 |
-
def __init__(
|
166 |
-
self,
|
167 |
-
num_train_timesteps: int = 1000,
|
168 |
-
beta_start: float = 0.0001,
|
169 |
-
beta_end: float = 0.02,
|
170 |
-
beta_schedule: str = "linear",
|
171 |
-
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
172 |
-
solver_order: int = 2,
|
173 |
-
prediction_type: str = "epsilon",
|
174 |
-
thresholding: bool = False,
|
175 |
-
dynamic_thresholding_ratio: float = 0.995,
|
176 |
-
sample_max_value: float = 1.0,
|
177 |
-
algorithm_type: str = "dpmsolver++",
|
178 |
-
solver_type: str = "midpoint",
|
179 |
-
lower_order_final: bool = True,
|
180 |
-
use_karras_sigmas: Optional[bool] = False,
|
181 |
-
lambda_min_clipped: float = -float("inf"),
|
182 |
-
variance_type: Optional[str] = None,
|
183 |
-
timestep_spacing: str = "linspace",
|
184 |
-
steps_offset: int = 0,
|
185 |
-
):
|
186 |
-
if trained_betas is not None:
|
187 |
-
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
188 |
-
elif beta_schedule == "linear":
|
189 |
-
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
190 |
-
elif beta_schedule == "scaled_linear":
|
191 |
-
# this schedule is very specific to the latent diffusion model.
|
192 |
-
self.betas = (
|
193 |
-
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
194 |
-
)
|
195 |
-
elif beta_schedule == "squaredcos_cap_v2":
|
196 |
-
# Glide cosine schedule
|
197 |
-
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
198 |
-
else:
|
199 |
-
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
200 |
-
|
201 |
-
self.alphas = 1.0 - self.betas
|
202 |
-
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
203 |
-
# Currently we only support VP-type noise schedule
|
204 |
-
self.alpha_t = torch.sqrt(self.alphas_cumprod)
|
205 |
-
self.sigma_t = torch.sqrt(1 - self.alphas_cumprod)
|
206 |
-
self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t)
|
207 |
-
|
208 |
-
# standard deviation of the initial noise distribution
|
209 |
-
self.init_noise_sigma = 1.0
|
210 |
-
|
211 |
-
# settings for DPM-Solver
|
212 |
-
if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]:
|
213 |
-
if algorithm_type == "deis":
|
214 |
-
self.register_to_config(algorithm_type="dpmsolver++")
|
215 |
-
else:
|
216 |
-
raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}")
|
217 |
-
|
218 |
-
if solver_type not in ["midpoint", "heun"]:
|
219 |
-
if solver_type in ["logrho", "bh1", "bh2"]:
|
220 |
-
self.register_to_config(solver_type="midpoint")
|
221 |
-
else:
|
222 |
-
raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}")
|
223 |
-
|
224 |
-
# setable values
|
225 |
-
self.num_inference_steps = None
|
226 |
-
timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
|
227 |
-
self.timesteps = torch.from_numpy(timesteps)
|
228 |
-
self.model_outputs = [None] * solver_order
|
229 |
-
self.lower_order_nums = 0
|
230 |
-
|
231 |
-
def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None):
|
232 |
-
"""
|
233 |
-
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
|
234 |
-
|
235 |
-
Args:
|
236 |
-
num_inference_steps (`int`):
|
237 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
238 |
-
device (`str` or `torch.device`, optional):
|
239 |
-
the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
240 |
-
"""
|
241 |
-
# Clipping the minimum of all lambda(t) for numerical stability.
|
242 |
-
# This is critical for cosine (squaredcos_cap_v2) noise schedule.
|
243 |
-
clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped)
|
244 |
-
last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item()
|
245 |
-
|
246 |
-
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
|
247 |
-
if self.config.timestep_spacing == "linspace":
|
248 |
-
timesteps = (
|
249 |
-
np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64)
|
250 |
-
)
|
251 |
-
elif self.config.timestep_spacing == "leading":
|
252 |
-
step_ratio = last_timestep // (num_inference_steps + 1)
|
253 |
-
# creates integer timesteps by multiplying by ratio
|
254 |
-
# casting to int to avoid issues when num_inference_step is power of 3
|
255 |
-
timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64)
|
256 |
-
timesteps += self.config.steps_offset
|
257 |
-
elif self.config.timestep_spacing == "trailing":
|
258 |
-
step_ratio = self.config.num_train_timesteps / num_inference_steps
|
259 |
-
# creates integer timesteps by multiplying by ratio
|
260 |
-
# casting to int to avoid issues when num_inference_step is power of 3
|
261 |
-
timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64)
|
262 |
-
timesteps -= 1
|
263 |
-
else:
|
264 |
-
raise ValueError(
|
265 |
-
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
|
266 |
-
)
|
267 |
-
|
268 |
-
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
|
269 |
-
if self.config.use_karras_sigmas:
|
270 |
-
log_sigmas = np.log(sigmas)
|
271 |
-
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
|
272 |
-
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
|
273 |
-
timesteps = np.flip(timesteps).copy().astype(np.int64)
|
274 |
-
|
275 |
-
self.sigmas = torch.from_numpy(sigmas)
|
276 |
-
|
277 |
-
# when num_inference_steps == num_train_timesteps, we can end up with
|
278 |
-
# duplicates in timesteps.
|
279 |
-
_, unique_indices = np.unique(timesteps, return_index=True)
|
280 |
-
timesteps = timesteps[np.sort(unique_indices)]
|
281 |
-
|
282 |
-
self.timesteps = torch.from_numpy(timesteps).to(device)
|
283 |
-
|
284 |
-
self.num_inference_steps = len(timesteps)
|
285 |
-
|
286 |
-
self.model_outputs = [
|
287 |
-
None,
|
288 |
-
] * self.config.solver_order
|
289 |
-
self.lower_order_nums = 0
|
290 |
-
|
291 |
-
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
292 |
-
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
|
293 |
-
"""
|
294 |
-
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
295 |
-
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
296 |
-
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
297 |
-
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
298 |
-
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
299 |
-
|
300 |
-
https://arxiv.org/abs/2205.11487
|
301 |
-
"""
|
302 |
-
dtype = sample.dtype
|
303 |
-
batch_size, channels, height, width = sample.shape
|
304 |
-
|
305 |
-
if dtype not in (torch.float32, torch.float64):
|
306 |
-
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
|
307 |
-
|
308 |
-
# Flatten sample for doing quantile calculation along each image
|
309 |
-
sample = sample.reshape(batch_size, channels * height * width)
|
310 |
-
|
311 |
-
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
312 |
-
|
313 |
-
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
314 |
-
s = torch.clamp(
|
315 |
-
s, min=1, max=self.config.sample_max_value
|
316 |
-
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
317 |
-
|
318 |
-
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
|
319 |
-
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
320 |
-
|
321 |
-
sample = sample.reshape(batch_size, channels, height, width)
|
322 |
-
sample = sample.to(dtype)
|
323 |
-
|
324 |
-
return sample
|
325 |
-
|
326 |
-
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
|
327 |
-
def _sigma_to_t(self, sigma, log_sigmas):
|
328 |
-
# get log sigma
|
329 |
-
log_sigma = np.log(sigma)
|
330 |
-
|
331 |
-
# get distribution
|
332 |
-
dists = log_sigma - log_sigmas[:, np.newaxis]
|
333 |
-
|
334 |
-
# get sigmas range
|
335 |
-
low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
|
336 |
-
high_idx = low_idx + 1
|
337 |
-
|
338 |
-
low = log_sigmas[low_idx]
|
339 |
-
high = log_sigmas[high_idx]
|
340 |
-
|
341 |
-
# interpolate sigmas
|
342 |
-
w = (low - log_sigma) / (low - high)
|
343 |
-
w = np.clip(w, 0, 1)
|
344 |
-
|
345 |
-
# transform interpolation to time range
|
346 |
-
t = (1 - w) * low_idx + w * high_idx
|
347 |
-
t = t.reshape(sigma.shape)
|
348 |
-
return t
|
349 |
-
|
350 |
-
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
|
351 |
-
def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor:
|
352 |
-
"""Constructs the noise schedule of Karras et al. (2022)."""
|
353 |
-
|
354 |
-
sigma_min: float = in_sigmas[-1].item()
|
355 |
-
sigma_max: float = in_sigmas[0].item()
|
356 |
-
|
357 |
-
rho = 7.0 # 7.0 is the value used in the paper
|
358 |
-
ramp = np.linspace(0, 1, num_inference_steps)
|
359 |
-
min_inv_rho = sigma_min ** (1 / rho)
|
360 |
-
max_inv_rho = sigma_max ** (1 / rho)
|
361 |
-
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
|
362 |
-
return sigmas
|
363 |
-
|
364 |
-
def convert_model_output(
|
365 |
-
self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor
|
366 |
-
) -> torch.FloatTensor:
|
367 |
-
"""
|
368 |
-
Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs.
|
369 |
-
|
370 |
-
DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to
|
371 |
-
discretize an integral of the data prediction model. So we need to first convert the model output to the
|
372 |
-
corresponding type to match the algorithm.
|
373 |
-
|
374 |
-
Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or
|
375 |
-
DPM-Solver++ for both noise prediction model and data prediction model.
|
376 |
-
|
377 |
-
Args:
|
378 |
-
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
379 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
380 |
-
sample (`torch.FloatTensor`):
|
381 |
-
current instance of sample being created by diffusion process.
|
382 |
-
|
383 |
-
Returns:
|
384 |
-
`torch.FloatTensor`: the converted model output.
|
385 |
-
"""
|
386 |
-
|
387 |
-
# DPM-Solver++ needs to solve an integral of the data prediction model.
|
388 |
-
if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]:
|
389 |
-
if self.config.prediction_type == "epsilon":
|
390 |
-
# DPM-Solver and DPM-Solver++ only need the "mean" output.
|
391 |
-
if self.config.variance_type in ["learned", "learned_range"]:
|
392 |
-
model_output = model_output[:, :3]
|
393 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
394 |
-
x0_pred = (sample - sigma_t * model_output) / alpha_t
|
395 |
-
elif self.config.prediction_type == "sample":
|
396 |
-
x0_pred = model_output
|
397 |
-
elif self.config.prediction_type == "v_prediction":
|
398 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
399 |
-
x0_pred = alpha_t * sample - sigma_t * model_output
|
400 |
-
else:
|
401 |
-
raise ValueError(
|
402 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
|
403 |
-
" `v_prediction` for the DPMSolverMultistepScheduler."
|
404 |
-
)
|
405 |
-
|
406 |
-
if self.config.thresholding:
|
407 |
-
x0_pred = self._threshold_sample(x0_pred)
|
408 |
-
|
409 |
-
return x0_pred
|
410 |
-
|
411 |
-
# DPM-Solver needs to solve an integral of the noise prediction model.
|
412 |
-
elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]:
|
413 |
-
if self.config.prediction_type == "epsilon":
|
414 |
-
# DPM-Solver and DPM-Solver++ only need the "mean" output.
|
415 |
-
if self.config.variance_type in ["learned", "learned_range"]:
|
416 |
-
epsilon = model_output[:, :3]
|
417 |
-
else:
|
418 |
-
epsilon = model_output
|
419 |
-
elif self.config.prediction_type == "sample":
|
420 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
421 |
-
epsilon = (sample - alpha_t * model_output) / sigma_t
|
422 |
-
elif self.config.prediction_type == "v_prediction":
|
423 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
424 |
-
epsilon = alpha_t * model_output + sigma_t * sample
|
425 |
-
else:
|
426 |
-
raise ValueError(
|
427 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
|
428 |
-
" `v_prediction` for the DPMSolverMultistepScheduler."
|
429 |
-
)
|
430 |
-
|
431 |
-
if self.config.thresholding:
|
432 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
433 |
-
x0_pred = (sample - sigma_t * epsilon) / alpha_t
|
434 |
-
x0_pred = self._threshold_sample(x0_pred)
|
435 |
-
epsilon = (sample - alpha_t * x0_pred) / sigma_t
|
436 |
-
|
437 |
-
return epsilon
|
438 |
-
|
439 |
-
def dpm_solver_first_order_update(
|
440 |
-
self,
|
441 |
-
model_output: torch.FloatTensor,
|
442 |
-
timestep: int,
|
443 |
-
prev_timestep: int,
|
444 |
-
sample: torch.FloatTensor,
|
445 |
-
noise: Optional[torch.FloatTensor] = None,
|
446 |
-
) -> torch.FloatTensor:
|
447 |
-
"""
|
448 |
-
One step for the first-order DPM-Solver (equivalent to DDIM).
|
449 |
-
|
450 |
-
See https://arxiv.org/abs/2206.00927 for the detailed derivation.
|
451 |
-
|
452 |
-
Args:
|
453 |
-
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
454 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
455 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
456 |
-
sample (`torch.FloatTensor`):
|
457 |
-
current instance of sample being created by diffusion process.
|
458 |
-
|
459 |
-
Returns:
|
460 |
-
`torch.FloatTensor`: the sample tensor at the previous timestep.
|
461 |
-
"""
|
462 |
-
lambda_t, lambda_s = self.lambda_t[prev_timestep], self.lambda_t[timestep]
|
463 |
-
alpha_t, alpha_s = self.alpha_t[prev_timestep], self.alpha_t[timestep]
|
464 |
-
sigma_t, sigma_s = self.sigma_t[prev_timestep], self.sigma_t[timestep]
|
465 |
-
h = lambda_t - lambda_s
|
466 |
-
if self.config.algorithm_type == "dpmsolver++":
|
467 |
-
x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output
|
468 |
-
elif self.config.algorithm_type == "dpmsolver":
|
469 |
-
x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output
|
470 |
-
elif self.config.algorithm_type == "sde-dpmsolver++":
|
471 |
-
assert noise is not None
|
472 |
-
x_t = (
|
473 |
-
(sigma_t / sigma_s * torch.exp(-h)) * sample
|
474 |
-
+ (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output
|
475 |
-
+ sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
|
476 |
-
)
|
477 |
-
elif self.config.algorithm_type == "sde-dpmsolver":
|
478 |
-
assert noise is not None
|
479 |
-
x_t = (
|
480 |
-
(alpha_t / alpha_s) * sample
|
481 |
-
- 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output
|
482 |
-
+ sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise
|
483 |
-
)
|
484 |
-
return x_t
|
485 |
-
|
486 |
-
def multistep_dpm_solver_second_order_update(
|
487 |
-
self,
|
488 |
-
model_output_list: List[torch.FloatTensor],
|
489 |
-
timestep_list: List[int],
|
490 |
-
prev_timestep: int,
|
491 |
-
sample: torch.FloatTensor,
|
492 |
-
noise: Optional[torch.FloatTensor] = None,
|
493 |
-
) -> torch.FloatTensor:
|
494 |
-
"""
|
495 |
-
One step for the second-order multistep DPM-Solver.
|
496 |
-
|
497 |
-
Args:
|
498 |
-
model_output_list (`List[torch.FloatTensor]`):
|
499 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
500 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
501 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
502 |
-
sample (`torch.FloatTensor`):
|
503 |
-
current instance of sample being created by diffusion process.
|
504 |
-
|
505 |
-
Returns:
|
506 |
-
`torch.FloatTensor`: the sample tensor at the previous timestep.
|
507 |
-
"""
|
508 |
-
t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
|
509 |
-
m0, m1 = model_output_list[-1], model_output_list[-2]
|
510 |
-
lambda_t, lambda_s0, lambda_s1 = self.lambda_t[t], self.lambda_t[s0], self.lambda_t[s1]
|
511 |
-
alpha_t, alpha_s0 = self.alpha_t[t], self.alpha_t[s0]
|
512 |
-
sigma_t, sigma_s0 = self.sigma_t[t], self.sigma_t[s0]
|
513 |
-
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
|
514 |
-
r0 = h_0 / h
|
515 |
-
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
|
516 |
-
if self.config.algorithm_type == "dpmsolver++":
|
517 |
-
# See https://arxiv.org/abs/2211.01095 for detailed derivations
|
518 |
-
if self.config.solver_type == "midpoint":
|
519 |
-
x_t = (
|
520 |
-
(sigma_t / sigma_s0) * sample
|
521 |
-
- (alpha_t * (torch.exp(-h) - 1.0)) * D0
|
522 |
-
- 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1
|
523 |
-
)
|
524 |
-
elif self.config.solver_type == "heun":
|
525 |
-
x_t = (
|
526 |
-
(sigma_t / sigma_s0) * sample
|
527 |
-
- (alpha_t * (torch.exp(-h) - 1.0)) * D0
|
528 |
-
+ (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1
|
529 |
-
)
|
530 |
-
elif self.config.algorithm_type == "dpmsolver":
|
531 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
532 |
-
if self.config.solver_type == "midpoint":
|
533 |
-
x_t = (
|
534 |
-
(alpha_t / alpha_s0) * sample
|
535 |
-
- (sigma_t * (torch.exp(h) - 1.0)) * D0
|
536 |
-
- 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1
|
537 |
-
)
|
538 |
-
elif self.config.solver_type == "heun":
|
539 |
-
x_t = (
|
540 |
-
(alpha_t / alpha_s0) * sample
|
541 |
-
- (sigma_t * (torch.exp(h) - 1.0)) * D0
|
542 |
-
- (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
|
543 |
-
)
|
544 |
-
elif self.config.algorithm_type == "sde-dpmsolver++":
|
545 |
-
assert noise is not None
|
546 |
-
if self.config.solver_type == "midpoint":
|
547 |
-
x_t = (
|
548 |
-
(sigma_t / sigma_s0 * torch.exp(-h)) * sample
|
549 |
-
+ (alpha_t * (1 - torch.exp(-2.0 * h))) * D0
|
550 |
-
+ 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1
|
551 |
-
+ sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
|
552 |
-
)
|
553 |
-
elif self.config.solver_type == "heun":
|
554 |
-
x_t = (
|
555 |
-
(sigma_t / sigma_s0 * torch.exp(-h)) * sample
|
556 |
-
+ (alpha_t * (1 - torch.exp(-2.0 * h))) * D0
|
557 |
-
+ (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1
|
558 |
-
+ sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
|
559 |
-
)
|
560 |
-
elif self.config.algorithm_type == "sde-dpmsolver":
|
561 |
-
assert noise is not None
|
562 |
-
if self.config.solver_type == "midpoint":
|
563 |
-
x_t = (
|
564 |
-
(alpha_t / alpha_s0) * sample
|
565 |
-
- 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0
|
566 |
-
- (sigma_t * (torch.exp(h) - 1.0)) * D1
|
567 |
-
+ sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise
|
568 |
-
)
|
569 |
-
elif self.config.solver_type == "heun":
|
570 |
-
x_t = (
|
571 |
-
(alpha_t / alpha_s0) * sample
|
572 |
-
- 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0
|
573 |
-
- 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
|
574 |
-
+ sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise
|
575 |
-
)
|
576 |
-
return x_t
|
577 |
-
|
578 |
-
def multistep_dpm_solver_third_order_update(
|
579 |
-
self,
|
580 |
-
model_output_list: List[torch.FloatTensor],
|
581 |
-
timestep_list: List[int],
|
582 |
-
prev_timestep: int,
|
583 |
-
sample: torch.FloatTensor,
|
584 |
-
) -> torch.FloatTensor:
|
585 |
-
"""
|
586 |
-
One step for the third-order multistep DPM-Solver.
|
587 |
-
|
588 |
-
Args:
|
589 |
-
model_output_list (`List[torch.FloatTensor]`):
|
590 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
591 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
592 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
593 |
-
sample (`torch.FloatTensor`):
|
594 |
-
current instance of sample being created by diffusion process.
|
595 |
-
|
596 |
-
Returns:
|
597 |
-
`torch.FloatTensor`: the sample tensor at the previous timestep.
|
598 |
-
"""
|
599 |
-
t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
|
600 |
-
m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
|
601 |
-
lambda_t, lambda_s0, lambda_s1, lambda_s2 = (
|
602 |
-
self.lambda_t[t],
|
603 |
-
self.lambda_t[s0],
|
604 |
-
self.lambda_t[s1],
|
605 |
-
self.lambda_t[s2],
|
606 |
-
)
|
607 |
-
alpha_t, alpha_s0 = self.alpha_t[t], self.alpha_t[s0]
|
608 |
-
sigma_t, sigma_s0 = self.sigma_t[t], self.sigma_t[s0]
|
609 |
-
h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2
|
610 |
-
r0, r1 = h_0 / h, h_1 / h
|
611 |
-
D0 = m0
|
612 |
-
D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2)
|
613 |
-
D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
|
614 |
-
D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)
|
615 |
-
if self.config.algorithm_type == "dpmsolver++":
|
616 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
617 |
-
x_t = (
|
618 |
-
(sigma_t / sigma_s0) * sample
|
619 |
-
- (alpha_t * (torch.exp(-h) - 1.0)) * D0
|
620 |
-
+ (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1
|
621 |
-
- (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
|
622 |
-
)
|
623 |
-
elif self.config.algorithm_type == "dpmsolver":
|
624 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
625 |
-
x_t = (
|
626 |
-
(alpha_t / alpha_s0) * sample
|
627 |
-
- (sigma_t * (torch.exp(h) - 1.0)) * D0
|
628 |
-
- (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
|
629 |
-
- (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
|
630 |
-
)
|
631 |
-
return x_t
|
632 |
-
|
633 |
-
def step(
|
634 |
-
self,
|
635 |
-
model_output: torch.FloatTensor,
|
636 |
-
timestep: int,
|
637 |
-
sample: torch.FloatTensor,
|
638 |
-
generator=None,
|
639 |
-
return_dict: bool = True,
|
640 |
-
) -> Union[SchedulerOutput, Tuple]:
|
641 |
-
"""
|
642 |
-
Step function propagating the sample with the multistep DPM-Solver.
|
643 |
-
|
644 |
-
Args:
|
645 |
-
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
646 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
647 |
-
sample (`torch.FloatTensor`):
|
648 |
-
current instance of sample being created by diffusion process.
|
649 |
-
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
|
650 |
-
|
651 |
-
Returns:
|
652 |
-
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
|
653 |
-
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
654 |
-
|
655 |
-
"""
|
656 |
-
if self.num_inference_steps is None:
|
657 |
-
raise ValueError(
|
658 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
659 |
-
)
|
660 |
-
|
661 |
-
if isinstance(timestep, torch.Tensor):
|
662 |
-
timestep = timestep.to(self.timesteps.device)
|
663 |
-
step_index = (self.timesteps == timestep).nonzero()
|
664 |
-
if len(step_index) == 0:
|
665 |
-
step_index = len(self.timesteps) - 1
|
666 |
-
else:
|
667 |
-
step_index = step_index.item()
|
668 |
-
prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
|
669 |
-
lower_order_final = (
|
670 |
-
(step_index == len(self.timesteps) - 1) and self.config.lower_order_final and len(self.timesteps) < 15
|
671 |
-
)
|
672 |
-
lower_order_second = (
|
673 |
-
(step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15
|
674 |
-
)
|
675 |
-
|
676 |
-
model_output = self.convert_model_output(model_output, timestep, sample)
|
677 |
-
for i in range(self.config.solver_order - 1):
|
678 |
-
self.model_outputs[i] = self.model_outputs[i + 1]
|
679 |
-
self.model_outputs[-1] = model_output
|
680 |
-
|
681 |
-
if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]:
|
682 |
-
noise = randn_tensor(
|
683 |
-
model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype
|
684 |
-
)
|
685 |
-
else:
|
686 |
-
noise = None
|
687 |
-
|
688 |
-
if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final:
|
689 |
-
prev_sample = self.dpm_solver_first_order_update(
|
690 |
-
model_output, timestep, prev_timestep, sample, noise=noise
|
691 |
-
)
|
692 |
-
elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
|
693 |
-
timestep_list = [self.timesteps[step_index - 1], timestep]
|
694 |
-
prev_sample = self.multistep_dpm_solver_second_order_update(
|
695 |
-
self.model_outputs, timestep_list, prev_timestep, sample, noise=noise
|
696 |
-
)
|
697 |
-
else:
|
698 |
-
timestep_list = [self.timesteps[step_index - 2], self.timesteps[step_index - 1], timestep]
|
699 |
-
prev_sample = self.multistep_dpm_solver_third_order_update(
|
700 |
-
self.model_outputs, timestep_list, prev_timestep, sample
|
701 |
-
)
|
702 |
-
|
703 |
-
if self.lower_order_nums < self.config.solver_order:
|
704 |
-
self.lower_order_nums += 1
|
705 |
-
|
706 |
-
if not return_dict:
|
707 |
-
return (prev_sample,)
|
708 |
-
|
709 |
-
return SchedulerOutput(prev_sample=prev_sample)
|
710 |
-
|
711 |
-
def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
|
712 |
-
"""
|
713 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
714 |
-
current timestep.
|
715 |
-
|
716 |
-
Args:
|
717 |
-
sample (`torch.FloatTensor`): input sample
|
718 |
-
|
719 |
-
Returns:
|
720 |
-
`torch.FloatTensor`: scaled input sample
|
721 |
-
"""
|
722 |
-
return sample
|
723 |
-
|
724 |
-
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
|
725 |
-
def add_noise(
|
726 |
-
self,
|
727 |
-
original_samples: torch.FloatTensor,
|
728 |
-
noise: torch.FloatTensor,
|
729 |
-
timesteps: torch.IntTensor,
|
730 |
-
) -> torch.FloatTensor:
|
731 |
-
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
732 |
-
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
|
733 |
-
timesteps = timesteps.to(original_samples.device)
|
734 |
-
|
735 |
-
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
736 |
-
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
737 |
-
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
738 |
-
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
739 |
-
|
740 |
-
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
741 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
742 |
-
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
743 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
744 |
-
|
745 |
-
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
746 |
-
return noisy_samples
|
747 |
-
|
748 |
-
def __len__(self):
|
749 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = './fast_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
|
3 |
-
# learning policy
|
4 |
-
lr_config = dict(step=[16, 22])
|
5 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
|
2 |
-
model = dict(bbox_head=dict(transform_method='minmax'))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnet18_v1c',
|
4 |
-
backbone=dict(depth=18),
|
5 |
-
decode_head=dict(
|
6 |
-
c1_in_channels=64,
|
7 |
-
c1_channels=12,
|
8 |
-
in_channels=512,
|
9 |
-
channels=128,
|
10 |
-
),
|
11 |
-
auxiliary_head=dict(in_channels=256, channels=64))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_model.py
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from functools import partial
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from modules import RoPE, shared
|
8 |
-
from modules.callbacks import Iteratorize
|
9 |
-
from modules.logging_colors import logger
|
10 |
-
from modules.text_generation import get_max_prompt_length
|
11 |
-
|
12 |
-
try:
|
13 |
-
import llama_cpp
|
14 |
-
except:
|
15 |
-
llama_cpp = None
|
16 |
-
|
17 |
-
try:
|
18 |
-
import llama_cpp_cuda
|
19 |
-
except:
|
20 |
-
llama_cpp_cuda = None
|
21 |
-
|
22 |
-
|
23 |
-
def llama_cpp_lib():
|
24 |
-
if (shared.args.cpu and llama_cpp is not None) or llama_cpp_cuda is None:
|
25 |
-
return llama_cpp
|
26 |
-
else:
|
27 |
-
return llama_cpp_cuda
|
28 |
-
|
29 |
-
|
30 |
-
def ban_eos_logits_processor(eos_token, input_ids, logits):
|
31 |
-
logits[eos_token] = -float('inf')
|
32 |
-
return logits
|
33 |
-
|
34 |
-
|
35 |
-
def custom_token_ban_logits_processor(token_ids, input_ids, logits):
|
36 |
-
for token_id in token_ids:
|
37 |
-
logits[token_id] = -float('inf')
|
38 |
-
|
39 |
-
return logits
|
40 |
-
|
41 |
-
|
42 |
-
class LlamaCppModel:
|
43 |
-
def __init__(self):
|
44 |
-
self.initialized = False
|
45 |
-
self.grammar_string = ''
|
46 |
-
self.grammar = None
|
47 |
-
|
48 |
-
def __del__(self):
|
49 |
-
self.model.__del__()
|
50 |
-
|
51 |
-
@classmethod
|
52 |
-
def from_pretrained(self, path):
|
53 |
-
|
54 |
-
Llama = llama_cpp_lib().Llama
|
55 |
-
LlamaCache = llama_cpp_lib().LlamaCache
|
56 |
-
|
57 |
-
result = self()
|
58 |
-
cache_capacity = 0
|
59 |
-
if shared.args.cache_capacity is not None:
|
60 |
-
if 'GiB' in shared.args.cache_capacity:
|
61 |
-
cache_capacity = int(re.sub('[a-zA-Z]', '', shared.args.cache_capacity)) * 1000 * 1000 * 1000
|
62 |
-
elif 'MiB' in shared.args.cache_capacity:
|
63 |
-
cache_capacity = int(re.sub('[a-zA-Z]', '', shared.args.cache_capacity)) * 1000 * 1000
|
64 |
-
else:
|
65 |
-
cache_capacity = int(shared.args.cache_capacity)
|
66 |
-
|
67 |
-
logger.info("Cache capacity is " + str(cache_capacity) + " bytes")
|
68 |
-
|
69 |
-
if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '':
|
70 |
-
tensor_split_list = None
|
71 |
-
else:
|
72 |
-
tensor_split_list = [float(x) for x in shared.args.tensor_split.strip().split(",")]
|
73 |
-
|
74 |
-
params = {
|
75 |
-
'model_path': str(path),
|
76 |
-
'n_ctx': shared.args.n_ctx,
|
77 |
-
'seed': int(shared.args.llama_cpp_seed),
|
78 |
-
'n_threads': shared.args.threads or None,
|
79 |
-
'n_threads_batch': shared.args.threads_batch or None,
|
80 |
-
'n_batch': shared.args.n_batch,
|
81 |
-
'use_mmap': not shared.args.no_mmap,
|
82 |
-
'use_mlock': shared.args.mlock,
|
83 |
-
'mul_mat_q': shared.args.mul_mat_q,
|
84 |
-
'numa': shared.args.numa,
|
85 |
-
'n_gpu_layers': shared.args.n_gpu_layers,
|
86 |
-
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
87 |
-
'tensor_split': tensor_split_list,
|
88 |
-
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
89 |
-
}
|
90 |
-
|
91 |
-
result.model = Llama(**params)
|
92 |
-
if cache_capacity > 0:
|
93 |
-
result.model.set_cache(LlamaCache(capacity_bytes=cache_capacity))
|
94 |
-
|
95 |
-
# This is ugly, but the model and the tokenizer are the same object in this library.
|
96 |
-
return result, result
|
97 |
-
|
98 |
-
def encode(self, string):
|
99 |
-
if type(string) is str:
|
100 |
-
string = string.encode()
|
101 |
-
|
102 |
-
return self.model.tokenize(string)
|
103 |
-
|
104 |
-
def decode(self, ids):
|
105 |
-
return self.model.detokenize(ids).decode('utf-8')
|
106 |
-
|
107 |
-
def get_logits(self, tokens):
|
108 |
-
self.model.eval(tokens)
|
109 |
-
logits = self.model._scores
|
110 |
-
logits = np.expand_dims(logits, 0) # batch dim is expected
|
111 |
-
return torch.tensor(logits, dtype=torch.float32)
|
112 |
-
|
113 |
-
def load_grammar(self, string):
|
114 |
-
if string != self.grammar_string:
|
115 |
-
self.grammar_string = string
|
116 |
-
if string.strip() != '':
|
117 |
-
self.grammar = llama_cpp_lib().LlamaGrammar.from_string(string)
|
118 |
-
else:
|
119 |
-
self.grammar = None
|
120 |
-
|
121 |
-
def generate(self, prompt, state, callback=None):
|
122 |
-
|
123 |
-
LogitsProcessorList = llama_cpp_lib().LogitsProcessorList
|
124 |
-
|
125 |
-
prompt = prompt if type(prompt) is str else prompt.decode()
|
126 |
-
|
127 |
-
# Handle truncation
|
128 |
-
prompt = self.encode(prompt)
|
129 |
-
prompt = prompt[-get_max_prompt_length(state):]
|
130 |
-
prompt = self.decode(prompt)
|
131 |
-
|
132 |
-
self.load_grammar(state['grammar_string'])
|
133 |
-
logit_processors = LogitsProcessorList()
|
134 |
-
if state['ban_eos_token']:
|
135 |
-
logit_processors.append(partial(ban_eos_logits_processor, self.model.token_eos()))
|
136 |
-
|
137 |
-
if state['custom_token_bans']:
|
138 |
-
to_ban = [int(x) for x in state['custom_token_bans'].split(',')]
|
139 |
-
if len(to_ban) > 0:
|
140 |
-
logit_processors.append(partial(custom_token_ban_logits_processor, to_ban))
|
141 |
-
|
142 |
-
completion_chunks = self.model.create_completion(
|
143 |
-
prompt=prompt,
|
144 |
-
max_tokens=state['max_new_tokens'],
|
145 |
-
temperature=state['temperature'],
|
146 |
-
top_p=state['top_p'],
|
147 |
-
top_k=state['top_k'],
|
148 |
-
repeat_penalty=state['repetition_penalty'],
|
149 |
-
tfs_z=state['tfs'],
|
150 |
-
mirostat_mode=int(state['mirostat_mode']),
|
151 |
-
mirostat_tau=state['mirostat_tau'],
|
152 |
-
mirostat_eta=state['mirostat_eta'],
|
153 |
-
stream=True,
|
154 |
-
logits_processor=logit_processors,
|
155 |
-
grammar=self.grammar
|
156 |
-
)
|
157 |
-
|
158 |
-
output = ""
|
159 |
-
for completion_chunk in completion_chunks:
|
160 |
-
if shared.stop_everything:
|
161 |
-
break
|
162 |
-
text = completion_chunk['choices'][0]['text']
|
163 |
-
output += text
|
164 |
-
if callback:
|
165 |
-
callback(text)
|
166 |
-
|
167 |
-
return output
|
168 |
-
|
169 |
-
def generate_with_streaming(self, *args, **kwargs):
|
170 |
-
with Iteratorize(self.generate, args, kwargs, callback=None) as generator:
|
171 |
-
reply = ''
|
172 |
-
for token in generator:
|
173 |
-
reply += token
|
174 |
-
yield reply
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/DogDiseasePredictor/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: DogDiseasePredictor
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: green
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: mit
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/app.py
DELETED
@@ -1,407 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
|
8 |
-
# also released under the MIT license.
|
9 |
-
|
10 |
-
import argparse
|
11 |
-
from concurrent.futures import ProcessPoolExecutor
|
12 |
-
import os
|
13 |
-
from pathlib import Path
|
14 |
-
import subprocess as sp
|
15 |
-
from tempfile import NamedTemporaryFile
|
16 |
-
import time
|
17 |
-
import typing as tp
|
18 |
-
import warnings
|
19 |
-
|
20 |
-
import torch
|
21 |
-
import gradio as gr
|
22 |
-
|
23 |
-
from audiocraft.data.audio_utils import convert_audio
|
24 |
-
from audiocraft.data.audio import audio_write
|
25 |
-
from audiocraft.models import MusicGen
|
26 |
-
|
27 |
-
|
28 |
-
MODEL = None # Last used model
|
29 |
-
IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
|
30 |
-
MAX_BATCH_SIZE = 6
|
31 |
-
BATCHED_DURATION = 15
|
32 |
-
INTERRUPTING = False
|
33 |
-
# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
|
34 |
-
_old_call = sp.call
|
35 |
-
|
36 |
-
|
37 |
-
def _call_nostderr(*args, **kwargs):
|
38 |
-
# Avoid ffmpeg vomitting on the logs.
|
39 |
-
kwargs['stderr'] = sp.DEVNULL
|
40 |
-
kwargs['stdout'] = sp.DEVNULL
|
41 |
-
_old_call(*args, **kwargs)
|
42 |
-
|
43 |
-
|
44 |
-
sp.call = _call_nostderr
|
45 |
-
# Preallocating the pool of processes.
|
46 |
-
pool = ProcessPoolExecutor(3)
|
47 |
-
pool.__enter__()
|
48 |
-
|
49 |
-
|
50 |
-
def interrupt():
|
51 |
-
global INTERRUPTING
|
52 |
-
INTERRUPTING = True
|
53 |
-
|
54 |
-
|
55 |
-
class FileCleaner:
|
56 |
-
def __init__(self, file_lifetime: float = 3600):
|
57 |
-
self.file_lifetime = file_lifetime
|
58 |
-
self.files = []
|
59 |
-
|
60 |
-
def add(self, path: tp.Union[str, Path]):
|
61 |
-
self._cleanup()
|
62 |
-
self.files.append((time.time(), Path(path)))
|
63 |
-
|
64 |
-
def _cleanup(self):
|
65 |
-
now = time.time()
|
66 |
-
for time_added, path in list(self.files):
|
67 |
-
if now - time_added > self.file_lifetime:
|
68 |
-
if path.exists():
|
69 |
-
path.unlink()
|
70 |
-
self.files.pop(0)
|
71 |
-
else:
|
72 |
-
break
|
73 |
-
|
74 |
-
|
75 |
-
file_cleaner = FileCleaner()
|
76 |
-
|
77 |
-
|
78 |
-
def make_waveform(*args, **kwargs):
|
79 |
-
# Further remove some warnings.
|
80 |
-
be = time.time()
|
81 |
-
with warnings.catch_warnings():
|
82 |
-
warnings.simplefilter('ignore')
|
83 |
-
out = gr.make_waveform(*args, **kwargs)
|
84 |
-
print("Make a video took", time.time() - be)
|
85 |
-
return out
|
86 |
-
|
87 |
-
|
88 |
-
def load_model(version='melody'):
|
89 |
-
global MODEL
|
90 |
-
print("Loading model", version)
|
91 |
-
if MODEL is None or MODEL.name != version:
|
92 |
-
MODEL = MusicGen.get_pretrained(version)
|
93 |
-
|
94 |
-
|
95 |
-
def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
|
96 |
-
MODEL.set_generation_params(duration=duration, **gen_kwargs)
|
97 |
-
print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies])
|
98 |
-
be = time.time()
|
99 |
-
processed_melodies = []
|
100 |
-
target_sr = 32000
|
101 |
-
target_ac = 1
|
102 |
-
for melody in melodies:
|
103 |
-
if melody is None:
|
104 |
-
processed_melodies.append(None)
|
105 |
-
else:
|
106 |
-
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
|
107 |
-
if melody.dim() == 1:
|
108 |
-
melody = melody[None]
|
109 |
-
melody = melody[..., :int(sr * duration)]
|
110 |
-
melody = convert_audio(melody, sr, target_sr, target_ac)
|
111 |
-
processed_melodies.append(melody)
|
112 |
-
|
113 |
-
if any(m is not None for m in processed_melodies):
|
114 |
-
outputs = MODEL.generate_with_chroma(
|
115 |
-
descriptions=texts,
|
116 |
-
melody_wavs=processed_melodies,
|
117 |
-
melody_sample_rate=target_sr,
|
118 |
-
progress=progress,
|
119 |
-
)
|
120 |
-
else:
|
121 |
-
outputs = MODEL.generate(texts, progress=progress)
|
122 |
-
|
123 |
-
outputs = outputs.detach().cpu().float()
|
124 |
-
out_files = []
|
125 |
-
for output in outputs:
|
126 |
-
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
|
127 |
-
audio_write(
|
128 |
-
file.name, output, MODEL.sample_rate, strategy="loudness",
|
129 |
-
loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
|
130 |
-
out_files.append(pool.submit(make_waveform, file.name))
|
131 |
-
file_cleaner.add(file.name)
|
132 |
-
res = [out_file.result() for out_file in out_files]
|
133 |
-
for file in res:
|
134 |
-
file_cleaner.add(file)
|
135 |
-
print("batch finished", len(texts), time.time() - be)
|
136 |
-
print("Tempfiles currently stored: ", len(file_cleaner.files))
|
137 |
-
return res
|
138 |
-
|
139 |
-
|
140 |
-
def predict_batched(texts, melodies):
|
141 |
-
max_text_length = 512
|
142 |
-
texts = [text[:max_text_length] for text in texts]
|
143 |
-
load_model('melody')
|
144 |
-
res = _do_predictions(texts, melodies, BATCHED_DURATION)
|
145 |
-
return [res]
|
146 |
-
|
147 |
-
|
148 |
-
def predict_full(model, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
|
149 |
-
global INTERRUPTING
|
150 |
-
INTERRUPTING = False
|
151 |
-
if temperature < 0:
|
152 |
-
raise gr.Error("Temperature must be >= 0.")
|
153 |
-
if topk < 0:
|
154 |
-
raise gr.Error("Topk must be non-negative.")
|
155 |
-
if topp < 0:
|
156 |
-
raise gr.Error("Topp must be non-negative.")
|
157 |
-
|
158 |
-
topk = int(topk)
|
159 |
-
load_model(model)
|
160 |
-
|
161 |
-
def _progress(generated, to_generate):
|
162 |
-
progress((generated, to_generate))
|
163 |
-
if INTERRUPTING:
|
164 |
-
raise gr.Error("Interrupted.")
|
165 |
-
MODEL.set_custom_progress_callback(_progress)
|
166 |
-
|
167 |
-
outs = _do_predictions(
|
168 |
-
[text], [melody], duration, progress=True,
|
169 |
-
top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
|
170 |
-
return outs[0]
|
171 |
-
|
172 |
-
|
173 |
-
def toggle_audio_src(choice):
|
174 |
-
if choice == "mic":
|
175 |
-
return gr.update(source="microphone", value=None, label="Microphone")
|
176 |
-
else:
|
177 |
-
return gr.update(source="upload", value=None, label="File")
|
178 |
-
|
179 |
-
|
180 |
-
def ui_full(launch_kwargs):
|
181 |
-
with gr.Blocks() as interface:
|
182 |
-
gr.Markdown(
|
183 |
-
"""
|
184 |
-
# MusicGen
|
185 |
-
This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
|
186 |
-
a simple and controllable model for music generation
|
187 |
-
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
|
188 |
-
"""
|
189 |
-
)
|
190 |
-
with gr.Row():
|
191 |
-
with gr.Column():
|
192 |
-
with gr.Row():
|
193 |
-
text = gr.Text(label="Input Text", interactive=True)
|
194 |
-
with gr.Column():
|
195 |
-
radio = gr.Radio(["file", "mic"], value="file",
|
196 |
-
label="Condition on a melody (optional) File or Mic")
|
197 |
-
melody = gr.Audio(source="upload", type="numpy", label="File",
|
198 |
-
interactive=True, elem_id="melody-input")
|
199 |
-
with gr.Row():
|
200 |
-
submit = gr.Button("Submit")
|
201 |
-
# Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
|
202 |
-
_ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
|
203 |
-
with gr.Row():
|
204 |
-
model = gr.Radio(["melody", "medium", "small", "large"],
|
205 |
-
label="Model", value="melody", interactive=True)
|
206 |
-
with gr.Row():
|
207 |
-
duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
|
208 |
-
with gr.Row():
|
209 |
-
topk = gr.Number(label="Top-k", value=250, interactive=True)
|
210 |
-
topp = gr.Number(label="Top-p", value=0, interactive=True)
|
211 |
-
temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
|
212 |
-
cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
|
213 |
-
with gr.Column():
|
214 |
-
output = gr.Video(label="Generated Music")
|
215 |
-
submit.click(predict_full,
|
216 |
-
inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef],
|
217 |
-
outputs=[output])
|
218 |
-
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
219 |
-
gr.Examples(
|
220 |
-
fn=predict_full,
|
221 |
-
examples=[
|
222 |
-
[
|
223 |
-
"An 80s driving pop song with heavy drums and synth pads in the background",
|
224 |
-
"./assets/bach.mp3",
|
225 |
-
"melody"
|
226 |
-
],
|
227 |
-
[
|
228 |
-
"A cheerful country song with acoustic guitars",
|
229 |
-
"./assets/bolero_ravel.mp3",
|
230 |
-
"melody"
|
231 |
-
],
|
232 |
-
[
|
233 |
-
"90s rock song with electric guitar and heavy drums",
|
234 |
-
None,
|
235 |
-
"medium"
|
236 |
-
],
|
237 |
-
[
|
238 |
-
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
|
239 |
-
"./assets/bach.mp3",
|
240 |
-
"melody"
|
241 |
-
],
|
242 |
-
[
|
243 |
-
"lofi slow bpm electro chill with organic samples",
|
244 |
-
None,
|
245 |
-
"medium",
|
246 |
-
],
|
247 |
-
],
|
248 |
-
inputs=[text, melody, model],
|
249 |
-
outputs=[output]
|
250 |
-
)
|
251 |
-
gr.Markdown(
|
252 |
-
"""
|
253 |
-
### More details
|
254 |
-
|
255 |
-
The model will generate a short music extract based on the description you provided.
|
256 |
-
The model can generate up to 30 seconds of audio in one pass. It is now possible
|
257 |
-
to extend the generation by feeding back the end of the previous chunk of audio.
|
258 |
-
This can take a long time, and the model might lose consistency. The model might also
|
259 |
-
decide at arbitrary positions that the song ends.
|
260 |
-
|
261 |
-
**WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
|
262 |
-
An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
|
263 |
-
are generated each time.
|
264 |
-
|
265 |
-
We present 4 model variations:
|
266 |
-
1. Melody -- a music generation model capable of generating music condition
|
267 |
-
on text and melody inputs. **Note**, you can also use text only.
|
268 |
-
2. Small -- a 300M transformer decoder conditioned on text only.
|
269 |
-
3. Medium -- a 1.5B transformer decoder conditioned on text only.
|
270 |
-
4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.)
|
271 |
-
|
272 |
-
When using `melody`, ou can optionaly provide a reference audio from
|
273 |
-
which a broad melody will be extracted. The model will then try to follow both
|
274 |
-
the description and melody provided.
|
275 |
-
|
276 |
-
You can also use your own GPU or a Google Colab by following the instructions on our repo.
|
277 |
-
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
|
278 |
-
for more details.
|
279 |
-
"""
|
280 |
-
)
|
281 |
-
|
282 |
-
interface.queue().launch(**launch_kwargs)
|
283 |
-
|
284 |
-
|
285 |
-
def ui_batched(launch_kwargs):
|
286 |
-
with gr.Blocks() as demo:
|
287 |
-
gr.Markdown(
|
288 |
-
"""
|
289 |
-
# MusicGen
|
290 |
-
|
291 |
-
This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
|
292 |
-
a simple and controllable model for music generation
|
293 |
-
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
|
294 |
-
<br/>
|
295 |
-
<a href="https://huggingface.co/spaces/facebook/MusicGen?duplicate=true"
|
296 |
-
style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
|
297 |
-
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;"
|
298 |
-
src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
299 |
-
for longer sequences, more control and no queue.</p>
|
300 |
-
"""
|
301 |
-
)
|
302 |
-
with gr.Row():
|
303 |
-
with gr.Column():
|
304 |
-
with gr.Row():
|
305 |
-
text = gr.Text(label="Describe your music", lines=2, interactive=True)
|
306 |
-
with gr.Column():
|
307 |
-
radio = gr.Radio(["file", "mic"], value="file",
|
308 |
-
label="Condition on a melody (optional) File or Mic")
|
309 |
-
melody = gr.Audio(source="upload", type="numpy", label="File",
|
310 |
-
interactive=True, elem_id="melody-input")
|
311 |
-
with gr.Row():
|
312 |
-
submit = gr.Button("Generate")
|
313 |
-
with gr.Column():
|
314 |
-
output = gr.Video(label="Generated Music")
|
315 |
-
submit.click(predict_batched, inputs=[text, melody],
|
316 |
-
outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE)
|
317 |
-
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
318 |
-
gr.Examples(
|
319 |
-
fn=predict_batched,
|
320 |
-
examples=[
|
321 |
-
[
|
322 |
-
"An 80s driving pop song with heavy drums and synth pads in the background",
|
323 |
-
"./assets/bach.mp3",
|
324 |
-
],
|
325 |
-
[
|
326 |
-
"A cheerful country song with acoustic guitars",
|
327 |
-
"./assets/bolero_ravel.mp3",
|
328 |
-
],
|
329 |
-
[
|
330 |
-
"90s rock song with electric guitar and heavy drums",
|
331 |
-
None,
|
332 |
-
],
|
333 |
-
[
|
334 |
-
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
|
335 |
-
"./assets/bach.mp3",
|
336 |
-
],
|
337 |
-
[
|
338 |
-
"lofi slow bpm electro chill with organic samples",
|
339 |
-
None,
|
340 |
-
],
|
341 |
-
],
|
342 |
-
inputs=[text, melody],
|
343 |
-
outputs=[output]
|
344 |
-
)
|
345 |
-
gr.Markdown("""
|
346 |
-
### More details
|
347 |
-
|
348 |
-
The model will generate 12 seconds of audio based on the description you provided.
|
349 |
-
You can optionaly provide a reference audio from which a broad melody will be extracted.
|
350 |
-
The model will then try to follow both the description and melody provided.
|
351 |
-
All samples are generated with the `melody` model.
|
352 |
-
|
353 |
-
You can also use your own GPU or a Google Colab by following the instructions on our repo.
|
354 |
-
|
355 |
-
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
|
356 |
-
for more details.
|
357 |
-
""")
|
358 |
-
|
359 |
-
demo.queue(max_size=8 * 4).launch(**launch_kwargs)
|
360 |
-
|
361 |
-
|
362 |
-
if __name__ == "__main__":
|
363 |
-
parser = argparse.ArgumentParser()
|
364 |
-
parser.add_argument(
|
365 |
-
'--listen',
|
366 |
-
type=str,
|
367 |
-
default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
|
368 |
-
help='IP to listen on for connections to Gradio',
|
369 |
-
)
|
370 |
-
parser.add_argument(
|
371 |
-
'--username', type=str, default='', help='Username for authentication'
|
372 |
-
)
|
373 |
-
parser.add_argument(
|
374 |
-
'--password', type=str, default='', help='Password for authentication'
|
375 |
-
)
|
376 |
-
parser.add_argument(
|
377 |
-
'--server_port',
|
378 |
-
type=int,
|
379 |
-
default=0,
|
380 |
-
help='Port to run the server listener on',
|
381 |
-
)
|
382 |
-
parser.add_argument(
|
383 |
-
'--inbrowser', action='store_true', help='Open in browser'
|
384 |
-
)
|
385 |
-
parser.add_argument(
|
386 |
-
'--share', action='store_true', help='Share the gradio UI'
|
387 |
-
)
|
388 |
-
|
389 |
-
args = parser.parse_args()
|
390 |
-
|
391 |
-
launch_kwargs = {}
|
392 |
-
launch_kwargs['server_name'] = args.listen
|
393 |
-
|
394 |
-
if args.username and args.password:
|
395 |
-
launch_kwargs['auth'] = (args.username, args.password)
|
396 |
-
if args.server_port:
|
397 |
-
launch_kwargs['server_port'] = args.server_port
|
398 |
-
if args.inbrowser:
|
399 |
-
launch_kwargs['inbrowser'] = args.inbrowser
|
400 |
-
if args.share:
|
401 |
-
launch_kwargs['share'] = args.share
|
402 |
-
|
403 |
-
# Show the interface
|
404 |
-
if IS_BATCHED:
|
405 |
-
ui_batched(launch_kwargs)
|
406 |
-
else:
|
407 |
-
ui_full(launch_kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/__main__.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from pip._vendor.platformdirs import PlatformDirs, __version__
|
4 |
-
|
5 |
-
PROPS = (
|
6 |
-
"user_data_dir",
|
7 |
-
"user_config_dir",
|
8 |
-
"user_cache_dir",
|
9 |
-
"user_state_dir",
|
10 |
-
"user_log_dir",
|
11 |
-
"user_documents_dir",
|
12 |
-
"user_runtime_dir",
|
13 |
-
"site_data_dir",
|
14 |
-
"site_config_dir",
|
15 |
-
"site_cache_dir",
|
16 |
-
)
|
17 |
-
|
18 |
-
|
19 |
-
def main() -> None:
|
20 |
-
app_name = "MyApp"
|
21 |
-
app_author = "MyCompany"
|
22 |
-
|
23 |
-
print(f"-- platformdirs {__version__} --")
|
24 |
-
|
25 |
-
print("-- app dirs (with optional 'version')")
|
26 |
-
dirs = PlatformDirs(app_name, app_author, version="1.0")
|
27 |
-
for prop in PROPS:
|
28 |
-
print(f"{prop}: {getattr(dirs, prop)}")
|
29 |
-
|
30 |
-
print("\n-- app dirs (without optional 'version')")
|
31 |
-
dirs = PlatformDirs(app_name, app_author)
|
32 |
-
for prop in PROPS:
|
33 |
-
print(f"{prop}: {getattr(dirs, prop)}")
|
34 |
-
|
35 |
-
print("\n-- app dirs (without optional 'appauthor')")
|
36 |
-
dirs = PlatformDirs(app_name)
|
37 |
-
for prop in PROPS:
|
38 |
-
print(f"{prop}: {getattr(dirs, prop)}")
|
39 |
-
|
40 |
-
print("\n-- app dirs (with disabled 'appauthor')")
|
41 |
-
dirs = PlatformDirs(app_name, appauthor=False)
|
42 |
-
for prop in PROPS:
|
43 |
-
print(f"{prop}: {getattr(dirs, prop)}")
|
44 |
-
|
45 |
-
|
46 |
-
if __name__ == "__main__":
|
47 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__init__.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.styles
|
3 |
-
~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Contains built-in styles.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from pip._vendor.pygments.plugin import find_plugin_styles
|
12 |
-
from pip._vendor.pygments.util import ClassNotFound
|
13 |
-
|
14 |
-
|
15 |
-
#: Maps style names to 'submodule::classname'.
|
16 |
-
STYLE_MAP = {
|
17 |
-
'default': 'default::DefaultStyle',
|
18 |
-
'emacs': 'emacs::EmacsStyle',
|
19 |
-
'friendly': 'friendly::FriendlyStyle',
|
20 |
-
'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle',
|
21 |
-
'colorful': 'colorful::ColorfulStyle',
|
22 |
-
'autumn': 'autumn::AutumnStyle',
|
23 |
-
'murphy': 'murphy::MurphyStyle',
|
24 |
-
'manni': 'manni::ManniStyle',
|
25 |
-
'material': 'material::MaterialStyle',
|
26 |
-
'monokai': 'monokai::MonokaiStyle',
|
27 |
-
'perldoc': 'perldoc::PerldocStyle',
|
28 |
-
'pastie': 'pastie::PastieStyle',
|
29 |
-
'borland': 'borland::BorlandStyle',
|
30 |
-
'trac': 'trac::TracStyle',
|
31 |
-
'native': 'native::NativeStyle',
|
32 |
-
'fruity': 'fruity::FruityStyle',
|
33 |
-
'bw': 'bw::BlackWhiteStyle',
|
34 |
-
'vim': 'vim::VimStyle',
|
35 |
-
'vs': 'vs::VisualStudioStyle',
|
36 |
-
'tango': 'tango::TangoStyle',
|
37 |
-
'rrt': 'rrt::RrtStyle',
|
38 |
-
'xcode': 'xcode::XcodeStyle',
|
39 |
-
'igor': 'igor::IgorStyle',
|
40 |
-
'paraiso-light': 'paraiso_light::ParaisoLightStyle',
|
41 |
-
'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle',
|
42 |
-
'lovelace': 'lovelace::LovelaceStyle',
|
43 |
-
'algol': 'algol::AlgolStyle',
|
44 |
-
'algol_nu': 'algol_nu::Algol_NuStyle',
|
45 |
-
'arduino': 'arduino::ArduinoStyle',
|
46 |
-
'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
|
47 |
-
'abap': 'abap::AbapStyle',
|
48 |
-
'solarized-dark': 'solarized::SolarizedDarkStyle',
|
49 |
-
'solarized-light': 'solarized::SolarizedLightStyle',
|
50 |
-
'sas': 'sas::SasStyle',
|
51 |
-
'staroffice' : 'staroffice::StarofficeStyle',
|
52 |
-
'stata': 'stata_light::StataLightStyle',
|
53 |
-
'stata-light': 'stata_light::StataLightStyle',
|
54 |
-
'stata-dark': 'stata_dark::StataDarkStyle',
|
55 |
-
'inkpot': 'inkpot::InkPotStyle',
|
56 |
-
'zenburn': 'zenburn::ZenburnStyle',
|
57 |
-
'gruvbox-dark': 'gruvbox::GruvboxDarkStyle',
|
58 |
-
'gruvbox-light': 'gruvbox::GruvboxLightStyle',
|
59 |
-
'dracula': 'dracula::DraculaStyle',
|
60 |
-
'one-dark': 'onedark::OneDarkStyle',
|
61 |
-
'lilypond' : 'lilypond::LilyPondStyle',
|
62 |
-
'nord': 'nord::NordStyle',
|
63 |
-
'nord-darker': 'nord::NordDarkerStyle',
|
64 |
-
'github-dark': 'gh_dark::GhDarkStyle'
|
65 |
-
}
|
66 |
-
|
67 |
-
|
68 |
-
def get_style_by_name(name):
|
69 |
-
if name in STYLE_MAP:
|
70 |
-
mod, cls = STYLE_MAP[name].split('::')
|
71 |
-
builtin = "yes"
|
72 |
-
else:
|
73 |
-
for found_name, style in find_plugin_styles():
|
74 |
-
if name == found_name:
|
75 |
-
return style
|
76 |
-
# perhaps it got dropped into our styles package
|
77 |
-
builtin = ""
|
78 |
-
mod = name
|
79 |
-
cls = name.title() + "Style"
|
80 |
-
|
81 |
-
try:
|
82 |
-
mod = __import__('pygments.styles.' + mod, None, None, [cls])
|
83 |
-
except ImportError:
|
84 |
-
raise ClassNotFound("Could not find style module %r" % mod +
|
85 |
-
(builtin and ", though it should be builtin") + ".")
|
86 |
-
try:
|
87 |
-
return getattr(mod, cls)
|
88 |
-
except AttributeError:
|
89 |
-
raise ClassNotFound("Could not find style class %r in style module." % cls)
|
90 |
-
|
91 |
-
|
92 |
-
def get_all_styles():
|
93 |
-
"""Return a generator for all styles by name,
|
94 |
-
both builtin and plugin."""
|
95 |
-
yield from STYLE_MAP
|
96 |
-
for name, _ in find_plugin_styles():
|
97 |
-
yield name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/scripts/start.sh
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
#后台运行Chat_on_webchat执行脚本
|
3 |
-
|
4 |
-
cd `dirname $0`/..
|
5 |
-
export BASE_DIR=`pwd`
|
6 |
-
echo $BASE_DIR
|
7 |
-
|
8 |
-
# check the nohup.out log output file
|
9 |
-
if [ ! -f "${BASE_DIR}/nohup.out" ]; then
|
10 |
-
touch "${BASE_DIR}/nohup.out"
|
11 |
-
echo "create file ${BASE_DIR}/nohup.out"
|
12 |
-
fi
|
13 |
-
|
14 |
-
nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out"
|
15 |
-
|
16 |
-
echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/backbone/vit.py
DELETED
@@ -1,538 +0,0 @@
|
|
1 |
-
# Modified by Jialian Wu from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
import fvcore.nn.weight_init as weight_init
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
from functools import partial
|
8 |
-
|
9 |
-
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
|
10 |
-
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
|
11 |
-
from detectron2.layers import ShapeSpec
|
12 |
-
from centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5
|
13 |
-
|
14 |
-
import torch.utils.checkpoint as checkpoint
|
15 |
-
from timm.models.layers import DropPath, Mlp, trunc_normal_
|
16 |
-
|
17 |
-
from detectron2.modeling.backbone.backbone import Backbone
|
18 |
-
from .utils import (
|
19 |
-
PatchEmbed,
|
20 |
-
add_decomposed_rel_pos,
|
21 |
-
get_abs_pos,
|
22 |
-
window_partition,
|
23 |
-
window_unpartition,
|
24 |
-
)
|
25 |
-
|
26 |
-
logger = logging.getLogger(__name__)
|
27 |
-
|
28 |
-
|
29 |
-
__all__ = ["ViT"]
|
30 |
-
|
31 |
-
|
32 |
-
class Attention(nn.Module):
|
33 |
-
"""Multi-head Attention block with relative position embeddings."""
|
34 |
-
|
35 |
-
def __init__(
|
36 |
-
self,
|
37 |
-
dim,
|
38 |
-
num_heads=8,
|
39 |
-
qkv_bias=True,
|
40 |
-
use_rel_pos=False,
|
41 |
-
rel_pos_zero_init=True,
|
42 |
-
input_size=None,
|
43 |
-
):
|
44 |
-
"""
|
45 |
-
Args:
|
46 |
-
dim (int): Number of input channels.
|
47 |
-
num_heads (int): Number of attention heads.
|
48 |
-
qkv_bias (bool: If True, add a learnable bias to query, key, value.
|
49 |
-
rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
50 |
-
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
51 |
-
input_size (int or None): Input resolution for calculating the relative positional
|
52 |
-
parameter size.
|
53 |
-
"""
|
54 |
-
super().__init__()
|
55 |
-
self.num_heads = num_heads
|
56 |
-
head_dim = dim // num_heads
|
57 |
-
self.scale = head_dim**-0.5
|
58 |
-
|
59 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
60 |
-
self.proj = nn.Linear(dim, dim)
|
61 |
-
|
62 |
-
self.use_rel_pos = use_rel_pos
|
63 |
-
if self.use_rel_pos:
|
64 |
-
# initialize relative positional embeddings
|
65 |
-
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
|
66 |
-
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
|
67 |
-
|
68 |
-
if not rel_pos_zero_init:
|
69 |
-
trunc_normal_(self.rel_pos_h, std=0.02)
|
70 |
-
trunc_normal_(self.rel_pos_w, std=0.02)
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
B, H, W, _ = x.shape
|
74 |
-
# qkv with shape (3, B, nHead, H * W, C)
|
75 |
-
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
76 |
-
# q, k, v with shape (B * nHead, H * W, C)
|
77 |
-
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
|
78 |
-
|
79 |
-
attn = (q * self.scale) @ k.transpose(-2, -1)
|
80 |
-
|
81 |
-
if self.use_rel_pos:
|
82 |
-
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
|
83 |
-
|
84 |
-
attn = attn.softmax(dim=-1)
|
85 |
-
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
|
86 |
-
x = self.proj(x)
|
87 |
-
|
88 |
-
return x
|
89 |
-
|
90 |
-
|
91 |
-
class ResBottleneckBlock(CNNBlockBase):
|
92 |
-
"""
|
93 |
-
The standard bottleneck residual block without the last activation layer.
|
94 |
-
It contains 3 conv layers with kernels 1x1, 3x3, 1x1.
|
95 |
-
"""
|
96 |
-
|
97 |
-
def __init__(
|
98 |
-
self,
|
99 |
-
in_channels,
|
100 |
-
out_channels,
|
101 |
-
bottleneck_channels,
|
102 |
-
norm="LN",
|
103 |
-
act_layer=nn.GELU,
|
104 |
-
):
|
105 |
-
"""
|
106 |
-
Args:
|
107 |
-
in_channels (int): Number of input channels.
|
108 |
-
out_channels (int): Number of output channels.
|
109 |
-
bottleneck_channels (int): number of output channels for the 3x3
|
110 |
-
"bottleneck" conv layers.
|
111 |
-
norm (str or callable): normalization for all conv layers.
|
112 |
-
See :func:`layers.get_norm` for supported format.
|
113 |
-
act_layer (callable): activation for all conv layers.
|
114 |
-
"""
|
115 |
-
super().__init__(in_channels, out_channels, 1)
|
116 |
-
|
117 |
-
self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
|
118 |
-
self.norm1 = get_norm(norm, bottleneck_channels)
|
119 |
-
self.act1 = act_layer()
|
120 |
-
|
121 |
-
self.conv2 = Conv2d(
|
122 |
-
bottleneck_channels,
|
123 |
-
bottleneck_channels,
|
124 |
-
3,
|
125 |
-
padding=1,
|
126 |
-
bias=False,
|
127 |
-
)
|
128 |
-
self.norm2 = get_norm(norm, bottleneck_channels)
|
129 |
-
self.act2 = act_layer()
|
130 |
-
|
131 |
-
self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
|
132 |
-
self.norm3 = get_norm(norm, out_channels)
|
133 |
-
|
134 |
-
for layer in [self.conv1, self.conv2, self.conv3]:
|
135 |
-
weight_init.c2_msra_fill(layer)
|
136 |
-
for layer in [self.norm1, self.norm2]:
|
137 |
-
layer.weight.data.fill_(1.0)
|
138 |
-
layer.bias.data.zero_()
|
139 |
-
# zero init last norm layer.
|
140 |
-
self.norm3.weight.data.zero_()
|
141 |
-
self.norm3.bias.data.zero_()
|
142 |
-
|
143 |
-
def forward(self, x):
|
144 |
-
out = x
|
145 |
-
for layer in self.children():
|
146 |
-
out = layer(out)
|
147 |
-
|
148 |
-
out = x + out
|
149 |
-
return out
|
150 |
-
|
151 |
-
|
152 |
-
class Block(nn.Module):
|
153 |
-
"""Transformer blocks with support of window attention and residual propagation blocks"""
|
154 |
-
|
155 |
-
def __init__(
|
156 |
-
self,
|
157 |
-
dim,
|
158 |
-
num_heads,
|
159 |
-
mlp_ratio=4.0,
|
160 |
-
qkv_bias=True,
|
161 |
-
drop_path=0.0,
|
162 |
-
norm_layer=nn.LayerNorm,
|
163 |
-
act_layer=nn.GELU,
|
164 |
-
use_rel_pos=False,
|
165 |
-
rel_pos_zero_init=True,
|
166 |
-
window_size=0,
|
167 |
-
use_residual_block=False,
|
168 |
-
input_size=None,
|
169 |
-
):
|
170 |
-
"""
|
171 |
-
Args:
|
172 |
-
dim (int): Number of input channels.
|
173 |
-
num_heads (int): Number of attention heads in each ViT block.
|
174 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
175 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
176 |
-
drop_path (float): Stochastic depth rate.
|
177 |
-
norm_layer (nn.Module): Normalization layer.
|
178 |
-
act_layer (nn.Module): Activation layer.
|
179 |
-
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
180 |
-
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
181 |
-
window_size (int): Window size for window attention blocks. If it equals 0, then not
|
182 |
-
use window attention.
|
183 |
-
use_residual_block (bool): If True, use a residual block after the MLP block.
|
184 |
-
input_size (int or None): Input resolution for calculating the relative positional
|
185 |
-
parameter size.
|
186 |
-
"""
|
187 |
-
super().__init__()
|
188 |
-
self.norm1 = norm_layer(dim)
|
189 |
-
self.attn = Attention(
|
190 |
-
dim,
|
191 |
-
num_heads=num_heads,
|
192 |
-
qkv_bias=qkv_bias,
|
193 |
-
use_rel_pos=use_rel_pos,
|
194 |
-
rel_pos_zero_init=rel_pos_zero_init,
|
195 |
-
input_size=input_size if window_size == 0 else (window_size, window_size),
|
196 |
-
)
|
197 |
-
|
198 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
199 |
-
self.norm2 = norm_layer(dim)
|
200 |
-
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
|
201 |
-
|
202 |
-
self.window_size = window_size
|
203 |
-
|
204 |
-
self.use_residual_block = use_residual_block
|
205 |
-
if use_residual_block:
|
206 |
-
# Use a residual block with bottleneck channel as dim // 2
|
207 |
-
self.residual = ResBottleneckBlock(
|
208 |
-
in_channels=dim,
|
209 |
-
out_channels=dim,
|
210 |
-
bottleneck_channels=dim // 2,
|
211 |
-
norm="LN",
|
212 |
-
act_layer=act_layer,
|
213 |
-
)
|
214 |
-
|
215 |
-
def forward(self, x):
|
216 |
-
shortcut = x
|
217 |
-
x = self.norm1(x)
|
218 |
-
# Window partition
|
219 |
-
if self.window_size > 0:
|
220 |
-
H, W = x.shape[1], x.shape[2]
|
221 |
-
x, pad_hw = window_partition(x, self.window_size)
|
222 |
-
|
223 |
-
x = self.attn(x)
|
224 |
-
# Reverse window partition
|
225 |
-
if self.window_size > 0:
|
226 |
-
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
|
227 |
-
|
228 |
-
x = shortcut + self.drop_path(x)
|
229 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
230 |
-
|
231 |
-
if self.use_residual_block:
|
232 |
-
x = self.residual(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
|
233 |
-
|
234 |
-
return x
|
235 |
-
|
236 |
-
|
237 |
-
class ViT(Backbone):
|
238 |
-
"""
|
239 |
-
This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`.
|
240 |
-
"Exploring Plain Vision Transformer Backbones for Object Detection",
|
241 |
-
https://arxiv.org/abs/2203.16527
|
242 |
-
"""
|
243 |
-
|
244 |
-
def __init__(
|
245 |
-
self,
|
246 |
-
img_size=1024,
|
247 |
-
patch_size=16,
|
248 |
-
in_chans=3,
|
249 |
-
embed_dim=768,
|
250 |
-
depth=12,
|
251 |
-
num_heads=12,
|
252 |
-
mlp_ratio=4.0,
|
253 |
-
qkv_bias=True,
|
254 |
-
drop_path_rate=0.0,
|
255 |
-
norm_layer=nn.LayerNorm,
|
256 |
-
act_layer=nn.GELU,
|
257 |
-
use_abs_pos=True,
|
258 |
-
use_rel_pos=False,
|
259 |
-
rel_pos_zero_init=True,
|
260 |
-
window_size=0,
|
261 |
-
window_block_indexes=(),
|
262 |
-
residual_block_indexes=(),
|
263 |
-
use_act_checkpoint=True,
|
264 |
-
pretrain_img_size=224,
|
265 |
-
pretrain_use_cls_token=True,
|
266 |
-
out_feature="last_feat",
|
267 |
-
):
|
268 |
-
"""
|
269 |
-
Args:
|
270 |
-
img_size (int): Input image size.
|
271 |
-
patch_size (int): Patch size.
|
272 |
-
in_chans (int): Number of input image channels.
|
273 |
-
embed_dim (int): Patch embedding dimension.
|
274 |
-
depth (int): Depth of ViT.
|
275 |
-
num_heads (int): Number of attention heads in each ViT block.
|
276 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
277 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
278 |
-
drop_path_rate (float): Stochastic depth rate.
|
279 |
-
norm_layer (nn.Module): Normalization layer.
|
280 |
-
act_layer (nn.Module): Activation layer.
|
281 |
-
use_abs_pos (bool): If True, use absolute positional embeddings.
|
282 |
-
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
283 |
-
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
284 |
-
window_size (int): Window size for window attention blocks.
|
285 |
-
window_block_indexes (list): Indexes for blocks using window attention.
|
286 |
-
residual_block_indexes (list): Indexes for blocks using conv propagation.
|
287 |
-
use_act_checkpoint (bool): If True, use activation checkpointing.
|
288 |
-
pretrain_img_size (int): input image size for pretraining models.
|
289 |
-
pretrain_use_cls_token (bool): If True, pretrainig models use class token.
|
290 |
-
out_feature (str): name of the feature from the last block.
|
291 |
-
"""
|
292 |
-
super().__init__()
|
293 |
-
self.pretrain_use_cls_token = pretrain_use_cls_token
|
294 |
-
self.use_act_checkpoint = use_act_checkpoint
|
295 |
-
|
296 |
-
self.patch_embed = PatchEmbed(
|
297 |
-
kernel_size=(patch_size, patch_size),
|
298 |
-
stride=(patch_size, patch_size),
|
299 |
-
in_chans=in_chans,
|
300 |
-
embed_dim=embed_dim,
|
301 |
-
)
|
302 |
-
|
303 |
-
if use_abs_pos:
|
304 |
-
# Initialize absolute positional embedding with pretrain image size.
|
305 |
-
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
|
306 |
-
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
|
307 |
-
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
|
308 |
-
else:
|
309 |
-
self.pos_embed = None
|
310 |
-
|
311 |
-
# stochastic depth decay rule
|
312 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
313 |
-
|
314 |
-
self.blocks = nn.ModuleList()
|
315 |
-
for i in range(depth):
|
316 |
-
block = Block(
|
317 |
-
dim=embed_dim,
|
318 |
-
num_heads=num_heads,
|
319 |
-
mlp_ratio=mlp_ratio,
|
320 |
-
qkv_bias=qkv_bias,
|
321 |
-
drop_path=dpr[i],
|
322 |
-
norm_layer=norm_layer,
|
323 |
-
act_layer=act_layer,
|
324 |
-
use_rel_pos=use_rel_pos,
|
325 |
-
rel_pos_zero_init=rel_pos_zero_init,
|
326 |
-
window_size=window_size if i in window_block_indexes else 0,
|
327 |
-
use_residual_block=i in residual_block_indexes,
|
328 |
-
input_size=(img_size // patch_size, img_size // patch_size),
|
329 |
-
)
|
330 |
-
self.blocks.append(block)
|
331 |
-
|
332 |
-
self._out_feature_channels = {out_feature: embed_dim}
|
333 |
-
self._out_feature_strides = {out_feature: patch_size}
|
334 |
-
self._out_features = [out_feature]
|
335 |
-
|
336 |
-
if self.pos_embed is not None:
|
337 |
-
trunc_normal_(self.pos_embed, std=0.02)
|
338 |
-
|
339 |
-
self.apply(self._init_weights)
|
340 |
-
|
341 |
-
def _init_weights(self, m):
|
342 |
-
if isinstance(m, nn.Linear):
|
343 |
-
trunc_normal_(m.weight, std=0.02)
|
344 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
345 |
-
nn.init.constant_(m.bias, 0)
|
346 |
-
elif isinstance(m, nn.LayerNorm):
|
347 |
-
nn.init.constant_(m.bias, 0)
|
348 |
-
nn.init.constant_(m.weight, 1.0)
|
349 |
-
|
350 |
-
def forward(self, x):
|
351 |
-
x = self.patch_embed(x)
|
352 |
-
if self.pos_embed is not None:
|
353 |
-
x = x + get_abs_pos(
|
354 |
-
self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2])
|
355 |
-
)
|
356 |
-
|
357 |
-
for blk in self.blocks:
|
358 |
-
if self.use_act_checkpoint:
|
359 |
-
x = checkpoint.checkpoint(blk, x)
|
360 |
-
else:
|
361 |
-
x = blk(x)
|
362 |
-
|
363 |
-
return x.permute(0, 3, 1, 2)
|
364 |
-
|
365 |
-
|
366 |
-
class ViT_FPN(Backbone):
|
367 |
-
def __init__(self, bottom_up=None, top_block=None, out_channels=None, strides=None, vit_out_dim=None):
|
368 |
-
super(ViT_FPN, self).__init__()
|
369 |
-
assert isinstance(bottom_up, Backbone)
|
370 |
-
self.bottom_up = bottom_up
|
371 |
-
self.top_block = top_block
|
372 |
-
|
373 |
-
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
|
374 |
-
self._out_features = list(self._out_feature_strides.keys())
|
375 |
-
self._out_feature_channels = {k: out_channels for k in self._out_features}
|
376 |
-
self._size_divisibility = strides[2]
|
377 |
-
|
378 |
-
self.maxpool = nn.MaxPool2d(2, stride=2)
|
379 |
-
self.fpn_stride_16_8 = nn.ConvTranspose2d(vit_out_dim, vit_out_dim, 2, stride=2, bias=False)
|
380 |
-
self.fpn_stride8_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
|
381 |
-
self.fpn_stride8_norm1 = nn.LayerNorm(out_channels)
|
382 |
-
self.fpn_stride8_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
383 |
-
self.fpn_stride8_norm2 = nn.LayerNorm(out_channels)
|
384 |
-
|
385 |
-
self.fpn_stride16_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
|
386 |
-
self.fpn_stride16_norm1 = nn.LayerNorm(out_channels)
|
387 |
-
self.fpn_stride16_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
388 |
-
self.fpn_stride16_norm2 = nn.LayerNorm(out_channels)
|
389 |
-
|
390 |
-
self.fpn_stride32_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
|
391 |
-
self.fpn_stride32_norm1 = nn.LayerNorm(out_channels)
|
392 |
-
self.fpn_stride32_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
393 |
-
self.fpn_stride32_norm2 = nn.LayerNorm(out_channels)
|
394 |
-
|
395 |
-
def forward(self, x):
|
396 |
-
vit_output_featuremap = self.bottom_up(x)
|
397 |
-
|
398 |
-
stride8_feature = self.fpn_stride_16_8(vit_output_featuremap)
|
399 |
-
stride8_feature = self.fpn_stride8_norm1(self.fpn_stride8_conv1(stride8_feature)
|
400 |
-
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
401 |
-
stride8_feature = self.fpn_stride8_norm2(self.fpn_stride8_conv2(stride8_feature)
|
402 |
-
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
403 |
-
|
404 |
-
stride32_feature = self.maxpool(vit_output_featuremap)
|
405 |
-
stride32_feature = self.fpn_stride32_norm1(self.fpn_stride32_conv1(stride32_feature)
|
406 |
-
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
407 |
-
stride32_feature = self.fpn_stride32_norm2(self.fpn_stride32_conv2(stride32_feature)
|
408 |
-
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
409 |
-
|
410 |
-
stride16_feature = self.fpn_stride16_norm1(self.fpn_stride16_conv1(vit_output_featuremap).
|
411 |
-
permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
412 |
-
stride16_feature = self.fpn_stride16_norm2(self.fpn_stride16_conv2(stride16_feature)
|
413 |
-
.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
414 |
-
|
415 |
-
results = [stride8_feature, stride16_feature, stride32_feature]
|
416 |
-
|
417 |
-
results.extend(self.top_block(stride32_feature))
|
418 |
-
|
419 |
-
assert len(self._out_features) == len(results)
|
420 |
-
fpn_out = {f: res for f, res in zip(self._out_features, results)}
|
421 |
-
|
422 |
-
return fpn_out
|
423 |
-
@property
|
424 |
-
def size_divisibility(self):
|
425 |
-
return self._size_divisibility
|
426 |
-
|
427 |
-
def output_shape(self):
|
428 |
-
return {
|
429 |
-
name: ShapeSpec(
|
430 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
431 |
-
)
|
432 |
-
for name in self._out_features
|
433 |
-
}
|
434 |
-
|
435 |
-
|
436 |
-
@BACKBONE_REGISTRY.register()
|
437 |
-
def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec):
|
438 |
-
embed_dim = 768
|
439 |
-
vit_out_dim = embed_dim
|
440 |
-
bottom_up = ViT( # Single-scale ViT backbone
|
441 |
-
img_size=1024,
|
442 |
-
patch_size=16,
|
443 |
-
embed_dim=embed_dim,
|
444 |
-
depth=12,
|
445 |
-
num_heads=12,
|
446 |
-
drop_path_rate=0.1,
|
447 |
-
window_size=14,
|
448 |
-
mlp_ratio=4,
|
449 |
-
qkv_bias=True,
|
450 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
451 |
-
window_block_indexes=[
|
452 |
-
# 2, 5, 8 11 for global attention
|
453 |
-
0,
|
454 |
-
1,
|
455 |
-
3,
|
456 |
-
4,
|
457 |
-
6,
|
458 |
-
7,
|
459 |
-
9,
|
460 |
-
10,
|
461 |
-
],
|
462 |
-
residual_block_indexes=[],
|
463 |
-
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
|
464 |
-
use_rel_pos=True,
|
465 |
-
out_feature="last_feat",)
|
466 |
-
|
467 |
-
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
|
468 |
-
assert out_channels == 256 or out_channels == 768 or out_channels == 1024
|
469 |
-
backbone = ViT_FPN(bottom_up=bottom_up,
|
470 |
-
top_block=LastLevelP6P7_P5(out_channels, out_channels),
|
471 |
-
out_channels=out_channels,
|
472 |
-
strides=[8, 16, 32, 64, 128],
|
473 |
-
vit_out_dim=vit_out_dim)
|
474 |
-
return backbone
|
475 |
-
|
476 |
-
|
477 |
-
@BACKBONE_REGISTRY.register()
|
478 |
-
def build_vit_fpn_backbone_large(cfg, input_shape: ShapeSpec):
|
479 |
-
window_block_indexes = (list(range(0, 5)) + list(range(6, 11)) + list(range(12, 17)) + list(range(18, 23)))
|
480 |
-
embed_dim = 1024
|
481 |
-
vit_out_dim = embed_dim
|
482 |
-
bottom_up = ViT( # Single-scale ViT backbone
|
483 |
-
img_size=1024,
|
484 |
-
patch_size=16,
|
485 |
-
embed_dim=embed_dim,
|
486 |
-
depth=24,
|
487 |
-
num_heads=16,
|
488 |
-
drop_path_rate=0.4,
|
489 |
-
window_size=14,
|
490 |
-
mlp_ratio=4,
|
491 |
-
qkv_bias=True,
|
492 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
493 |
-
window_block_indexes=window_block_indexes,
|
494 |
-
residual_block_indexes=[],
|
495 |
-
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
|
496 |
-
use_rel_pos=True,
|
497 |
-
out_feature="last_feat",)
|
498 |
-
|
499 |
-
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
|
500 |
-
assert out_channels == 256 or out_channels == 768 or out_channels == 1024
|
501 |
-
backbone = ViT_FPN(bottom_up=bottom_up,
|
502 |
-
top_block=LastLevelP6P7_P5(out_channels, out_channels),
|
503 |
-
out_channels=out_channels,
|
504 |
-
strides=[8, 16, 32, 64, 128],
|
505 |
-
vit_out_dim=vit_out_dim)
|
506 |
-
return backbone
|
507 |
-
|
508 |
-
|
509 |
-
@BACKBONE_REGISTRY.register()
|
510 |
-
def build_vit_fpn_backbone_huge(cfg, input_shape: ShapeSpec):
|
511 |
-
window_block_indexes = (list(range(0, 7)) + list(range(8, 15)) + list(range(16, 23)) + list(range(24, 31)))
|
512 |
-
embed_dim = 1280
|
513 |
-
vit_out_dim = embed_dim
|
514 |
-
bottom_up = ViT( # Single-scale ViT backbone
|
515 |
-
img_size=1024,
|
516 |
-
patch_size=16,
|
517 |
-
embed_dim=embed_dim,
|
518 |
-
depth=32,
|
519 |
-
num_heads=16,
|
520 |
-
drop_path_rate=0.5,
|
521 |
-
window_size=14,
|
522 |
-
mlp_ratio=4,
|
523 |
-
qkv_bias=True,
|
524 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
525 |
-
window_block_indexes=window_block_indexes,
|
526 |
-
residual_block_indexes=[],
|
527 |
-
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
|
528 |
-
use_rel_pos=True,
|
529 |
-
out_feature="last_feat",)
|
530 |
-
|
531 |
-
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
|
532 |
-
assert out_channels == 256 or out_channels == 768 or out_channels == 1024
|
533 |
-
backbone = ViT_FPN(bottom_up=bottom_up,
|
534 |
-
top_block=LastLevelP6P7_P5(out_channels, out_channels),
|
535 |
-
out_channels=out_channels,
|
536 |
-
strides=[8, 16, 32, 64, 128],
|
537 |
-
vit_out_dim=vit_out_dim)
|
538 |
-
return backbone
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/_framework_compat.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Backward compatibility for homebrew builds on macOS.
|
3 |
-
"""
|
4 |
-
|
5 |
-
|
6 |
-
import sys
|
7 |
-
import os
|
8 |
-
import functools
|
9 |
-
import subprocess
|
10 |
-
import sysconfig
|
11 |
-
|
12 |
-
|
13 |
-
@functools.lru_cache()
|
14 |
-
def enabled():
|
15 |
-
"""
|
16 |
-
Only enabled for Python 3.9 framework homebrew builds
|
17 |
-
except ensurepip and venv.
|
18 |
-
"""
|
19 |
-
PY39 = (3, 9) < sys.version_info < (3, 10)
|
20 |
-
framework = sys.platform == 'darwin' and sys._framework
|
21 |
-
homebrew = "Cellar" in sysconfig.get_config_var('projectbase')
|
22 |
-
venv = sys.prefix != sys.base_prefix
|
23 |
-
ensurepip = os.environ.get("ENSUREPIP_OPTIONS")
|
24 |
-
return PY39 and framework and homebrew and not venv and not ensurepip
|
25 |
-
|
26 |
-
|
27 |
-
schemes = dict(
|
28 |
-
osx_framework_library=dict(
|
29 |
-
stdlib='{installed_base}/{platlibdir}/python{py_version_short}',
|
30 |
-
platstdlib='{platbase}/{platlibdir}/python{py_version_short}',
|
31 |
-
purelib='{homebrew_prefix}/lib/python{py_version_short}/site-packages',
|
32 |
-
platlib='{homebrew_prefix}/{platlibdir}/python{py_version_short}/site-packages',
|
33 |
-
include='{installed_base}/include/python{py_version_short}{abiflags}',
|
34 |
-
platinclude='{installed_platbase}/include/python{py_version_short}{abiflags}',
|
35 |
-
scripts='{homebrew_prefix}/bin',
|
36 |
-
data='{homebrew_prefix}',
|
37 |
-
)
|
38 |
-
)
|
39 |
-
|
40 |
-
|
41 |
-
@functools.lru_cache()
|
42 |
-
def vars():
|
43 |
-
if not enabled():
|
44 |
-
return {}
|
45 |
-
homebrew_prefix = subprocess.check_output(['brew', '--prefix'], text=True).strip()
|
46 |
-
return locals()
|
47 |
-
|
48 |
-
|
49 |
-
def scheme(name):
|
50 |
-
"""
|
51 |
-
Override the selected scheme for posix_prefix.
|
52 |
-
"""
|
53 |
-
if not enabled() or not name.endswith('_prefix'):
|
54 |
-
return name
|
55 |
-
return 'osx_framework_library'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/extern/__init__.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import importlib.util
|
2 |
-
import sys
|
3 |
-
|
4 |
-
|
5 |
-
class VendorImporter:
|
6 |
-
"""
|
7 |
-
A PEP 302 meta path importer for finding optionally-vendored
|
8 |
-
or otherwise naturally-installed packages from root_name.
|
9 |
-
"""
|
10 |
-
|
11 |
-
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
|
12 |
-
self.root_name = root_name
|
13 |
-
self.vendored_names = set(vendored_names)
|
14 |
-
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
|
15 |
-
|
16 |
-
@property
|
17 |
-
def search_path(self):
|
18 |
-
"""
|
19 |
-
Search first the vendor package then as a natural package.
|
20 |
-
"""
|
21 |
-
yield self.vendor_pkg + '.'
|
22 |
-
yield ''
|
23 |
-
|
24 |
-
def _module_matches_namespace(self, fullname):
|
25 |
-
"""Figure out if the target module is vendored."""
|
26 |
-
root, base, target = fullname.partition(self.root_name + '.')
|
27 |
-
return not root and any(map(target.startswith, self.vendored_names))
|
28 |
-
|
29 |
-
def load_module(self, fullname):
|
30 |
-
"""
|
31 |
-
Iterate over the search path to locate and load fullname.
|
32 |
-
"""
|
33 |
-
root, base, target = fullname.partition(self.root_name + '.')
|
34 |
-
for prefix in self.search_path:
|
35 |
-
try:
|
36 |
-
extant = prefix + target
|
37 |
-
__import__(extant)
|
38 |
-
mod = sys.modules[extant]
|
39 |
-
sys.modules[fullname] = mod
|
40 |
-
return mod
|
41 |
-
except ImportError:
|
42 |
-
pass
|
43 |
-
else:
|
44 |
-
raise ImportError(
|
45 |
-
"The '{target}' package is required; "
|
46 |
-
"normally this is bundled with this package so if you get "
|
47 |
-
"this warning, consult the packager of your "
|
48 |
-
"distribution.".format(**locals())
|
49 |
-
)
|
50 |
-
|
51 |
-
def create_module(self, spec):
|
52 |
-
return self.load_module(spec.name)
|
53 |
-
|
54 |
-
def exec_module(self, module):
|
55 |
-
pass
|
56 |
-
|
57 |
-
def find_spec(self, fullname, path=None, target=None):
|
58 |
-
"""Return a module spec for vendored names."""
|
59 |
-
return (
|
60 |
-
importlib.util.spec_from_loader(fullname, self)
|
61 |
-
if self._module_matches_namespace(fullname) else None
|
62 |
-
)
|
63 |
-
|
64 |
-
def install(self):
|
65 |
-
"""
|
66 |
-
Install this importer into sys.meta_path if not already present.
|
67 |
-
"""
|
68 |
-
if self not in sys.meta_path:
|
69 |
-
sys.meta_path.append(self)
|
70 |
-
|
71 |
-
|
72 |
-
names = (
|
73 |
-
'packaging', 'pyparsing', 'ordered_set', 'more_itertools', 'importlib_metadata',
|
74 |
-
'zipp', 'importlib_resources', 'jaraco', 'typing_extensions', 'tomli',
|
75 |
-
)
|
76 |
-
VendorImporter(__name__, names, 'setuptools._vendor').install()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigSalmon/GPT2_Most_Probable/README.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Try
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 0.89.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
# Configuration
|
13 |
-
|
14 |
-
`title`: _string_
|
15 |
-
Display title for the Space
|
16 |
-
|
17 |
-
`emoji`: _string_
|
18 |
-
Space emoji (emoji-only character allowed)
|
19 |
-
|
20 |
-
`colorFrom`: _string_
|
21 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
22 |
-
|
23 |
-
`colorTo`: _string_
|
24 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
25 |
-
|
26 |
-
`sdk`: _string_
|
27 |
-
Can be either `gradio` or `streamlit`
|
28 |
-
|
29 |
-
`sdk_version` : _string_
|
30 |
-
Only applicable for `streamlit` SDK.
|
31 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
32 |
-
|
33 |
-
`app_file`: _string_
|
34 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
35 |
-
Path is relative to the root of the repository.
|
36 |
-
|
37 |
-
`pinned`: _boolean_
|
38 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bumpeet/faceTracking/README.md
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FaceTracking
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.25.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: unknown
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
-
|
15 |
-
# Instructions on how to run on the local setup
|
16 |
-
- run these commands initially in the git bash
|
17 |
-
- `git lfs install`
|
18 |
-
- `git clone https://huggingface.co/spaces/Bumpeet/faceTracking`
|
19 |
-
- `cd faceTracking`
|
20 |
-
|
21 |
-
- Now install the libraries mentioned in the requirements.txt, follow this command
|
22 |
-
- `pip install -r requirements.txt`
|
23 |
-
|
24 |
-
- If you face any issue related to the installation of face-recognition library, make sure you install the c++ development tools from visual studio in windows. [Visual Studio Installer](https://visualstudio.microsoft.com/downloads/)
|
25 |
-
|
26 |
-
- If you facing the same issue in the linux servers then install the list of packages mentioned in the packages.txt
|
27 |
-
|
28 |
-
- Once everything is properly setup, run the following command in the terminal to explore the UI. `streamlit run app.py`
|
29 |
-
|
30 |
-
# Usage of UI
|
31 |
-
|
32 |
-
- Upload a video that contains faces of people which you want to track, here is the [link for the sample](https://drive.google.com/file/d/1fl4I2EE_07sNSm0v29VIQ4tJ61qAkdAf/view?usp=sharing)
|
33 |
-
|
34 |
-
- After uploading you'll be able to see a slider to skip the frames for better performance. For instance if you choose 3, only 0,3,6,9,.... frames are processed. If it is 4 only 0,4,8,12,.... frames are processed and rest of them will be skipped. This is for better performance.
|
35 |
-
|
36 |
-
- After that wait for some time to process the video for the clustering process and you'll be able to watch the video with the tracked faces.
|
37 |
-
|
38 |
-
- You can also download the zip file which contains the sub folder with detected
|
39 |
-
faces based on their identity.
|
40 |
-
|
41 |
-
- Remember if your video length is high then increase the slider to max value.
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/GETTING_STARTED.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
|
2 |
-
## Getting Started with Detectron2
|
3 |
-
|
4 |
-
This document provides a brief intro of the usage of builtin command-line tools in detectron2.
|
5 |
-
|
6 |
-
For a tutorial that involves actual coding with the API,
|
7 |
-
see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
|
8 |
-
which covers how to run inference with an
|
9 |
-
existing model, and how to train a builtin model on a custom dataset.
|
10 |
-
|
11 |
-
For more advanced tutorials, refer to our [documentation](https://detectron2.readthedocs.io/tutorials/extend.html).
|
12 |
-
|
13 |
-
|
14 |
-
### Inference Demo with Pre-trained Models
|
15 |
-
|
16 |
-
1. Pick a model and its config file from
|
17 |
-
[model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md),
|
18 |
-
for example, `mask_rcnn_R_50_FPN_3x.yaml`.
|
19 |
-
2. We provide `demo.py` that is able to run builtin standard models. Run it with:
|
20 |
-
```
|
21 |
-
cd demo/
|
22 |
-
python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \
|
23 |
-
--input input1.jpg input2.jpg \
|
24 |
-
[--other-options]
|
25 |
-
--opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl
|
26 |
-
```
|
27 |
-
The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation.
|
28 |
-
This command will run the inference and show visualizations in an OpenCV window.
|
29 |
-
|
30 |
-
For details of the command line arguments, see `demo.py -h` or look at its source code
|
31 |
-
to understand its behavior. Some common arguments are:
|
32 |
-
* To run __on your webcam__, replace `--input files` with `--webcam`.
|
33 |
-
* To run __on a video__, replace `--input files` with `--video-input video.mp4`.
|
34 |
-
* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`.
|
35 |
-
* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`.
|
36 |
-
|
37 |
-
|
38 |
-
### Training & Evaluation in Command Line
|
39 |
-
|
40 |
-
We provide a script in "tools/{,plain_}train_net.py", that is made to train
|
41 |
-
all the configs provided in detectron2.
|
42 |
-
You may want to use it as a reference to write your own training script for a new research.
|
43 |
-
|
44 |
-
To train a model with "train_net.py", first
|
45 |
-
setup the corresponding datasets following
|
46 |
-
[datasets/README.md](https://github.com/facebookresearch/detectron2/blob/master/datasets/README.md),
|
47 |
-
then run:
|
48 |
-
```
|
49 |
-
cd tools/
|
50 |
-
./train_net.py --num-gpus 8 \
|
51 |
-
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
|
52 |
-
```
|
53 |
-
|
54 |
-
The configs are made for 8-GPU training.
|
55 |
-
To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.:
|
56 |
-
```
|
57 |
-
./train_net.py \
|
58 |
-
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
|
59 |
-
SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025
|
60 |
-
```
|
61 |
-
|
62 |
-
For most models, CPU training is not supported.
|
63 |
-
|
64 |
-
To evaluate a model's performance, use
|
65 |
-
```
|
66 |
-
./train_net.py \
|
67 |
-
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
|
68 |
-
--eval-only MODEL.WEIGHTS /path/to/checkpoint_file
|
69 |
-
```
|
70 |
-
For more options, see `./train_net.py -h`.
|
71 |
-
|
72 |
-
### Use Detectron2 APIs in Your Code
|
73 |
-
|
74 |
-
See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
|
75 |
-
to learn how to use detectron2 APIs to:
|
76 |
-
1. run inference with an existing model
|
77 |
-
2. train a builtin model on a custom dataset
|
78 |
-
|
79 |
-
See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/master/projects)
|
80 |
-
for more ways to build your project on detectron2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/replace.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits this algorithm
|
22 |
-
#include <thrust/system/cpp/detail/scatter.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/__init__.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
|
2 |
-
from mmdet.datasets.cityscapes import CityscapesDataset
|
3 |
-
from mmdet.datasets.coco import CocoDataset
|
4 |
-
from .custom import CustomDatasetLocal
|
5 |
-
from mmdet.datasets.custom import CustomDataset
|
6 |
-
from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
|
7 |
-
RepeatDataset)
|
8 |
-
from mmdet.datasets.deepfashion import DeepFashionDataset
|
9 |
-
from mmdet.datasets.lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
|
10 |
-
from mmdet.datasets.samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
|
11 |
-
from mmdet.datasets.utils import (NumClassCheckHook, get_loading_pipeline,
|
12 |
-
replace_ImageToTensor)
|
13 |
-
from mmdet.datasets.voc import VOCDataset
|
14 |
-
from mmdet.datasets.wider_face import WIDERFaceDataset
|
15 |
-
from mmdet.datasets.xml_style import XMLDataset
|
16 |
-
from .walt_synthetic import WaltSynthDataset
|
17 |
-
from .walt_3d import Walt3DDataset
|
18 |
-
from .walt import WaltDataset
|
19 |
-
__all__ = [
|
20 |
-
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
|
21 |
-
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
|
22 |
-
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
|
23 |
-
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
|
24 |
-
'ClassBalancedDataset', 'Walt3DDataset','WIDERFaceDataset', 'DATASETS', 'PIPELINES',
|
25 |
-
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
|
26 |
-
'WaltSynthDataset', 'WaltDataset', 'NumClassCheckHook'
|
27 |
-
]
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisPreston/meaqua/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Meaqua
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/fanatic/__init__.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
from meme_generator.exception import TextOverLength
|
8 |
-
|
9 |
-
img_dir = Path(__file__).parent / "images"
|
10 |
-
|
11 |
-
|
12 |
-
def fanatic(images, texts: List[str], args):
|
13 |
-
text = texts[0]
|
14 |
-
frame = BuildImage.open(img_dir / "0.jpg")
|
15 |
-
try:
|
16 |
-
frame.draw_text(
|
17 |
-
(145, 40, 343, 160),
|
18 |
-
text,
|
19 |
-
allow_wrap=True,
|
20 |
-
lines_align="center",
|
21 |
-
max_fontsize=70,
|
22 |
-
min_fontsize=30,
|
23 |
-
)
|
24 |
-
except ValueError:
|
25 |
-
raise TextOverLength(text)
|
26 |
-
return frame.save_jpg()
|
27 |
-
|
28 |
-
|
29 |
-
add_meme(
|
30 |
-
"fanatic",
|
31 |
-
fanatic,
|
32 |
-
min_texts=1,
|
33 |
-
max_texts=1,
|
34 |
-
default_texts=["洛天依"],
|
35 |
-
keywords=["狂爱", "狂粉"],
|
36 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Codecooker/rvcapi/src/infer_pack/attentions.py
DELETED
@@ -1,417 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
from infer_pack import commons
|
9 |
-
from infer_pack import modules
|
10 |
-
from infer_pack.modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(
|
15 |
-
self,
|
16 |
-
hidden_channels,
|
17 |
-
filter_channels,
|
18 |
-
n_heads,
|
19 |
-
n_layers,
|
20 |
-
kernel_size=1,
|
21 |
-
p_dropout=0.0,
|
22 |
-
window_size=10,
|
23 |
-
**kwargs
|
24 |
-
):
|
25 |
-
super().__init__()
|
26 |
-
self.hidden_channels = hidden_channels
|
27 |
-
self.filter_channels = filter_channels
|
28 |
-
self.n_heads = n_heads
|
29 |
-
self.n_layers = n_layers
|
30 |
-
self.kernel_size = kernel_size
|
31 |
-
self.p_dropout = p_dropout
|
32 |
-
self.window_size = window_size
|
33 |
-
|
34 |
-
self.drop = nn.Dropout(p_dropout)
|
35 |
-
self.attn_layers = nn.ModuleList()
|
36 |
-
self.norm_layers_1 = nn.ModuleList()
|
37 |
-
self.ffn_layers = nn.ModuleList()
|
38 |
-
self.norm_layers_2 = nn.ModuleList()
|
39 |
-
for i in range(self.n_layers):
|
40 |
-
self.attn_layers.append(
|
41 |
-
MultiHeadAttention(
|
42 |
-
hidden_channels,
|
43 |
-
hidden_channels,
|
44 |
-
n_heads,
|
45 |
-
p_dropout=p_dropout,
|
46 |
-
window_size=window_size,
|
47 |
-
)
|
48 |
-
)
|
49 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
50 |
-
self.ffn_layers.append(
|
51 |
-
FFN(
|
52 |
-
hidden_channels,
|
53 |
-
hidden_channels,
|
54 |
-
filter_channels,
|
55 |
-
kernel_size,
|
56 |
-
p_dropout=p_dropout,
|
57 |
-
)
|
58 |
-
)
|
59 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
60 |
-
|
61 |
-
def forward(self, x, x_mask):
|
62 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
63 |
-
x = x * x_mask
|
64 |
-
for i in range(self.n_layers):
|
65 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
66 |
-
y = self.drop(y)
|
67 |
-
x = self.norm_layers_1[i](x + y)
|
68 |
-
|
69 |
-
y = self.ffn_layers[i](x, x_mask)
|
70 |
-
y = self.drop(y)
|
71 |
-
x = self.norm_layers_2[i](x + y)
|
72 |
-
x = x * x_mask
|
73 |
-
return x
|
74 |
-
|
75 |
-
|
76 |
-
class Decoder(nn.Module):
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
hidden_channels,
|
80 |
-
filter_channels,
|
81 |
-
n_heads,
|
82 |
-
n_layers,
|
83 |
-
kernel_size=1,
|
84 |
-
p_dropout=0.0,
|
85 |
-
proximal_bias=False,
|
86 |
-
proximal_init=True,
|
87 |
-
**kwargs
|
88 |
-
):
|
89 |
-
super().__init__()
|
90 |
-
self.hidden_channels = hidden_channels
|
91 |
-
self.filter_channels = filter_channels
|
92 |
-
self.n_heads = n_heads
|
93 |
-
self.n_layers = n_layers
|
94 |
-
self.kernel_size = kernel_size
|
95 |
-
self.p_dropout = p_dropout
|
96 |
-
self.proximal_bias = proximal_bias
|
97 |
-
self.proximal_init = proximal_init
|
98 |
-
|
99 |
-
self.drop = nn.Dropout(p_dropout)
|
100 |
-
self.self_attn_layers = nn.ModuleList()
|
101 |
-
self.norm_layers_0 = nn.ModuleList()
|
102 |
-
self.encdec_attn_layers = nn.ModuleList()
|
103 |
-
self.norm_layers_1 = nn.ModuleList()
|
104 |
-
self.ffn_layers = nn.ModuleList()
|
105 |
-
self.norm_layers_2 = nn.ModuleList()
|
106 |
-
for i in range(self.n_layers):
|
107 |
-
self.self_attn_layers.append(
|
108 |
-
MultiHeadAttention(
|
109 |
-
hidden_channels,
|
110 |
-
hidden_channels,
|
111 |
-
n_heads,
|
112 |
-
p_dropout=p_dropout,
|
113 |
-
proximal_bias=proximal_bias,
|
114 |
-
proximal_init=proximal_init,
|
115 |
-
)
|
116 |
-
)
|
117 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
-
self.encdec_attn_layers.append(
|
119 |
-
MultiHeadAttention(
|
120 |
-
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
121 |
-
)
|
122 |
-
)
|
123 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
124 |
-
self.ffn_layers.append(
|
125 |
-
FFN(
|
126 |
-
hidden_channels,
|
127 |
-
hidden_channels,
|
128 |
-
filter_channels,
|
129 |
-
kernel_size,
|
130 |
-
p_dropout=p_dropout,
|
131 |
-
causal=True,
|
132 |
-
)
|
133 |
-
)
|
134 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
135 |
-
|
136 |
-
def forward(self, x, x_mask, h, h_mask):
|
137 |
-
"""
|
138 |
-
x: decoder input
|
139 |
-
h: encoder output
|
140 |
-
"""
|
141 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
142 |
-
device=x.device, dtype=x.dtype
|
143 |
-
)
|
144 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
145 |
-
x = x * x_mask
|
146 |
-
for i in range(self.n_layers):
|
147 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
148 |
-
y = self.drop(y)
|
149 |
-
x = self.norm_layers_0[i](x + y)
|
150 |
-
|
151 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
152 |
-
y = self.drop(y)
|
153 |
-
x = self.norm_layers_1[i](x + y)
|
154 |
-
|
155 |
-
y = self.ffn_layers[i](x, x_mask)
|
156 |
-
y = self.drop(y)
|
157 |
-
x = self.norm_layers_2[i](x + y)
|
158 |
-
x = x * x_mask
|
159 |
-
return x
|
160 |
-
|
161 |
-
|
162 |
-
class MultiHeadAttention(nn.Module):
|
163 |
-
def __init__(
|
164 |
-
self,
|
165 |
-
channels,
|
166 |
-
out_channels,
|
167 |
-
n_heads,
|
168 |
-
p_dropout=0.0,
|
169 |
-
window_size=None,
|
170 |
-
heads_share=True,
|
171 |
-
block_length=None,
|
172 |
-
proximal_bias=False,
|
173 |
-
proximal_init=False,
|
174 |
-
):
|
175 |
-
super().__init__()
|
176 |
-
assert channels % n_heads == 0
|
177 |
-
|
178 |
-
self.channels = channels
|
179 |
-
self.out_channels = out_channels
|
180 |
-
self.n_heads = n_heads
|
181 |
-
self.p_dropout = p_dropout
|
182 |
-
self.window_size = window_size
|
183 |
-
self.heads_share = heads_share
|
184 |
-
self.block_length = block_length
|
185 |
-
self.proximal_bias = proximal_bias
|
186 |
-
self.proximal_init = proximal_init
|
187 |
-
self.attn = None
|
188 |
-
|
189 |
-
self.k_channels = channels // n_heads
|
190 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
191 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
192 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
193 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
194 |
-
self.drop = nn.Dropout(p_dropout)
|
195 |
-
|
196 |
-
if window_size is not None:
|
197 |
-
n_heads_rel = 1 if heads_share else n_heads
|
198 |
-
rel_stddev = self.k_channels**-0.5
|
199 |
-
self.emb_rel_k = nn.Parameter(
|
200 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
201 |
-
* rel_stddev
|
202 |
-
)
|
203 |
-
self.emb_rel_v = nn.Parameter(
|
204 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
205 |
-
* rel_stddev
|
206 |
-
)
|
207 |
-
|
208 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
209 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
210 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
211 |
-
if proximal_init:
|
212 |
-
with torch.no_grad():
|
213 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
214 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
215 |
-
|
216 |
-
def forward(self, x, c, attn_mask=None):
|
217 |
-
q = self.conv_q(x)
|
218 |
-
k = self.conv_k(c)
|
219 |
-
v = self.conv_v(c)
|
220 |
-
|
221 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
222 |
-
|
223 |
-
x = self.conv_o(x)
|
224 |
-
return x
|
225 |
-
|
226 |
-
def attention(self, query, key, value, mask=None):
|
227 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
228 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
229 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
230 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
231 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
232 |
-
|
233 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
234 |
-
if self.window_size is not None:
|
235 |
-
assert (
|
236 |
-
t_s == t_t
|
237 |
-
), "Relative attention is only available for self-attention."
|
238 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
239 |
-
rel_logits = self._matmul_with_relative_keys(
|
240 |
-
query / math.sqrt(self.k_channels), key_relative_embeddings
|
241 |
-
)
|
242 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
243 |
-
scores = scores + scores_local
|
244 |
-
if self.proximal_bias:
|
245 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
246 |
-
scores = scores + self._attention_bias_proximal(t_s).to(
|
247 |
-
device=scores.device, dtype=scores.dtype
|
248 |
-
)
|
249 |
-
if mask is not None:
|
250 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
251 |
-
if self.block_length is not None:
|
252 |
-
assert (
|
253 |
-
t_s == t_t
|
254 |
-
), "Local attention is only available for self-attention."
|
255 |
-
block_mask = (
|
256 |
-
torch.ones_like(scores)
|
257 |
-
.triu(-self.block_length)
|
258 |
-
.tril(self.block_length)
|
259 |
-
)
|
260 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
261 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
262 |
-
p_attn = self.drop(p_attn)
|
263 |
-
output = torch.matmul(p_attn, value)
|
264 |
-
if self.window_size is not None:
|
265 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
266 |
-
value_relative_embeddings = self._get_relative_embeddings(
|
267 |
-
self.emb_rel_v, t_s
|
268 |
-
)
|
269 |
-
output = output + self._matmul_with_relative_values(
|
270 |
-
relative_weights, value_relative_embeddings
|
271 |
-
)
|
272 |
-
output = (
|
273 |
-
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
274 |
-
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
275 |
-
return output, p_attn
|
276 |
-
|
277 |
-
def _matmul_with_relative_values(self, x, y):
|
278 |
-
"""
|
279 |
-
x: [b, h, l, m]
|
280 |
-
y: [h or 1, m, d]
|
281 |
-
ret: [b, h, l, d]
|
282 |
-
"""
|
283 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
284 |
-
return ret
|
285 |
-
|
286 |
-
def _matmul_with_relative_keys(self, x, y):
|
287 |
-
"""
|
288 |
-
x: [b, h, l, d]
|
289 |
-
y: [h or 1, m, d]
|
290 |
-
ret: [b, h, l, m]
|
291 |
-
"""
|
292 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
293 |
-
return ret
|
294 |
-
|
295 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
296 |
-
max_relative_position = 2 * self.window_size + 1
|
297 |
-
# Pad first before slice to avoid using cond ops.
|
298 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
299 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
300 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
301 |
-
if pad_length > 0:
|
302 |
-
padded_relative_embeddings = F.pad(
|
303 |
-
relative_embeddings,
|
304 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
305 |
-
)
|
306 |
-
else:
|
307 |
-
padded_relative_embeddings = relative_embeddings
|
308 |
-
used_relative_embeddings = padded_relative_embeddings[
|
309 |
-
:, slice_start_position:slice_end_position
|
310 |
-
]
|
311 |
-
return used_relative_embeddings
|
312 |
-
|
313 |
-
def _relative_position_to_absolute_position(self, x):
|
314 |
-
"""
|
315 |
-
x: [b, h, l, 2*l-1]
|
316 |
-
ret: [b, h, l, l]
|
317 |
-
"""
|
318 |
-
batch, heads, length, _ = x.size()
|
319 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
320 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
321 |
-
|
322 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
323 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
324 |
-
x_flat = F.pad(
|
325 |
-
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
326 |
-
)
|
327 |
-
|
328 |
-
# Reshape and slice out the padded elements.
|
329 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
330 |
-
:, :, :length, length - 1 :
|
331 |
-
]
|
332 |
-
return x_final
|
333 |
-
|
334 |
-
def _absolute_position_to_relative_position(self, x):
|
335 |
-
"""
|
336 |
-
x: [b, h, l, l]
|
337 |
-
ret: [b, h, l, 2*l-1]
|
338 |
-
"""
|
339 |
-
batch, heads, length, _ = x.size()
|
340 |
-
# padd along column
|
341 |
-
x = F.pad(
|
342 |
-
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
343 |
-
)
|
344 |
-
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
345 |
-
# add 0's in the beginning that will skew the elements after reshape
|
346 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
347 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
348 |
-
return x_final
|
349 |
-
|
350 |
-
def _attention_bias_proximal(self, length):
|
351 |
-
"""Bias for self-attention to encourage attention to close positions.
|
352 |
-
Args:
|
353 |
-
length: an integer scalar.
|
354 |
-
Returns:
|
355 |
-
a Tensor with shape [1, 1, length, length]
|
356 |
-
"""
|
357 |
-
r = torch.arange(length, dtype=torch.float32)
|
358 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
359 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
360 |
-
|
361 |
-
|
362 |
-
class FFN(nn.Module):
|
363 |
-
def __init__(
|
364 |
-
self,
|
365 |
-
in_channels,
|
366 |
-
out_channels,
|
367 |
-
filter_channels,
|
368 |
-
kernel_size,
|
369 |
-
p_dropout=0.0,
|
370 |
-
activation=None,
|
371 |
-
causal=False,
|
372 |
-
):
|
373 |
-
super().__init__()
|
374 |
-
self.in_channels = in_channels
|
375 |
-
self.out_channels = out_channels
|
376 |
-
self.filter_channels = filter_channels
|
377 |
-
self.kernel_size = kernel_size
|
378 |
-
self.p_dropout = p_dropout
|
379 |
-
self.activation = activation
|
380 |
-
self.causal = causal
|
381 |
-
|
382 |
-
if causal:
|
383 |
-
self.padding = self._causal_padding
|
384 |
-
else:
|
385 |
-
self.padding = self._same_padding
|
386 |
-
|
387 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
388 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
389 |
-
self.drop = nn.Dropout(p_dropout)
|
390 |
-
|
391 |
-
def forward(self, x, x_mask):
|
392 |
-
x = self.conv_1(self.padding(x * x_mask))
|
393 |
-
if self.activation == "gelu":
|
394 |
-
x = x * torch.sigmoid(1.702 * x)
|
395 |
-
else:
|
396 |
-
x = torch.relu(x)
|
397 |
-
x = self.drop(x)
|
398 |
-
x = self.conv_2(self.padding(x * x_mask))
|
399 |
-
return x * x_mask
|
400 |
-
|
401 |
-
def _causal_padding(self, x):
|
402 |
-
if self.kernel_size == 1:
|
403 |
-
return x
|
404 |
-
pad_l = self.kernel_size - 1
|
405 |
-
pad_r = 0
|
406 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
407 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
408 |
-
return x
|
409 |
-
|
410 |
-
def _same_padding(self, x):
|
411 |
-
if self.kernel_size == 1:
|
412 |
-
return x
|
413 |
-
pad_l = (self.kernel_size - 1) // 2
|
414 |
-
pad_r = self.kernel_size // 2
|
415 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
416 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
417 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.v1/config.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from dotenv import load_dotenv
|
2 |
-
import os
|
3 |
-
|
4 |
-
load_dotenv(dotenv_path=".env") # Load environment variables from .env file
|
5 |
-
|
6 |
-
# DATABASE_URL = os.getenv("DATABASE_URL")
|
7 |
-
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
8 |
-
# OCR_API_KEY = os.getenv("OCR_API_KEY")
|
9 |
-
NGROK_AUTH_TOKEN = os.getenv("NGROK_AUTH_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/__init__.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Copyright (c) 2022, salesforce.com, inc.
|
3 |
-
All rights reserved.
|
4 |
-
SPDX-License-Identifier: BSD-3-Clause
|
5 |
-
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
-
"""
|
7 |
-
|
8 |
-
import os
|
9 |
-
import sys
|
10 |
-
|
11 |
-
from omegaconf import OmegaConf
|
12 |
-
|
13 |
-
from video_llama.common.registry import registry
|
14 |
-
|
15 |
-
from video_llama.datasets.builders import *
|
16 |
-
from video_llama.models import *
|
17 |
-
from video_llama.processors import *
|
18 |
-
from video_llama.tasks import *
|
19 |
-
|
20 |
-
|
21 |
-
root_dir = os.path.dirname(os.path.abspath(__file__))
|
22 |
-
default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml"))
|
23 |
-
|
24 |
-
registry.register_path("library_root", root_dir)
|
25 |
-
repo_root = os.path.join(root_dir, "..")
|
26 |
-
registry.register_path("repo_root", repo_root)
|
27 |
-
cache_root = os.path.join(repo_root, default_cfg.env.cache_root)
|
28 |
-
registry.register_path("cache_root", cache_root)
|
29 |
-
|
30 |
-
registry.register("MAX_INT", sys.maxsize)
|
31 |
-
registry.register("SPLIT_NAMES", ["train", "val", "test"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/termui.py
DELETED
@@ -1,784 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import io
|
3 |
-
import itertools
|
4 |
-
import sys
|
5 |
-
import typing as t
|
6 |
-
from gettext import gettext as _
|
7 |
-
|
8 |
-
from ._compat import isatty
|
9 |
-
from ._compat import strip_ansi
|
10 |
-
from .exceptions import Abort
|
11 |
-
from .exceptions import UsageError
|
12 |
-
from .globals import resolve_color_default
|
13 |
-
from .types import Choice
|
14 |
-
from .types import convert_type
|
15 |
-
from .types import ParamType
|
16 |
-
from .utils import echo
|
17 |
-
from .utils import LazyFile
|
18 |
-
|
19 |
-
if t.TYPE_CHECKING:
|
20 |
-
from ._termui_impl import ProgressBar
|
21 |
-
|
22 |
-
V = t.TypeVar("V")
|
23 |
-
|
24 |
-
# The prompt functions to use. The doc tools currently override these
|
25 |
-
# functions to customize how they work.
|
26 |
-
visible_prompt_func: t.Callable[[str], str] = input
|
27 |
-
|
28 |
-
_ansi_colors = {
|
29 |
-
"black": 30,
|
30 |
-
"red": 31,
|
31 |
-
"green": 32,
|
32 |
-
"yellow": 33,
|
33 |
-
"blue": 34,
|
34 |
-
"magenta": 35,
|
35 |
-
"cyan": 36,
|
36 |
-
"white": 37,
|
37 |
-
"reset": 39,
|
38 |
-
"bright_black": 90,
|
39 |
-
"bright_red": 91,
|
40 |
-
"bright_green": 92,
|
41 |
-
"bright_yellow": 93,
|
42 |
-
"bright_blue": 94,
|
43 |
-
"bright_magenta": 95,
|
44 |
-
"bright_cyan": 96,
|
45 |
-
"bright_white": 97,
|
46 |
-
}
|
47 |
-
_ansi_reset_all = "\033[0m"
|
48 |
-
|
49 |
-
|
50 |
-
def hidden_prompt_func(prompt: str) -> str:
|
51 |
-
import getpass
|
52 |
-
|
53 |
-
return getpass.getpass(prompt)
|
54 |
-
|
55 |
-
|
56 |
-
def _build_prompt(
|
57 |
-
text: str,
|
58 |
-
suffix: str,
|
59 |
-
show_default: bool = False,
|
60 |
-
default: t.Optional[t.Any] = None,
|
61 |
-
show_choices: bool = True,
|
62 |
-
type: t.Optional[ParamType] = None,
|
63 |
-
) -> str:
|
64 |
-
prompt = text
|
65 |
-
if type is not None and show_choices and isinstance(type, Choice):
|
66 |
-
prompt += f" ({', '.join(map(str, type.choices))})"
|
67 |
-
if default is not None and show_default:
|
68 |
-
prompt = f"{prompt} [{_format_default(default)}]"
|
69 |
-
return f"{prompt}{suffix}"
|
70 |
-
|
71 |
-
|
72 |
-
def _format_default(default: t.Any) -> t.Any:
|
73 |
-
if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
|
74 |
-
return default.name
|
75 |
-
|
76 |
-
return default
|
77 |
-
|
78 |
-
|
79 |
-
def prompt(
|
80 |
-
text: str,
|
81 |
-
default: t.Optional[t.Any] = None,
|
82 |
-
hide_input: bool = False,
|
83 |
-
confirmation_prompt: t.Union[bool, str] = False,
|
84 |
-
type: t.Optional[t.Union[ParamType, t.Any]] = None,
|
85 |
-
value_proc: t.Optional[t.Callable[[str], t.Any]] = None,
|
86 |
-
prompt_suffix: str = ": ",
|
87 |
-
show_default: bool = True,
|
88 |
-
err: bool = False,
|
89 |
-
show_choices: bool = True,
|
90 |
-
) -> t.Any:
|
91 |
-
"""Prompts a user for input. This is a convenience function that can
|
92 |
-
be used to prompt a user for input later.
|
93 |
-
|
94 |
-
If the user aborts the input by sending an interrupt signal, this
|
95 |
-
function will catch it and raise a :exc:`Abort` exception.
|
96 |
-
|
97 |
-
:param text: the text to show for the prompt.
|
98 |
-
:param default: the default value to use if no input happens. If this
|
99 |
-
is not given it will prompt until it's aborted.
|
100 |
-
:param hide_input: if this is set to true then the input value will
|
101 |
-
be hidden.
|
102 |
-
:param confirmation_prompt: Prompt a second time to confirm the
|
103 |
-
value. Can be set to a string instead of ``True`` to customize
|
104 |
-
the message.
|
105 |
-
:param type: the type to use to check the value against.
|
106 |
-
:param value_proc: if this parameter is provided it's a function that
|
107 |
-
is invoked instead of the type conversion to
|
108 |
-
convert a value.
|
109 |
-
:param prompt_suffix: a suffix that should be added to the prompt.
|
110 |
-
:param show_default: shows or hides the default value in the prompt.
|
111 |
-
:param err: if set to true the file defaults to ``stderr`` instead of
|
112 |
-
``stdout``, the same as with echo.
|
113 |
-
:param show_choices: Show or hide choices if the passed type is a Choice.
|
114 |
-
For example if type is a Choice of either day or week,
|
115 |
-
show_choices is true and text is "Group by" then the
|
116 |
-
prompt will be "Group by (day, week): ".
|
117 |
-
|
118 |
-
.. versionadded:: 8.0
|
119 |
-
``confirmation_prompt`` can be a custom string.
|
120 |
-
|
121 |
-
.. versionadded:: 7.0
|
122 |
-
Added the ``show_choices`` parameter.
|
123 |
-
|
124 |
-
.. versionadded:: 6.0
|
125 |
-
Added unicode support for cmd.exe on Windows.
|
126 |
-
|
127 |
-
.. versionadded:: 4.0
|
128 |
-
Added the `err` parameter.
|
129 |
-
|
130 |
-
"""
|
131 |
-
|
132 |
-
def prompt_func(text: str) -> str:
|
133 |
-
f = hidden_prompt_func if hide_input else visible_prompt_func
|
134 |
-
try:
|
135 |
-
# Write the prompt separately so that we get nice
|
136 |
-
# coloring through colorama on Windows
|
137 |
-
echo(text.rstrip(" "), nl=False, err=err)
|
138 |
-
# Echo a space to stdout to work around an issue where
|
139 |
-
# readline causes backspace to clear the whole line.
|
140 |
-
return f(" ")
|
141 |
-
except (KeyboardInterrupt, EOFError):
|
142 |
-
# getpass doesn't print a newline if the user aborts input with ^C.
|
143 |
-
# Allegedly this behavior is inherited from getpass(3).
|
144 |
-
# A doc bug has been filed at https://bugs.python.org/issue24711
|
145 |
-
if hide_input:
|
146 |
-
echo(None, err=err)
|
147 |
-
raise Abort() from None
|
148 |
-
|
149 |
-
if value_proc is None:
|
150 |
-
value_proc = convert_type(type, default)
|
151 |
-
|
152 |
-
prompt = _build_prompt(
|
153 |
-
text, prompt_suffix, show_default, default, show_choices, type
|
154 |
-
)
|
155 |
-
|
156 |
-
if confirmation_prompt:
|
157 |
-
if confirmation_prompt is True:
|
158 |
-
confirmation_prompt = _("Repeat for confirmation")
|
159 |
-
|
160 |
-
confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)
|
161 |
-
|
162 |
-
while True:
|
163 |
-
while True:
|
164 |
-
value = prompt_func(prompt)
|
165 |
-
if value:
|
166 |
-
break
|
167 |
-
elif default is not None:
|
168 |
-
value = default
|
169 |
-
break
|
170 |
-
try:
|
171 |
-
result = value_proc(value)
|
172 |
-
except UsageError as e:
|
173 |
-
if hide_input:
|
174 |
-
echo(_("Error: The value you entered was invalid."), err=err)
|
175 |
-
else:
|
176 |
-
echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306
|
177 |
-
continue
|
178 |
-
if not confirmation_prompt:
|
179 |
-
return result
|
180 |
-
while True:
|
181 |
-
value2 = prompt_func(confirmation_prompt)
|
182 |
-
is_empty = not value and not value2
|
183 |
-
if value2 or is_empty:
|
184 |
-
break
|
185 |
-
if value == value2:
|
186 |
-
return result
|
187 |
-
echo(_("Error: The two entered values do not match."), err=err)
|
188 |
-
|
189 |
-
|
190 |
-
def confirm(
|
191 |
-
text: str,
|
192 |
-
default: t.Optional[bool] = False,
|
193 |
-
abort: bool = False,
|
194 |
-
prompt_suffix: str = ": ",
|
195 |
-
show_default: bool = True,
|
196 |
-
err: bool = False,
|
197 |
-
) -> bool:
|
198 |
-
"""Prompts for confirmation (yes/no question).
|
199 |
-
|
200 |
-
If the user aborts the input by sending a interrupt signal this
|
201 |
-
function will catch it and raise a :exc:`Abort` exception.
|
202 |
-
|
203 |
-
:param text: the question to ask.
|
204 |
-
:param default: The default value to use when no input is given. If
|
205 |
-
``None``, repeat until input is given.
|
206 |
-
:param abort: if this is set to `True` a negative answer aborts the
|
207 |
-
exception by raising :exc:`Abort`.
|
208 |
-
:param prompt_suffix: a suffix that should be added to the prompt.
|
209 |
-
:param show_default: shows or hides the default value in the prompt.
|
210 |
-
:param err: if set to true the file defaults to ``stderr`` instead of
|
211 |
-
``stdout``, the same as with echo.
|
212 |
-
|
213 |
-
.. versionchanged:: 8.0
|
214 |
-
Repeat until input is given if ``default`` is ``None``.
|
215 |
-
|
216 |
-
.. versionadded:: 4.0
|
217 |
-
Added the ``err`` parameter.
|
218 |
-
"""
|
219 |
-
prompt = _build_prompt(
|
220 |
-
text,
|
221 |
-
prompt_suffix,
|
222 |
-
show_default,
|
223 |
-
"y/n" if default is None else ("Y/n" if default else "y/N"),
|
224 |
-
)
|
225 |
-
|
226 |
-
while True:
|
227 |
-
try:
|
228 |
-
# Write the prompt separately so that we get nice
|
229 |
-
# coloring through colorama on Windows
|
230 |
-
echo(prompt.rstrip(" "), nl=False, err=err)
|
231 |
-
# Echo a space to stdout to work around an issue where
|
232 |
-
# readline causes backspace to clear the whole line.
|
233 |
-
value = visible_prompt_func(" ").lower().strip()
|
234 |
-
except (KeyboardInterrupt, EOFError):
|
235 |
-
raise Abort() from None
|
236 |
-
if value in ("y", "yes"):
|
237 |
-
rv = True
|
238 |
-
elif value in ("n", "no"):
|
239 |
-
rv = False
|
240 |
-
elif default is not None and value == "":
|
241 |
-
rv = default
|
242 |
-
else:
|
243 |
-
echo(_("Error: invalid input"), err=err)
|
244 |
-
continue
|
245 |
-
break
|
246 |
-
if abort and not rv:
|
247 |
-
raise Abort()
|
248 |
-
return rv
|
249 |
-
|
250 |
-
|
251 |
-
def echo_via_pager(
|
252 |
-
text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],
|
253 |
-
color: t.Optional[bool] = None,
|
254 |
-
) -> None:
|
255 |
-
"""This function takes a text and shows it via an environment specific
|
256 |
-
pager on stdout.
|
257 |
-
|
258 |
-
.. versionchanged:: 3.0
|
259 |
-
Added the `color` flag.
|
260 |
-
|
261 |
-
:param text_or_generator: the text to page, or alternatively, a
|
262 |
-
generator emitting the text to page.
|
263 |
-
:param color: controls if the pager supports ANSI colors or not. The
|
264 |
-
default is autodetection.
|
265 |
-
"""
|
266 |
-
color = resolve_color_default(color)
|
267 |
-
|
268 |
-
if inspect.isgeneratorfunction(text_or_generator):
|
269 |
-
i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()
|
270 |
-
elif isinstance(text_or_generator, str):
|
271 |
-
i = [text_or_generator]
|
272 |
-
else:
|
273 |
-
i = iter(t.cast(t.Iterable[str], text_or_generator))
|
274 |
-
|
275 |
-
# convert every element of i to a text type if necessary
|
276 |
-
text_generator = (el if isinstance(el, str) else str(el) for el in i)
|
277 |
-
|
278 |
-
from ._termui_impl import pager
|
279 |
-
|
280 |
-
return pager(itertools.chain(text_generator, "\n"), color)
|
281 |
-
|
282 |
-
|
283 |
-
def progressbar(
|
284 |
-
iterable: t.Optional[t.Iterable[V]] = None,
|
285 |
-
length: t.Optional[int] = None,
|
286 |
-
label: t.Optional[str] = None,
|
287 |
-
show_eta: bool = True,
|
288 |
-
show_percent: t.Optional[bool] = None,
|
289 |
-
show_pos: bool = False,
|
290 |
-
item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
|
291 |
-
fill_char: str = "#",
|
292 |
-
empty_char: str = "-",
|
293 |
-
bar_template: str = "%(label)s [%(bar)s] %(info)s",
|
294 |
-
info_sep: str = " ",
|
295 |
-
width: int = 36,
|
296 |
-
file: t.Optional[t.TextIO] = None,
|
297 |
-
color: t.Optional[bool] = None,
|
298 |
-
update_min_steps: int = 1,
|
299 |
-
) -> "ProgressBar[V]":
|
300 |
-
"""This function creates an iterable context manager that can be used
|
301 |
-
to iterate over something while showing a progress bar. It will
|
302 |
-
either iterate over the `iterable` or `length` items (that are counted
|
303 |
-
up). While iteration happens, this function will print a rendered
|
304 |
-
progress bar to the given `file` (defaults to stdout) and will attempt
|
305 |
-
to calculate remaining time and more. By default, this progress bar
|
306 |
-
will not be rendered if the file is not a terminal.
|
307 |
-
|
308 |
-
The context manager creates the progress bar. When the context
|
309 |
-
manager is entered the progress bar is already created. With every
|
310 |
-
iteration over the progress bar, the iterable passed to the bar is
|
311 |
-
advanced and the bar is updated. When the context manager exits,
|
312 |
-
a newline is printed and the progress bar is finalized on screen.
|
313 |
-
|
314 |
-
Note: The progress bar is currently designed for use cases where the
|
315 |
-
total progress can be expected to take at least several seconds.
|
316 |
-
Because of this, the ProgressBar class object won't display
|
317 |
-
progress that is considered too fast, and progress where the time
|
318 |
-
between steps is less than a second.
|
319 |
-
|
320 |
-
No printing must happen or the progress bar will be unintentionally
|
321 |
-
destroyed.
|
322 |
-
|
323 |
-
Example usage::
|
324 |
-
|
325 |
-
with progressbar(items) as bar:
|
326 |
-
for item in bar:
|
327 |
-
do_something_with(item)
|
328 |
-
|
329 |
-
Alternatively, if no iterable is specified, one can manually update the
|
330 |
-
progress bar through the `update()` method instead of directly
|
331 |
-
iterating over the progress bar. The update method accepts the number
|
332 |
-
of steps to increment the bar with::
|
333 |
-
|
334 |
-
with progressbar(length=chunks.total_bytes) as bar:
|
335 |
-
for chunk in chunks:
|
336 |
-
process_chunk(chunk)
|
337 |
-
bar.update(chunks.bytes)
|
338 |
-
|
339 |
-
The ``update()`` method also takes an optional value specifying the
|
340 |
-
``current_item`` at the new position. This is useful when used
|
341 |
-
together with ``item_show_func`` to customize the output for each
|
342 |
-
manual step::
|
343 |
-
|
344 |
-
with click.progressbar(
|
345 |
-
length=total_size,
|
346 |
-
label='Unzipping archive',
|
347 |
-
item_show_func=lambda a: a.filename
|
348 |
-
) as bar:
|
349 |
-
for archive in zip_file:
|
350 |
-
archive.extract()
|
351 |
-
bar.update(archive.size, archive)
|
352 |
-
|
353 |
-
:param iterable: an iterable to iterate over. If not provided the length
|
354 |
-
is required.
|
355 |
-
:param length: the number of items to iterate over. By default the
|
356 |
-
progressbar will attempt to ask the iterator about its
|
357 |
-
length, which might or might not work. If an iterable is
|
358 |
-
also provided this parameter can be used to override the
|
359 |
-
length. If an iterable is not provided the progress bar
|
360 |
-
will iterate over a range of that length.
|
361 |
-
:param label: the label to show next to the progress bar.
|
362 |
-
:param show_eta: enables or disables the estimated time display. This is
|
363 |
-
automatically disabled if the length cannot be
|
364 |
-
determined.
|
365 |
-
:param show_percent: enables or disables the percentage display. The
|
366 |
-
default is `True` if the iterable has a length or
|
367 |
-
`False` if not.
|
368 |
-
:param show_pos: enables or disables the absolute position display. The
|
369 |
-
default is `False`.
|
370 |
-
:param item_show_func: A function called with the current item which
|
371 |
-
can return a string to show next to the progress bar. If the
|
372 |
-
function returns ``None`` nothing is shown. The current item can
|
373 |
-
be ``None``, such as when entering and exiting the bar.
|
374 |
-
:param fill_char: the character to use to show the filled part of the
|
375 |
-
progress bar.
|
376 |
-
:param empty_char: the character to use to show the non-filled part of
|
377 |
-
the progress bar.
|
378 |
-
:param bar_template: the format string to use as template for the bar.
|
379 |
-
The parameters in it are ``label`` for the label,
|
380 |
-
``bar`` for the progress bar and ``info`` for the
|
381 |
-
info section.
|
382 |
-
:param info_sep: the separator between multiple info items (eta etc.)
|
383 |
-
:param width: the width of the progress bar in characters, 0 means full
|
384 |
-
terminal width
|
385 |
-
:param file: The file to write to. If this is not a terminal then
|
386 |
-
only the label is printed.
|
387 |
-
:param color: controls if the terminal supports ANSI colors or not. The
|
388 |
-
default is autodetection. This is only needed if ANSI
|
389 |
-
codes are included anywhere in the progress bar output
|
390 |
-
which is not the case by default.
|
391 |
-
:param update_min_steps: Render only when this many updates have
|
392 |
-
completed. This allows tuning for very fast iterators.
|
393 |
-
|
394 |
-
.. versionchanged:: 8.0
|
395 |
-
Output is shown even if execution time is less than 0.5 seconds.
|
396 |
-
|
397 |
-
.. versionchanged:: 8.0
|
398 |
-
``item_show_func`` shows the current item, not the previous one.
|
399 |
-
|
400 |
-
.. versionchanged:: 8.0
|
401 |
-
Labels are echoed if the output is not a TTY. Reverts a change
|
402 |
-
in 7.0 that removed all output.
|
403 |
-
|
404 |
-
.. versionadded:: 8.0
|
405 |
-
Added the ``update_min_steps`` parameter.
|
406 |
-
|
407 |
-
.. versionchanged:: 4.0
|
408 |
-
Added the ``color`` parameter. Added the ``update`` method to
|
409 |
-
the object.
|
410 |
-
|
411 |
-
.. versionadded:: 2.0
|
412 |
-
"""
|
413 |
-
from ._termui_impl import ProgressBar
|
414 |
-
|
415 |
-
color = resolve_color_default(color)
|
416 |
-
return ProgressBar(
|
417 |
-
iterable=iterable,
|
418 |
-
length=length,
|
419 |
-
show_eta=show_eta,
|
420 |
-
show_percent=show_percent,
|
421 |
-
show_pos=show_pos,
|
422 |
-
item_show_func=item_show_func,
|
423 |
-
fill_char=fill_char,
|
424 |
-
empty_char=empty_char,
|
425 |
-
bar_template=bar_template,
|
426 |
-
info_sep=info_sep,
|
427 |
-
file=file,
|
428 |
-
label=label,
|
429 |
-
width=width,
|
430 |
-
color=color,
|
431 |
-
update_min_steps=update_min_steps,
|
432 |
-
)
|
433 |
-
|
434 |
-
|
435 |
-
def clear() -> None:
|
436 |
-
"""Clears the terminal screen. This will have the effect of clearing
|
437 |
-
the whole visible space of the terminal and moving the cursor to the
|
438 |
-
top left. This does not do anything if not connected to a terminal.
|
439 |
-
|
440 |
-
.. versionadded:: 2.0
|
441 |
-
"""
|
442 |
-
if not isatty(sys.stdout):
|
443 |
-
return
|
444 |
-
|
445 |
-
# ANSI escape \033[2J clears the screen, \033[1;1H moves the cursor
|
446 |
-
echo("\033[2J\033[1;1H", nl=False)
|
447 |
-
|
448 |
-
|
449 |
-
def _interpret_color(
|
450 |
-
color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0
|
451 |
-
) -> str:
|
452 |
-
if isinstance(color, int):
|
453 |
-
return f"{38 + offset};5;{color:d}"
|
454 |
-
|
455 |
-
if isinstance(color, (tuple, list)):
|
456 |
-
r, g, b = color
|
457 |
-
return f"{38 + offset};2;{r:d};{g:d};{b:d}"
|
458 |
-
|
459 |
-
return str(_ansi_colors[color] + offset)
|
460 |
-
|
461 |
-
|
462 |
-
def style(
|
463 |
-
text: t.Any,
|
464 |
-
fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
|
465 |
-
bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
|
466 |
-
bold: t.Optional[bool] = None,
|
467 |
-
dim: t.Optional[bool] = None,
|
468 |
-
underline: t.Optional[bool] = None,
|
469 |
-
overline: t.Optional[bool] = None,
|
470 |
-
italic: t.Optional[bool] = None,
|
471 |
-
blink: t.Optional[bool] = None,
|
472 |
-
reverse: t.Optional[bool] = None,
|
473 |
-
strikethrough: t.Optional[bool] = None,
|
474 |
-
reset: bool = True,
|
475 |
-
) -> str:
|
476 |
-
"""Styles a text with ANSI styles and returns the new string. By
|
477 |
-
default the styling is self contained which means that at the end
|
478 |
-
of the string a reset code is issued. This can be prevented by
|
479 |
-
passing ``reset=False``.
|
480 |
-
|
481 |
-
Examples::
|
482 |
-
|
483 |
-
click.echo(click.style('Hello World!', fg='green'))
|
484 |
-
click.echo(click.style('ATTENTION!', blink=True))
|
485 |
-
click.echo(click.style('Some things', reverse=True, fg='cyan'))
|
486 |
-
click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
|
487 |
-
|
488 |
-
Supported color names:
|
489 |
-
|
490 |
-
* ``black`` (might be a gray)
|
491 |
-
* ``red``
|
492 |
-
* ``green``
|
493 |
-
* ``yellow`` (might be an orange)
|
494 |
-
* ``blue``
|
495 |
-
* ``magenta``
|
496 |
-
* ``cyan``
|
497 |
-
* ``white`` (might be light gray)
|
498 |
-
* ``bright_black``
|
499 |
-
* ``bright_red``
|
500 |
-
* ``bright_green``
|
501 |
-
* ``bright_yellow``
|
502 |
-
* ``bright_blue``
|
503 |
-
* ``bright_magenta``
|
504 |
-
* ``bright_cyan``
|
505 |
-
* ``bright_white``
|
506 |
-
* ``reset`` (reset the color code only)
|
507 |
-
|
508 |
-
If the terminal supports it, color may also be specified as:
|
509 |
-
|
510 |
-
- An integer in the interval [0, 255]. The terminal must support
|
511 |
-
8-bit/256-color mode.
|
512 |
-
- An RGB tuple of three integers in [0, 255]. The terminal must
|
513 |
-
support 24-bit/true-color mode.
|
514 |
-
|
515 |
-
See https://en.wikipedia.org/wiki/ANSI_color and
|
516 |
-
https://gist.github.com/XVilka/8346728 for more information.
|
517 |
-
|
518 |
-
:param text: the string to style with ansi codes.
|
519 |
-
:param fg: if provided this will become the foreground color.
|
520 |
-
:param bg: if provided this will become the background color.
|
521 |
-
:param bold: if provided this will enable or disable bold mode.
|
522 |
-
:param dim: if provided this will enable or disable dim mode. This is
|
523 |
-
badly supported.
|
524 |
-
:param underline: if provided this will enable or disable underline.
|
525 |
-
:param overline: if provided this will enable or disable overline.
|
526 |
-
:param italic: if provided this will enable or disable italic.
|
527 |
-
:param blink: if provided this will enable or disable blinking.
|
528 |
-
:param reverse: if provided this will enable or disable inverse
|
529 |
-
rendering (foreground becomes background and the
|
530 |
-
other way round).
|
531 |
-
:param strikethrough: if provided this will enable or disable
|
532 |
-
striking through text.
|
533 |
-
:param reset: by default a reset-all code is added at the end of the
|
534 |
-
string which means that styles do not carry over. This
|
535 |
-
can be disabled to compose styles.
|
536 |
-
|
537 |
-
.. versionchanged:: 8.0
|
538 |
-
A non-string ``message`` is converted to a string.
|
539 |
-
|
540 |
-
.. versionchanged:: 8.0
|
541 |
-
Added support for 256 and RGB color codes.
|
542 |
-
|
543 |
-
.. versionchanged:: 8.0
|
544 |
-
Added the ``strikethrough``, ``italic``, and ``overline``
|
545 |
-
parameters.
|
546 |
-
|
547 |
-
.. versionchanged:: 7.0
|
548 |
-
Added support for bright colors.
|
549 |
-
|
550 |
-
.. versionadded:: 2.0
|
551 |
-
"""
|
552 |
-
if not isinstance(text, str):
|
553 |
-
text = str(text)
|
554 |
-
|
555 |
-
bits = []
|
556 |
-
|
557 |
-
if fg:
|
558 |
-
try:
|
559 |
-
bits.append(f"\033[{_interpret_color(fg)}m")
|
560 |
-
except KeyError:
|
561 |
-
raise TypeError(f"Unknown color {fg!r}") from None
|
562 |
-
|
563 |
-
if bg:
|
564 |
-
try:
|
565 |
-
bits.append(f"\033[{_interpret_color(bg, 10)}m")
|
566 |
-
except KeyError:
|
567 |
-
raise TypeError(f"Unknown color {bg!r}") from None
|
568 |
-
|
569 |
-
if bold is not None:
|
570 |
-
bits.append(f"\033[{1 if bold else 22}m")
|
571 |
-
if dim is not None:
|
572 |
-
bits.append(f"\033[{2 if dim else 22}m")
|
573 |
-
if underline is not None:
|
574 |
-
bits.append(f"\033[{4 if underline else 24}m")
|
575 |
-
if overline is not None:
|
576 |
-
bits.append(f"\033[{53 if overline else 55}m")
|
577 |
-
if italic is not None:
|
578 |
-
bits.append(f"\033[{3 if italic else 23}m")
|
579 |
-
if blink is not None:
|
580 |
-
bits.append(f"\033[{5 if blink else 25}m")
|
581 |
-
if reverse is not None:
|
582 |
-
bits.append(f"\033[{7 if reverse else 27}m")
|
583 |
-
if strikethrough is not None:
|
584 |
-
bits.append(f"\033[{9 if strikethrough else 29}m")
|
585 |
-
bits.append(text)
|
586 |
-
if reset:
|
587 |
-
bits.append(_ansi_reset_all)
|
588 |
-
return "".join(bits)
|
589 |
-
|
590 |
-
|
591 |
-
def unstyle(text: str) -> str:
|
592 |
-
"""Removes ANSI styling information from a string. Usually it's not
|
593 |
-
necessary to use this function as Click's echo function will
|
594 |
-
automatically remove styling if necessary.
|
595 |
-
|
596 |
-
.. versionadded:: 2.0
|
597 |
-
|
598 |
-
:param text: the text to remove style information from.
|
599 |
-
"""
|
600 |
-
return strip_ansi(text)
|
601 |
-
|
602 |
-
|
603 |
-
def secho(
|
604 |
-
message: t.Optional[t.Any] = None,
|
605 |
-
file: t.Optional[t.IO[t.AnyStr]] = None,
|
606 |
-
nl: bool = True,
|
607 |
-
err: bool = False,
|
608 |
-
color: t.Optional[bool] = None,
|
609 |
-
**styles: t.Any,
|
610 |
-
) -> None:
|
611 |
-
"""This function combines :func:`echo` and :func:`style` into one
|
612 |
-
call. As such the following two calls are the same::
|
613 |
-
|
614 |
-
click.secho('Hello World!', fg='green')
|
615 |
-
click.echo(click.style('Hello World!', fg='green'))
|
616 |
-
|
617 |
-
All keyword arguments are forwarded to the underlying functions
|
618 |
-
depending on which one they go with.
|
619 |
-
|
620 |
-
Non-string types will be converted to :class:`str`. However,
|
621 |
-
:class:`bytes` are passed directly to :meth:`echo` without applying
|
622 |
-
style. If you want to style bytes that represent text, call
|
623 |
-
:meth:`bytes.decode` first.
|
624 |
-
|
625 |
-
.. versionchanged:: 8.0
|
626 |
-
A non-string ``message`` is converted to a string. Bytes are
|
627 |
-
passed through without style applied.
|
628 |
-
|
629 |
-
.. versionadded:: 2.0
|
630 |
-
"""
|
631 |
-
if message is not None and not isinstance(message, (bytes, bytearray)):
|
632 |
-
message = style(message, **styles)
|
633 |
-
|
634 |
-
return echo(message, file=file, nl=nl, err=err, color=color)
|
635 |
-
|
636 |
-
|
637 |
-
def edit(
|
638 |
-
text: t.Optional[t.AnyStr] = None,
|
639 |
-
editor: t.Optional[str] = None,
|
640 |
-
env: t.Optional[t.Mapping[str, str]] = None,
|
641 |
-
require_save: bool = True,
|
642 |
-
extension: str = ".txt",
|
643 |
-
filename: t.Optional[str] = None,
|
644 |
-
) -> t.Optional[t.AnyStr]:
|
645 |
-
r"""Edits the given text in the defined editor. If an editor is given
|
646 |
-
(should be the full path to the executable but the regular operating
|
647 |
-
system search path is used for finding the executable) it overrides
|
648 |
-
the detected editor. Optionally, some environment variables can be
|
649 |
-
used. If the editor is closed without changes, `None` is returned. In
|
650 |
-
case a file is edited directly the return value is always `None` and
|
651 |
-
`require_save` and `extension` are ignored.
|
652 |
-
|
653 |
-
If the editor cannot be opened a :exc:`UsageError` is raised.
|
654 |
-
|
655 |
-
Note for Windows: to simplify cross-platform usage, the newlines are
|
656 |
-
automatically converted from POSIX to Windows and vice versa. As such,
|
657 |
-
the message here will have ``\n`` as newline markers.
|
658 |
-
|
659 |
-
:param text: the text to edit.
|
660 |
-
:param editor: optionally the editor to use. Defaults to automatic
|
661 |
-
detection.
|
662 |
-
:param env: environment variables to forward to the editor.
|
663 |
-
:param require_save: if this is true, then not saving in the editor
|
664 |
-
will make the return value become `None`.
|
665 |
-
:param extension: the extension to tell the editor about. This defaults
|
666 |
-
to `.txt` but changing this might change syntax
|
667 |
-
highlighting.
|
668 |
-
:param filename: if provided it will edit this file instead of the
|
669 |
-
provided text contents. It will not use a temporary
|
670 |
-
file as an indirection in that case.
|
671 |
-
"""
|
672 |
-
from ._termui_impl import Editor
|
673 |
-
|
674 |
-
ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)
|
675 |
-
|
676 |
-
if filename is None:
|
677 |
-
return ed.edit(text)
|
678 |
-
|
679 |
-
ed.edit_file(filename)
|
680 |
-
return None
|
681 |
-
|
682 |
-
|
683 |
-
def launch(url: str, wait: bool = False, locate: bool = False) -> int:
|
684 |
-
"""This function launches the given URL (or filename) in the default
|
685 |
-
viewer application for this file type. If this is an executable, it
|
686 |
-
might launch the executable in a new session. The return value is
|
687 |
-
the exit code of the launched application. Usually, ``0`` indicates
|
688 |
-
success.
|
689 |
-
|
690 |
-
Examples::
|
691 |
-
|
692 |
-
click.launch('https://click.palletsprojects.com/')
|
693 |
-
click.launch('/my/downloaded/file', locate=True)
|
694 |
-
|
695 |
-
.. versionadded:: 2.0
|
696 |
-
|
697 |
-
:param url: URL or filename of the thing to launch.
|
698 |
-
:param wait: Wait for the program to exit before returning. This
|
699 |
-
only works if the launched program blocks. In particular,
|
700 |
-
``xdg-open`` on Linux does not block.
|
701 |
-
:param locate: if this is set to `True` then instead of launching the
|
702 |
-
application associated with the URL it will attempt to
|
703 |
-
launch a file manager with the file located. This
|
704 |
-
might have weird effects if the URL does not point to
|
705 |
-
the filesystem.
|
706 |
-
"""
|
707 |
-
from ._termui_impl import open_url
|
708 |
-
|
709 |
-
return open_url(url, wait=wait, locate=locate)
|
710 |
-
|
711 |
-
|
712 |
-
# If this is provided, getchar() calls into this instead. This is used
|
713 |
-
# for unittesting purposes.
|
714 |
-
_getchar: t.Optional[t.Callable[[bool], str]] = None
|
715 |
-
|
716 |
-
|
717 |
-
def getchar(echo: bool = False) -> str:
|
718 |
-
"""Fetches a single character from the terminal and returns it. This
|
719 |
-
will always return a unicode character and under certain rare
|
720 |
-
circumstances this might return more than one character. The
|
721 |
-
situations which more than one character is returned is when for
|
722 |
-
whatever reason multiple characters end up in the terminal buffer or
|
723 |
-
standard input was not actually a terminal.
|
724 |
-
|
725 |
-
Note that this will always read from the terminal, even if something
|
726 |
-
is piped into the standard input.
|
727 |
-
|
728 |
-
Note for Windows: in rare cases when typing non-ASCII characters, this
|
729 |
-
function might wait for a second character and then return both at once.
|
730 |
-
This is because certain Unicode characters look like special-key markers.
|
731 |
-
|
732 |
-
.. versionadded:: 2.0
|
733 |
-
|
734 |
-
:param echo: if set to `True`, the character read will also show up on
|
735 |
-
the terminal. The default is to not show it.
|
736 |
-
"""
|
737 |
-
global _getchar
|
738 |
-
|
739 |
-
if _getchar is None:
|
740 |
-
from ._termui_impl import getchar as f
|
741 |
-
|
742 |
-
_getchar = f
|
743 |
-
|
744 |
-
return _getchar(echo)
|
745 |
-
|
746 |
-
|
747 |
-
def raw_terminal() -> t.ContextManager[int]:
|
748 |
-
from ._termui_impl import raw_terminal as f
|
749 |
-
|
750 |
-
return f()
|
751 |
-
|
752 |
-
|
753 |
-
def pause(info: t.Optional[str] = None, err: bool = False) -> None:
|
754 |
-
"""This command stops execution and waits for the user to press any
|
755 |
-
key to continue. This is similar to the Windows batch "pause"
|
756 |
-
command. If the program is not run through a terminal, this command
|
757 |
-
will instead do nothing.
|
758 |
-
|
759 |
-
.. versionadded:: 2.0
|
760 |
-
|
761 |
-
.. versionadded:: 4.0
|
762 |
-
Added the `err` parameter.
|
763 |
-
|
764 |
-
:param info: The message to print before pausing. Defaults to
|
765 |
-
``"Press any key to continue..."``.
|
766 |
-
:param err: if set to message goes to ``stderr`` instead of
|
767 |
-
``stdout``, the same as with echo.
|
768 |
-
"""
|
769 |
-
if not isatty(sys.stdin) or not isatty(sys.stdout):
|
770 |
-
return
|
771 |
-
|
772 |
-
if info is None:
|
773 |
-
info = _("Press any key to continue...")
|
774 |
-
|
775 |
-
try:
|
776 |
-
if info:
|
777 |
-
echo(info, nl=False, err=err)
|
778 |
-
try:
|
779 |
-
getchar()
|
780 |
-
except (KeyboardInterrupt, EOFError):
|
781 |
-
pass
|
782 |
-
finally:
|
783 |
-
if info:
|
784 |
-
echo(err=err)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/ModifyUpload-c89cfce3.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as g,e as w,s as _,J as p,K as o,L as i,p as k,M as m,n as u,A as b,N as z,O as B,k as $,U as v,o as C,z as d,u as I,v as h,y as E,x as M,B as j}from"./index-1d65707a.js";import"./Button-f155035a.js";import{I as L}from"./IconButton-d42f3661.js";import"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";function S(a){let e,s,t,l;return{c(){e=p("svg"),s=p("g"),t=p("path"),l=p("path"),o(t,"d","M18,6L6.087,17.913"),i(t,"fill","none"),i(t,"fill-rule","nonzero"),i(t,"stroke-width","2px"),o(s,"transform","matrix(1.14096,-0.140958,-0.140958,1.14096,-0.0559523,0.0559523)"),o(l,"d","M4.364,4.364L19.636,19.636"),i(l,"fill","none"),i(l,"fill-rule","nonzero"),i(l,"stroke-width","2px"),o(e,"width","100%"),o(e,"height","100%"),o(e,"viewBox","0 0 24 24"),o(e,"version","1.1"),o(e,"xmlns","http://www.w3.org/2000/svg"),o(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),o(e,"xml:space","preserve"),o(e,"stroke","currentColor"),i(e,"fill-rule","evenodd"),i(e,"clip-rule","evenodd"),i(e,"stroke-linecap","round"),i(e,"stroke-linejoin","round")},m(n,r){k(n,e,r),m(e,s),m(s,t),m(e,l)},p:u,i:u,o:u,d(n){n&&b(e)}}}class U extends g{constructor(e){super(),w(this,e,null,S,_,{})}}function q(a){let e,s;return{c(){e=p("svg"),s=p("path"),o(s,"d","M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"),o(e,"xmlns","http://www.w3.org/2000/svg"),o(e,"width","100%"),o(e,"height","100%"),o(e,"viewBox","0 0 24 24"),o(e,"fill","none"),o(e,"stroke","currentColor"),o(e,"stroke-width","1.5"),o(e,"stroke-linecap","round"),o(e,"stroke-linejoin","round"),o(e,"class","feather feather-edit-2")},m(t,l){k(t,e,l),m(e,s)},p:u,i:u,o:u,d(t){t&&b(e)}}}class y extends g{constructor(e){super(),w(this,e,null,q,_,{})}}function x(a){let e,s;return e=new L({props:{Icon:y,label:"Edit"}}),e.$on("click",a[3]),{c(){$(e.$$.fragment)},m(t,l){C(e,t,l),s=!0},p:u,i(t){s||(d(e.$$.fragment,t),s=!0)},o(t){h(e.$$.fragment,t),s=!1},d(t){M(e,t)}}}function A(a){let e,s,t,l,n=a[0]&&x(a);return t=new L({props:{Icon:U,label:"Clear"}}),t.$on("click",a[4]),{c(){e=z("div"),n&&n.c(),s=B(),$(t.$$.fragment),o(e,"class","svelte-19sk1im"),v(e,"not-absolute",!a[1]),i(e,"position",a[1]?"absolute":"static")},m(r,c){k(r,e,c),n&&n.m(e,null),m(e,s),C(t,e,null),l=!0},p(r,[c]){r[0]?n?(n.p(r,c),c&1&&d(n,1)):(n=x(r),n.c(),d(n,1),n.m(e,s)):n&&(I(),h(n,1,1,()=>{n=null}),E()),(!l||c&2)&&v(e,"not-absolute",!r[1]),c&2&&i(e,"position",r[1]?"absolute":"static")},i(r){l||(d(n),d(t.$$.fragment,r),l=!0)},o(r){h(n),h(t.$$.fragment,r),l=!1},d(r){r&&b(e),n&&n.d(),M(t)}}}function D(a,e,s){let{editable:t=!1}=e,{absolute:l=!0}=e;const n=j(),r=()=>n("edit"),c=f=>{n("clear"),f.stopPropagation()};return a.$$set=f=>{"editable"in f&&s(0,t=f.editable),"absolute"in f&&s(1,l=f.absolute)},[t,l,n,r,c]}class P extends g{constructor(e){super(),w(this,e,D,A,_,{editable:0,absolute:1})}}export{U as C,P as M};
|
2 |
-
//# sourceMappingURL=ModifyUpload-c89cfce3.js.map
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_cache_assets.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2019-present, the HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
from pathlib import Path
|
16 |
-
from typing import Union
|
17 |
-
|
18 |
-
from ..constants import HUGGINGFACE_ASSETS_CACHE
|
19 |
-
|
20 |
-
|
21 |
-
def cached_assets_path(
|
22 |
-
library_name: str,
|
23 |
-
namespace: str = "default",
|
24 |
-
subfolder: str = "default",
|
25 |
-
*,
|
26 |
-
assets_dir: Union[str, Path, None] = None,
|
27 |
-
):
|
28 |
-
"""Return a folder path to cache arbitrary files.
|
29 |
-
|
30 |
-
`huggingface_hub` provides a canonical folder path to store assets. This is the
|
31 |
-
recommended way to integrate cache in a downstream library as it will benefit from
|
32 |
-
the builtins tools to scan and delete the cache properly.
|
33 |
-
|
34 |
-
The distinction is made between files cached from the Hub and assets. Files from the
|
35 |
-
Hub are cached in a git-aware manner and entirely managed by `huggingface_hub`. See
|
36 |
-
[related documentation](https://huggingface.co/docs/huggingface_hub/how-to-cache).
|
37 |
-
All other files that a downstream library caches are considered to be "assets"
|
38 |
-
(files downloaded from external sources, extracted from a .tar archive, preprocessed
|
39 |
-
for training,...).
|
40 |
-
|
41 |
-
Once the folder path is generated, it is guaranteed to exist and to be a directory.
|
42 |
-
The path is based on 3 levels of depth: the library name, a namespace and a
|
43 |
-
subfolder. Those 3 levels grants flexibility while allowing `huggingface_hub` to
|
44 |
-
expect folders when scanning/deleting parts of the assets cache. Within a library,
|
45 |
-
it is expected that all namespaces share the same subset of subfolder names but this
|
46 |
-
is not a mandatory rule. The downstream library has then full control on which file
|
47 |
-
structure to adopt within its cache. Namespace and subfolder are optional (would
|
48 |
-
default to a `"default/"` subfolder) but library name is mandatory as we want every
|
49 |
-
downstream library to manage its own cache.
|
50 |
-
|
51 |
-
Expected tree:
|
52 |
-
```text
|
53 |
-
assets/
|
54 |
-
└── datasets/
|
55 |
-
│ ├── SQuAD/
|
56 |
-
│ │ ├── downloaded/
|
57 |
-
│ │ ├── extracted/
|
58 |
-
│ │ └── processed/
|
59 |
-
│ ├── Helsinki-NLP--tatoeba_mt/
|
60 |
-
│ ├── downloaded/
|
61 |
-
│ ├── extracted/
|
62 |
-
│ └── processed/
|
63 |
-
└── transformers/
|
64 |
-
├── default/
|
65 |
-
│ ├── something/
|
66 |
-
├── bert-base-cased/
|
67 |
-
│ ├── default/
|
68 |
-
│ └── training/
|
69 |
-
hub/
|
70 |
-
└── models--julien-c--EsperBERTo-small/
|
71 |
-
├── blobs/
|
72 |
-
│ ├── (...)
|
73 |
-
│ ├── (...)
|
74 |
-
├── refs/
|
75 |
-
│ └── (...)
|
76 |
-
└── [ 128] snapshots/
|
77 |
-
├── 2439f60ef33a0d46d85da5001d52aeda5b00ce9f/
|
78 |
-
│ ├── (...)
|
79 |
-
└── bbc77c8132af1cc5cf678da3f1ddf2de43606d48/
|
80 |
-
└── (...)
|
81 |
-
```
|
82 |
-
|
83 |
-
|
84 |
-
Args:
|
85 |
-
library_name (`str`):
|
86 |
-
Name of the library that will manage the cache folder. Example: `"dataset"`.
|
87 |
-
namespace (`str`, *optional*, defaults to "default"):
|
88 |
-
Namespace to which the data belongs. Example: `"SQuAD"`.
|
89 |
-
subfolder (`str`, *optional*, defaults to "default"):
|
90 |
-
Subfolder in which the data will be stored. Example: `extracted`.
|
91 |
-
assets_dir (`str`, `Path`, *optional*):
|
92 |
-
Path to the folder where assets are cached. This must not be the same folder
|
93 |
-
where Hub files are cached. Defaults to `HF_HOME / "assets"` if not provided.
|
94 |
-
Can also be set with `HUGGINGFACE_ASSETS_CACHE` environment variable.
|
95 |
-
|
96 |
-
Returns:
|
97 |
-
Path to the cache folder (`Path`).
|
98 |
-
|
99 |
-
Example:
|
100 |
-
```py
|
101 |
-
>>> from huggingface_hub import cached_assets_path
|
102 |
-
|
103 |
-
>>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="download")
|
104 |
-
PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/download')
|
105 |
-
|
106 |
-
>>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="extracted")
|
107 |
-
PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/extracted')
|
108 |
-
|
109 |
-
>>> cached_assets_path(library_name="datasets", namespace="Helsinki-NLP/tatoeba_mt")
|
110 |
-
PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/Helsinki-NLP--tatoeba_mt/default')
|
111 |
-
|
112 |
-
>>> cached_assets_path(library_name="datasets", assets_dir="/tmp/tmp123456")
|
113 |
-
PosixPath('/tmp/tmp123456/datasets/default/default')
|
114 |
-
```
|
115 |
-
"""
|
116 |
-
# Resolve assets_dir
|
117 |
-
if assets_dir is None:
|
118 |
-
assets_dir = HUGGINGFACE_ASSETS_CACHE
|
119 |
-
assets_dir = Path(assets_dir).expanduser().resolve()
|
120 |
-
|
121 |
-
# Avoid names that could create path issues
|
122 |
-
for part in (" ", "/", "\\"):
|
123 |
-
library_name = library_name.replace(part, "--")
|
124 |
-
namespace = namespace.replace(part, "--")
|
125 |
-
subfolder = subfolder.replace(part, "--")
|
126 |
-
|
127 |
-
# Path to subfolder is created
|
128 |
-
path = assets_dir / library_name / namespace / subfolder
|
129 |
-
try:
|
130 |
-
path.mkdir(exist_ok=True, parents=True)
|
131 |
-
except (FileExistsError, NotADirectoryError):
|
132 |
-
raise ValueError(f"Corrupted assets folder: cannot create directory because of an existing file ({path}).")
|
133 |
-
|
134 |
-
# Return
|
135 |
-
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/DescriptionGPT/detic/data/custom_build_augmentation.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
import pycocotools.mask as mask_util
|
5 |
-
import torch
|
6 |
-
from fvcore.common.file_io import PathManager
|
7 |
-
from PIL import Image
|
8 |
-
|
9 |
-
|
10 |
-
from detectron2.data import transforms as T
|
11 |
-
from .transforms.custom_augmentation_impl import EfficientDetResizeCrop
|
12 |
-
|
13 |
-
def build_custom_augmentation(cfg, is_train, scale=None, size=None, \
|
14 |
-
min_size=None, max_size=None):
|
15 |
-
"""
|
16 |
-
Create a list of default :class:`Augmentation` from config.
|
17 |
-
Now it includes resizing and flipping.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
list[Augmentation]
|
21 |
-
"""
|
22 |
-
if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge':
|
23 |
-
if is_train:
|
24 |
-
min_size = cfg.INPUT.MIN_SIZE_TRAIN if min_size is None else min_size
|
25 |
-
max_size = cfg.INPUT.MAX_SIZE_TRAIN if max_size is None else max_size
|
26 |
-
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
|
27 |
-
else:
|
28 |
-
min_size = cfg.INPUT.MIN_SIZE_TEST
|
29 |
-
max_size = cfg.INPUT.MAX_SIZE_TEST
|
30 |
-
sample_style = "choice"
|
31 |
-
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
|
32 |
-
elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
|
33 |
-
if is_train:
|
34 |
-
scale = cfg.INPUT.SCALE_RANGE if scale is None else scale
|
35 |
-
size = cfg.INPUT.TRAIN_SIZE if size is None else size
|
36 |
-
else:
|
37 |
-
scale = (1, 1)
|
38 |
-
size = cfg.INPUT.TEST_SIZE
|
39 |
-
augmentation = [EfficientDetResizeCrop(size, scale)]
|
40 |
-
else:
|
41 |
-
assert 0, cfg.INPUT.CUSTOM_AUG
|
42 |
-
|
43 |
-
if is_train:
|
44 |
-
augmentation.append(T.RandomFlip())
|
45 |
-
return augmentation
|
46 |
-
|
47 |
-
|
48 |
-
build_custom_transform_gen = build_custom_augmentation
|
49 |
-
"""
|
50 |
-
Alias for backward-compatibility.
|
51 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/StyleGAN-NADA/e4e/models/encoders/helpers.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
from collections import namedtuple
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
|
5 |
-
|
6 |
-
"""
|
7 |
-
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
|
8 |
-
"""
|
9 |
-
|
10 |
-
|
11 |
-
class Flatten(Module):
|
12 |
-
def forward(self, input):
|
13 |
-
return input.view(input.size(0), -1)
|
14 |
-
|
15 |
-
|
16 |
-
def l2_norm(input, axis=1):
|
17 |
-
norm = torch.norm(input, 2, axis, True)
|
18 |
-
output = torch.div(input, norm)
|
19 |
-
return output
|
20 |
-
|
21 |
-
|
22 |
-
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
|
23 |
-
""" A named tuple describing a ResNet block. """
|
24 |
-
|
25 |
-
|
26 |
-
def get_block(in_channel, depth, num_units, stride=2):
|
27 |
-
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
|
28 |
-
|
29 |
-
|
30 |
-
def get_blocks(num_layers):
|
31 |
-
if num_layers == 50:
|
32 |
-
blocks = [
|
33 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
34 |
-
get_block(in_channel=64, depth=128, num_units=4),
|
35 |
-
get_block(in_channel=128, depth=256, num_units=14),
|
36 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
37 |
-
]
|
38 |
-
elif num_layers == 100:
|
39 |
-
blocks = [
|
40 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
41 |
-
get_block(in_channel=64, depth=128, num_units=13),
|
42 |
-
get_block(in_channel=128, depth=256, num_units=30),
|
43 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
44 |
-
]
|
45 |
-
elif num_layers == 152:
|
46 |
-
blocks = [
|
47 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
48 |
-
get_block(in_channel=64, depth=128, num_units=8),
|
49 |
-
get_block(in_channel=128, depth=256, num_units=36),
|
50 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
51 |
-
]
|
52 |
-
else:
|
53 |
-
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
|
54 |
-
return blocks
|
55 |
-
|
56 |
-
|
57 |
-
class SEModule(Module):
|
58 |
-
def __init__(self, channels, reduction):
|
59 |
-
super(SEModule, self).__init__()
|
60 |
-
self.avg_pool = AdaptiveAvgPool2d(1)
|
61 |
-
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
|
62 |
-
self.relu = ReLU(inplace=True)
|
63 |
-
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
|
64 |
-
self.sigmoid = Sigmoid()
|
65 |
-
|
66 |
-
def forward(self, x):
|
67 |
-
module_input = x
|
68 |
-
x = self.avg_pool(x)
|
69 |
-
x = self.fc1(x)
|
70 |
-
x = self.relu(x)
|
71 |
-
x = self.fc2(x)
|
72 |
-
x = self.sigmoid(x)
|
73 |
-
return module_input * x
|
74 |
-
|
75 |
-
|
76 |
-
class bottleneck_IR(Module):
|
77 |
-
def __init__(self, in_channel, depth, stride):
|
78 |
-
super(bottleneck_IR, self).__init__()
|
79 |
-
if in_channel == depth:
|
80 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
81 |
-
else:
|
82 |
-
self.shortcut_layer = Sequential(
|
83 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
84 |
-
BatchNorm2d(depth)
|
85 |
-
)
|
86 |
-
self.res_layer = Sequential(
|
87 |
-
BatchNorm2d(in_channel),
|
88 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
|
89 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
|
90 |
-
)
|
91 |
-
|
92 |
-
def forward(self, x):
|
93 |
-
shortcut = self.shortcut_layer(x)
|
94 |
-
res = self.res_layer(x)
|
95 |
-
return res + shortcut
|
96 |
-
|
97 |
-
|
98 |
-
class bottleneck_IR_SE(Module):
|
99 |
-
def __init__(self, in_channel, depth, stride):
|
100 |
-
super(bottleneck_IR_SE, self).__init__()
|
101 |
-
if in_channel == depth:
|
102 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
103 |
-
else:
|
104 |
-
self.shortcut_layer = Sequential(
|
105 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
106 |
-
BatchNorm2d(depth)
|
107 |
-
)
|
108 |
-
self.res_layer = Sequential(
|
109 |
-
BatchNorm2d(in_channel),
|
110 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
|
111 |
-
PReLU(depth),
|
112 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
|
113 |
-
BatchNorm2d(depth),
|
114 |
-
SEModule(depth, 16)
|
115 |
-
)
|
116 |
-
|
117 |
-
def forward(self, x):
|
118 |
-
shortcut = self.shortcut_layer(x)
|
119 |
-
res = self.res_layer(x)
|
120 |
-
return res + shortcut
|
121 |
-
|
122 |
-
|
123 |
-
def _upsample_add(x, y):
|
124 |
-
"""Upsample and add two feature maps.
|
125 |
-
Args:
|
126 |
-
x: (Variable) top feature map to be upsampled.
|
127 |
-
y: (Variable) lateral feature map.
|
128 |
-
Returns:
|
129 |
-
(Variable) added feature map.
|
130 |
-
Note in PyTorch, when input size is odd, the upsampled feature map
|
131 |
-
with `F.upsample(..., scale_factor=2, mode='nearest')`
|
132 |
-
maybe not equal to the lateral feature map size.
|
133 |
-
e.g.
|
134 |
-
original input size: [N,_,15,15] ->
|
135 |
-
conv2d feature map size: [N,_,8,8] ->
|
136 |
-
upsampled feature map size: [N,_,16,16]
|
137 |
-
So we choose bilinear upsample which supports arbitrary output sizes.
|
138 |
-
"""
|
139 |
-
_, _, H, W = y.size()
|
140 |
-
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DhruvShek/chatlm/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chatlm
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.19.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-nc-4.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Docfile/open_llm_leaderboard/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Open LLM Leaderboard
|
3 |
-
emoji: 🏆
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.43.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: HuggingFaceH4/open_llm_leaderboard
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/stylegan_human/openpose/src/util.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import math
|
3 |
-
import cv2
|
4 |
-
import matplotlib
|
5 |
-
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
6 |
-
from matplotlib.figure import Figure
|
7 |
-
import numpy as np
|
8 |
-
import matplotlib.pyplot as plt
|
9 |
-
import cv2
|
10 |
-
|
11 |
-
|
12 |
-
def padRightDownCorner(img, stride, padValue):
|
13 |
-
h = img.shape[0]
|
14 |
-
w = img.shape[1]
|
15 |
-
|
16 |
-
pad = 4 * [None]
|
17 |
-
pad[0] = 0 # up
|
18 |
-
pad[1] = 0 # left
|
19 |
-
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
|
20 |
-
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
|
21 |
-
|
22 |
-
img_padded = img
|
23 |
-
pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
|
24 |
-
img_padded = np.concatenate((pad_up, img_padded), axis=0)
|
25 |
-
pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
|
26 |
-
img_padded = np.concatenate((pad_left, img_padded), axis=1)
|
27 |
-
pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
|
28 |
-
img_padded = np.concatenate((img_padded, pad_down), axis=0)
|
29 |
-
pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
|
30 |
-
img_padded = np.concatenate((img_padded, pad_right), axis=1)
|
31 |
-
|
32 |
-
return img_padded, pad
|
33 |
-
|
34 |
-
# transfer caffe model to pytorch which will match the layer name
|
35 |
-
|
36 |
-
|
37 |
-
def transfer(model, model_weights):
|
38 |
-
transfered_model_weights = {}
|
39 |
-
for weights_name in model.state_dict().keys():
|
40 |
-
transfered_model_weights[weights_name] = model_weights['.'.join(
|
41 |
-
weights_name.split('.')[1:])]
|
42 |
-
return transfered_model_weights
|
43 |
-
|
44 |
-
# draw the body keypoint and lims
|
45 |
-
|
46 |
-
|
47 |
-
def draw_bodypose(canvas, candidate, subset, show_number=False):
|
48 |
-
stickwidth = 4
|
49 |
-
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
|
50 |
-
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
|
51 |
-
[1, 16], [16, 18], [3, 17], [6, 18]]
|
52 |
-
|
53 |
-
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
|
54 |
-
[0, 255, 85], [0, 255, 170], [0, 255, 255], [
|
55 |
-
0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
|
56 |
-
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
|
57 |
-
for i in range(18):
|
58 |
-
for n in range(len(subset)):
|
59 |
-
index = int(subset[n][i])
|
60 |
-
if index == -1:
|
61 |
-
continue
|
62 |
-
x, y = candidate[index][0:2]
|
63 |
-
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
|
64 |
-
if show_number:
|
65 |
-
cv2.putText(canvas, f'{index}', (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
|
66 |
-
(255, 255, 0), 1, cv2.LINE_AA)
|
67 |
-
# calc and print average
|
68 |
-
for i in range(17):
|
69 |
-
for n in range(len(subset)):
|
70 |
-
index = subset[n][np.array(limbSeq[i]) - 1]
|
71 |
-
if -1 in index:
|
72 |
-
continue
|
73 |
-
cur_canvas = canvas.copy()
|
74 |
-
Y = candidate[index.astype(int), 0]
|
75 |
-
X = candidate[index.astype(int), 1]
|
76 |
-
mX = np.mean(X)
|
77 |
-
mY = np.mean(Y)
|
78 |
-
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
|
79 |
-
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
|
80 |
-
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(
|
81 |
-
length / 2), stickwidth), int(angle), 0, 360, 1)
|
82 |
-
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
|
83 |
-
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
|
84 |
-
|
85 |
-
return canvas
|
86 |
-
|
87 |
-
# get max index of 2d array
|
88 |
-
|
89 |
-
|
90 |
-
def npmax(array):
|
91 |
-
arrayindex = array.argmax(1)
|
92 |
-
arrayvalue = array.max(1)
|
93 |
-
i = arrayvalue.argmax()
|
94 |
-
j = arrayindex[i]
|
95 |
-
return i, j
|
96 |
-
|
97 |
-
# get max index of 2d array
|
98 |
-
|
99 |
-
|
100 |
-
def npmax_with_score(array):
|
101 |
-
arrayindex = array.argmax(1)
|
102 |
-
arrayvalue = array.max(1)
|
103 |
-
i = arrayvalue.argmax()
|
104 |
-
j = arrayindex[i]
|
105 |
-
score = array[i][j]
|
106 |
-
return i, j, score
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|