Commit
·
de5fa00
1
Parent(s):
983febd
Update parquet files (step 77 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Pagemaker 7.0 Free Download Full Version for Windows XP - Step by Step Guide.md +0 -38
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Callofdutyfinesthourpcgamefullhighlycompressedtorrent The Ultimate Guide to Playing this Legendary Call of Duty Game on PC.md +0 -130
- spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2015 Crack Torrent.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Ajab Gazabb Love Full !!HOT!! Song Hd 720p.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Din5482splinestandardfiletypepdf19.md +0 -11
- spaces/1gistliPinn/ChatGPT4/Examples/Download Take A Walk Passion Pit.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Driver Carte Satellite Twinhan.md +0 -70
- spaces/1phancelerku/anime-remove-background/DRAGON BALL FighterZ NSP XCI DLC How to Download and Play on Egg NS Emulator.md +0 -89
- spaces/1phancelerku/anime-remove-background/Download Data One Piece Bounty Rush 2022 and Join the Pirate World of Luffy and His Crew.md +0 -177
- spaces/1phancelerku/anime-remove-background/Download Hot Lava Game APK and Customize Your Character.md +0 -120
- spaces/232labs/VToonify/vtoonify_model.py +0 -287
- spaces/7eu7d7/anime-ai-detect-fucker/attacker/PGD.py +0 -84
- spaces/AI-Dashboards/README/README.md +0 -8
- spaces/AIConsultant/MusicGen/audiocraft/solvers/musicgen.py +0 -699
- spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/dataset_utils.py +0 -311
- spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/fs_adv.py +0 -260
- spaces/Abhaykoul/HelpingAI-T3/index.html +0 -2
- spaces/Adapter/T2I-Adapter/ldm/models/diffusion/dpm_solver/dpm_solver.py +0 -1217
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/BaseSizer.d.ts +0 -739
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Factory.js +0 -13
- spaces/Akshay-Vs/GPT-Based-Generator/app.py +0 -47
- spaces/Alpaca233/SadTalker/src/utils/audio.py +0 -136
- spaces/Anandhju-jayan/image-captioning-cloned/README.md +0 -14
- spaces/Andres99/Tune-A-Video-Training-UI/Dockerfile +0 -57
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/dit.md +0 -35
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_ldm3d.py +0 -310
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddpm.py +0 -187
- spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py +0 -2
- spaces/Apex-X/nono/roop/processors/frame/face_swapper.py +0 -100
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/__init__.py +0 -0
- spaces/Ashrafb/codellama-34b/model.py +0 -57
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/parser.py +0 -294
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/pyproject.py +0 -179
- spaces/Beasto/Photo2Monet_Cyclegan/README.md +0 -13
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/vendored/requests/exceptions.py +0 -99
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/_json.py +0 -84
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py +0 -188
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/tomli/_types.py +0 -10
- spaces/CVPR/LIVE/pcg.h +0 -40
- spaces/CVPR/LIVE/thrust/thrust/detail/complex/arithmetic.h +0 -300
- spaces/CVPR/WALT/app.py +0 -82
- spaces/CVPR/WALT/mmdet/core/bbox/samplers/random_sampler.py +0 -78
- spaces/CVPR/lama-example/saicinpainting/training/data/datasets.py +0 -304
- spaces/CaliforniaHealthCollaborative/Mermaid.Md/index.html +0 -19
- spaces/ChandraMohanNayal/AutoGPT/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py +0 -105
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/README.md +0 -264
- spaces/CobaltZvc/HyperBot/README.md +0 -12
- spaces/CofAI/chat/client/js/highlightjs-copy.min.js +0 -1
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/transforms/build.py +0 -54
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Pagemaker 7.0 Free Download Full Version for Windows XP - Step by Step Guide.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Adobe Pagemaker 7.0 for Free on Windows XP</h1>
|
3 |
-
<p>Adobe Pagemaker 7.0 is a popular desktop publishing software that allows you to create professional-looking documents, such as newsletters, brochures, flyers, and reports. It is compatible with Windows XP and other older versions of Windows.</p>
|
4 |
-
<h2>adobe pagemaker 7.0 free download full version for windows xp</h2><br /><p><b><b>Download</b> → <a href="https://byltly.com/2uKvUy">https://byltly.com/2uKvUy</a></b></p><br /><br />
|
5 |
-
<p>If you want to download Adobe Pagemaker 7.0 for free on Windows XP, you can follow these steps:</p>
|
6 |
-
<ol>
|
7 |
-
<li>Go to <a href="https://archive.org/details/AdobePageMaker7.0">https://archive.org/details/AdobePageMaker7.0</a>, which is an online archive of old software and media.</li>
|
8 |
-
<li>Click on the "Download Options" button and choose the "ZIP" file format.</li>
|
9 |
-
<li>Save the ZIP file to your computer and extract it using a program like WinRAR or 7-Zip.</li>
|
10 |
-
<li>Open the extracted folder and double-click on the "Setup.exe" file to start the installation process.</li>
|
11 |
-
<li>Follow the instructions on the screen and enter the serial number provided in the "Serial.txt" file when prompted.</li>
|
12 |
-
<li>After the installation is complete, you can launch Adobe Pagemaker 7.0 from your Start menu or desktop shortcut.</li>
|
13 |
-
</ol>
|
14 |
-
<p>Congratulations! You have successfully downloaded and installed Adobe Pagemaker 7.0 for free on Windows XP. Enjoy creating stunning documents with this powerful software.</p>
|
15 |
-
|
16 |
-
<h2>Why Use Adobe Pagemaker 7.0?</h2>
|
17 |
-
<p>Adobe Pagemaker 7.0 is a versatile and easy-to-use software that offers many features and benefits for desktop publishing. Some of the reasons why you might want to use Adobe Pagemaker 7.0 are:</p>
|
18 |
-
<ul>
|
19 |
-
<li>It supports a wide range of file formats, such as PDF, EPS, TIFF, JPEG, and more.</li>
|
20 |
-
<li>It allows you to import and edit text and graphics from other applications, such as Microsoft Word, Excel, and Photoshop.</li>
|
21 |
-
<li>It provides templates and wizards to help you create various types of documents quickly and easily.</li>
|
22 |
-
<li>It enables you to customize your documents with fonts, colors, styles, borders, backgrounds, and more.</li>
|
23 |
-
<li>It lets you print your documents with high quality and accuracy, or export them to the web or email.</li>
|
24 |
-
</ul>
|
25 |
-
<p>With Adobe Pagemaker 7.0, you can unleash your creativity and produce professional-looking documents that suit your needs and preferences.</p>
|
26 |
-
<p></p>
|
27 |
-
|
28 |
-
<h2>How to Learn Adobe Pagemaker 7.0?</h2>
|
29 |
-
<p>If you are new to Adobe Pagemaker 7.0 or want to improve your skills, there are many resources available online that can help you learn how to use this software effectively. Some of the resources are:</p>
|
30 |
-
<ul>
|
31 |
-
<li>The official Adobe website <a href="https://www.adobe.com/products/pagemaker.html">https://www.adobe.com/products/pagemaker.html</a>, which provides information, tutorials, tips, and support for Adobe Pagemaker 7.0.</li>
|
32 |
-
<li>The online courses offered by Udemy <a href="https://www.udemy.com/topic/adobe-pagemaker/">https://www.udemy.com/topic/adobe-pagemaker/</a>, which teach you the basics and advanced features of Adobe Pagemaker 7.0 through video lectures and exercises.</li>
|
33 |
-
<li>The YouTube videos created by experts and users <a href="https://www.youtube.com/results?search_query=adobe+pagemaker+7.0">https://www.youtube.com/results?search_query=adobe+pagemaker+7.0</a>, which demonstrate how to use Adobe Pagemaker 7.0 for various purposes and projects.</li>
|
34 |
-
<li>The books and ebooks available on Amazon <a href="https://www.amazon.com/s?k=adobe+pagemaker+7.0">https://www.amazon.com/s?k=adobe+pagemaker+7.0</a>, which provide comprehensive guides and examples on how to use Adobe Pagemaker 7.0.</li>
|
35 |
-
</ul>
|
36 |
-
<p>By using these resources, you can learn Adobe Pagemaker 7.0 at your own pace and convenience, and become a proficient desktop publisher in no time.</p> ddb901b051<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Callofdutyfinesthourpcgamefullhighlycompressedtorrent The Ultimate Guide to Playing this Legendary Call of Duty Game on PC.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Call of Duty: Finest Hour - A Classic Shooter Game for PC</h1>
|
3 |
-
<p>Are you a fan of shooter games? Do you love to relive the epic battles of World War II? If yes, then you should definitely check out Call of Duty: Finest Hour, one of the best games in the Call of Duty franchise. In this article, I will tell you everything you need to know about this amazing game, how to download it for PC using a torrent file, and why you should go for a highly compressed version of it.</p>
|
4 |
-
<h2>callofdutyfinesthourpcgamefullhighlycompressedtorrent</h2><br /><p><b><b>Download File</b> ===> <a href="https://byltly.com/2uKxto">https://byltly.com/2uKxto</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>Call of Duty: Finest Hour is a first-person shooter game that was released in 2004 for PlayStation 2, Xbox, and GameCube. It is a spin-off of the original Call of Duty game that was released in 2003 for PC. The game lets you experience three different campaigns from the perspectives of American, British, and Soviet soldiers during World War II. You will fight in various locations such as North Africa, Russia, and Germany, using authentic weapons and vehicles from that era.</p>
|
7 |
-
<p>But how can you play this game on your PC? Well, there is a simple way to do that. You just need to download a torrent file that contains the game data and use a torrent client such as uTorrent or BitTorrent to download it. A torrent file is a small file that contains information about the files and folders that are shared by other users over a peer-to-peer network. By using a torrent file, you can download large files faster and more efficiently.</p>
|
8 |
-
<p>However, there is one problem. The original size of Call of Duty: Finest Hour is about 4 GB, which means it will take a lot of time and space to download and install it on your PC. That's why I recommend you to download a highly compressed version of it, which reduces the size of the game without compromising its quality. A highly compressed version of Call of Duty: Finest Hour is only about 1 GB, which means you can download it in minutes and save a lot of disk space.</p>
|
9 |
-
<h2>Gameplay and Features</h2>
|
10 |
-
<p>Now that you have downloaded the game, let's see what it has to offer. Call of Duty: Finest Hour has three main modes: Campaign, Multiplayer, and Bonus. In Campaign mode, you can play through 19 missions that span across three campaigns: Eastern Front (Soviet), Western Front (American), and North African Campaign (British). You can choose from four difficulty levels: Greenhorn, Regular, Hardened, and Extreme. Each mission has different objectives such as destroying enemy tanks, rescuing prisoners, or defending a position.</p>
|
11 |
-
<p>In Multiplayer mode, you can play with up to 16 players online or offline using split-screen or system link. You can choose from six modes: Deathmatch, Team Deathmatch, Capture the Flag, Search and Destroy, Headquarters, and Behind Enemy Lines. You can also customize your character's appearance, weapons, perks, and skills.</p>
|
12 |
-
<p>In Bonus mode, you can unlock extra content such as concept art, interviews, cheats, and historical footage by collecting medals in Campaign mode. You can also play two bonus missions: Operation Saturn (Soviet) and The Flag Must Fall (British).</p>
|
13 |
-
<p>As for the features, Call of Duty: Finest Hour boasts impressive graphics and sound effects that immerse you in the war atmosphere. You can see realistic explosions, smoke effects, shadows, lighting effects, and weather effects. You can also hear authentic sounds such as gunshots, explosions, voices, and music. The game also features a dynamic soundtrack that changes according to your actions and situations.</p>
|
14 |
-
<p>Another feature that makes Call of Duty: Finest Hour stand out is its variety of weapons and vehicles. You can use over 30 different weapons such as rifles, machine guns, shotguns, snipers, pistols, grenades, and rocket launchers. You can also drive or ride over 10 different vehicles such as tanks, jeeps, trucks, motorcycles, and planes.</p>
|
15 |
-
<p>download call of duty finest hour pc game highly compressed<br />
|
16 |
-
call of duty finest hour pc game full version torrent link<br />
|
17 |
-
how to install call of duty finest hour pc game in low size<br />
|
18 |
-
call of duty finest hour pc game free download full cracked<br />
|
19 |
-
call of duty finest hour pc game system requirements and features<br />
|
20 |
-
call of duty finest hour pc game gameplay and review<br />
|
21 |
-
call of duty finest hour pc game cheats and trainer<br />
|
22 |
-
call of duty finest hour pc game mods and patches<br />
|
23 |
-
call of duty finest hour pc game online multiplayer mode<br />
|
24 |
-
call of duty finest hour pc game best settings and tips<br />
|
25 |
-
call of duty finest hour pc game iso file download<br />
|
26 |
-
call of duty finest hour pc game rar password unlocker<br />
|
27 |
-
call of duty finest hour pc game direct download link<br />
|
28 |
-
call of duty finest hour pc game highly compressed 100mb<br />
|
29 |
-
call of duty finest hour pc game no survey no password<br />
|
30 |
-
call of duty finest hour pc game single link download<br />
|
31 |
-
call of duty finest hour pc game repack by fitgirl<br />
|
32 |
-
call of duty finest hour pc game skidrow crack only<br />
|
33 |
-
call of duty finest hour pc game error fix and solution<br />
|
34 |
-
call of duty finest hour pc game comparison with ps2 version<br />
|
35 |
-
call of duty finest hour pc game controller support and configuration<br />
|
36 |
-
call of duty finest hour pc game save file location and backup<br />
|
37 |
-
call of duty finest hour pc game soundtracks and wallpapers<br />
|
38 |
-
call of duty finest hour pc game bonus missions and secrets<br />
|
39 |
-
call of duty finest hour pc game all weapons and vehicles<br />
|
40 |
-
call of duty finest hour pc game walkthrough and guide<br />
|
41 |
-
call of duty finest hour pc game speedrun and record<br />
|
42 |
-
call of duty finest hour pc game graphics mod and enhancement<br />
|
43 |
-
call of duty finest hour pc game windows 10 compatibility fix<br />
|
44 |
-
call of duty finest hour pc game keyboard and mouse controls<br />
|
45 |
-
call of duty finest hour pc game screenshots and videos<br />
|
46 |
-
call of duty finest hour pc game download for mac and linux<br />
|
47 |
-
call of duty finest hour pc game alternative download sites<br />
|
48 |
-
call of duty finest hour pc game history and development<br />
|
49 |
-
call of duty finest hour pc game awards and ratings<br />
|
50 |
-
call of duty finest hour pc game trivia and facts<br />
|
51 |
-
call of duty finest hour pc game fan art and cosplay<br />
|
52 |
-
call of duty finest hour pc game merchandise and collectibles<br />
|
53 |
-
call of duty finest hour pc game forum and community<br />
|
54 |
-
call of duty finest hour pc game news and updates<br />
|
55 |
-
buy call of duty finest hour pc game original cd key cheap<br />
|
56 |
-
sell call of duty finest hour pc game used copy online<br />
|
57 |
-
trade call of duty finest hour pc game with other games<br />
|
58 |
-
rent call of duty finest hour pc game for a limited time<br />
|
59 |
-
stream call of duty finest hour pc game on twitch or youtube<br />
|
60 |
-
watch call of duty finest hour pc game movie adaptation or documentary<br />
|
61 |
-
read call of duty finest hour pc game novelization or comic book<br />
|
62 |
-
play call of duty finest hour pc game with friends or strangers<br />
|
63 |
-
enjoy call of duty finest hour pc game as a classic shooter</p>
|
64 |
-
<h2>Tips and Tricks</h2>
|
65 |
-
<p>Now that you know what Call of Duty: Finest Hour is all about, let me give you some tips and tricks on how to install and run it on your PC, how to optimize it for better performance, and how to use cheats and hacks in it.</p>
|
66 |
-
<h3>How to install and run Call of Duty: Finest Hour on PC?</h3>
|
67 |
-
<p>To install and run Call of Duty: Finest Hour on your PC, you need to follow these steps:</p>
|
68 |
-
<ol>
|
69 |
-
<li>Download a torrent file that contains Call of Duty: Finest Hour from a reliable source such as <a href="https://compressedlab.com/">CompressedLab</a>.</li>
|
70 |
-
<li>Download and install a torrent client such as uTorrent or BitTorrent on your PC.</li>
|
71 |
-
<li>Open the torrent file with your torrent client and select where you want to save the game data.</li>
|
72 |
-
<li>Wait for the download to finish.</li>
|
73 |
-
<li>Extract the game data using WinRAR or 7-Zip.</li>
|
74 |
-
<li>Open the extracted folder and run Setup.exe.</li>
|
75 |
-
<li>Follow the instructions on screen to install the game on your PC.</li>
|
76 |
-
<li>Run CODFH.exe from your desktop or start menu to launch the game.</li>
|
77 |
-
</ol>
|
78 |
-
<h3>How to optimize Call of Duty: Finest Hour for better performance?</h3>
|
79 |
-
<p>To optimize Call of Duty: Finest Hour for better performance on your PC, you need to adjust some settings in the game options menu. Here are some suggestions:</p>
|
80 |
-
<ul>
|
81 |
-
<li>Set your screen resolution according to your monitor size.</li>
|
82 |
-
<li>Set your graphics quality according to your PC specifications.</li>
|
83 |
-
<li>Turn off anti-aliasing, anisotropic filtering, and dynamic shadows if they cause lag or stuttering.</li>
|
84 |
-
<li>Turn on subtitles if you have trouble hearing or understanding dialogues.</li>
|
85 |
-
<li>Adjust your mouse sensitivity according to your preference.</li>
|
86 |
-
<li>Adjust your audio volume according to your environment.</li>
|
87 |
-
</ul>
|
88 |
-
<h3>How to use cheats and hacks in Call of Duty: Finest Hour?</h3>
|
89 |
-
<p>To use cheats and hacks in Call of Duty: Finest Hour, you need to enter some codes in the cheat menu or use some third-party tools. Here are some examples:</p>
|
90 |
-
<table border="1">
|
91 |
-
<tr><th>Cheat Code</th><th>Effect</th></tr>
|
92 |
-
<tr><td>BULLETZAP</td><td>Bullets ricochet off walls</td></tr>
|
93 |
-
<tr><td>DAYNIGHT</td><td>Cycle through day/night settings</td></tr>
|
94 |
-
<tr><td>GODMODE</td><td>Invincibility</td></tr>
|
95 |
-
<tr><td>MENUSCREEN</td><td>Show menu screen during gameplay</td></tr>
|
96 |
-
<tr><td>NOWEAPONS</td><td>No weapons except knife</td></tr>
|
97 |
-
<tr><td>SUPERHEAR</td><td>Hear enemies from far away</td></tr>
|
98 |
-
<tr><td>TIMELIMIT</td><td>No time limit in missions</td></tr>
|
99 |
-
<tr><td>ZOOM</td><td>Better zoom with sniper rifle</td></tr>
|
100 |
-
</table>
|
101 |
-
<p>To enter these codes, you need to go to Options > Game Options > Cheat Codes > Enter Cheat Code. ```html <p>enter these codes, you need to go to Options > Game Options > Cheat Codes > Enter Cheat Code. You can also unlock some cheats by collecting medals in Campaign mode.</p>
|
102 |
-
<p>If you want to use some hacks such as aimbot, wallhack, or speedhack, you need to download and install some third-party tools such as <a href="https://www.iwantcheats.net/call-of-duty-hacks-aimbot-cheats/">IWantCheats</a> or <a href="https://www.hackprovider.com/call-of-duty-hacks-cheats-aimbot/">HackProvider</a>. These tools can give you an unfair advantage over other players or enemies, but they can also get you banned from online servers or damage your PC. Use them at your own risk.</p>
|
103 |
-
<h2>Conclusion</h2>
|
104 |
-
<p>Call of Duty: Finest Hour is a classic shooter game that lets you experience the thrill and horror of World War II from different perspectives. You can download it for PC using a torrent file and enjoy its amazing gameplay and features. You can also optimize it for better performance and use cheats and hacks to spice up your experience.</p>
|
105 |
-
<p>So what are you waiting for? Download Call of Duty: Finest Hour today and join the fight for freedom and glory! You won't regret it!</p>
|
106 |
-
<p>Thank you for reading this article. I hope you found it helpful and informative. If you have any questions or feedback, please leave a comment below. I would love to hear from you.</p>
|
107 |
-
<h2>FAQs</h2>
|
108 |
-
<p>Here are some frequently asked questions about Call of Duty: Finest Hour:</p>
|
109 |
-
<ol>
|
110 |
-
<li><b>What are some of the best alternatives to Call of Duty: Finest Hour for PC?</b></li>
|
111 |
-
<p>Some of the best alternatives to Call of Duty: Finest Hour for PC are:</p>
|
112 |
-
<ul>
|
113 |
-
<li>Call of Duty 2 - The sequel to the original Call of Duty that features improved graphics, physics, and AI.</li>
|
114 |
-
<li>Medal of Honor: Allied Assault - A game that inspired Call of Duty that focuses on the Allied invasion of Europe.</li>
|
115 |
-
<li>Battlefield 1942 - A game that allows you to fight in large-scale battles with vehicles and aircraft.</li>
|
116 |
-
<li>Brothers in Arms: Road to Hill 30 - A game that emphasizes squad tactics and realism.</li>
|
117 |
-
<li>Wolfenstein: Enemy Territory - A game that offers a free online multiplayer mode with classes and objectives.</li>
|
118 |
-
</ul>
|
119 |
-
<li><b>Is Call of Duty: Finest Hour compatible with Windows 10?</b></li>
|
120 |
-
<p>Yes, Call of Duty: Finest Hour is compatible with Windows 10. However, you may need to run it in compatibility mode or use some patches or fixes to make it work properly. You can find some solutions online or on forums such as <a href="https://steamcommunity.com/app/2620/discussions/">Steam Community</a>.</p>
|
121 |
-
<li><b>How long is the gameplay of Call of Duty: Finest Hour?</b></li>
|
122 |
-
<p>The gameplay of Call of Duty: Finest Hour depends on your skill level, difficulty level, and mode. On average, it takes about 8 hours to complete the Campaign mode, and about 10 hours to unlock all the Bonus content. The Multiplayer mode can offer unlimited hours of gameplay depending on your preference.</p>
|
123 |
-
<li><b>How to play Call of Duty: Finest Hour online with other players?</b></li>
|
124 |
-
<p>To play Call of Duty: Finest Hour online with other players, you need to have a valid CD key and an internet connection. You can either join an existing server or host your own server using the game options menu. You can also use some third-party tools such as <a href="https://www.gameranger.com/">GameRanger</a> or <a href="https://www.xfire.com/">Xfire</a> to find and join online games.</p>
|
125 |
-
<li><b>Is Call of Duty: Finest Hour safe to download from torrent sites?</b></li>
|
126 |
-
<p>Downloading Call of Duty: Finest Hour from torrent sites is not recommended, as it may contain viruses, malware, or spyware that can harm your PC or steal your personal information. It may also violate the copyright laws and get you in trouble with the authorities. It is better to buy the game from a legitimate source such as <a href="https://store.steampowered.com/app/2620/Call_of_Duty/">Steam</a>.</p>
|
127 |
-
</ol>
|
128 |
-
```</p> 0a6ba089eb<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2015 Crack Torrent.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Adobe After Effects Cc 2015 Crack Torrent</h2><br /><p><b><b>Download</b> ---> <a href="https://imgfil.com/2uxXDC">https://imgfil.com/2uxXDC</a></b></p><br /><br />
|
2 |
-
|
3 |
-
4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ajab Gazabb Love Full !!HOT!! Song Hd 720p.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Ajab Gazabb Love full song hd 720p</h2><br /><p><b><b>Download File</b> — <a href="https://imgfil.com/2uy1SJ">https://imgfil.com/2uy1SJ</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Pop, Hindi, Hindi, Bollywood, Bollywood Hot Top Popular Bollywood Movie Songs Of All Time. Check The Best Songs Of All Time. So, I was also going to the same place! Rajini delivered a gangster-like performance that made her one of the most popular villains in Indian cinema. Why are you going to the same place everyday?. Start Your Free Trial. You could also find it at the networked computing store. Here are the 25 Best Bollywood Songs (with English Lyrics) of All Time!. The song is composed by the duo Afzal Brothers with lyrics written by Neeraj Sridhar. The song is sung by Arijit Singh, singer for Airtel. The story is about a love triangle between an innocent, handsome boy and a married woman. List of popular romantic Hindi movie songs Bollywood songs of all time. That's when I saw him standing there with two people. As it is not yet on any platform, I am doing a hard copy compilation of the Hindi film songs with English lyrics in the best possible way. The song will be sung by Arijit Singh and it will be a duet with Neha Kakkar. Music by Arijit Singh/ Akshay Kumar/ D. Atif Aslam, Lyrics by Kausar Munir, Sameer. 11. Today I'm going to compose about the biggest aaj sutrabhoomi thay me, jo bhi aaj sutrabhoomi thay me. The songs and the music video. rages/House of Lords - Jashanmer Jaisaa or Puriyaan Makaan (Saath - Yeh Jawaani) Bollywood Song. 45 Naseeb Mein Phirse. When I turned 18 years old, I was busy singing love songs. My songs have not only always been about love and romance but my music is also timeless. Watch now or download MP3. Welcome to the dream world where you are dating a prince with magical ways and getting married to a fairy who flies in the sky and is a princess. Quick Look: Bollywood Music Box Bollywood Radio Hindi Songs Ter Bijl Shamaakriyo Bollywood Hori Ki Music Koyi Kangal Bollywood C. So it is not only about buying a property in your name but also you have to invest in it with your heart and soul. You could find it at the market place. Songs from the same genre, see below. Many 4fefd39f24<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Din5482splinestandardfiletypepdf19.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
<h2>din5482splinestandardfiletypepdf19</h2><br /><p><b><b>Download File</b> ……… <a href="https://imgfil.com/2uxXWo">https://imgfil.com/2uxXWo</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
. Level code B22[/url]Just Cause 2 Download patch [url= flissinneple . Ru_Mods_Just_Cause_2_Redux_v_1.2.0.00_M_Pack_v1_5.
|
4 |
-
RuTracker.org » Other simulators » Download torrent Just Cause 2 v.1.4.0.0 [RUS].
|
5 |
-
Just Cause 2 is the sequel to the popular game about how .
|
6 |
-
RuTracker.org » Other simulators » Download torrent Just Cause 2 v 1.4.0.0 [RUS] + DLC [by .
|
7 |
-
RuTracker.org » Simulations » Download torrent Just Cause 2 v.1.4.0.0 [RUS] + DLC [by .
|
8 |
-
Just Cause 2 is the sequel to the popular game about how . 8a78ff9644<br />
|
9 |
-
<br />
|
10 |
-
<br />
|
11 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Take A Walk Passion Pit.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Download Take A Walk Passion Pit</h2><br /><p><b><b>Download File</b> ☆ <a href="https://imgfil.com/2uy0qx">https://imgfil.com/2uy0qx</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Driver Carte Satellite Twinhan.md
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Driver carte satellite twinhan: A Guide for Satellite TV Lovers</h1>
|
3 |
-
<p>If you love watching satellite TV on your computer, you may need a driver carte satellite twinhan to make it possible. A driver carte satellite twinhan is a software that allows your computer to communicate with your satellite TV tuner card and receive satellite signals. Without a driver carte satellite twinhan, your satellite TV tuner card may not work properly or at all.</p>
|
4 |
-
<p>In this article, we will tell you what a driver carte satellite twinhan is, where to find it, how to install it, and how to use it. We will also give you some tips and tricks on how to troubleshoot some common issues that may occur with your driver carte satellite twinhan. Read on to learn more.</p>
|
5 |
-
<h2>Driver carte satellite twinhan</h2><br /><p><b><b>Download</b> ✯✯✯ <a href="https://imgfil.com/2uy19b">https://imgfil.com/2uy19b</a></b></p><br /><br />
|
6 |
-
<h2>What Is Driver carte satellite twinhan</h2>
|
7 |
-
<p>A driver carte satellite twinhan is a software that enables your computer to recognize and use your satellite TV tuner card. A satellite TV tuner card is a device that you can insert into your computer's PCI or USB slot and connect to an antenna, cable, or dish to receive TV signals from satellites orbiting the Earth. A satellite TV tuner card can provide you with hundreds of channels from different countries and regions.</p>
|
8 |
-
<p>A driver carte satellite twinhan is essential for your satellite TV tuner card to work properly on your computer. It acts as a bridge between your hardware and your operating system, allowing them to exchange data and commands. It also provides you with some features and functions that you can use to control and customize your TV viewing experience.</p>
|
9 |
-
<p>There are different types of driver carte satellite twinhan for different models and brands of satellite TV tuner cards. You need to find the one that matches your specific device and operating system. Otherwise, you may encounter some compatibility or performance issues.</p>
|
10 |
-
<h2>Where to Find Driver carte satellite twinhan</h2>
|
11 |
-
<p>The best place to find a driver carte satellite twinhan is from the official website of your satellite TV tuner card manufacturer or vendor. There, you can find the latest and most suitable driver carte satellite twinhan for your specific model and operating system. You can also get some support and guidance from the official website if you have any questions or problems.</p>
|
12 |
-
<p>Alternatively, you can also find a driver carte satellite twinhan from some other sources online, such as software download sites or torrent sites. However, you need to be careful and choose wisely. Some of these sources may not be trustworthy or reliable. You may end up with a corrupted or infected file that does not work or even harm your computer.</p>
|
13 |
-
<h2>How to Install Driver carte satellite twinhan</h2>
|
14 |
-
<p>Once you have found the driver carte satellite twinhan file, you need to install it on your computer. The installation process may vary depending on the source and format of the file, but generally, you can follow these steps:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Locate the driver carte satellite twinhan file on your computer and double-click on it.</li>
|
17 |
-
<li>Follow the instructions on the screen to complete the installation process.</li>
|
18 |
-
<li>Restart your computer if prompted.</li>
|
19 |
-
<li>Connect your satellite TV tuner card to your computer and check if it works properly.</li>
|
20 |
-
</ol>
|
21 |
-
<p>Note: You may need to uninstall any previous or incompatible drivers before installing the new driver carte satellite twinhan.</p>
|
22 |
-
<h2>How to Use Driver carte satellite twinhan</h2>
|
23 |
-
<p>After installing the driver carte satellite twinhan, you can use it to watch and record satellite TV on your computer. You will need a software that can access and control your satellite TV tuner card, such as Media Portal , which is a free and open-source media center software that supports various TV tuners and formats.</p>
|
24 |
-
<p></p>
|
25 |
-
<p>To use Media Portal with your driver carte satellite twinhan, you need to follow these steps:</p>
|
26 |
-
<ol>
|
27 |
-
<li>Download and install Media Portal from its official website.</li>
|
28 |
-
<li>Launch Media Portal and go to Settings > Television > TV Servers.</li>
|
29 |
-
<li>Select your satellite TV tuner card from the list and click on Scan for Channels.</li>
|
30 |
-
<li>Wait for Media Portal to scan and find all the available channels from your satellite signal.</li>
|
31 |
-
<li>Go back to the main menu and select Television > My TV > Watch TV.</li>
|
32 |
-
<li>Choose a channel from the list and enjoy watching satellite TV on your computer.</li>
|
33 |
-
</ol>
|
34 |
-
<p>Note: You may need to adjust some settings and preferences according to your needs and preferences, such as language, subtitles, aspect ratio, etc.</p>
|
35 |
-
<h2>How to Troubleshoot Driver carte satellite twinhan Problems</h2>
|
36 |
-
<p>Sometimes, you may encounter some problems or errors with your driver carte satellite twinhan that prevent you from using it properly or at all. Here are some common problems and solutions that may help you fix them:</p>
|
37 |
-
<ul>
|
38 |
-
<li>If you get an error message that says "Driver not found" or "Device not detected", you may need to check your connection, installation, or compatibility of your driver carte satellite twinhan. Make sure your satellite TV tuner card is properly connected to your computer, your driver carte satellite twinhan is correctly installed and updated, and your operating system is compatible with your driver carte satellite twinhan.</li>
|
39 |
-
<li>If you get an error message that says "No signal" or "Weak signal", you may need to check your antenna, cable, or dish settings. Make sure they are properly aligned, connected, and configured to receive the best possible signal from your satellite provider.</li>
|
40 |
-
<li>If you get an error message that says "No channels found" or "Channel not available", you may need to check</p>
|
41 |
-
<h2>What Are the Benefits of Driver carte satellite twinhan</h2>
|
42 |
-
<p>Using a driver carte satellite twinhan can bring you many benefits, such as:</p>
|
43 |
-
<ul>
|
44 |
-
<li>You can enjoy watching satellite TV on your computer with high-quality video and audio.</li>
|
45 |
-
<li>You can access hundreds of channels from different countries and regions, including news, sports, movies, music, documentaries, etc.</li>
|
46 |
-
<li>You can record your favorite programs and watch them later or share them with others.</li>
|
47 |
-
<li>You can customize your TV viewing experience with various settings and options, such as subtitles, aspect ratio, parental control, etc.</li>
|
48 |
-
<li>You can save money and space by using your computer as a TV instead of buying a separate TV set and receiver.</li>
|
49 |
-
</ul>
|
50 |
-
<h2>What Are the Drawbacks of Driver carte satellite twinhan</h2>
|
51 |
-
<p>Using a driver carte satellite twinhan can also have some drawbacks, such as:</p>
|
52 |
-
<ul>
|
53 |
-
<li>You may need to pay for a subscription or a license to use some satellite TV services or software.</li>
|
54 |
-
<li>You may need to buy a compatible satellite TV tuner card and an antenna, cable, or dish to receive satellite signals.</li>
|
55 |
-
<li>You may need to update your driver carte satellite twinhan regularly to keep up with the changes and improvements of your satellite TV tuner card and software.</li>
|
56 |
-
<li>You may experience some technical issues or errors with your driver carte satellite twinhan that may affect your TV viewing experience.</li>
|
57 |
-
</ul>
|
58 |
-
<h2>How to Choose the Best Driver carte satellite twinhan</h2>
|
59 |
-
<p>There are many factors that you need to consider when choosing the best driver carte satellite twinhan for your needs and preferences, such as:</p>
|
60 |
-
<ul>
|
61 |
-
<li>The compatibility of your driver carte satellite twinhan with your satellite TV tuner card and operating system.</li>
|
62 |
-
<li>The features and functions of your driver carte satellite twinhan that suit your TV viewing needs and preferences.</li>
|
63 |
-
<li>The reliability and security of your driver carte satellite twinhan that protect your computer and data from viruses and malware.</li>
|
64 |
-
<li>The availability and accessibility of your driver carte satellite twinhan that provide you with easy download, installation, update, and support.</li>
|
65 |
-
<li>The reputation and reviews of your driver carte satellite twinhan that reflect its quality and performance.</li>
|
66 |
-
</ul>
|
67 |
-
<h2>Conclusion</h2>
|
68 |
-
<p>A driver carte satellite twinhan is a software that enables your computer to use your satellite TV tuner card and watch satellite TV on your computer. It is important to find, install, use, and update the right driver carte satellite twinhan for your specific device and operating system. A driver carte satellite twinhan can provide you with many benefits, such as access to hundreds of channels, high-quality video and audio, recording and customization features, and more. However, a driver carte satellite twinhan can also have some drawbacks, such as compatibility or performance issues, technical errors, or security risks. Therefore, you need to be careful and choose wisely when using a driver carte satellite twinhan. You can also consider some alternatives to driver carte satellite twinhan, such as standalone receivers, online streaming services, or mobile apps. However, these alternatives may have their own advantages and disadvantages as well. In conclusion, a driver carte satellite twinhan is a useful software for satellite TV lovers who want to enjoy watching satellite TV on their computer. However, it also requires some knowledge and skills to use it properly and safely.</p> 3cee63e6c2<br />
|
69 |
-
<br />
|
70 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/DRAGON BALL FighterZ NSP XCI DLC How to Download and Play on Egg NS Emulator.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Dragon Ball FighterZ NSP Download: How to Play the Best Dragon Ball Game on Nintendo Switch</h1>
|
3 |
-
<p>If you are a fan of Dragon Ball and fighting games, you probably have heard of <strong>Dragon Ball FighterZ</strong>, the latest and greatest game based on the iconic anime series. But did you know that you can play it on your Nintendo Switch, even if it is not available on the official eShop or in your region? In this article, we will show you how to download and install Dragon Ball FighterZ NSP file on your Switch using a custom firmware, so you can enjoy this amazing game on the go.</p>
|
4 |
-
<h2>dragon ball fighterz nsp download</h2><br /><p><b><b>Download</b> ➡ <a href="https://jinyurl.com/2uNTQD">https://jinyurl.com/2uNTQD</a></b></p><br /><br />
|
5 |
-
<h2>What is Dragon Ball FighterZ?</h2>
|
6 |
-
<p>Dragon Ball FighterZ is a 3v3 fighting game developed by Arc System Works, the makers of Blazblue and Guilty Gear. It features a roster of 24 characters from the Dragon Ball universe, each with their own unique moves and abilities. You can choose your favorite fighters and form your own team, or let the game pick them for you randomly. You can also switch between them during battle, or call them for assist attacks.</p>
|
7 |
-
<p>One of the most impressive aspects of Dragon Ball FighterZ is its <strong>visual style</strong>, which replicates the anime perfectly. The game uses 3D models that look like 2D sprites, with cel-shaded graphics and dynamic camera angles. The animations are fluid and faithful to the source material, and the special effects are spectacular. The game also features original voice acting from both Japanese and English cast members, as well as an epic soundtrack.</p>
|
8 |
-
<p>Another highlight of Dragon Ball FighterZ is its <strong>combat system</strong>, which is simple but deep. The game uses four buttons for light, medium, heavy, and special attacks, as well as universal commands for super moves, homing dashes, vanishes, and more. The game is easy to learn, but hard to master, as it requires timing, strategy, and teamwork. The game also has various modes for different types of players, such as story mode, arcade mode, online mode, training mode, and more.</p>
|
9 |
-
<h2>What is NSP and why do you need it?</h2>
|
10 |
-
<p>NSP stands for <strong>Nintendo Switch Package</strong> and it is a file format for digital games that can be installed on your Switch using a custom firmware. A custom firmware is a modified version of the official Switch software that <p>allows you to run homebrew apps, emulators, backups, and more. NSP files can be downloaded from various sources on the internet and installed on your Switch using a NSP installer app.</p>
|
11 |
-
<p>dragon ball fighterz nsp xci rom download<br />
|
12 |
-
dragon ball fighterz nsp update download<br />
|
13 |
-
dragon ball fighterz nsp dlc download<br />
|
14 |
-
dragon ball fighterz nsp switch download<br />
|
15 |
-
dragon ball fighterz nsp free download<br />
|
16 |
-
dragon ball fighterz nsp torrent download<br />
|
17 |
-
dragon ball fighterz nsp mega download<br />
|
18 |
-
dragon ball fighterz nsp google drive download<br />
|
19 |
-
dragon ball fighterz nsp 1fichier download<br />
|
20 |
-
dragon ball fighterz nsp reddit download<br />
|
21 |
-
dragon ball fighterz nsp full game download<br />
|
22 |
-
dragon ball fighterz nsp latest version download<br />
|
23 |
-
dragon ball fighterz nsp english patch download<br />
|
24 |
-
dragon ball fighterz nsp online play download<br />
|
25 |
-
dragon ball fighterz nsp emulator download<br />
|
26 |
-
dragon ball fighterz xci to nsp converter download<br />
|
27 |
-
dragon ball fighterz xci vs nsp download<br />
|
28 |
-
dragon ball fighterz xci file size download<br />
|
29 |
-
dragon ball fighterz xci romsmania download<br />
|
30 |
-
dragon ball fighterz xci switch-xci.com download<br />
|
31 |
-
dragon ball fighterz xci base game download<br />
|
32 |
-
dragon ball fighterz xci update 1.27 download<br />
|
33 |
-
dragon ball fighterz xci all dlc download<br />
|
34 |
-
dragon ball fighterz xci torrent magnet download<br />
|
35 |
-
dragon ball fighterz xci mega.nz download<br />
|
36 |
-
dragon ball fighterz xci google drive link download<br />
|
37 |
-
dragon ball fighterz xci 1fichier premium download<br />
|
38 |
-
dragon ball fighterz xci reddit request download<br />
|
39 |
-
dragon ball fighterz xci full game cracked download<br />
|
40 |
-
dragon ball fighterz xci latest version patched download<br />
|
41 |
-
dragon ball fighterz xci english language download<br />
|
42 |
-
dragon ball fighterz xci online multiplayer download<br />
|
43 |
-
dragon ball fighterz xci emulator pc download<br />
|
44 |
-
how to install dragon ball fighterz nsp on switch<br />
|
45 |
-
how to update dragon ball fighterz nsp on switch<br />
|
46 |
-
how to install dlc for dragon ball fighterz nsp on switch<br />
|
47 |
-
how to play online with dragon ball fighterz nsp on switch<br />
|
48 |
-
how to fix error code 2002 4518 on dragon ball fighterz nsp on switch<br />
|
49 |
-
how to convert xci to nsp for dragon ball fighterz on switch<br />
|
50 |
-
how to install dragon ball fighterz xci on switch sx os<br />
|
51 |
-
how to update dragon ball fighterz xci on switch sx os<br />
|
52 |
-
how to install dlc for dragon ball fighterz xci on switch sx os<br />
|
53 |
-
how to play online with dragon ball fighterz xci on switch sx os<br />
|
54 |
-
how to fix error code 2002 4518 on dragon ball fighterz xci on switch sx os<br />
|
55 |
-
how to convert nsp to xci for dragon ball fighterz on switch sx os</p>
|
56 |
-
<p>NSP files are useful because they allow you to play games that are not available on the official eShop or that are region-locked. For example, Dragon Ball FighterZ is not available on the eShop in some countries, such as Japan, China, and Korea. By downloading and installing the NSP file, you can bypass this restriction and play the game on your Switch. NSP files also let you play games that are not yet released in your region, or that are cheaper in other regions.</p>
|
57 |
-
<h2>How to download and install Dragon Ball FighterZ NSP on your Switch?</h2>
|
58 |
-
<p>Before you can download and install Dragon Ball FighterZ NSP on your Switch, you need to prepare your Switch for custom firmware installation. This involves backing up your NAND, creating an emuMMC partition, and installing a custom firmware of your choice. You also need to enable sigpatches to bypass Nintendo's security checks. This process is not very difficult, but it requires some technical knowledge and caution. If you are not familiar with it, we recommend you to follow a detailed guide from a reputable source, such as <a href="">this one</a>.</p>
|
59 |
-
<p>Once you have prepared your Switch for custom firmware installation, you can proceed to download and install Dragon Ball FighterZ NSP on your Switch. Here are the steps you need to follow:</p>
|
60 |
-
<h3>Step 1: Download Dragon Ball FighterZ NSP file from a reliable source</h3>
|
61 |
-
<p>The first step is to download the Dragon Ball FighterZ NSP file from a reliable source. There are many websites that offer NSP files for download, but not all of them are safe and trustworthy. Some of them may contain malware, viruses, or corrupted files that can harm your Switch or your PC. Therefore, you need to be careful and choose a reputable site that has positive reviews and feedback from other users.</p>
|
62 |
-
<p>One of the best sites to download Dragon Ball FighterZ NSP file is <a href="">nsw2u.com</a>, which is a popular and trusted site for Switch games and updates. You can find the link to the game's page <a href="">here</a>. To download the NSP file from this site, you need to use a VPN and a torrent client, such as qBittorrent or uTorrent. A VPN is a service that encrypts your internet traffic and changes your IP address, so you can access blocked or restricted sites and protect your privacy. A torrent client is a software that allows you to download files from peer-to-peer networks.</p>
|
63 |
-
<p>To download the NSP file from nsw2u.com, follow these steps:</p>
|
64 |
-
<ul>
|
65 |
-
<li>Download and install a VPN of your choice on your PC. We recommend using NordVPN or ExpressVPN, as they are fast, secure, and easy to use.</li>
|
66 |
-
<li>Connect to a VPN server in a country where nsw2u.com is not blocked, such as Canada or the Netherlands.</li>
|
67 |
-
<li>Download and install a torrent client of your choice on your PC. We recommend using qBittorrent or uTorrent, as they are lightweight, user-friendly, and free.</li>
|
68 |
-
<li>Go to the game's page on nsw2u.com and click on the magnet link icon next to the NSP file name. This will open the torrent client and start downloading the file.</li>
|
69 |
-
</ul>
|
70 |
-
<p>Before downloading the NSP file, make sure to check the file size and the required firmware version for the game. The file size for Dragon Ball FighterZ NSP is about 6.5 GB, and the required firmware version is 11.0.1 or higher. If your Switch's firmware version is lower than that, you need to update it using ChoiDujourNX or another homebrew app.</p>
|
71 |
-
<p>Also, make sure to verify the file integrity using a checksum tool or a NSP verifier app after downloading it. A checksum tool is a software that calculates a unique code for a file based on its content, which can be used to check if the file is authentic and unmodified. A NSP verifier app is a software that checks if a NSP file is valid and compatible with your Switch. You can use tools like MD5 & SHA Checksum Utility or NSC Builder for this purpose.</p>
|
72 |
-
<h3>Step 2: Transfer and install Dragon Ball FighterZ NSP file on your Switch</h3>
|
73 |
-
<p>The second step is to transfer and install Dragon Ball FighterZ NSP file on your Switch. There are two ways to do this: using a USB cable or a microSD card adapter. A USB cable is a wire that connects your Switch to your PC, while a microSD card adapter is a device that allows you to insert your Switch's microSD card into your PC's card reader. Both methods require a file manager or a NSP installer app on your Switch, such as Goldleaf or Tinfoil. A file manager or a NSP installer app is a software that allows you to browse, copy, delete, and install files on your Switch. To transfer and install Dragon Ball FighterZ NSP file on your Switch using a USB cable, follow these steps: - Connect your Switch to your PC using a USB-C to USB-A cable. Make sure your Switch is in RCM mode and has the custom firmware running. - Launch the file manager or the NSP installer app on your Switch. We recommend using Goldleaf, as it is simple and compatible with most NSP files. - On your PC, download and run Quark, which is a companion app for Goldleaf that enables USB communication. You can find the link to Quark here. - On your Switch, select the USB option in Goldleaf and browse to the folder where you downloaded the Dragon Ball FighterZ NSP file. - Select the NSP file and choose to install it on your Switch's SD card or internal memory. Wait for the installation to finish. To transfer and install Dragon Ball FighterZ NSP file on your Switch using a microSD card adapter, follow these steps: - Turn off your Switch and remove the microSD card from it. Insert the microSD card into the microSD card adapter and plug it into your PC's card reader. - On your PC, open the microSD card folder and copy the Dragon Ball FighterZ NSP file to it. You can create a subfolder for it if you want. - Safely eject the microSD card adapter from your PC and remove the microSD card from it. Insert the microSD card back into your Switch and turn it on. - Launch the file manager or the NSP installer app on your Switch. We recommend using Tinfoil, as it is fast and supports multiple formats. - On your Switch, select the SD card option in Tinfoil and browse to the folder where you copied the Dragon Ball FighterZ NSP file. - Select the NSP file and choose to install it on your Switch's SD card or internal memory. Wait for the installation to finish. <h2>Conclusion</h2>
|
74 |
-
<p>Dragon Ball FighterZ is one of the best fighting games ever made, and you can play it on your Nintendo Switch using a custom firmware and a NSP file. In this article, we showed you how to download and install Dragon Ball FighterZ NSP file on your Switch using two methods: USB cable or microSD card adapter. We also explained what is Dragon Ball FighterZ, what is NSP, and why you need it. We hope you found this article helpful and informative, and that you enjoy playing Dragon Ball FighterZ on your Switch.</p>
|
75 |
-
<h2>FAQs</h2>
|
76 |
-
<p>Here are some frequently asked questions about Dragon Ball FighterZ NSP download:</p>
|
77 |
-
<ul>
|
78 |
-
<li><strong>Q: Is downloading and installing Dragon Ball FighterZ NSP legal?</strong></li>
|
79 |
-
<li>A: Downloading and installing Dragon Ball FighterZ NSP is not legal, as it violates Nintendo's terms of service and intellectual property rights. You should only download and install Dragon Ball FighterZ NSP if you own a legitimate copy of the game or if you live in a region where the game is not available.</li>
|
80 |
-
<li><strong>Q: Is downloading and installing Dragon Ball FighterZ NSP safe?</strong></li>
|
81 |
-
<li>A: Downloading and installing Dragon Ball FighterZ NSP is not safe, as it exposes you to various risks, such as malware, viruses, corrupted files, bans, bricks, and more. You should only download and install Dragon Ball FighterZ NSP from reliable sources, verify the file integrity, use a VPN, backup your NAND, create an emuMMC partition, enable sigpatches, and avoid going online.</li>
|
82 |
-
<li><strong>Q: How can I update Dragon Ball FighterZ after installing it from NSP?</strong></li>
|
83 |
-
<li>A: You can update Dragon Ball FighterZ after installing it from NSP by downloading and installing the update NSP file from the same source as the game NSP file. You can also use homebrew apps like DBI or Awoo Installer to download and install updates directly from Nintendo's servers.</li>
|
84 |
-
<li><strong>Q: How can I play online with Dragon Ball FighterZ after installing it from NSP?</strong></li>
|
85 |
-
<li>A: You can play online with Dragon Ball FighterZ after installing it from NSP by using homebrew apps like 90DNS or Incognito to block Nintendo's servers and avoid bans. You can also use homebrew apps like Lan Play or XLink Kai to play online with other custom firmware users.</li <li><strong>Q: How can I add DLC characters to Dragon Ball FighterZ after installing it from NSP?</strong></li>
|
86 |
-
<li>A: You can add DLC characters to Dragon Ball FighterZ after installing it from NSP by downloading and installing the DLC NSP files from the same source as the game NSP file. You can also use homebrew apps like NUT or NS-USBloader to download and install DLC directly from Nintendo's servers.</li>
|
87 |
-
</ul></p> 401be4b1e0<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Data One Piece Bounty Rush 2022 and Join the Pirate World of Luffy and His Crew.md
DELETED
@@ -1,177 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Data One Piece Bounty Rush 2022</h1>
|
3 |
-
<p>If you are a fan of the popular manga and anime series One Piece, you might want to try out Data One Piece Bounty Rush, a 3D anime battle arena treasure looting game set in the pirate world of One Piece. In this game, you can join Luffy, Zoro, Nami, Sanji, and other famous characters from the series in 4 vs 4 real-time PvP battles to rush and loot treasures of berry coins for victory. You can also customize your own pirate crew by mixing and matching characters from different classes, elements, skills, and traits. You can also experience the One Piece universe in beautiful 3D graphics and battle at iconic locations from the anime.</p>
|
4 |
-
<h2>download data one piece bounty rush 2022</h2><br /><p><b><b>DOWNLOAD</b> ✺ <a href="https://jinyurl.com/2uNRSV">https://jinyurl.com/2uNRSV</a></b></p><br /><br />
|
5 |
-
<p>However, before you can enjoy all these features, you need to download the game data first. The game data is a large file that contains all the necessary information and resources for the game to run smoothly on your device. By downloading the game data, you can reduce loading times, improve performance, and save storage space on your device. In this article, we will show you how to download Data One Piece Bounty Rush 2022 on Android and iOS devices. We will also show you how to update the game data when new versions are released. Finally, we will give you some tips and tricks for playing Data One Piece Bounty Rush and becoming a pirate king.</p>
|
6 |
-
<h2>What is Data One Piece Bounty Rush?</h2>
|
7 |
-
<p>Data One Piece Bounty Rush is a mobile game based on the One Piece franchise, developed and published by Bandai Namco Entertainment. The game is played in real-time with four-player teams in battle mode, in which the team that has the most treasure at the end wins. There are five random treasure locations on a map, and you and your teammates will have to quickly move to them and capture them by tapping the flag icon. You will also have to fight your enemies and push them <p>away from the treasure. You can use your character's skills and traits to gain an advantage in combat, such as stunning, freezing, or knocking back your opponents. You can also use items and boosts to enhance your character's stats and abilities. The game features over 100 characters from the One Piece series, each with their own class, element, skills, and traits. You can choose from four classes: Fighter, Warrior, Supporter, and Shooter. Each class has its own strengths and weaknesses, and you can mix and match them to create a balanced team. You can also choose from five elements: Red, Green, Blue, Black, and Yellow. Each element has an advantage over another element, except for Black and Yellow, which are neutral. You can use the element wheel to see which element is stronger or weaker against another element. You can also upgrade your characters by leveling them up, enhancing their skills, and equipping them with medals and boosts.</p>
|
8 |
-
<h2>Why Download Data One Piece Bounty Rush?</h2>
|
9 |
-
<p>Downloading Data One Piece Bounty Rush is highly recommended for anyone who wants to play the game without any issues or interruptions. The game data is a large file that contains all the necessary information and resources for the game to run smoothly on your device. By downloading the game data, you can enjoy the following benefits:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Faster loading times: The game data will reduce the amount of time it takes to load the game and its features. You will not have to wait for long periods of time to start playing or switch between modes.</li>
|
12 |
-
<li>Smoother performance: The game data will improve the performance of the game on your device. You will not experience any lagging, crashing, or freezing while playing. You will also be able to play at higher graphics settings without compromising the quality of the game.</li>
|
13 |
-
<li>Saving storage space: The game data will save storage space on your device by compressing the file size of the game. You will not have to worry about running out of space or deleting other apps or files to make room for the game.</li>
|
14 |
-
</ul>
|
15 |
-
<p>Downloading Data One Piece Bounty Rush is easy and simple. All you need is a stable internet connection and enough storage space on your device. In the next sections, we will show you how to download Data One Piece Bounty Rush on Android and iOS devices.</p> <h2>How to Download Data One Piece Bounty Rush on Android</h2>
|
16 |
-
<p>If you have an Android device, you can download Data One Piece Bounty Rush from the Google Play Store. However, before you do that, you need to make sure that your device meets the minimum and recommended specifications for the game. Here are the requirements for downloading Data One Piece Bounty Rush on Android:</p>
|
17 |
-
<p>How to download data one piece bounty rush 2022 on android<br />
|
18 |
-
Download data one piece bounty rush 2022 apk<br />
|
19 |
-
Best characters and medals for one piece bounty rush 2022<br />
|
20 |
-
One piece bounty rush 2022 tips and tricks<br />
|
21 |
-
Download data one piece bounty rush 2022 mod<br />
|
22 |
-
One piece bounty rush 2022 tier list<br />
|
23 |
-
Download data one piece bounty rush 2022 for pc<br />
|
24 |
-
One piece bounty rush 2022 update<br />
|
25 |
-
Download data one piece bounty rush 2022 ios<br />
|
26 |
-
One piece bounty rush 2022 reddit<br />
|
27 |
-
Download data one piece bounty rush 2022 hack<br />
|
28 |
-
One piece bounty rush 2022 discord<br />
|
29 |
-
Download data one piece bounty rush 2022 latest version<br />
|
30 |
-
One piece bounty rush 2022 review<br />
|
31 |
-
Download data one piece bounty rush 2022 offline<br />
|
32 |
-
One piece bounty rush 2022 gameplay<br />
|
33 |
-
Download data one piece bounty rush 2022 cheats<br />
|
34 |
-
One piece bounty rush 2022 wiki<br />
|
35 |
-
Download data one piece bounty rush 2022 obb<br />
|
36 |
-
One piece bounty rush 2022 codes<br />
|
37 |
-
Download data one piece bounty rush 2022 bandai namco<br />
|
38 |
-
One piece bounty rush 2022 events<br />
|
39 |
-
Download data one piece bounty rush 2022 google play<br />
|
40 |
-
One piece bounty rush 2022 guide<br />
|
41 |
-
Download data one piece bounty rush 2022 error<br />
|
42 |
-
One piece bounty rush 2022 characters<br />
|
43 |
-
Download data one piece bounty rush 2022 free<br />
|
44 |
-
One piece bounty rush 2022 medals<br />
|
45 |
-
Download data one piece bounty rush 2022 new characters<br />
|
46 |
-
One piece bounty rush 2022 release date<br />
|
47 |
-
Download data one piece bounty rush 2022 size<br />
|
48 |
-
One piece bounty rush 2022 support tags<br />
|
49 |
-
Download data one piece bounty rush 2022 online<br />
|
50 |
-
One piece bounty rush 2022 database<br />
|
51 |
-
Download data one piece bounty rush 2022 patch notes<br />
|
52 |
-
One piece bounty rush 2022 news<br />
|
53 |
-
Download data one piece bounty rush 2022 system requirements<br />
|
54 |
-
One piece bounty rush 2022 forum<br />
|
55 |
-
Download data one piece bounty rush 2022 lag fix<br />
|
56 |
-
One piece bounty rush 2022 trailer</p>
|
57 |
-
<h3>Requirements for Downloading Data One Piece Bounty Rush on Android</h3>
|
58 |
-
<table>
|
59 |
-
<tr>
|
60 |
-
<th>Minimum Specifications</th>
|
61 |
-
<th>Recommended Specifications</th>
|
62 |
-
</tr>
|
63 |
-
<tr>
|
64 |
-
<td>OS: Android 6.0 or higher</td>
|
65 |
-
<td>OS: Android 8.0 or higher</td>
|
66 |
-
</tr>
|
67 |
-
<tr>
|
68 |
-
<td>RAM: 2 GB or more</td>
|
69 |
-
<td>RAM: 4 GB or more</td>
|
70 |
-
</tr>
|
71 |
-
<tr>
|
72 |
-
<td>Storage: 3 GB or more</td>
|
73 |
-
<td>Storage: 5 GB or more</td>
|
74 |
-
</tr>
|
75 |
-
<tr>
|
76 |
-
<td>CPU: Snapdragon 625 or equivalent</td>
|
77 |
-
<td>CPU: Snapdragon 845 or equivalent</td>
|
78 |
-
</tr>
|
79 |
-
<tr>
|
80 |
-
<td>GPU: Adreno 506 or equivalent</td>
|
81 |
-
<td>GPU: Adreno 630 or equivalent</td>
|
82 |
-
</tr>
|
83 |
-
<tr>
|
84 |
-
<td>Internet: Wi-Fi or 4G LTE</td>
|
85 |
-
<td>Internet: Wi-Fi or 5G NR</td>
|
86 |
-
</tr>
|
87 |
-
</table>
|
88 |
-
<p>If your device meets these requirements, you can proceed to download Data One Piece Bounty Rush on Android. Here are the steps to follow:</p>
|
89 |
-
<h3>Steps for Downloading Data One Piece Bounty Rush on Android</h3>
|
90 |
-
<ol>
|
91 |
-
<li>Open the Google Play Store app on your device and search for "Data One Piece Bounty Rush". Alternatively, you can use this link to go directly to the game page.</li>
|
92 |
-
<li>Tap on the "Install" button and wait for the game to download and install on your device. The game size is about 1.5 GB, so make sure you have enough storage space and a stable internet connection.</li>
|
93 |
-
<li>Once the game is installed, tap on the "Open" button to launch the game. You will see a splash screen with the game logo and a loading bar.</li>
|
94 |
-
<li>When the loading bar is full, you will see a pop-up window asking you to download the game data. Tap on the "Download" button to start downloading the game data. The game data size is about 1.5 GB, so make sure you have enough storage space and a stable internet connection.</li>
|
95 |
-
<li>You will see a progress bar showing the percentage of the game data downloaded. You can also see the estimated time remaining and the download speed. You can pause and resume the download at any time by tapping on the "Pause" and "Resume" buttons.</li>
|
96 |
-
<li>When the download is complete, you will see a pop-up window saying "Download Complete". Tap on the "OK" button to finish downloading the game data.</li>
|
97 |
-
<li>You will then see a pop-up window asking you to agree to the terms of service and privacy policy of the game. Read them carefully and tap on the "Agree" button if you accept them.</li>
|
98 |
-
<li>You will then see a pop-up window asking you to choose your region and language. Select your preferred options and tap on the "OK" button.</li>
|
99 |
-
<li>You will then see a pop-up window asking you to create a user name. Enter a unique and appropriate user name and tap on the "OK" button.</li>
|
100 |
-
<li>You will then see a pop-up window asking you to select your favorite character from the One Piece series. Choose one of the four options and tap on the "OK" button.</li>
|
101 |
-
<li>You will then see a tutorial video explaining the basics of the game. Watch it carefully and tap on the "Skip" button if you want to skip it.</li>
|
102 |
-
<li>You will then enter the main menu of the game, where you can access various modes and features of Data One Piece Bounty Rush. Congratulations, you have successfully downloaded Data One Piece Bounty Rush on Android!</li> to finish downloading the game data.</li>
|
103 |
-
<li>You will then see a pop-up window asking you to agree to the terms of service and privacy policy of the game. Read them carefully and tap on the "Agree" button if you accept them.</li>
|
104 |
-
<li>You will then see a pop-up window asking you to choose your region and language. Select your preferred options and tap on the "OK" button.</li>
|
105 |
-
<li>You will then see a pop-up window asking you to create a user name. Enter a unique and appropriate user name and tap on the "OK" button.</li>
|
106 |
-
<li>You will then see a pop-up window asking you to select your favorite character from the One Piece series. Choose one of the four options and tap on the "OK" button.</li>
|
107 |
-
<li>You will then see a tutorial video explaining the basics of the game. Watch it carefully and tap on the "Skip" button if you want to skip it.</li>
|
108 |
-
<li>You will then enter the main menu of the game, where you can access various modes and features of Data One Piece Bounty Rush. Congratulations, you have successfully downloaded Data One Piece Bounty Rush on iOS!</li>
|
109 |
-
</ol>
|
110 |
-
<h2>How to Update Data One Piece Bounty Rush</h2>
|
111 |
-
<p>Data One Piece Bounty Rush is constantly updated with new features, characters, events, and bug fixes. To enjoy the latest version of the game, you need to update the game data regularly. Updating the game data is easy and simple. All you need is a stable internet connection and enough storage space on your device. In this section, we will show you how to update Data One Piece Bounty Rush on Android and iOS devices.</p>
|
112 |
-
<h3>How to Check for Updates for Data One Piece Bounty Rush</h3>
|
113 |
-
<p>The first step to update Data One Piece Bounty Rush is to check if there are any updates available for the game data. You can do this by following these steps:</p>
|
114 |
-
<ol>
|
115 |
-
<li>Launch Data One Piece Bounty Rush on your device and enter the main menu.</li>
|
116 |
-
<li>Tap on the "Settings" icon at the top right corner of the screen.</li>
|
117 |
-
<li>Tap on the "Update" tab at the bottom of the screen.</li>
|
118 |
-
<li>You will see a message saying "Checking for updates..." and a loading bar.</li>
|
119 |
-
<li>If there are any updates available, you will see a message saying "Update available" and a download size.</li>
|
120 |
-
<li>If there are no updates available, you will see a message saying "No updates available" and a current version number.</li>
|
121 |
-
</ol>
|
122 |
-
<p>You can also enable automatic updates for Data One Piece Bounty Rush by tapping on the "Auto Update" switch at the top of the screen. This will allow the game to download and install any updates automatically when they are released. However, this may consume more data and battery power, so make sure you have a stable internet connection and enough storage space on your device.</p>
|
123 |
-
<h3>How to Update Data One Piece Bounty Rush Manually</h3>
|
124 |
-
<p>If you have disabled automatic updates or if they are not working properly, you can update Data One Piece Bounty Rush manually by following these steps:</p>
|
125 |
-
<ol>
|
126 |
-
<li>Launch Data One Piece Bounty Rush on your device and enter the main menu.</li>
|
127 |
-
<li>Tap on the "Settings" icon at the top right corner of the screen.</li>
|
128 |
-
<li>Tap on the "Update" tab at the bottom of the screen.</li>
|
129 |
-
<li>If there are any updates available, tap on the "Download" button to start downloading the update data. The update data size may vary depending on the version and content of the update.</li>
|
130 |
-
<li>You will see a progress bar showing the percentage of the update data downloaded. You can also see the estimated time remaining and the download speed. You can pause and resume the download at any time by tapping on the "Pause" and "Resume" buttons.</li>
|
131 |
-
<li>When the download is complete, you will see a pop-up window saying "Download Complete". Tap on the "OK" button to finish downloading the update data.</li>
|
132 |
-
<li>You will then see a pop-up window saying "Update Complete". Tap on the "OK" button to finish updating the game data.</li>
|
133 |
-
<li>You will then enter the main menu of the game, where you can access the latest features and content of Data One Piece Bounty Rush. Congratulations, you have successfully updated Data One Piece Bounty Rush!</li>
|
134 |
-
</ol>
|
135 |
-
<h2>Tips and Tricks for Playing Data One Piece Bounty Rush</h2>
|
136 |
-
<p>Now that you have downloaded and updated Data One Piece Bounty Rush, you are ready to play the game and have fun. However, if you want to improve your skills and win more battles, you might want to learn some tips and tricks for playing Data One Piece Bounty Rush. In this section, we will share with you some useful tips and tricks for playing Data One Piece Bounty Rush, such as how to choose the best characters for your team, how to use medals and boosts effectively, and how to win league battles and loot treasures.</p>
|
137 |
-
<h3>How to Choose the Best Characters for Your Team</h3>
|
138 |
-
<p>One of the most important aspects of Data One Piece Bounty Rush is choosing the right characters for your team. You can have up to four characters in your team, and you can switch between them during battle. You can also customize your team by mixing and matching characters from different classes, elements, skills, and traits. Here are some tips on how to choose the best characters for your team:</p>
|
139 |
-
<ul>
|
140 |
-
<li>Consider the class of your characters: There are four classes in Data One Piece Bounty Rush: Fighter, Warrior, Supporter, and Shooter. Each class has its own strengths and weaknesses, and you should balance them in your team. Fighters are good at close-range combat and have high attack power. Warriors are good at mid-range combat and have high defense power. Supporters are good at healing and buffing their allies and debuffing their enemies. Shooters are good at long-range combat and have high speed and mobility.</li>
|
141 |
-
<li>Consider the element of your characters: There are five elements in Data One Piece Bounty Rush: Red, Green, Blue, Black, and Yellow. Each element has an advantage over another element, except for Black and Yellow, which are neutral. You can use the element wheel to see which element is stronger or weaker against another element. You should choose characters that have an element advantage over your enemies, or at least avoid having an element disadvantage.</li>
|
142 |
-
<li>Consider the skills and traits of your characters: Each character has two skills and two traits that can be activated during battle. Skills are special abilities that can deal damage, heal, buff, debuff, or stun your enemies or allies. Traits are passive abilities that can enhance your character's stats or grant them certain effects. You should choose characters that have skills and traits that suit your playstyle and strategy. For example, if you like to be aggressive and deal a lot of damage, you might want to choose characters that have skills that can stun or knock back your enemies, or traits that can increase your attack power or critical rate.</li>
|
143 |
-
</ul>
|
144 |
-
<h3>How to Use Medals and Boosts Effectively</h3>
|
145 |
-
<p>Another important aspect of Data One Piece Bounty Rush is using medals and boosts effectively. Medals are items that can be equipped to your characters to enhance their stats and abilities. Boosts are items that can be used before or during battle to give you an edge over your enemies. Here are some tips on how to use medals and boosts effectively:</p>
|
146 |
-
<ul>
|
147 |
-
<li>Choose medals that match your character's class and element: There are different types of medals in Data One Piece Bounty Rush, such as Fighter medals, Warrior medals, Supporter medals, Shooter medals, Red medals, Green medals, Blue medals, Black medals, and Yellow medals. Each type of medal has different effects and bonuses for your character. You should choose medals that match your character's class and element to maximize their potential. For example, if you have a Red Fighter character, you might want to equip them with Red Fighter medals that can increase their attack power and critical rate.</li>
|
148 |
-
<li>Combine medals that have synergy effects: Some medals have synergy effects that can activate when you equip them together. These effects can give you additional bonuses or special abilities for your character. You can check the synergy effects of your medals by tapping on the "Medal Set" button at the bottom of the screen. You should combine medals that have synergy effects that suit your playstyle and strategy. For example, if you want to be more durable and tanky, you might want to combine medals that have synergy effects that can increase your defense power and HP recovery.</li>
|
149 |
-
<li>Use boosts wisely: Boosts are items that can be used before or during battle to give you an edge over your enemies. There are different types of boosts in Data One Piece Bounty Rush, such as Attack Boosts, Defense Boosts, Speed Boosts, Skill Boosts, and Berry Boosts. Each type of boost has a different effect and duration for your character. You can use up to three boosts per battle, and you can buy more boosts with berries or real money. You should use boosts wisely and strategically, depending on the situation and your goals. For example, if you want to capture treasures faster, you might want to use Speed Boosts to increase your movement speed. If you want to deal more damage, you might want to use Attack Boosts or Skill Boosts to increase your attack power or skill damage.</li>
|
150 |
-
</ul>
|
151 |
-
<h3>How to Win League Battles and Loot Treasures</h3>
|
152 |
-
<p>The main mode of Data One Piece Bounty Rush is League Battle, where you can compete with other players in 4 vs 4 real-time PvP battles to rush and loot treasures of berry coins for victory. League Battle is a fun and exciting mode that tests your skills and strategy as a pirate. Here are some tips on how to win League Battles and loot treasures:</p>
|
153 |
-
<ul>
|
154 |
-
<li>Strategize your team formation: Before you enter a League Battle, you can choose your team formation by tapping on the "Team" button at the bottom of the screen. You can select up to four characters for your team, and you can switch between them during battle. You can also see the class and element of each character, as well as their skills and traits. You should strategize your team formation based on the map, the enemy team, and your own preferences. You should balance your team with different classes and elements, and choose characters that complement each other's skills and traits.</li>
|
155 |
-
<li>Capture and defend treasures: The objective of League Battle is to capture and defend treasures on the map. There are five random treasure locations on each map, and you and your teammates will have to quickly move to them and capture them by tapping the flag icon. You will also have to fight your enemies and push them away from the treasure. The team that has the most treasure at the end of the battle wins. You should capture and defend treasures strategically, depending on the situation and your goals. You should prioritize capturing treasures that are closer to your spawn point or have less enemies around them. You should also defend treasures that are more valuable or have more enemies around them.</li>
|
156 |
-
<li>Earn more berries: Berries are the currency of Data One Piece Bounty Rush, which you can use to buy boosts, upgrade characters, or summon new characters. You can earn berries by playing League Battles, completing missions, or logging in daily. The amount of berries you earn depends on various factors, such as your rank, your score, your win rate, and your MVP rate. You should earn more berries by playing League Battles regularly, improving your skills and strategy, winning more battles, and becoming MVP more often.</li>
|
157 |
-
</ul>
|
158 |
-
<h1>Conclusion</h1>
|
159 |
-
<p>Data One Piece Bounty Rush is a 3D anime battle arena treasure looting game set in the pirate world of One Piece. In this game, you can join Luffy, Zoro, Nami, Sanji, and other famous characters from the series in 4 vs 4 real-time PvP battles to rush and loot treasures of berry coins for victory. You can also customize your own pirate crew by mixing and matching characters from different classes, elements, skills, and traits. You can also experience the One Piece universe in beautiful 3D graphics and battle at iconic locations from the anime.</p>
|
160 |
-
<p>However, before you can enjoy all these features, you need to download the game data first. The game data is a large file that contains all the necessary information and resources for the game to run smoothly on your device. By downloading the game data, you can reduce loading times, improve performance, and save storage space on your device. In this article, we showed you how to download Data One Piece Bounty Rush 2022 on Android and iOS devices. We also showed you how to update the game data when new versions are released. Finally, we gave you some tips and tricks for playing Data One Piece Bounty Rush and becoming a pirate king.</p>
|
161 |
-
<p>We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. If you liked this article, please share it with your friends and fellow pirates. And if you are ready to play Data One Piece Bounty Rush, download it now from the Google Play Store or the App Store and join the fun!</p>
|
162 |
-
<h2>FAQs</h2>
|
163 |
-
<p>Here are some frequently asked questions about Data One Piece Bounty Rush:</p>
|
164 |
-
<ol>
|
165 |
-
<li>Q: How can I get new characters in Data One Piece Bounty Rush?</li>
|
166 |
-
<li>A: You can get new characters in Data One Piece Bounty Rush by summoning them with scout tickets or rainbow diamonds. Scout tickets are items that can be used to summon characters from specific banners or events. Rainbow diamonds are the premium currency of Data One Piece Bounty Rush, which can be used to summon characters from any banner or event. You can get scout tickets and rainbow diamonds by playing League Battles, completing missions, logging in daily, or buying them with real money.</li>
|
167 |
-
<li>Q: How can I level up my characters in Data One Piece Bounty Rush?</li>
|
168 |
-
<li>A: You can level up your characters in Data One Piece Bounty Rush by using character fragments or EXP orbs. Character fragments are items that can be used to level up specific characters. EXP orbs are items that can be used to level up any character. You can get character fragments and EXP orbs by playing League Battles, completing missions, logging in daily, or buying them with berries or real money.</li>
|
169 |
-
<li>Q: How can I enhance my character's skills in Data One Piece Bounty Rush?</li>
|
170 |
-
<li>A: You can enhance your character's skills in Data One Piece Bounty Rush by using skill orbs or skill scrolls. Skill orbs are items that can be used to enhance any skill of any character. Skill scrolls are items that can be used to enhance specific skills of specific characters. You can get skill orbs and skill scrolls by playing League Battles, completing missions, logging in daily, or buying them with berries or real money.</li>
|
171 |
-
<li>Q: How can I join a crew in Data One Piece Bounty Rush?</li>
|
172 |
-
<li>A: You can join a crew in Data One Piece Bounty Rush by tapping on the "Crew" button at the bottom of the screen. You can then search for a crew by name, ID, rank, or language. You can also create your own crew by tapping on the "Create" button at the top of the screen. You will need 100 rainbow diamonds to create a crew. By joining a crew, you can chat with other members, participate in crew battles, and earn crew points and rewards.</li>
|
173 |
-
<li>Q: How can I contact the customer support of Data One Piece Bounty Rush?</li>
|
174 |
-
<li>A: You can contact the customer support of Data One Piece Bounty Rush by tapping on the "Settings" icon at the top right corner of the screen. Then tap on the "Support" tab at the bottom of the screen. Then tap on the "Contact Us" button at the top of the screen. You will then see a form where you can enter your name, email address, inquiry type, inquiry details, and attachments. Fill out the form and tap on the "Send" button to submit your inquiry.</li>
|
175 |
-
</ol></p> 401be4b1e0<br />
|
176 |
-
<br />
|
177 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Hot Lava Game APK and Customize Your Character.md
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Hot Lava Game Download APK: How to Play the Ultimate Floor is Lava Challenge on Your Android Device</h1>
|
3 |
-
<p>Do you remember playing the floor is lava game as a kid? You know, the one where you had to jump from one furniture to another without touching the ground, pretending that it was hot lava that would burn you if you did. Well, now you can relive that childhood fun on your Android device with Hot Lava Game, a thrilling and addictive platformer game that will test your skills and reflexes. In this article, we will tell you everything you need to know about Hot Lava Game, how to download and install it on your Android device, and how to play and enjoy it.</p>
|
4 |
-
<h2>What is Hot Lava Game?</h2>
|
5 |
-
<p>Hot Lava Game is a 3D platformer game developed by Jbro Studios, inspired by the popular floor is lava challenge. In this game, you have to navigate through various environments, such as a school, a park, a mall, and more, by jumping from one platform to another, avoiding the hot lava that covers the floor. You can also collect coins, gems, and power-ups along the way, as well as unlock new outfits and accessories for your character.</p>
|
6 |
-
<h2>hot lava game download apk</h2><br /><p><b><b>Download Zip</b> ✑ ✑ ✑ <a href="https://jinyurl.com/2uNTBM">https://jinyurl.com/2uNTBM</a></b></p><br /><br />
|
7 |
-
<h3>The concept and gameplay of Hot Lava Game</h3>
|
8 |
-
<p>The concept of Hot Lava Game is simple: don't touch the floor. The gameplay is fast-paced and challenging, as you have to time your jumps carefully and avoid obstacles and enemies that can knock you off your platform. You also have to balance your speed and accuracy, as some platforms are moving or disappearing, and some levels have a time limit. You can also perform tricks and stunts in mid-air, such as flips, spins, and slides, to earn extra points and coins.</p>
|
9 |
-
<h3>The features and benefits of Hot Lava Game</h3>
|
10 |
-
<p>Hot Lava Game has many features and benefits that make it an enjoyable and rewarding game to play. Some of them are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>It has stunning graphics and sound effects that create an immersive and realistic experience.</li>
|
13 |
-
<li>It has a variety of environments and themes that keep the game fresh and exciting.</li>
|
14 |
-
<li>It has a simple and intuitive control system that makes it easy to play.</li>
|
15 |
-
<li>It has a multiplayer mode that allows you to compete with other players from around the world.</li>
|
16 |
-
<li>It has a leaderboard and achievements system that tracks your progress and rewards your performance.</li>
|
17 |
-
<li>It has a customization option that lets you personalize your character with different outfits and accessories.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>How to Download and Install Hot Lava Game APK on Your Android Device</h2>
|
20 |
-
<p>If you want to play Hot Lava Game on your Android device, you will need to download and install its APK file. An APK file is an application package file that contains all the data and files needed to run an app on an Android device. However, before you download and install Hot Lava Game APK, there are some requirements and precautions that you need to follow.</p>
|
21 |
-
<h3>The requirements and precautions for downloading Hot Lava Game APK</h3>
|
22 |
-
<p>The requirements for downloading Hot Lava Game APK are:</p>
|
23 |
-
<ul>
|
24 |
-
<li>You need an Android device that runs on Android 4.4 or higher.</li>
|
25 |
-
<li>You need at least 100 MB of free storage space on your device.</li>
|
26 |
-
<li>You need a stable internet connection to download the APK file.</li>
|
27 |
-
</ul <p>The precautions for downloading Hot Lava Game APK are:</p>
|
28 |
-
<ul>
|
29 |
-
<li>You need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
30 |
-
<li>You need to download the APK file from a trusted and reliable source, such as the official website of Hot Lava Game or a reputable APK download site. Avoid downloading the APK file from unknown or suspicious links, as they may contain malware or viruses that can harm your device.</li>
|
31 |
-
<li>You need to scan the APK file with an antivirus or anti-malware app before installing it, to ensure that it is safe and clean.</li>
|
32 |
-
</ul>
|
33 |
-
<h3>The steps for downloading and installing Hot Lava Game APK</h3>
|
34 |
-
<p>The steps for downloading and installing Hot Lava Game APK are:</p>
|
35 |
-
<p>hot lava floor game free download apk<br />
|
36 |
-
hot lava game android apk download<br />
|
37 |
-
hot lava game download apk mod<br />
|
38 |
-
hot lava game download apk for pc<br />
|
39 |
-
hot lava game download apk latest version<br />
|
40 |
-
hot lava game download apk offline<br />
|
41 |
-
hot lava game download apk obb<br />
|
42 |
-
hot lava game download apk pure<br />
|
43 |
-
hot lava game download apk uptodown<br />
|
44 |
-
hot lava game download apk hack<br />
|
45 |
-
hot lava game multiplayer apk download<br />
|
46 |
-
hot lava game online apk download<br />
|
47 |
-
hot lava game 3d apk download<br />
|
48 |
-
hot lava game simulator apk download<br />
|
49 |
-
hot lava game survival apk download<br />
|
50 |
-
hot lava game adventure apk download<br />
|
51 |
-
hot lava game parkour apk download<br />
|
52 |
-
hot lava game action apk download<br />
|
53 |
-
hot lava game arcade apk download<br />
|
54 |
-
hot lava game runner apk download<br />
|
55 |
-
hot lava floor 2 game download apk<br />
|
56 |
-
hot lava floor 3d game download apk<br />
|
57 |
-
hot lava floor challenge game download apk<br />
|
58 |
-
hot lava floor escape game download apk<br />
|
59 |
-
hot lava floor impossible game download apk<br />
|
60 |
-
how to download hot lava game apk<br />
|
61 |
-
where to download hot lava game apk<br />
|
62 |
-
best site to download hot lava game apk<br />
|
63 |
-
safe site to download hot lava game apk<br />
|
64 |
-
trusted site to download hot lava game apk<br />
|
65 |
-
free site to download hot lava game apk<br />
|
66 |
-
easy way to download hot lava game apk<br />
|
67 |
-
fast way to download hot lava game apk<br />
|
68 |
-
simple way to download hot lava game apk<br />
|
69 |
-
quick way to download hot lava game apk<br />
|
70 |
-
tips for downloading hot lava game apk<br />
|
71 |
-
guide for downloading hot lava game apk<br />
|
72 |
-
tutorial for downloading hot lava game apk<br />
|
73 |
-
steps for downloading hot lava game apk<br />
|
74 |
-
instructions for downloading hot lava game apk</p>
|
75 |
-
<ol>
|
76 |
-
<li>Go to the official website of Hot Lava Game or a reputable APK download site and find the download link for Hot Lava Game APK.</li>
|
77 |
-
<li>Click on the download link and wait for the APK file to be downloaded to your device.</li>
|
78 |
-
<li>Once the download is complete, locate the APK file in your device's file manager and tap on it to open it.</li>
|
79 |
-
<li>Follow the on-screen instructions and grant the necessary permissions to install the app on your device.</li>
|
80 |
-
<li>After the installation is done, you will see the Hot Lava Game icon on your device's home screen or app drawer. Tap on it to launch the game and enjoy.</li>
|
81 |
-
</ol>
|
82 |
-
<h2>How to Play and Enjoy Hot Lava Game on Your Android Device</h2>
|
83 |
-
<p>Now that you have downloaded and installed Hot Lava Game on your Android device, you are ready to play and enjoy it. Here are some tips and tricks on how to play and enjoy Hot Lava Game on your Android device.</p>
|
84 |
-
<h3>The controls and tips for playing Hot Lava Game</h3>
|
85 |
-
<p>The controls for playing Hot Lava Game are simple and intuitive. You can use the virtual joystick on the left side of the screen to move your character, and the buttons on the right side of the screen to jump, slide, and perform tricks. You can also swipe the screen to change the camera angle and view your surroundings. Some tips for playing Hot Lava Game are:</p>
|
86 |
-
<ul>
|
87 |
-
<li>Try to maintain a steady speed and momentum, as slowing down or stopping can make you lose balance and fall into the lava.</li>
|
88 |
-
<li>Use the power-ups wisely, as they can give you an edge over the obstacles and enemies. For example, the jetpack can help you fly over gaps, the magnet can help you collect coins easily, and the shield can protect you from damage.</li>
|
89 |
-
<li>Watch out for signs and hints that indicate where to go next, such as arrows, platforms, ropes, ladders, etc.</li>
|
90 |
-
<li>Explore different paths and routes, as they may lead you to hidden secrets and bonuses.</li>
|
91 |
-
</ul>
|
92 |
-
<h3>The modes and levels of Hot Lava Game</h3>
|
93 |
-
<p>Hot Lava Game has two modes: single-player and multiplayer. In single-player mode, you can play through various levels that have different themes, such as school, park, mall, etc. Each level has its own challenges and objectives that you need to complete in order to unlock the next level. You can also earn stars based on your performance in each level. In multiplayer mode, you can compete with other players from around the world in real-time. You can join or create a room with up to four players and race against each other in different maps. You can also chat with other players and make friends.</p>
|
94 |
-
<h3>The customization and social options of Hot Lava Game</h3>
|
95 |
-
<p>Hot Lava Game also has a customization option that lets you personalize your character with different outfits and accessories. You can unlock new items by collecting coins and gems in the game or by purchasing them with real money. You can also mix and match different items to create your own unique style. Hot Lava Game also has a social option that lets you connect with other players and share your achievements. You can link your Facebook account to invite your friends to play with you or to see their scores and rankings. You can also follow other players and send them messages.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<p>Hot Lava Game is a fun and exciting platformer game that will bring back your childhood memories of playing the floor is lava game. It has stunning graphics, addictive gameplay, various environments, multiplayer mode, customization option, social option, and more. It is easy to download and install on your Android device with its APK file. If you are looking for a game that will challenge your skills and reflexes, then you should definitely try Hot Lava Game. Download it now and enjoy!</p>
|
98 |
-
<h3>A summary of the main points of the article <p>In this article, we have covered the following main points:</p>
|
99 |
-
<ul>
|
100 |
-
<li>Hot Lava Game is a 3D platformer game inspired by the floor is lava challenge, where you have to jump from one platform to another without touching the hot lava that covers the floor.</li>
|
101 |
-
<li>Hot Lava Game has many features and benefits, such as stunning graphics, various environments, multiplayer mode, customization option, social option, and more.</li>
|
102 |
-
<li>Hot Lava Game can be downloaded and installed on your Android device with its APK file, which is an application package file that contains all the data and files needed to run an app on an Android device.</li>
|
103 |
-
<li>Hot Lava Game can be played and enjoyed on your Android device with simple and intuitive controls, as well as tips and tricks that will help you improve your performance and score.</li>
|
104 |
-
</ul>
|
105 |
-
<h3>A call to action for the readers to download and play Hot Lava Game</h3>
|
106 |
-
<p>If you are interested in playing Hot Lava Game on your Android device, don't hesitate to download it now and join the ultimate floor is lava challenge. You will have a blast jumping, sliding, and performing tricks in different environments, as well as competing with other players from around the world. Hot Lava Game is a game that will keep you entertained and engaged for hours. Download it now and enjoy!</p>
|
107 |
-
<h2>FAQs</h2>
|
108 |
-
<p>Here are some frequently asked questions about Hot Lava Game:</p>
|
109 |
-
<h4>Q: Is Hot Lava Game free to play?</h4>
|
110 |
-
<p>A: Yes, Hot Lava Game is free to play. However, it contains in-app purchases that allow you to buy coins, gems, and items with real money.</p>
|
111 |
-
<h4>Q: Is Hot Lava Game safe to download and install?</h4>
|
112 |
-
<p>A: Yes, Hot Lava Game is safe to download and install, as long as you follow the requirements and precautions mentioned in this article. Make sure you download the APK file from a trusted and reliable source, enable the installation of apps from unknown sources on your device, and scan the APK file with an antivirus or anti-malware app before installing it.</p>
|
113 |
-
<h4>Q: How can I update Hot Lava Game on my Android device?</h4>
|
114 |
-
<p>A: You can update Hot Lava Game on your Android device by downloading and installing the latest version of its APK file from the official website of Hot Lava Game or a reputable APK download site. Alternatively, you can check for updates in the game settings or in the Google Play Store.</p>
|
115 |
-
<h4>Q: How can I contact the developer of Hot Lava Game?</h4>
|
116 |
-
<p>A: You can contact the developer of Hot Lava Game by sending an email to [email protected] or by visiting their Facebook page at https://www.facebook.com/jbrostudios/.</p>
|
117 |
-
<h4>Q: How can I share my feedback and suggestions about Hot Lava Game?</h4>
|
118 |
-
<p>A: You can share your feedback and suggestions about Hot Lava Game by leaving a review or rating on the Google Play Store or by sending a message to the developer via email or Facebook.</p> 197e85843d<br />
|
119 |
-
<br />
|
120 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify_model.py
DELETED
@@ -1,287 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
import sys
|
3 |
-
sys.path.insert(0, 'vtoonify')
|
4 |
-
|
5 |
-
from util import load_psp_standalone, get_video_crop_parameter, tensor2cv2
|
6 |
-
import torch
|
7 |
-
import torch.nn as nn
|
8 |
-
import numpy as np
|
9 |
-
import dlib
|
10 |
-
import cv2
|
11 |
-
from model.vtoonify import VToonify
|
12 |
-
from model.bisenet.model import BiSeNet
|
13 |
-
import torch.nn.functional as F
|
14 |
-
from torchvision import transforms
|
15 |
-
from model.encoder.align_all_parallel import align_face
|
16 |
-
import gc
|
17 |
-
import huggingface_hub
|
18 |
-
import os
|
19 |
-
|
20 |
-
MODEL_REPO = 'saimemrekanat/vmodels'
|
21 |
-
|
22 |
-
class Model():
|
23 |
-
def __init__(self, device):
|
24 |
-
super().__init__()
|
25 |
-
|
26 |
-
self.device = device
|
27 |
-
self.style_types = {
|
28 |
-
'cartoon1': ['vtoonify_d_cartoon/vtoonify_s026_d0.5.pt', 26],
|
29 |
-
'cartoon1-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 26],
|
30 |
-
'cartoon2-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 64],
|
31 |
-
'cartoon3-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 153],
|
32 |
-
'cartoon4': ['vtoonify_d_cartoon/vtoonify_s299_d0.5.pt', 299],
|
33 |
-
'cartoon4-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 299],
|
34 |
-
'cartoon5-d': ['vtoonify_d_cartoon/vtoonify_s_d.pt', 8],
|
35 |
-
'comic1-d': ['vtoonify_d_comic/vtoonify_s_d.pt', 28],
|
36 |
-
'comic2-d': ['vtoonify_d_comic/vtoonify_s_d.pt', 18],
|
37 |
-
'arcane1': ['vtoonify_d_arcane/vtoonify_s000_d0.5.pt', 0],
|
38 |
-
'arcane1-d': ['vtoonify_d_arcane/vtoonify_s_d.pt', 0],
|
39 |
-
'arcane2': ['vtoonify_d_arcane/vtoonify_s077_d0.5.pt', 77],
|
40 |
-
'arcane2-d': ['vtoonify_d_arcane/vtoonify_s_d.pt', 77],
|
41 |
-
'caricature1': ['vtoonify_d_caricature/vtoonify_s039_d0.5.pt', 39],
|
42 |
-
'caricature2': ['vtoonify_d_caricature/vtoonify_s068_d0.5.pt', 68],
|
43 |
-
'pixar': ['vtoonify_d_pixar/vtoonify_s052_d0.5.pt', 52],
|
44 |
-
'pixar-d': ['vtoonify_d_pixar/vtoonify_s_d.pt', 52],
|
45 |
-
'illustration1-d': ['vtoonify_d_illustration/vtoonify_s054_d_c.pt', 54],
|
46 |
-
'illustration2-d': ['vtoonify_d_illustration/vtoonify_s004_d_c.pt', 4],
|
47 |
-
'illustration3-d': ['vtoonify_d_illustration/vtoonify_s009_d_c.pt', 9],
|
48 |
-
'illustration4-d': ['vtoonify_d_illustration/vtoonify_s043_d_c.pt', 43],
|
49 |
-
'illustration5-d': ['vtoonify_d_illustration/vtoonify_s086_d_c.pt', 86],
|
50 |
-
}
|
51 |
-
|
52 |
-
self.landmarkpredictor = self._create_dlib_landmark_model()
|
53 |
-
self.cnn_model = self._create_dlib_landmark_cnn_model()
|
54 |
-
self.parsingpredictor = self._create_parsing_model()
|
55 |
-
self.pspencoder = self._load_encoder()
|
56 |
-
self.transform = transforms.Compose([
|
57 |
-
transforms.ToTensor(),
|
58 |
-
transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
|
59 |
-
])
|
60 |
-
|
61 |
-
self.vtoonify, self.exstyle = self._load_default_model()
|
62 |
-
self.color_transfer = False
|
63 |
-
self.style_name = 'cartoon1'
|
64 |
-
self.video_limit_cpu = 100
|
65 |
-
self.video_limit_gpu = 300
|
66 |
-
|
67 |
-
@staticmethod
|
68 |
-
def _create_dlib_landmark_model():
|
69 |
-
return dlib.shape_predictor(huggingface_hub.hf_hub_download(MODEL_REPO,
|
70 |
-
'models/shape_predictor_68_face_landmarks.dat'))
|
71 |
-
|
72 |
-
@staticmethod
|
73 |
-
def _create_dlib_landmark_cnn_model():
|
74 |
-
return dlib.cnn_face_detection_model_v1('localmodel/mmod_human_face_detector.dat')
|
75 |
-
|
76 |
-
def _create_parsing_model(self):
|
77 |
-
parsingpredictor = BiSeNet(n_classes=19)
|
78 |
-
parsingpredictor.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO, 'models/faceparsing.pth'),
|
79 |
-
map_location=lambda storage, loc: storage))
|
80 |
-
parsingpredictor.to(self.device).eval()
|
81 |
-
return parsingpredictor
|
82 |
-
|
83 |
-
def _load_encoder(self) -> nn.Module:
|
84 |
-
style_encoder_path = huggingface_hub.hf_hub_download(MODEL_REPO,'models/encoder.pt')
|
85 |
-
return load_psp_standalone(style_encoder_path, self.device)
|
86 |
-
|
87 |
-
def _load_default_model(self) -> tuple[torch.Tensor, str]:
|
88 |
-
vtoonify = VToonify(backbone = 'dualstylegan')
|
89 |
-
vtoonify.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO,
|
90 |
-
'models/vtoonify_d_cartoon/vtoonify_s026_d0.5.pt'),
|
91 |
-
map_location=lambda storage, loc: storage)['g_ema'])
|
92 |
-
vtoonify.to(self.device)
|
93 |
-
tmp = np.load(huggingface_hub.hf_hub_download(MODEL_REPO,'models/vtoonify_d_cartoon/exstyle_code.npy'), allow_pickle=True).item()
|
94 |
-
exstyle = torch.tensor(tmp[list(tmp.keys())[26]]).to(self.device)
|
95 |
-
with torch.no_grad():
|
96 |
-
exstyle = vtoonify.zplus2wplus(exstyle)
|
97 |
-
return vtoonify, exstyle
|
98 |
-
|
99 |
-
def load_model(self, style_type: str) -> tuple[torch.Tensor, str]:
|
100 |
-
if 'illustration' in style_type:
|
101 |
-
self.color_transfer = True
|
102 |
-
else:
|
103 |
-
self.color_transfer = False
|
104 |
-
if style_type not in self.style_types.keys():
|
105 |
-
return None, 'Oops, wrong Style Type. Please select a valid model.'
|
106 |
-
self.style_name = style_type
|
107 |
-
model_path, ind = self.style_types[style_type]
|
108 |
-
style_path = os.path.join('models',os.path.dirname(model_path),'exstyle_code.npy')
|
109 |
-
self.vtoonify.load_state_dict(torch.load(huggingface_hub.hf_hub_download(MODEL_REPO,'models/'+model_path),
|
110 |
-
map_location=lambda storage, loc: storage)['g_ema'])
|
111 |
-
tmp = np.load(huggingface_hub.hf_hub_download(MODEL_REPO, style_path), allow_pickle=True).item()
|
112 |
-
exstyle = torch.tensor(tmp[list(tmp.keys())[ind]]).to(self.device)
|
113 |
-
with torch.no_grad():
|
114 |
-
exstyle = self.vtoonify.zplus2wplus(exstyle)
|
115 |
-
return exstyle, 'Model of %s loaded.'%(style_type)
|
116 |
-
|
117 |
-
def detect_and_align(self, frame, top, bottom, left, right, return_para=False):
|
118 |
-
message = 'Error: no face detected! Please retry or change the photo.'
|
119 |
-
paras = get_video_crop_parameter(frame, self.landmarkpredictor, [left, right, top, bottom])
|
120 |
-
instyle = None
|
121 |
-
h, w, scale = 0, 0, 0
|
122 |
-
if paras is not None:
|
123 |
-
h,w,top,bottom,left,right,scale = paras
|
124 |
-
H, W = int(bottom-top), int(right-left)
|
125 |
-
# for HR image, we apply gaussian blur to it to avoid over-sharp stylization results
|
126 |
-
kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]])
|
127 |
-
if scale <= 0.75:
|
128 |
-
frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
|
129 |
-
if scale <= 0.375:
|
130 |
-
frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
|
131 |
-
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
132 |
-
with torch.no_grad():
|
133 |
-
I = align_face(frame, self.landmarkpredictor)
|
134 |
-
if I is not None:
|
135 |
-
I = self.transform(I).unsqueeze(dim=0).to(self.device)
|
136 |
-
instyle = self.pspencoder(I)
|
137 |
-
instyle = self.vtoonify.zplus2wplus(instyle)
|
138 |
-
message = 'Successfully rescale the frame to (%d, %d)'%(bottom-top, right-left)
|
139 |
-
else:
|
140 |
-
frame = np.zeros((256,256,3), np.uint8)
|
141 |
-
else:
|
142 |
-
frame = np.zeros((256,256,3), np.uint8)
|
143 |
-
if return_para:
|
144 |
-
return frame, instyle, message, w, h, top, bottom, left, right, scale
|
145 |
-
return frame, instyle, message
|
146 |
-
|
147 |
-
#@torch.inference_mode()
|
148 |
-
def detect_and_align_image(self, image: str, top: int, bottom: int, left: int, right: int
|
149 |
-
) -> tuple[np.ndarray, torch.Tensor, str]:
|
150 |
-
if image is None:
|
151 |
-
return np.zeros((256,256,3), np.uint8), None, 'Error: fail to load empty file.'
|
152 |
-
frame = cv2.imread(image)
|
153 |
-
if frame is None:
|
154 |
-
return np.zeros((256,256,3), np.uint8), None, 'Error: fail to load the image.'
|
155 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
156 |
-
return self.detect_and_align(frame, top, bottom, left, right)
|
157 |
-
|
158 |
-
def detect_and_align_video(self, video: str, top: int, bottom: int, left: int, right: int
|
159 |
-
) -> tuple[np.ndarray, torch.Tensor, str]:
|
160 |
-
if video is None:
|
161 |
-
return np.zeros((256,256,3), np.uint8), None, 'Error: fail to load empty file.'
|
162 |
-
video_cap = cv2.VideoCapture(video)
|
163 |
-
if video_cap.get(7) == 0:
|
164 |
-
video_cap.release()
|
165 |
-
return np.zeros((256,256,3), np.uint8), torch.zeros(1,18,512).to(self.device), 'Error: fail to load the video.'
|
166 |
-
success, frame = video_cap.read()
|
167 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
168 |
-
video_cap.release()
|
169 |
-
return self.detect_and_align(frame, top, bottom, left, right)
|
170 |
-
|
171 |
-
def detect_and_align_full_video(self, video: str, top: int, bottom: int, left: int, right: int) -> tuple[str, torch.Tensor, str]:
|
172 |
-
message = 'Error: no face detected! Please retry or change the video.'
|
173 |
-
instyle = None
|
174 |
-
if video is None:
|
175 |
-
return 'default.mp4', instyle, 'Error: fail to load empty file.'
|
176 |
-
video_cap = cv2.VideoCapture(video)
|
177 |
-
if video_cap.get(7) == 0:
|
178 |
-
video_cap.release()
|
179 |
-
return 'default.mp4', instyle, 'Error: fail to load the video.'
|
180 |
-
num = min(self.video_limit_gpu, int(video_cap.get(7)))
|
181 |
-
if self.device == 'cpu':
|
182 |
-
num = min(self.video_limit_cpu, num)
|
183 |
-
success, frame = video_cap.read()
|
184 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
185 |
-
frame, instyle, message, w, h, top, bottom, left, right, scale = self.detect_and_align(frame, top, bottom, left, right, True)
|
186 |
-
if instyle is None:
|
187 |
-
return 'default.mp4', instyle, message
|
188 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
189 |
-
videoWriter = cv2.VideoWriter('input.mp4', fourcc, video_cap.get(5), (int(right-left), int(bottom-top)))
|
190 |
-
videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
191 |
-
kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]])
|
192 |
-
for i in range(num-1):
|
193 |
-
success, frame = video_cap.read()
|
194 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
195 |
-
if scale <= 0.75:
|
196 |
-
frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
|
197 |
-
if scale <= 0.375:
|
198 |
-
frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
|
199 |
-
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
200 |
-
videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
201 |
-
|
202 |
-
videoWriter.release()
|
203 |
-
video_cap.release()
|
204 |
-
|
205 |
-
return 'input.mp4', instyle, 'Successfully rescale the video to (%d, %d)'%(bottom-top, right-left)
|
206 |
-
|
207 |
-
def image_toonify(self, aligned_face: np.ndarray, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[np.ndarray, str]:
|
208 |
-
#print(style_type + ' ' + self.style_name)
|
209 |
-
if instyle is None or aligned_face is None:
|
210 |
-
return np.zeros((256,256,3), np.uint8), 'Opps, something wrong with the input. Please go to Step 2 and Rescale Image/First Frame again.'
|
211 |
-
if self.style_name != style_type:
|
212 |
-
exstyle, _ = self.load_model(style_type)
|
213 |
-
if exstyle is None:
|
214 |
-
exstyle, _ = self.load_model(style_type)
|
215 |
-
return np.zeros((256,256,3), np.uint8), 'Opps, something wrong with the style type. Please go to Step 1 and load model again.'
|
216 |
-
with torch.no_grad():
|
217 |
-
if self.color_transfer:
|
218 |
-
s_w = exstyle
|
219 |
-
else:
|
220 |
-
s_w = instyle.clone()
|
221 |
-
s_w[:,:7] = exstyle[:,:7]
|
222 |
-
|
223 |
-
x = self.transform(aligned_face).unsqueeze(dim=0).to(self.device)
|
224 |
-
x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0],
|
225 |
-
scale_factor=0.5, recompute_scale_factor=False).detach()
|
226 |
-
inputs = torch.cat((x, x_p/16.), dim=1)
|
227 |
-
y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = style_degree)
|
228 |
-
y_tilde = torch.clamp(y_tilde, -1, 1)
|
229 |
-
print('*** Toonify %dx%d image with style of %s'%(y_tilde.shape[2], y_tilde.shape[3], style_type))
|
230 |
-
return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8), 'Successfully toonify the image with style of %s'%(self.style_name)
|
231 |
-
|
232 |
-
def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[str, str]:
|
233 |
-
print(style_type + ' ' + self.style_name)
|
234 |
-
exstyle, _ = self.load_model(style_type)
|
235 |
-
if aligned_video is None:
|
236 |
-
return 'default.mp4', 'Opps, something wrong with the input. Please go to Step 2 and Rescale Video again. 1'
|
237 |
-
video_cap = cv2.VideoCapture(aligned_video)
|
238 |
-
if instyle is None or aligned_video is None or video_cap.get(7) == 0:
|
239 |
-
video_cap.release()
|
240 |
-
return 'default.mp4', 'Opps, something wrong with the input. Please go to Step 2 and Rescale Video again. 2'
|
241 |
-
if self.style_name != style_type:
|
242 |
-
exstyle, _ = self.load_model(style_type)
|
243 |
-
num = min(self.video_limit_gpu, int(video_cap.get(7)))
|
244 |
-
if self.device == 'cpu':
|
245 |
-
num = min(self.video_limit_cpu, num)
|
246 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
247 |
-
videoWriter = cv2.VideoWriter('output.mp4', fourcc,
|
248 |
-
video_cap.get(5), (int(video_cap.get(3)*4),
|
249 |
-
int(video_cap.get(4)*4)))
|
250 |
-
|
251 |
-
batch_frames = []
|
252 |
-
if video_cap.get(3) != 0:
|
253 |
-
if self.device == 'cpu':
|
254 |
-
batch_size = max(1, int(4 * 256* 256/ video_cap.get(3) / video_cap.get(4)))
|
255 |
-
else:
|
256 |
-
batch_size = min(max(1, int(4 * 400 * 360/ video_cap.get(3) / video_cap.get(4))), 4)
|
257 |
-
else:
|
258 |
-
batch_size = 1
|
259 |
-
print('*** Toonify using batch size of %d on %dx%d video of %d frames with style of %s'%(batch_size, int(video_cap.get(3)*4), int(video_cap.get(4)*4), num, style_type))
|
260 |
-
with torch.no_grad():
|
261 |
-
if self.color_transfer:
|
262 |
-
s_w = exstyle
|
263 |
-
else:
|
264 |
-
s_w = instyle.clone()
|
265 |
-
s_w[:,:7] = exstyle[:,:7]
|
266 |
-
for i in range(num):
|
267 |
-
success, frame = video_cap.read()
|
268 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
269 |
-
batch_frames += [self.transform(frame).unsqueeze(dim=0).to(self.device)]
|
270 |
-
if len(batch_frames) == batch_size or (i+1) == num:
|
271 |
-
x = torch.cat(batch_frames, dim=0)
|
272 |
-
batch_frames = []
|
273 |
-
with torch.no_grad():
|
274 |
-
x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0],
|
275 |
-
scale_factor=0.5, recompute_scale_factor=False).detach()
|
276 |
-
inputs = torch.cat((x, x_p/16.), dim=1)
|
277 |
-
y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), style_degree)
|
278 |
-
y_tilde = torch.clamp(y_tilde, -1, 1)
|
279 |
-
for k in range(y_tilde.size(0)):
|
280 |
-
videoWriter.write(tensor2cv2(y_tilde[k].cpu()))
|
281 |
-
gc.collect()
|
282 |
-
|
283 |
-
videoWriter.release()
|
284 |
-
video_cap.release()
|
285 |
-
return 'output.mp4', 'Successfully toonify video of %d frames with style of %s'%(num, self.style_name)
|
286 |
-
|
287 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7eu7d7/anime-ai-detect-fucker/attacker/PGD.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
from copy import deepcopy
|
4 |
-
from .base import Attacker, Empty
|
5 |
-
from torch.cuda import amp
|
6 |
-
from tqdm import tqdm
|
7 |
-
|
8 |
-
class PGD(Attacker):
|
9 |
-
def __init__(self, model, img_transform=(lambda x:x, lambda x:x), use_amp=False):
|
10 |
-
super().__init__(model, img_transform)
|
11 |
-
self.use_amp=use_amp
|
12 |
-
self.call_back=None
|
13 |
-
self.img_loader=None
|
14 |
-
self.img_hook=None
|
15 |
-
|
16 |
-
self.scaler = amp.GradScaler(enabled=use_amp)
|
17 |
-
|
18 |
-
def set_para(self, eps=8, alpha=lambda:8, iters=20, **kwargs):
|
19 |
-
super().set_para(eps=eps, alpha=alpha, iters=iters, **kwargs)
|
20 |
-
|
21 |
-
def set_call_back(self, call_back):
|
22 |
-
self.call_back=call_back
|
23 |
-
|
24 |
-
def set_img_loader(self, img_loader):
|
25 |
-
self.img_loader=img_loader
|
26 |
-
|
27 |
-
def step(self, images, labels, loss):
|
28 |
-
with amp.autocast(enabled=self.use_amp):
|
29 |
-
images.requires_grad = True
|
30 |
-
outputs = self.model(images).logits
|
31 |
-
|
32 |
-
self.model.zero_grad()
|
33 |
-
cost = loss(outputs, labels)#+outputs[2].view(-1)[0]*0+outputs[1].view(-1)[0]*0+outputs[0].view(-1)[0]*0 #support DDP
|
34 |
-
|
35 |
-
self.scaler.scale(cost).backward()
|
36 |
-
|
37 |
-
adv_images = (images + self.alpha() * images.grad.sign()).detach_()
|
38 |
-
eta = torch.clamp(adv_images - self.ori_images, min=-self.eps, max=self.eps)
|
39 |
-
images = self.img_transform[0](torch.clamp(self.img_transform[1](self.ori_images + eta), min=0, max=1).detach_())
|
40 |
-
|
41 |
-
return images
|
42 |
-
|
43 |
-
def set_data(self, images, labels):
|
44 |
-
self.ori_images = deepcopy(images)
|
45 |
-
self.images = images
|
46 |
-
self.labels = labels
|
47 |
-
|
48 |
-
def __iter__(self):
|
49 |
-
self.atk_step=0
|
50 |
-
return self
|
51 |
-
|
52 |
-
def __next__(self):
|
53 |
-
self.atk_step += 1
|
54 |
-
if self.atk_step>self.iters:
|
55 |
-
raise StopIteration
|
56 |
-
|
57 |
-
with self.model.no_sync() if isinstance(self.model, nn.parallel.DistributedDataParallel) else Empty():
|
58 |
-
self.model.eval()
|
59 |
-
|
60 |
-
self.images = self.forward(self, self.images, self.labels)
|
61 |
-
|
62 |
-
self.model.zero_grad()
|
63 |
-
self.model.train()
|
64 |
-
|
65 |
-
return self.ori_images, self.images.detach(), self.labels
|
66 |
-
|
67 |
-
def attack(self, images, labels):
|
68 |
-
#images = deepcopy(images)
|
69 |
-
self.ori_images = deepcopy(images)
|
70 |
-
|
71 |
-
for i in tqdm(range(self.iters)):
|
72 |
-
self.model.eval()
|
73 |
-
|
74 |
-
images = self.forward(self, images, labels)
|
75 |
-
|
76 |
-
self.model.zero_grad()
|
77 |
-
self.model.train()
|
78 |
-
if self.call_back:
|
79 |
-
self.call_back(self.ori_images, images.detach(), labels)
|
80 |
-
|
81 |
-
if self.img_hook is not None:
|
82 |
-
images=self.img_hook(self.ori_images, images.detach())
|
83 |
-
|
84 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Dashboards/README/README.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/solvers/musicgen.py
DELETED
@@ -1,699 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from pathlib import Path
|
8 |
-
import time
|
9 |
-
import typing as tp
|
10 |
-
|
11 |
-
import flashy
|
12 |
-
import math
|
13 |
-
import omegaconf
|
14 |
-
import torch
|
15 |
-
from torch.nn import functional as F
|
16 |
-
|
17 |
-
from . import base, builders
|
18 |
-
from .compression import CompressionSolver
|
19 |
-
from .. import metrics as eval_metrics
|
20 |
-
from .. import models
|
21 |
-
from ..data.audio_dataset import AudioDataset
|
22 |
-
from ..data.music_dataset import MusicDataset, MusicInfo, AudioInfo
|
23 |
-
from ..data.audio_utils import normalize_audio
|
24 |
-
from ..modules.conditioners import JointEmbedCondition, SegmentWithAttributes, WavCondition
|
25 |
-
from ..utils.cache import CachedBatchWriter, CachedBatchLoader
|
26 |
-
from ..utils.samples.manager import SampleManager
|
27 |
-
from ..utils.utils import get_dataset_from_loader, is_jsonable, warn_once
|
28 |
-
|
29 |
-
|
30 |
-
class MusicGenSolver(base.StandardSolver):
|
31 |
-
"""Solver for MusicGen training task.
|
32 |
-
|
33 |
-
Used in: https://arxiv.org/abs/2306.05284
|
34 |
-
"""
|
35 |
-
DATASET_TYPE: builders.DatasetType = builders.DatasetType.MUSIC
|
36 |
-
|
37 |
-
def __init__(self, cfg: omegaconf.DictConfig):
|
38 |
-
super().__init__(cfg)
|
39 |
-
# easier access to sampling parameters
|
40 |
-
self.generation_params = {
|
41 |
-
'use_sampling': self.cfg.generate.lm.use_sampling,
|
42 |
-
'temp': self.cfg.generate.lm.temp,
|
43 |
-
'top_k': self.cfg.generate.lm.top_k,
|
44 |
-
'top_p': self.cfg.generate.lm.top_p,
|
45 |
-
}
|
46 |
-
self._best_metric_name: tp.Optional[str] = 'ce'
|
47 |
-
|
48 |
-
self._cached_batch_writer = None
|
49 |
-
self._cached_batch_loader = None
|
50 |
-
if cfg.cache.path:
|
51 |
-
if cfg.cache.write:
|
52 |
-
self._cached_batch_writer = CachedBatchWriter(Path(cfg.cache.path))
|
53 |
-
if self.cfg.cache.write_num_shards:
|
54 |
-
self.logger.warning("Multiple shard cache, best_metric_name will be set to None.")
|
55 |
-
self._best_metric_name = None
|
56 |
-
else:
|
57 |
-
self._cached_batch_loader = CachedBatchLoader(
|
58 |
-
Path(cfg.cache.path), cfg.dataset.batch_size, cfg.dataset.num_workers,
|
59 |
-
min_length=self.cfg.optim.updates_per_epoch or 1)
|
60 |
-
self.dataloaders['original_train'] = self.dataloaders['train']
|
61 |
-
self.dataloaders['train'] = self._cached_batch_loader # type: ignore
|
62 |
-
|
63 |
-
@staticmethod
|
64 |
-
def get_eval_solver_from_sig(sig: str, dtype: tp.Optional[str] = None,
|
65 |
-
device: tp.Optional[str] = None, autocast: bool = True,
|
66 |
-
batch_size: tp.Optional[int] = None,
|
67 |
-
override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
|
68 |
-
**kwargs):
|
69 |
-
"""Mostly a convenience function around magma.train.get_solver_from_sig,
|
70 |
-
populating all the proper param, deactivating EMA, FSDP, loading the best state,
|
71 |
-
basically all you need to get a solver ready to "play" with in single GPU mode
|
72 |
-
and with minimal memory overhead.
|
73 |
-
|
74 |
-
Args:
|
75 |
-
sig (str): signature to load.
|
76 |
-
dtype (str or None): potential dtype, as a string, i.e. 'float16'.
|
77 |
-
device (str or None): potential device, as a string, i.e. 'cuda'.
|
78 |
-
override_cfg (dict or omegaconf.DictConfig or None): potential device, as a string, i.e. 'cuda'.
|
79 |
-
"""
|
80 |
-
from audiocraft import train
|
81 |
-
our_override_cfg: tp.Dict[str, tp.Any] = {'optim': {'ema': {'use': False}}}
|
82 |
-
our_override_cfg['autocast'] = autocast
|
83 |
-
if dtype is not None:
|
84 |
-
our_override_cfg['dtype'] = dtype
|
85 |
-
if device is not None:
|
86 |
-
our_override_cfg['device'] = device
|
87 |
-
if batch_size is not None:
|
88 |
-
our_override_cfg['dataset'] = {'batch_size': batch_size}
|
89 |
-
if override_cfg is None:
|
90 |
-
override_cfg = {}
|
91 |
-
override_cfg = omegaconf.OmegaConf.merge(
|
92 |
-
omegaconf.DictConfig(override_cfg), omegaconf.DictConfig(our_override_cfg)) # type: ignore
|
93 |
-
solver = train.get_solver_from_sig(
|
94 |
-
sig, override_cfg=override_cfg,
|
95 |
-
load_best=True, disable_fsdp=True,
|
96 |
-
ignore_state_keys=['optimizer', 'ema'], **kwargs)
|
97 |
-
solver.model.eval()
|
98 |
-
return solver
|
99 |
-
|
100 |
-
def get_formatter(self, stage_name: str) -> flashy.Formatter:
|
101 |
-
return flashy.Formatter({
|
102 |
-
'lr': '.2E',
|
103 |
-
'ce': '.3f',
|
104 |
-
'ppl': '.3f',
|
105 |
-
'grad_norm': '.3E',
|
106 |
-
}, exclude_keys=['ce_q*', 'ppl_q*'])
|
107 |
-
|
108 |
-
@property
|
109 |
-
def best_metric_name(self) -> tp.Optional[str]:
|
110 |
-
return self._best_metric_name
|
111 |
-
|
112 |
-
def build_model(self) -> None:
|
113 |
-
"""Instantiate models and optimizer."""
|
114 |
-
# we can potentially not use all quantizers with which the EnCodec model was trained
|
115 |
-
# (e.g. we trained the model with quantizers dropout)
|
116 |
-
self.compression_model = CompressionSolver.wrapped_model_from_checkpoint(
|
117 |
-
self.cfg, self.cfg.compression_model_checkpoint, device=self.device)
|
118 |
-
assert self.compression_model.sample_rate == self.cfg.sample_rate, (
|
119 |
-
f"Compression model sample rate is {self.compression_model.sample_rate} but "
|
120 |
-
f"Solver sample rate is {self.cfg.sample_rate}."
|
121 |
-
)
|
122 |
-
# ensure we have matching configuration between LM and compression model
|
123 |
-
assert self.cfg.transformer_lm.card == self.compression_model.cardinality, (
|
124 |
-
"Cardinalities of the LM and compression model don't match: ",
|
125 |
-
f"LM cardinality is {self.cfg.transformer_lm.card} vs ",
|
126 |
-
f"compression model cardinality is {self.compression_model.cardinality}"
|
127 |
-
)
|
128 |
-
assert self.cfg.transformer_lm.n_q == self.compression_model.num_codebooks, (
|
129 |
-
"Numbers of codebooks of the LM and compression models don't match: ",
|
130 |
-
f"LM number of codebooks is {self.cfg.transformer_lm.n_q} vs ",
|
131 |
-
f"compression model numer of codebooks is {self.compression_model.num_codebooks}"
|
132 |
-
)
|
133 |
-
self.logger.info("Compression model has %d codebooks with %d cardinality, and a framerate of %d",
|
134 |
-
self.compression_model.num_codebooks, self.compression_model.cardinality,
|
135 |
-
self.compression_model.frame_rate)
|
136 |
-
# instantiate LM model
|
137 |
-
self.model: models.LMModel = models.builders.get_lm_model(self.cfg).to(self.device)
|
138 |
-
if self.cfg.fsdp.use:
|
139 |
-
assert not self.cfg.autocast, "Cannot use autocast with fsdp"
|
140 |
-
self.model = self.wrap_with_fsdp(self.model)
|
141 |
-
self.register_ema('model')
|
142 |
-
# initialize optimization
|
143 |
-
self.optimizer = builders.get_optimizer(builders.get_optim_parameter_groups(self.model), self.cfg.optim)
|
144 |
-
self.lr_scheduler = builders.get_lr_scheduler(self.optimizer, self.cfg.schedule, self.total_updates)
|
145 |
-
self.register_stateful('compression_model', 'model', 'optimizer', 'lr_scheduler')
|
146 |
-
self.register_best_state('model')
|
147 |
-
self.autocast_dtype = {
|
148 |
-
'float16': torch.float16, 'bfloat16': torch.bfloat16
|
149 |
-
}[self.cfg.autocast_dtype]
|
150 |
-
self.scaler: tp.Optional[torch.cuda.amp.GradScaler] = None
|
151 |
-
if self.cfg.fsdp.use:
|
152 |
-
need_scaler = self.cfg.fsdp.param_dtype == 'float16'
|
153 |
-
else:
|
154 |
-
need_scaler = self.cfg.autocast and self.autocast_dtype is torch.float16
|
155 |
-
if need_scaler:
|
156 |
-
if self.cfg.fsdp.use:
|
157 |
-
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
|
158 |
-
self.scaler = ShardedGradScaler() # type: ignore
|
159 |
-
else:
|
160 |
-
self.scaler = torch.cuda.amp.GradScaler()
|
161 |
-
self.register_stateful('scaler')
|
162 |
-
|
163 |
-
def build_dataloaders(self) -> None:
|
164 |
-
"""Instantiate audio dataloaders for each stage."""
|
165 |
-
self.dataloaders = builders.get_audio_datasets(self.cfg, dataset_type=self.DATASET_TYPE)
|
166 |
-
|
167 |
-
def show(self) -> None:
|
168 |
-
"""Show the compression model and LM model."""
|
169 |
-
self.logger.info("Compression model:")
|
170 |
-
self.log_model_summary(self.compression_model)
|
171 |
-
self.logger.info("LM model:")
|
172 |
-
self.log_model_summary(self.model)
|
173 |
-
|
174 |
-
def load_state_dict(self, state: dict) -> None:
|
175 |
-
if 'condition_provider' in state:
|
176 |
-
model_state = state['model']
|
177 |
-
condition_provider_state = state.pop('condition_provider')
|
178 |
-
prefix = 'condition_provider.'
|
179 |
-
for key, value in condition_provider_state.items():
|
180 |
-
key = prefix + key
|
181 |
-
assert key not in model_state
|
182 |
-
model_state[key] = value
|
183 |
-
super().load_state_dict(state)
|
184 |
-
|
185 |
-
def load_from_pretrained(self, name: str):
|
186 |
-
# TODO: support native HF versions of MusicGen.
|
187 |
-
lm_pkg = models.loaders.load_lm_model_ckpt(name)
|
188 |
-
state: dict = {
|
189 |
-
'best_state': {
|
190 |
-
'model': lm_pkg['best_state'],
|
191 |
-
},
|
192 |
-
}
|
193 |
-
return state
|
194 |
-
|
195 |
-
def _compute_cross_entropy(
|
196 |
-
self, logits: torch.Tensor, targets: torch.Tensor, mask: torch.Tensor
|
197 |
-
) -> tp.Tuple[torch.Tensor, tp.List[torch.Tensor]]:
|
198 |
-
"""Compute cross entropy between multi-codebook targets and model's logits.
|
199 |
-
The cross entropy is computed per codebook to provide codebook-level cross entropy.
|
200 |
-
Valid timesteps for each of the codebook are pulled from the mask, where invalid
|
201 |
-
timesteps are set to 0.
|
202 |
-
|
203 |
-
Args:
|
204 |
-
logits (torch.Tensor): Model's logits of shape [B, K, T, card].
|
205 |
-
targets (torch.Tensor): Target codes, of shape [B, K, T].
|
206 |
-
mask (torch.Tensor): Mask for valid target codes, of shape [B, K, T].
|
207 |
-
Returns:
|
208 |
-
ce (torch.Tensor): Cross entropy averaged over the codebooks
|
209 |
-
ce_per_codebook (list of torch.Tensor): Cross entropy per codebook (detached).
|
210 |
-
"""
|
211 |
-
B, K, T = targets.shape
|
212 |
-
assert logits.shape[:-1] == targets.shape
|
213 |
-
assert mask.shape == targets.shape
|
214 |
-
ce = torch.zeros([], device=targets.device)
|
215 |
-
ce_per_codebook: tp.List[torch.Tensor] = []
|
216 |
-
for k in range(K):
|
217 |
-
logits_k = logits[:, k, ...].contiguous().view(-1, logits.size(-1)) # [B x T, card]
|
218 |
-
targets_k = targets[:, k, ...].contiguous().view(-1) # [B x T]
|
219 |
-
mask_k = mask[:, k, ...].contiguous().view(-1) # [B x T]
|
220 |
-
ce_targets = targets_k[mask_k]
|
221 |
-
ce_logits = logits_k[mask_k]
|
222 |
-
q_ce = F.cross_entropy(ce_logits, ce_targets)
|
223 |
-
ce += q_ce
|
224 |
-
ce_per_codebook.append(q_ce.detach())
|
225 |
-
# average cross entropy across codebooks
|
226 |
-
ce = ce / K
|
227 |
-
return ce, ce_per_codebook
|
228 |
-
|
229 |
-
@torch.no_grad()
|
230 |
-
def _prepare_tokens_and_attributes(
|
231 |
-
self, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]],
|
232 |
-
check_synchronization_points: bool = False
|
233 |
-
) -> tp.Tuple[dict, torch.Tensor, torch.Tensor]:
|
234 |
-
"""Prepare input batchs for language model training.
|
235 |
-
|
236 |
-
Args:
|
237 |
-
batch (tuple[torch.Tensor, list[SegmentWithAttributes]]): Input batch with audio tensor of shape [B, C, T]
|
238 |
-
and corresponding metadata as SegmentWithAttributes (with B items).
|
239 |
-
check_synchronization_points (bool): Whether to check for synchronization points slowing down training.
|
240 |
-
Returns:
|
241 |
-
Condition tensors (dict[str, any]): Preprocessed condition attributes.
|
242 |
-
Tokens (torch.Tensor): Audio tokens from compression model, of shape [B, K, T_s],
|
243 |
-
with B the batch size, K the number of codebooks, T_s the token timesteps.
|
244 |
-
Padding mask (torch.Tensor): Mask with valid positions in the tokens tensor, of shape [B, K, T_s].
|
245 |
-
"""
|
246 |
-
if self._cached_batch_loader is None or self.current_stage != "train":
|
247 |
-
audio, infos = batch
|
248 |
-
audio = audio.to(self.device)
|
249 |
-
audio_tokens = None
|
250 |
-
assert audio.size(0) == len(infos), (
|
251 |
-
f"Mismatch between number of items in audio batch ({audio.size(0)})",
|
252 |
-
f" and in metadata ({len(infos)})"
|
253 |
-
)
|
254 |
-
else:
|
255 |
-
audio = None
|
256 |
-
# In that case the batch will be a tuple coming from the _cached_batch_writer bit below.
|
257 |
-
infos, = batch # type: ignore
|
258 |
-
assert all([isinstance(info, AudioInfo) for info in infos])
|
259 |
-
assert all([info.audio_tokens is not None for info in infos]) # type: ignore
|
260 |
-
audio_tokens = torch.stack([info.audio_tokens for info in infos]).to(self.device) # type: ignore
|
261 |
-
audio_tokens = audio_tokens.long()
|
262 |
-
for info in infos:
|
263 |
-
if isinstance(info, MusicInfo):
|
264 |
-
# Careful here, if you want to use this condition_wav (e.b. chroma conditioning),
|
265 |
-
# then you must be using the chroma cache! otherwise the code will try
|
266 |
-
# to use this segment and fail (by that I mean you will see NaN everywhere).
|
267 |
-
info.self_wav = WavCondition(
|
268 |
-
torch.full([1, info.channels, info.total_frames], float('NaN')),
|
269 |
-
length=torch.tensor([info.n_frames]),
|
270 |
-
sample_rate=[info.sample_rate],
|
271 |
-
path=[info.meta.path],
|
272 |
-
seek_time=[info.seek_time])
|
273 |
-
dataset = get_dataset_from_loader(self.dataloaders['original_train'])
|
274 |
-
assert isinstance(dataset, MusicDataset), type(dataset)
|
275 |
-
if dataset.paraphraser is not None and info.description is not None:
|
276 |
-
# Hackingly reapplying paraphraser when using cache.
|
277 |
-
info.description = dataset.paraphraser.sample_paraphrase(
|
278 |
-
info.meta.path, info.description)
|
279 |
-
# prepare attributes
|
280 |
-
attributes = [info.to_condition_attributes() for info in infos]
|
281 |
-
attributes = self.model.cfg_dropout(attributes)
|
282 |
-
attributes = self.model.att_dropout(attributes)
|
283 |
-
tokenized = self.model.condition_provider.tokenize(attributes)
|
284 |
-
|
285 |
-
# Now we should be synchronization free.
|
286 |
-
if self.device == "cuda" and check_synchronization_points:
|
287 |
-
torch.cuda.set_sync_debug_mode("warn")
|
288 |
-
|
289 |
-
if audio_tokens is None:
|
290 |
-
with torch.no_grad():
|
291 |
-
audio_tokens, scale = self.compression_model.encode(audio)
|
292 |
-
assert scale is None, "Scaled compression model not supported with LM."
|
293 |
-
|
294 |
-
with self.autocast:
|
295 |
-
condition_tensors = self.model.condition_provider(tokenized)
|
296 |
-
|
297 |
-
# create a padding mask to hold valid vs invalid positions
|
298 |
-
padding_mask = torch.ones_like(audio_tokens, dtype=torch.bool, device=audio_tokens.device)
|
299 |
-
# replace encodec tokens from padded audio with special_token_id
|
300 |
-
if self.cfg.tokens.padding_with_special_token:
|
301 |
-
audio_tokens = audio_tokens.clone()
|
302 |
-
padding_mask = padding_mask.clone()
|
303 |
-
token_sample_rate = self.compression_model.frame_rate
|
304 |
-
B, K, T_s = audio_tokens.shape
|
305 |
-
for i in range(B):
|
306 |
-
n_samples = infos[i].n_frames
|
307 |
-
audio_sample_rate = infos[i].sample_rate
|
308 |
-
# take the last token generated from actual audio frames (non-padded audio)
|
309 |
-
valid_tokens = math.floor(float(n_samples) / audio_sample_rate * token_sample_rate)
|
310 |
-
audio_tokens[i, :, valid_tokens:] = self.model.special_token_id
|
311 |
-
padding_mask[i, :, valid_tokens:] = 0
|
312 |
-
|
313 |
-
if self.device == "cuda" and check_synchronization_points:
|
314 |
-
torch.cuda.set_sync_debug_mode("default")
|
315 |
-
|
316 |
-
if self._cached_batch_writer is not None and self.current_stage == 'train':
|
317 |
-
assert self._cached_batch_loader is None
|
318 |
-
assert audio_tokens is not None
|
319 |
-
for info, one_audio_tokens in zip(infos, audio_tokens):
|
320 |
-
assert isinstance(info, AudioInfo)
|
321 |
-
if isinstance(info, MusicInfo):
|
322 |
-
assert not info.joint_embed, "joint_embed and cache not supported yet."
|
323 |
-
info.self_wav = None
|
324 |
-
assert one_audio_tokens.max() < 2**15, one_audio_tokens.max().item()
|
325 |
-
info.audio_tokens = one_audio_tokens.short().cpu()
|
326 |
-
self._cached_batch_writer.save(infos)
|
327 |
-
|
328 |
-
return condition_tensors, audio_tokens, padding_mask
|
329 |
-
|
330 |
-
def run_step(self, idx: int, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]], metrics: dict) -> dict:
|
331 |
-
"""Perform one training or valid step on a given batch."""
|
332 |
-
check_synchronization_points = idx == 1 and self.device == 'cuda'
|
333 |
-
|
334 |
-
condition_tensors, audio_tokens, padding_mask = self._prepare_tokens_and_attributes(
|
335 |
-
batch, check_synchronization_points)
|
336 |
-
|
337 |
-
self.deadlock_detect.update('tokens_and_conditions')
|
338 |
-
|
339 |
-
if check_synchronization_points:
|
340 |
-
torch.cuda.set_sync_debug_mode('warn')
|
341 |
-
|
342 |
-
with self.autocast:
|
343 |
-
model_output = self.model.compute_predictions(audio_tokens, [], condition_tensors) # type: ignore
|
344 |
-
logits = model_output.logits
|
345 |
-
mask = padding_mask & model_output.mask
|
346 |
-
ce, ce_per_codebook = self._compute_cross_entropy(logits, audio_tokens, mask)
|
347 |
-
loss = ce
|
348 |
-
self.deadlock_detect.update('loss')
|
349 |
-
|
350 |
-
if check_synchronization_points:
|
351 |
-
torch.cuda.set_sync_debug_mode('default')
|
352 |
-
|
353 |
-
if self.is_training:
|
354 |
-
metrics['lr'] = self.optimizer.param_groups[0]['lr']
|
355 |
-
if self.scaler is not None:
|
356 |
-
loss = self.scaler.scale(loss)
|
357 |
-
self.deadlock_detect.update('scale')
|
358 |
-
if self.cfg.fsdp.use:
|
359 |
-
loss.backward()
|
360 |
-
flashy.distrib.average_tensors(self.model.buffers())
|
361 |
-
elif self.cfg.optim.eager_sync:
|
362 |
-
with flashy.distrib.eager_sync_model(self.model):
|
363 |
-
loss.backward()
|
364 |
-
else:
|
365 |
-
# this should always be slower but can be useful
|
366 |
-
# for weird use cases like multiple backwards.
|
367 |
-
loss.backward()
|
368 |
-
flashy.distrib.sync_model(self.model)
|
369 |
-
self.deadlock_detect.update('backward')
|
370 |
-
|
371 |
-
if self.scaler is not None:
|
372 |
-
self.scaler.unscale_(self.optimizer)
|
373 |
-
if self.cfg.optim.max_norm:
|
374 |
-
if self.cfg.fsdp.use:
|
375 |
-
metrics['grad_norm'] = self.model.clip_grad_norm_(self.cfg.optim.max_norm) # type: ignore
|
376 |
-
else:
|
377 |
-
metrics['grad_norm'] = torch.nn.utils.clip_grad_norm_(
|
378 |
-
self.model.parameters(), self.cfg.optim.max_norm
|
379 |
-
)
|
380 |
-
if self.scaler is None:
|
381 |
-
self.optimizer.step()
|
382 |
-
else:
|
383 |
-
self.scaler.step(self.optimizer)
|
384 |
-
self.scaler.update()
|
385 |
-
if self.lr_scheduler:
|
386 |
-
self.lr_scheduler.step()
|
387 |
-
self.optimizer.zero_grad()
|
388 |
-
self.deadlock_detect.update('optim')
|
389 |
-
if self.scaler is not None:
|
390 |
-
scale = self.scaler.get_scale()
|
391 |
-
metrics['grad_scale'] = scale
|
392 |
-
if not loss.isfinite().all():
|
393 |
-
raise RuntimeError("Model probably diverged.")
|
394 |
-
|
395 |
-
metrics['ce'] = ce
|
396 |
-
metrics['ppl'] = torch.exp(ce)
|
397 |
-
for k, ce_q in enumerate(ce_per_codebook):
|
398 |
-
metrics[f'ce_q{k + 1}'] = ce_q
|
399 |
-
metrics[f'ppl_q{k + 1}'] = torch.exp(ce_q)
|
400 |
-
|
401 |
-
return metrics
|
402 |
-
|
403 |
-
@torch.no_grad()
|
404 |
-
def run_generate_step(self, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]],
|
405 |
-
gen_duration: float, prompt_duration: tp.Optional[float] = None,
|
406 |
-
remove_prompt: bool = False,
|
407 |
-
**generation_params) -> dict:
|
408 |
-
"""Run generate step on a batch of optional audio tensor and corresponding attributes.
|
409 |
-
|
410 |
-
Args:
|
411 |
-
batch (tuple[torch.Tensor, list[SegmentWithAttributes]]):
|
412 |
-
use_prompt (bool): Whether to do audio continuation generation with prompt from audio batch.
|
413 |
-
gen_duration (float): Target audio duration for the generation.
|
414 |
-
prompt_duration (float, optional): Duration for the audio prompt to use for continuation.
|
415 |
-
remove_prompt (bool, optional): Whether to remove the prompt from the generated audio.
|
416 |
-
generation_params: Additional generation parameters.
|
417 |
-
Returns:
|
418 |
-
gen_outputs (dict): Generation outputs, consisting in audio, audio tokens from both the generation
|
419 |
-
and the prompt along with additional information.
|
420 |
-
"""
|
421 |
-
bench_start = time.time()
|
422 |
-
audio, meta = batch
|
423 |
-
assert audio.size(0) == len(meta), (
|
424 |
-
f"Mismatch between number of items in audio batch ({audio.size(0)})",
|
425 |
-
f" and in metadata ({len(meta)})"
|
426 |
-
)
|
427 |
-
# prepare attributes
|
428 |
-
attributes = [x.to_condition_attributes() for x in meta]
|
429 |
-
# TODO: Add dropout for chroma?
|
430 |
-
|
431 |
-
# prepare audio prompt
|
432 |
-
if prompt_duration is None:
|
433 |
-
prompt_audio = None
|
434 |
-
else:
|
435 |
-
assert prompt_duration < gen_duration, "Prompt duration must be lower than target generation duration"
|
436 |
-
prompt_audio_frames = int(prompt_duration * self.compression_model.sample_rate)
|
437 |
-
prompt_audio = audio[..., :prompt_audio_frames]
|
438 |
-
|
439 |
-
# get audio tokens from compression model
|
440 |
-
if prompt_audio is None or prompt_audio.nelement() == 0:
|
441 |
-
num_samples = len(attributes)
|
442 |
-
prompt_tokens = None
|
443 |
-
else:
|
444 |
-
num_samples = None
|
445 |
-
prompt_audio = prompt_audio.to(self.device)
|
446 |
-
prompt_tokens, scale = self.compression_model.encode(prompt_audio)
|
447 |
-
assert scale is None, "Compression model in MusicGen should not require rescaling."
|
448 |
-
|
449 |
-
# generate by sampling from the LM
|
450 |
-
with self.autocast:
|
451 |
-
total_gen_len = math.ceil(gen_duration * self.compression_model.frame_rate)
|
452 |
-
gen_tokens = self.model.generate(
|
453 |
-
prompt_tokens, attributes, max_gen_len=total_gen_len,
|
454 |
-
num_samples=num_samples, **self.generation_params)
|
455 |
-
|
456 |
-
# generate audio from tokens
|
457 |
-
assert gen_tokens.dim() == 3
|
458 |
-
gen_audio = self.compression_model.decode(gen_tokens, None)
|
459 |
-
|
460 |
-
bench_end = time.time()
|
461 |
-
gen_outputs = {
|
462 |
-
'rtf': (bench_end - bench_start) / gen_duration,
|
463 |
-
'ref_audio': audio,
|
464 |
-
'gen_audio': gen_audio,
|
465 |
-
'gen_tokens': gen_tokens,
|
466 |
-
'prompt_audio': prompt_audio,
|
467 |
-
'prompt_tokens': prompt_tokens,
|
468 |
-
}
|
469 |
-
return gen_outputs
|
470 |
-
|
471 |
-
def generate_audio(self) -> dict:
|
472 |
-
"""Audio generation stage."""
|
473 |
-
generate_stage_name = f'{self.current_stage}'
|
474 |
-
sample_manager = SampleManager(self.xp)
|
475 |
-
self.logger.info(f"Generating samples in {sample_manager.base_folder}")
|
476 |
-
loader = self.dataloaders['generate']
|
477 |
-
updates = len(loader)
|
478 |
-
lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)
|
479 |
-
|
480 |
-
dataset = get_dataset_from_loader(loader)
|
481 |
-
dataset_duration = dataset.segment_duration
|
482 |
-
assert dataset_duration is not None
|
483 |
-
assert isinstance(dataset, AudioDataset)
|
484 |
-
target_duration = self.cfg.generate.lm.gen_duration
|
485 |
-
prompt_duration = self.cfg.generate.lm.prompt_duration
|
486 |
-
if target_duration is None:
|
487 |
-
target_duration = dataset_duration
|
488 |
-
if prompt_duration is None:
|
489 |
-
prompt_duration = dataset_duration / 4
|
490 |
-
assert prompt_duration < dataset_duration, (
|
491 |
-
f"Specified prompt duration ({prompt_duration}s) is longer",
|
492 |
-
f" than reference audio duration ({dataset_duration}s)"
|
493 |
-
)
|
494 |
-
|
495 |
-
def get_hydrated_conditions(meta: tp.List[SegmentWithAttributes]):
|
496 |
-
hydrated_conditions = []
|
497 |
-
for sample in [x.to_condition_attributes() for x in meta]:
|
498 |
-
cond_dict = {}
|
499 |
-
for cond_type in sample.__annotations__.keys():
|
500 |
-
for cond_key, cond_val in getattr(sample, cond_type).items():
|
501 |
-
if cond_key not in self.model.condition_provider.conditioners.keys():
|
502 |
-
continue
|
503 |
-
if is_jsonable(cond_val):
|
504 |
-
cond_dict[cond_key] = cond_val
|
505 |
-
elif isinstance(cond_val, WavCondition):
|
506 |
-
cond_dict[cond_key] = cond_val.path
|
507 |
-
elif isinstance(cond_val, JointEmbedCondition):
|
508 |
-
cond_dict[cond_key] = cond_val.text # only support text at inference for now
|
509 |
-
else:
|
510 |
-
# if we reached this point, it is not clear how to log the condition
|
511 |
-
# so we just log the type.
|
512 |
-
cond_dict[cond_key] = str(type(cond_val))
|
513 |
-
continue
|
514 |
-
hydrated_conditions.append(cond_dict)
|
515 |
-
return hydrated_conditions
|
516 |
-
|
517 |
-
metrics: dict = {}
|
518 |
-
average = flashy.averager()
|
519 |
-
for batch in lp:
|
520 |
-
audio, meta = batch
|
521 |
-
# metadata for sample manager
|
522 |
-
hydrated_conditions = get_hydrated_conditions(meta)
|
523 |
-
sample_generation_params = {
|
524 |
-
**{f'classifier_free_guidance_{k}': v for k, v in self.cfg.classifier_free_guidance.items()},
|
525 |
-
**self.generation_params
|
526 |
-
}
|
527 |
-
if self.cfg.generate.lm.unprompted_samples:
|
528 |
-
if self.cfg.generate.lm.gen_gt_samples:
|
529 |
-
# get the ground truth instead of generation
|
530 |
-
self.logger.warn(
|
531 |
-
"Use ground truth instead of audio generation as generate.lm.gen_gt_samples=true")
|
532 |
-
gen_unprompted_audio = audio
|
533 |
-
rtf = 1.
|
534 |
-
else:
|
535 |
-
gen_unprompted_outputs = self.run_generate_step(
|
536 |
-
batch, gen_duration=target_duration, prompt_duration=prompt_duration,
|
537 |
-
**self.generation_params)
|
538 |
-
gen_unprompted_audio = gen_unprompted_outputs['gen_audio'].cpu()
|
539 |
-
rtf = gen_unprompted_outputs['rtf']
|
540 |
-
sample_manager.add_samples(
|
541 |
-
gen_unprompted_audio, self.epoch, hydrated_conditions,
|
542 |
-
ground_truth_wavs=audio, generation_args=sample_generation_params)
|
543 |
-
|
544 |
-
if self.cfg.generate.lm.prompted_samples:
|
545 |
-
gen_outputs = self.run_generate_step(
|
546 |
-
batch, gen_duration=target_duration, prompt_duration=prompt_duration,
|
547 |
-
**self.generation_params)
|
548 |
-
gen_audio = gen_outputs['gen_audio'].cpu()
|
549 |
-
prompt_audio = gen_outputs['prompt_audio'].cpu()
|
550 |
-
sample_manager.add_samples(
|
551 |
-
gen_audio, self.epoch, hydrated_conditions,
|
552 |
-
prompt_wavs=prompt_audio, ground_truth_wavs=audio,
|
553 |
-
generation_args=sample_generation_params)
|
554 |
-
|
555 |
-
metrics['rtf'] = rtf
|
556 |
-
metrics = average(metrics)
|
557 |
-
|
558 |
-
flashy.distrib.barrier()
|
559 |
-
return metrics
|
560 |
-
|
561 |
-
def generate(self) -> dict:
|
562 |
-
"""Generate stage."""
|
563 |
-
self.model.eval()
|
564 |
-
with torch.no_grad():
|
565 |
-
return self.generate_audio()
|
566 |
-
|
567 |
-
def run_epoch(self):
|
568 |
-
if self.cfg.cache.write:
|
569 |
-
if ((self.epoch - 1) % self.cfg.cache.write_num_shards) != self.cfg.cache.write_shard:
|
570 |
-
return
|
571 |
-
super().run_epoch()
|
572 |
-
|
573 |
-
def train(self):
|
574 |
-
"""Train stage.
|
575 |
-
"""
|
576 |
-
if self._cached_batch_writer is not None:
|
577 |
-
self._cached_batch_writer.start_epoch(self.epoch)
|
578 |
-
if self._cached_batch_loader is None:
|
579 |
-
dataset = get_dataset_from_loader(self.dataloaders['train'])
|
580 |
-
assert isinstance(dataset, AudioDataset)
|
581 |
-
dataset.current_epoch = self.epoch
|
582 |
-
else:
|
583 |
-
self._cached_batch_loader.start_epoch(self.epoch)
|
584 |
-
return super().train()
|
585 |
-
|
586 |
-
def evaluate_audio_generation(self) -> dict:
|
587 |
-
"""Evaluate audio generation with off-the-shelf metrics."""
|
588 |
-
evaluate_stage_name = f'{self.current_stage}_generation'
|
589 |
-
# instantiate evaluation metrics, if at least one metric is defined, run audio generation evaluation
|
590 |
-
fad: tp.Optional[eval_metrics.FrechetAudioDistanceMetric] = None
|
591 |
-
kldiv: tp.Optional[eval_metrics.KLDivergenceMetric] = None
|
592 |
-
text_consistency: tp.Optional[eval_metrics.TextConsistencyMetric] = None
|
593 |
-
chroma_cosine: tp.Optional[eval_metrics.ChromaCosineSimilarityMetric] = None
|
594 |
-
should_run_eval = False
|
595 |
-
eval_chroma_wavs: tp.Optional[torch.Tensor] = None
|
596 |
-
if self.cfg.evaluate.metrics.fad:
|
597 |
-
fad = builders.get_fad(self.cfg.metrics.fad).to(self.device)
|
598 |
-
should_run_eval = True
|
599 |
-
if self.cfg.evaluate.metrics.kld:
|
600 |
-
kldiv = builders.get_kldiv(self.cfg.metrics.kld).to(self.device)
|
601 |
-
should_run_eval = True
|
602 |
-
if self.cfg.evaluate.metrics.text_consistency:
|
603 |
-
text_consistency = builders.get_text_consistency(self.cfg.metrics.text_consistency).to(self.device)
|
604 |
-
should_run_eval = True
|
605 |
-
if self.cfg.evaluate.metrics.chroma_cosine:
|
606 |
-
chroma_cosine = builders.get_chroma_cosine_similarity(self.cfg.metrics.chroma_cosine).to(self.device)
|
607 |
-
# if we have predefind wavs for chroma we should purge them for computing the cosine metric
|
608 |
-
has_predefined_eval_chromas = 'self_wav' in self.model.condition_provider.conditioners and \
|
609 |
-
self.model.condition_provider.conditioners['self_wav'].has_eval_wavs()
|
610 |
-
if has_predefined_eval_chromas:
|
611 |
-
warn_once(self.logger, "Attempting to run cosine eval for config with pre-defined eval chromas! "
|
612 |
-
'Resetting eval chromas to None for evaluation.')
|
613 |
-
eval_chroma_wavs = self.model.condition_provider.conditioners.self_wav.eval_wavs # type: ignore
|
614 |
-
self.model.condition_provider.conditioners.self_wav.reset_eval_wavs(None) # type: ignore
|
615 |
-
should_run_eval = True
|
616 |
-
|
617 |
-
def get_compressed_audio(audio: torch.Tensor) -> torch.Tensor:
|
618 |
-
audio_tokens, scale = self.compression_model.encode(audio.to(self.device))
|
619 |
-
compressed_audio = self.compression_model.decode(audio_tokens, scale)
|
620 |
-
return compressed_audio[..., :audio.shape[-1]]
|
621 |
-
|
622 |
-
metrics: dict = {}
|
623 |
-
if should_run_eval:
|
624 |
-
loader = self.dataloaders['evaluate']
|
625 |
-
updates = len(loader)
|
626 |
-
lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)
|
627 |
-
average = flashy.averager()
|
628 |
-
dataset = get_dataset_from_loader(loader)
|
629 |
-
assert isinstance(dataset, AudioDataset)
|
630 |
-
self.logger.info(f"Computing evaluation metrics on {len(dataset)} samples")
|
631 |
-
|
632 |
-
for idx, batch in enumerate(lp):
|
633 |
-
audio, meta = batch
|
634 |
-
assert all([self.cfg.sample_rate == m.sample_rate for m in meta])
|
635 |
-
|
636 |
-
target_duration = audio.shape[-1] / self.cfg.sample_rate
|
637 |
-
if self.cfg.evaluate.fixed_generation_duration:
|
638 |
-
target_duration = self.cfg.evaluate.fixed_generation_duration
|
639 |
-
|
640 |
-
gen_outputs = self.run_generate_step(
|
641 |
-
batch, gen_duration=target_duration,
|
642 |
-
**self.generation_params
|
643 |
-
)
|
644 |
-
y_pred = gen_outputs['gen_audio'].detach()
|
645 |
-
y_pred = y_pred[..., :audio.shape[-1]]
|
646 |
-
|
647 |
-
normalize_kwargs = dict(self.cfg.generate.audio)
|
648 |
-
normalize_kwargs.pop('format', None)
|
649 |
-
y_pred = torch.stack([normalize_audio(w, **normalize_kwargs) for w in y_pred], dim=0).cpu()
|
650 |
-
y = audio.cpu() # should already be on CPU but just in case
|
651 |
-
sizes = torch.tensor([m.n_frames for m in meta]) # actual sizes without padding
|
652 |
-
sample_rates = torch.tensor([m.sample_rate for m in meta]) # sample rates for audio samples
|
653 |
-
audio_stems = [Path(m.meta.path).stem + f"_{m.seek_time}" for m in meta]
|
654 |
-
|
655 |
-
if fad is not None:
|
656 |
-
if self.cfg.metrics.fad.use_gt:
|
657 |
-
y_pred = get_compressed_audio(y).cpu()
|
658 |
-
fad.update(y_pred, y, sizes, sample_rates, audio_stems)
|
659 |
-
if kldiv is not None:
|
660 |
-
if self.cfg.metrics.kld.use_gt:
|
661 |
-
y_pred = get_compressed_audio(y).cpu()
|
662 |
-
kldiv.update(y_pred, y, sizes, sample_rates)
|
663 |
-
if text_consistency is not None:
|
664 |
-
texts = [m.description for m in meta]
|
665 |
-
if self.cfg.metrics.text_consistency.use_gt:
|
666 |
-
y_pred = y
|
667 |
-
text_consistency.update(y_pred, texts, sizes, sample_rates)
|
668 |
-
if chroma_cosine is not None:
|
669 |
-
if self.cfg.metrics.chroma_cosine.use_gt:
|
670 |
-
y_pred = get_compressed_audio(y).cpu()
|
671 |
-
chroma_cosine.update(y_pred, y, sizes, sample_rates)
|
672 |
-
# restore chroma conditioner's eval chroma wavs
|
673 |
-
if eval_chroma_wavs is not None:
|
674 |
-
self.model.condition_provider.conditioners['self_wav'].reset_eval_wavs(eval_chroma_wavs)
|
675 |
-
|
676 |
-
flashy.distrib.barrier()
|
677 |
-
if fad is not None:
|
678 |
-
metrics['fad'] = fad.compute()
|
679 |
-
if kldiv is not None:
|
680 |
-
kld_metrics = kldiv.compute()
|
681 |
-
metrics.update(kld_metrics)
|
682 |
-
if text_consistency is not None:
|
683 |
-
metrics['text_consistency'] = text_consistency.compute()
|
684 |
-
if chroma_cosine is not None:
|
685 |
-
metrics['chroma_cosine'] = chroma_cosine.compute()
|
686 |
-
metrics = average(metrics)
|
687 |
-
metrics = flashy.distrib.average_metrics(metrics, len(loader))
|
688 |
-
|
689 |
-
return metrics
|
690 |
-
|
691 |
-
def evaluate(self) -> dict:
|
692 |
-
"""Evaluate stage."""
|
693 |
-
self.model.eval()
|
694 |
-
with torch.no_grad():
|
695 |
-
metrics: dict = {}
|
696 |
-
if self.cfg.evaluate.metrics.base:
|
697 |
-
metrics.update(self.common_train_valid('evaluate'))
|
698 |
-
gen_metrics = self.evaluate_audio_generation()
|
699 |
-
return {**metrics, **gen_metrics}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/dataset_utils.py
DELETED
@@ -1,311 +0,0 @@
|
|
1 |
-
import torch.optim
|
2 |
-
import torch.utils.data
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torch.optim
|
6 |
-
import torch.utils.data
|
7 |
-
import torch.distributions
|
8 |
-
from text_to_speech.utils.audio.pitch.utils import norm_interp_f0, denorm_f0
|
9 |
-
from text_to_speech.utils.commons.dataset_utils import BaseDataset, collate_1d_or_2d
|
10 |
-
from text_to_speech.utils.commons.indexed_datasets import IndexedDataset
|
11 |
-
from text_to_speech.utils.commons.hparams import hparams
|
12 |
-
import random
|
13 |
-
|
14 |
-
|
15 |
-
class BaseSpeechDataset(BaseDataset):
|
16 |
-
def __init__(self, prefix, shuffle=False, items=None, data_dir=None):
|
17 |
-
super().__init__(shuffle)
|
18 |
-
from text_to_speech.utils.commons.hparams import hparams
|
19 |
-
self.data_dir = hparams['binary_data_dir'] if data_dir is None else data_dir
|
20 |
-
self.prefix = prefix
|
21 |
-
self.hparams = hparams
|
22 |
-
self.indexed_ds = None
|
23 |
-
if items is not None:
|
24 |
-
self.indexed_ds = items
|
25 |
-
self.sizes = [1] * len(items)
|
26 |
-
self.avail_idxs = list(range(len(self.sizes)))
|
27 |
-
else:
|
28 |
-
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
|
29 |
-
if prefix == 'test' and len(hparams['test_ids']) > 0:
|
30 |
-
self.avail_idxs = hparams['test_ids']
|
31 |
-
else:
|
32 |
-
self.avail_idxs = list(range(len(self.sizes)))
|
33 |
-
if prefix == 'train' and hparams['min_frames'] > 0:
|
34 |
-
self.avail_idxs = [x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']]
|
35 |
-
try:
|
36 |
-
self.sizes = [self.sizes[i] for i in self.avail_idxs]
|
37 |
-
except:
|
38 |
-
tmp_sizes = []
|
39 |
-
for i in self.avail_idxs:
|
40 |
-
try:
|
41 |
-
tmp_sizes.append(self.sizes[i])
|
42 |
-
except:
|
43 |
-
continue
|
44 |
-
self.sizes = tmp_sizes
|
45 |
-
|
46 |
-
def _get_item(self, index):
|
47 |
-
if hasattr(self, 'avail_idxs') and self.avail_idxs is not None:
|
48 |
-
index = self.avail_idxs[index]
|
49 |
-
if self.indexed_ds is None:
|
50 |
-
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
|
51 |
-
return self.indexed_ds[index]
|
52 |
-
|
53 |
-
def __getitem__(self, index):
|
54 |
-
hparams = self.hparams
|
55 |
-
item = self._get_item(index)
|
56 |
-
assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index])
|
57 |
-
max_frames = hparams['max_frames']
|
58 |
-
spec = torch.Tensor(item['mel'])[:max_frames]
|
59 |
-
max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple']
|
60 |
-
spec = spec[:max_frames]
|
61 |
-
ph_token = torch.LongTensor(item['ph_token'][:hparams['max_input_tokens']])
|
62 |
-
sample = {
|
63 |
-
"id": index,
|
64 |
-
"item_name": item['item_name'],
|
65 |
-
"text": item['txt'],
|
66 |
-
"txt_token": ph_token,
|
67 |
-
"mel": spec,
|
68 |
-
"mel_nonpadding": spec.abs().sum(-1) > 0,
|
69 |
-
}
|
70 |
-
if hparams['use_spk_embed']:
|
71 |
-
sample["spk_embed"] = torch.Tensor(item['spk_embed'])
|
72 |
-
if hparams['use_spk_id']:
|
73 |
-
sample["spk_id"] = int(item['spk_id'])
|
74 |
-
return sample
|
75 |
-
|
76 |
-
def collater(self, samples):
|
77 |
-
if len(samples) == 0:
|
78 |
-
return {}
|
79 |
-
hparams = self.hparams
|
80 |
-
ids = [s['id'] for s in samples]
|
81 |
-
item_names = [s['item_name'] for s in samples]
|
82 |
-
text = [s['text'] for s in samples]
|
83 |
-
txt_tokens = collate_1d_or_2d([s['txt_token'] for s in samples], 0)
|
84 |
-
mels = collate_1d_or_2d([s['mel'] for s in samples], 0.0)
|
85 |
-
txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples])
|
86 |
-
mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
|
87 |
-
|
88 |
-
batch = {
|
89 |
-
'id': ids,
|
90 |
-
'item_name': item_names,
|
91 |
-
'nsamples': len(samples),
|
92 |
-
'text': text,
|
93 |
-
'txt_tokens': txt_tokens,
|
94 |
-
'txt_lengths': txt_lengths,
|
95 |
-
'mels': mels,
|
96 |
-
'mel_lengths': mel_lengths,
|
97 |
-
}
|
98 |
-
|
99 |
-
if hparams['use_spk_embed']:
|
100 |
-
spk_embed = torch.stack([s['spk_embed'] for s in samples])
|
101 |
-
batch['spk_embed'] = spk_embed
|
102 |
-
if hparams['use_spk_id']:
|
103 |
-
spk_ids = torch.LongTensor([s['spk_id'] for s in samples])
|
104 |
-
batch['spk_ids'] = spk_ids
|
105 |
-
return batch
|
106 |
-
|
107 |
-
|
108 |
-
class FastSpeechDataset(BaseSpeechDataset):
|
109 |
-
def __getitem__(self, index):
|
110 |
-
sample = super(FastSpeechDataset, self).__getitem__(index)
|
111 |
-
item = self._get_item(index)
|
112 |
-
hparams = self.hparams
|
113 |
-
mel = sample['mel']
|
114 |
-
T = mel.shape[0]
|
115 |
-
ph_token = sample['txt_token']
|
116 |
-
sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T]
|
117 |
-
if hparams['use_pitch_embed']:
|
118 |
-
assert 'f0' in item
|
119 |
-
pitch = torch.LongTensor(item.get(hparams.get('pitch_key', 'pitch')))[:T]
|
120 |
-
f0, uv = norm_interp_f0(item["f0"][:T])
|
121 |
-
uv = torch.FloatTensor(uv)
|
122 |
-
f0 = torch.FloatTensor(f0)
|
123 |
-
if hparams['pitch_type'] == 'ph':
|
124 |
-
if "f0_ph" in item:
|
125 |
-
f0 = torch.FloatTensor(item['f0_ph'])
|
126 |
-
else:
|
127 |
-
f0 = denorm_f0(f0, None)
|
128 |
-
f0_phlevel_sum = torch.zeros_like(ph_token).float().scatter_add(0, mel2ph - 1, f0)
|
129 |
-
f0_phlevel_num = torch.zeros_like(ph_token).float().scatter_add(
|
130 |
-
0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1)
|
131 |
-
f0_ph = f0_phlevel_sum / f0_phlevel_num
|
132 |
-
f0, uv = norm_interp_f0(f0_ph)
|
133 |
-
else:
|
134 |
-
f0, uv, pitch = None, None, None
|
135 |
-
sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch
|
136 |
-
return sample
|
137 |
-
|
138 |
-
def collater(self, samples):
|
139 |
-
if len(samples) == 0:
|
140 |
-
return {}
|
141 |
-
batch = super(FastSpeechDataset, self).collater(samples)
|
142 |
-
hparams = self.hparams
|
143 |
-
if hparams['use_pitch_embed']:
|
144 |
-
f0 = collate_1d_or_2d([s['f0'] for s in samples], 0.0)
|
145 |
-
pitch = collate_1d_or_2d([s['pitch'] for s in samples])
|
146 |
-
uv = collate_1d_or_2d([s['uv'] for s in samples])
|
147 |
-
else:
|
148 |
-
f0, uv, pitch = None, None, None
|
149 |
-
mel2ph = collate_1d_or_2d([s['mel2ph'] for s in samples], 0.0)
|
150 |
-
batch.update({
|
151 |
-
'mel2ph': mel2ph,
|
152 |
-
'pitch': pitch,
|
153 |
-
'f0': f0,
|
154 |
-
'uv': uv,
|
155 |
-
})
|
156 |
-
return batch
|
157 |
-
|
158 |
-
class FastSpeechWordDataset(FastSpeechDataset):
|
159 |
-
def __init__(self, prefix, shuffle=False, items=None, data_dir=None):
|
160 |
-
super().__init__(prefix, shuffle, items, data_dir)
|
161 |
-
# BERT contrastive loss & mlm loss
|
162 |
-
# from transformers import AutoTokenizer
|
163 |
-
# if hparams['ds_name'] in ['ljspeech', 'libritts']:
|
164 |
-
# self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
|
165 |
-
# elif hparams['ds_name'] == 'biaobei':
|
166 |
-
# self.tokenizer = AutoTokenizer.from_pretrained('bert-base-chinese')
|
167 |
-
# else:
|
168 |
-
# raise NotImplementedError()
|
169 |
-
# self.mlm_probability = 0.15
|
170 |
-
# if hparams.get("cl_ds_name") is None:
|
171 |
-
# pass
|
172 |
-
# elif hparams['cl_ds_name'] == "wiki":
|
173 |
-
# from experimental_yerfor.simcse_datasets import WikiDataset
|
174 |
-
# self.cl_dataset = WikiDataset(prefix=prefix)
|
175 |
-
# shuffle = True if prefix == 'train' else False
|
176 |
-
# endless = True
|
177 |
-
# num_workers = None if prefix == 'train' else 0
|
178 |
-
# self.cl_dataloader = self.cl_dataset.build_dataloader(shuffle=shuffle, max_tokens=hparams.get("cl_max_tokens", 3200),
|
179 |
-
# max_sentences=hparams.get("cl_max_sentences", 64), endless=endless, num_workers=num_workers)
|
180 |
-
# self.cl_dl_iter = iter(self.cl_dataloader)
|
181 |
-
# elif hparams['cl_ds_name'] == "nli":
|
182 |
-
# from experimental_yerfor.simcse_datasets import NLIDataset
|
183 |
-
# self.cl_dataset = NLIDataset(prefix=prefix)
|
184 |
-
# shuffle = True if prefix == 'train' else False
|
185 |
-
# endless = True
|
186 |
-
# num_workers = None if prefix == 'train' else 0
|
187 |
-
# self.cl_dataloader = self.cl_dataset.build_dataloader(shuffle=shuffle, max_tokens=hparams.get("cl_max_tokens", 4800),
|
188 |
-
# max_sentences=hparams.get("cl_max_sentences", 128), endless=endless, num_workers=num_workers)
|
189 |
-
# self.cl_dl_iter = iter(self.cl_dataloader)
|
190 |
-
|
191 |
-
def __getitem__(self, index):
|
192 |
-
sample = super().__getitem__(index)
|
193 |
-
item = self._get_item(index)
|
194 |
-
max_frames = sample['mel'].shape[0]
|
195 |
-
if 'word' in item:
|
196 |
-
sample['words'] = item['word']
|
197 |
-
sample["ph_words"] = item["ph_gb_word"]
|
198 |
-
sample["word_tokens"] = torch.LongTensor(item["word_token"])
|
199 |
-
else:
|
200 |
-
sample['words'] = item['words']
|
201 |
-
sample["ph_words"] = " ".join(item["ph_words"])
|
202 |
-
sample["word_tokens"] = torch.LongTensor(item["word_tokens"])
|
203 |
-
sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames]
|
204 |
-
sample["ph2word"] = torch.LongTensor(item['ph2word'][:self.hparams['max_input_tokens']])
|
205 |
-
|
206 |
-
# SyntaSpeech related features
|
207 |
-
# sample['dgl_graph'] = item['dgl_graph']
|
208 |
-
# sample['edge_types'] = item['edge_types']
|
209 |
-
|
210 |
-
# BERT related features
|
211 |
-
# sample['bert_token'] = item['bert_token']
|
212 |
-
# sample['bert_input_ids'] = torch.LongTensor(item['bert_input_ids'])
|
213 |
-
# sample['bert_token2word'] = torch.LongTensor(item['bert_token2word'])
|
214 |
-
# sample['bert_attention_mask'] = torch.LongTensor(item['bert_attention_mask'])
|
215 |
-
# sample['bert_token_type_ids'] = torch.LongTensor(item['bert_token_type_ids'])
|
216 |
-
|
217 |
-
return sample
|
218 |
-
|
219 |
-
def collater(self, samples):
|
220 |
-
samples = [s for s in samples if s is not None]
|
221 |
-
batch = super().collater(samples)
|
222 |
-
ph_words = [s['ph_words'] for s in samples]
|
223 |
-
batch['ph_words'] = ph_words
|
224 |
-
word_tokens = collate_1d_or_2d([s['word_tokens'] for s in samples], 0)
|
225 |
-
batch['word_tokens'] = word_tokens
|
226 |
-
mel2word = collate_1d_or_2d([s['mel2word'] for s in samples], 0)
|
227 |
-
batch['mel2word'] = mel2word
|
228 |
-
ph2word = collate_1d_or_2d([s['ph2word'] for s in samples], 0)
|
229 |
-
batch['ph2word'] = ph2word
|
230 |
-
batch['words'] = [s['words'] for s in samples]
|
231 |
-
batch['word_lengths'] = torch.LongTensor([len(s['word_tokens']) for s in samples])
|
232 |
-
if self.hparams['use_word_input']: # always False
|
233 |
-
batch['txt_tokens'] = batch['word_tokens']
|
234 |
-
batch['txt_lengths'] = torch.LongTensor([s['word_tokens'].numel() for s in samples])
|
235 |
-
batch['mel2ph'] = batch['mel2word']
|
236 |
-
|
237 |
-
# SyntaSpeech
|
238 |
-
# graph_lst, etypes_lst = [], [] # new features for Graph-based SDP
|
239 |
-
# for s in samples:
|
240 |
-
# graph_lst.append(s['dgl_graph'])
|
241 |
-
# etypes_lst.append(s['edge_types'])
|
242 |
-
# batch.update({
|
243 |
-
# 'graph_lst': graph_lst,
|
244 |
-
# 'etypes_lst': etypes_lst,
|
245 |
-
# })
|
246 |
-
|
247 |
-
# BERT
|
248 |
-
# batch['bert_feats'] = {}
|
249 |
-
# batch['bert_feats']['bert_tokens'] = [s['bert_token'] for s in samples]
|
250 |
-
# bert_input_ids = collate_1d_or_2d([s['bert_input_ids'] for s in samples], 0)
|
251 |
-
# batch['bert_feats']['bert_input_ids'] = bert_input_ids
|
252 |
-
# bert_token2word = collate_1d_or_2d([s['bert_token2word'] for s in samples], 0)
|
253 |
-
# batch['bert_feats']['bert_token2word'] = bert_token2word
|
254 |
-
# bert_attention_mask = collate_1d_or_2d([s['bert_attention_mask'] for s in samples], 0)
|
255 |
-
# batch['bert_feats']['bert_attention_mask'] = bert_attention_mask
|
256 |
-
# bert_token_type_ids = collate_1d_or_2d([s['bert_token_type_ids'] for s in samples], 0)
|
257 |
-
# batch['bert_feats']['bert_token_type_ids'] = bert_token_type_ids
|
258 |
-
|
259 |
-
# BERT contrastive loss & mlm loss & electra loss
|
260 |
-
# if hparams.get("cl_ds_name") is None:
|
261 |
-
# batch['cl_feats'] = {}
|
262 |
-
# batch['cl_feats']['cl_input_ids'] = batch['bert_feats']['bert_input_ids'].unsqueeze(1).repeat([1,2,1])
|
263 |
-
# batch['cl_feats']['cl_token2word'] = batch['bert_feats']['bert_token2word'].unsqueeze(1).repeat([1,2,1])
|
264 |
-
# batch['cl_feats']['cl_attention_mask'] = batch['bert_feats']['bert_attention_mask'].unsqueeze(1).repeat([1,2,1])
|
265 |
-
# batch['cl_feats']['cl_token_type_ids'] = batch['bert_feats']['bert_token_type_ids'].unsqueeze(1).repeat([1,2,1])
|
266 |
-
# bs, _, t = batch['cl_feats']['cl_input_ids'].shape
|
267 |
-
# mlm_input_ids, mlm_labels = self.mask_tokens(batch['bert_feats']['bert_input_ids'].reshape([bs, t]))
|
268 |
-
# batch['cl_feats']["mlm_input_ids"] = mlm_input_ids.reshape([bs, t])
|
269 |
-
# batch['cl_feats']["mlm_labels"] = mlm_labels.reshape([bs, t])
|
270 |
-
# batch['cl_feats']["mlm_attention_mask"] = batch['bert_feats']['bert_attention_mask']
|
271 |
-
# elif hparams['cl_ds_name'] in ["wiki", "nli"]:
|
272 |
-
# try:
|
273 |
-
# cl_feats = self.cl_dl_iter.__next__()
|
274 |
-
# except:
|
275 |
-
# self.cl_dl_iter = iter(self.cl_dataloader)
|
276 |
-
# cl_feats = self.cl_dl_iter.__next__()
|
277 |
-
# batch['cl_feats'] = cl_feats
|
278 |
-
return batch
|
279 |
-
|
280 |
-
# def mask_tokens(self, inputs, special_tokens_mask=None):
|
281 |
-
# """
|
282 |
-
# Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
|
283 |
-
# """
|
284 |
-
# inputs = inputs.clone()
|
285 |
-
# labels = inputs.clone()
|
286 |
-
# # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
|
287 |
-
# probability_matrix = torch.full(labels.shape, self.mlm_probability)
|
288 |
-
# if special_tokens_mask is None:
|
289 |
-
# special_tokens_mask = [
|
290 |
-
# self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
291 |
-
# ]
|
292 |
-
# special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
|
293 |
-
# else:
|
294 |
-
# special_tokens_mask = special_tokens_mask.bool()
|
295 |
-
|
296 |
-
# probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
|
297 |
-
# masked_indices = torch.bernoulli(probability_matrix).bool()
|
298 |
-
# labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
299 |
-
|
300 |
-
# # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
301 |
-
# indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
|
302 |
-
# inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
|
303 |
-
|
304 |
-
# # 10% of the time, we replace masked input tokens with random word
|
305 |
-
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
306 |
-
# random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
|
307 |
-
# inputs[indices_random] = random_words[indices_random]
|
308 |
-
|
309 |
-
# # The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
310 |
-
# return inputs, labels
|
311 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/fs_adv.py
DELETED
@@ -1,260 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import torch.nn as nn
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from text_to_speech.modules.tts.syntaspeech.multi_window_disc import Discriminator
|
8 |
-
from tasks.tts.fs import FastSpeechTask
|
9 |
-
from text_to_speech.modules.tts.fs import FastSpeech
|
10 |
-
|
11 |
-
from text_to_speech.utils.audio.align import mel2token_to_dur
|
12 |
-
from text_to_speech.utils.commons.hparams import hparams
|
13 |
-
from text_to_speech.utils.nn.model_utils import num_params
|
14 |
-
from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars
|
15 |
-
from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0
|
16 |
-
from text_to_speech.utils.audio.pitch_extractors import get_pitch
|
17 |
-
from text_to_speech.utils.metrics.dtw import dtw as DTW
|
18 |
-
|
19 |
-
from text_to_speech.utils.plot.plot import spec_to_figure
|
20 |
-
from text_to_speech.utils.text.text_encoder import build_token_encoder
|
21 |
-
|
22 |
-
|
23 |
-
class FastSpeechAdvTask(FastSpeechTask):
|
24 |
-
def __init__(self):
|
25 |
-
super().__init__()
|
26 |
-
self.build_disc_model()
|
27 |
-
self.mse_loss_fn = torch.nn.MSELoss()
|
28 |
-
|
29 |
-
def build_tts_model(self):
|
30 |
-
dict_size = len(self.token_encoder)
|
31 |
-
self.model = FastSpeech(dict_size, hparams)
|
32 |
-
self.gen_params = [p for p in self.model.parameters() if p.requires_grad]
|
33 |
-
self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)]
|
34 |
-
self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)]
|
35 |
-
self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)]
|
36 |
-
self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ]
|
37 |
-
self.use_bert = True if len(self.bert_params) > 0 else False
|
38 |
-
|
39 |
-
|
40 |
-
def build_disc_model(self):
|
41 |
-
disc_win_num = hparams['disc_win_num']
|
42 |
-
h = hparams['mel_disc_hidden_size']
|
43 |
-
self.mel_disc = Discriminator(
|
44 |
-
time_lengths=[32, 64, 128][:disc_win_num],
|
45 |
-
freq_length=80, hidden_size=h, kernel=(3, 3)
|
46 |
-
)
|
47 |
-
self.disc_params = list(self.mel_disc.parameters())
|
48 |
-
|
49 |
-
def _training_step(self, sample, batch_idx, optimizer_idx):
|
50 |
-
loss_output = {}
|
51 |
-
loss_weights = {}
|
52 |
-
disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0
|
53 |
-
if optimizer_idx == 0:
|
54 |
-
#######################
|
55 |
-
# Generator #
|
56 |
-
#######################
|
57 |
-
loss_output, model_out = self.run_model(sample, infer=False)
|
58 |
-
self.model_out_gt = self.model_out = \
|
59 |
-
{k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)}
|
60 |
-
if disc_start:
|
61 |
-
mel_p = model_out['mel_out']
|
62 |
-
if hasattr(self.model, 'out2mel'):
|
63 |
-
mel_p = self.model.out2mel(mel_p)
|
64 |
-
o_ = self.mel_disc(mel_p)
|
65 |
-
p_, pc_ = o_['y'], o_['y_c']
|
66 |
-
if p_ is not None:
|
67 |
-
loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size()))
|
68 |
-
loss_weights['a'] = hparams['lambda_mel_adv']
|
69 |
-
if pc_ is not None:
|
70 |
-
loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size()))
|
71 |
-
loss_weights['ac'] = hparams['lambda_mel_adv']
|
72 |
-
else:
|
73 |
-
#######################
|
74 |
-
# Discriminator #
|
75 |
-
#######################
|
76 |
-
if disc_start and self.global_step % hparams['disc_interval'] == 0:
|
77 |
-
model_out = self.model_out_gt
|
78 |
-
mel_g = sample['mels']
|
79 |
-
mel_p = model_out['mel_out']
|
80 |
-
o = self.mel_disc(mel_g)
|
81 |
-
p, pc = o['y'], o['y_c']
|
82 |
-
o_ = self.mel_disc(mel_p)
|
83 |
-
p_, pc_ = o_['y'], o_['y_c']
|
84 |
-
if p_ is not None:
|
85 |
-
loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size()))
|
86 |
-
loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size()))
|
87 |
-
if pc_ is not None:
|
88 |
-
loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size()))
|
89 |
-
loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size()))
|
90 |
-
else:
|
91 |
-
return None
|
92 |
-
total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad])
|
93 |
-
loss_output['batch_size'] = sample['txt_tokens'].size()[0]
|
94 |
-
return total_loss, loss_output
|
95 |
-
|
96 |
-
|
97 |
-
def validation_step(self, sample, batch_idx):
|
98 |
-
outputs = {}
|
99 |
-
outputs['losses'] = {}
|
100 |
-
outputs['losses'], model_out = self.run_model(sample)
|
101 |
-
outputs['total_loss'] = sum(outputs['losses'].values())
|
102 |
-
outputs['nsamples'] = sample['nsamples']
|
103 |
-
outputs = tensors_to_scalars(outputs)
|
104 |
-
if self.global_step % hparams['valid_infer_interval'] == 0 \
|
105 |
-
and batch_idx < hparams['num_valid_plots']:
|
106 |
-
valid_results = self.save_valid_result(sample, batch_idx, model_out)
|
107 |
-
wav_gt = valid_results['wav_gt']
|
108 |
-
mel_gt = valid_results['mel_gt']
|
109 |
-
wav_pred = valid_results['wav_pred']
|
110 |
-
mel_pred = valid_results['mel_pred']
|
111 |
-
f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
|
112 |
-
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
|
113 |
-
manhattan_distance = lambda x, y: np.abs(x - y)
|
114 |
-
dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance)
|
115 |
-
outputs['losses']['f0_dtw'] = dist / len(f0_gt_)
|
116 |
-
return outputs
|
117 |
-
|
118 |
-
def save_valid_result(self, sample, batch_idx, model_out):
|
119 |
-
sr = hparams['audio_sample_rate']
|
120 |
-
f0_gt = None
|
121 |
-
mel_out = model_out['mel_out']
|
122 |
-
if sample.get('f0') is not None:
|
123 |
-
f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
|
124 |
-
self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt)
|
125 |
-
|
126 |
-
# if self.global_step > 0:
|
127 |
-
wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt)
|
128 |
-
self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr)
|
129 |
-
# with gt duration
|
130 |
-
model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True)
|
131 |
-
dur_info = self.get_plot_dur_info(sample, model_out)
|
132 |
-
del dur_info['dur_pred']
|
133 |
-
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
|
134 |
-
self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr)
|
135 |
-
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}',
|
136 |
-
dur_info=dur_info, f0s=f0_gt)
|
137 |
-
|
138 |
-
# with pred duration
|
139 |
-
if not hparams['use_gt_dur']:
|
140 |
-
model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False)
|
141 |
-
dur_info = self.get_plot_dur_info(sample, model_out)
|
142 |
-
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}',
|
143 |
-
dur_info=dur_info, f0s=f0_gt)
|
144 |
-
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
|
145 |
-
self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr)
|
146 |
-
# gt wav
|
147 |
-
mel_gt = sample['mels'][0].cpu()
|
148 |
-
wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
|
149 |
-
if self.global_step <= hparams['valid_infer_interval']:
|
150 |
-
self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr)
|
151 |
-
|
152 |
-
# add attn plot
|
153 |
-
# if self.global_step > 0 and hparams['dur_level'] == 'word':
|
154 |
-
# self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step)
|
155 |
-
|
156 |
-
return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()}
|
157 |
-
|
158 |
-
|
159 |
-
def get_plot_dur_info(self, sample, model_out):
|
160 |
-
# if hparams['dur_level'] == 'word':
|
161 |
-
# T_txt = sample['word_lengths'].max()
|
162 |
-
# dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0]
|
163 |
-
# dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
164 |
-
# txt = sample['ph_words'][0].split(" ")
|
165 |
-
# else:
|
166 |
-
T_txt = sample['txt_tokens'].shape[1]
|
167 |
-
dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
|
168 |
-
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
169 |
-
txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
|
170 |
-
txt = txt.split(" ")
|
171 |
-
return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
|
172 |
-
|
173 |
-
def build_optimizer(self, model):
|
174 |
-
|
175 |
-
optimizer_gen = torch.optim.AdamW(
|
176 |
-
self.gen_params,
|
177 |
-
lr=hparams['lr'],
|
178 |
-
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
|
179 |
-
weight_decay=hparams['weight_decay'])
|
180 |
-
|
181 |
-
optimizer_disc = torch.optim.AdamW(
|
182 |
-
self.disc_params,
|
183 |
-
lr=hparams['disc_lr'],
|
184 |
-
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
|
185 |
-
**hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None
|
186 |
-
|
187 |
-
return [optimizer_gen, optimizer_disc]
|
188 |
-
|
189 |
-
def build_scheduler(self, optimizer):
|
190 |
-
return [
|
191 |
-
FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler
|
192 |
-
torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler
|
193 |
-
**hparams["discriminator_scheduler_params"]),
|
194 |
-
]
|
195 |
-
|
196 |
-
def on_before_optimization(self, opt_idx):
|
197 |
-
if opt_idx == 0:
|
198 |
-
nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm'])
|
199 |
-
if self.use_bert:
|
200 |
-
nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm'])
|
201 |
-
nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm'])
|
202 |
-
else:
|
203 |
-
nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm'])
|
204 |
-
else:
|
205 |
-
nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"])
|
206 |
-
|
207 |
-
def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx):
|
208 |
-
if self.scheduler is not None:
|
209 |
-
self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches'])
|
210 |
-
self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches'])
|
211 |
-
|
212 |
-
############
|
213 |
-
# infer
|
214 |
-
############
|
215 |
-
def test_start(self):
|
216 |
-
super().test_start()
|
217 |
-
if hparams.get('save_attn', False):
|
218 |
-
os.makedirs(f'{self.gen_dir}/attn', exist_ok=True)
|
219 |
-
self.model.store_inverse_all()
|
220 |
-
|
221 |
-
def test_step(self, sample, batch_idx):
|
222 |
-
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
|
223 |
-
outputs = self.run_model(sample, infer=True)
|
224 |
-
text = sample['text'][0]
|
225 |
-
item_name = sample['item_name'][0]
|
226 |
-
tokens = sample['txt_tokens'][0].cpu().numpy()
|
227 |
-
mel_gt = sample['mels'][0].cpu().numpy()
|
228 |
-
mel_pred = outputs['mel_out'][0].cpu().numpy()
|
229 |
-
mel2ph = sample['mel2ph'][0].cpu().numpy()
|
230 |
-
mel2ph_pred = None
|
231 |
-
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
|
232 |
-
base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
|
233 |
-
if text is not None:
|
234 |
-
base_fn += text.replace(":", "$3A")[:80]
|
235 |
-
base_fn = base_fn.replace(' ', '_')
|
236 |
-
gen_dir = self.gen_dir
|
237 |
-
wav_pred = self.vocoder.spec2wav(mel_pred)
|
238 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
239 |
-
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
|
240 |
-
if hparams['save_gt']:
|
241 |
-
wav_gt = self.vocoder.spec2wav(mel_gt)
|
242 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
243 |
-
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
|
244 |
-
if hparams.get('save_attn', False):
|
245 |
-
attn = outputs['attn'][0].cpu().numpy()
|
246 |
-
np.save(f'{gen_dir}/attn/{item_name}.npy', attn)
|
247 |
-
# save f0 for pitch dtw
|
248 |
-
f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
|
249 |
-
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
|
250 |
-
np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_)
|
251 |
-
np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_)
|
252 |
-
|
253 |
-
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
|
254 |
-
return {
|
255 |
-
'item_name': item_name,
|
256 |
-
'text': text,
|
257 |
-
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
|
258 |
-
'wav_fn_pred': base_fn % 'P',
|
259 |
-
'wav_fn_gt': base_fn % 'G',
|
260 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhaykoul/HelpingAI-T3/index.html
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
<script type='module' src='https://interfaces.zapier.com/assets/web-components/zapier-interfaces/zapier-interfaces.esm.js'></script>
|
2 |
-
<zapier-interfaces-page-embed page-id='clne7sanm07250pl8i23i1jed' no-background='false' style='max-width: 1500px; height: 600px;' name='HelpingAI-Chat'></zapier-interfaces-page-embed>
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/models/diffusion/dpm_solver/dpm_solver.py
DELETED
@@ -1,1217 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
import math
|
4 |
-
from tqdm import tqdm
|
5 |
-
|
6 |
-
|
7 |
-
class NoiseScheduleVP:
|
8 |
-
def __init__(
|
9 |
-
self,
|
10 |
-
schedule='discrete',
|
11 |
-
betas=None,
|
12 |
-
alphas_cumprod=None,
|
13 |
-
continuous_beta_0=0.1,
|
14 |
-
continuous_beta_1=20.,
|
15 |
-
):
|
16 |
-
"""Create a wrapper class for the forward SDE (VP type).
|
17 |
-
|
18 |
-
***
|
19 |
-
Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
|
20 |
-
We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
|
21 |
-
***
|
22 |
-
|
23 |
-
The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
|
24 |
-
We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
|
25 |
-
Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
|
26 |
-
|
27 |
-
log_alpha_t = self.marginal_log_mean_coeff(t)
|
28 |
-
sigma_t = self.marginal_std(t)
|
29 |
-
lambda_t = self.marginal_lambda(t)
|
30 |
-
|
31 |
-
Moreover, as lambda(t) is an invertible function, we also support its inverse function:
|
32 |
-
|
33 |
-
t = self.inverse_lambda(lambda_t)
|
34 |
-
|
35 |
-
===============================================================
|
36 |
-
|
37 |
-
We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
|
38 |
-
|
39 |
-
1. For discrete-time DPMs:
|
40 |
-
|
41 |
-
For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
|
42 |
-
t_i = (i + 1) / N
|
43 |
-
e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
|
44 |
-
We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
|
45 |
-
|
46 |
-
Args:
|
47 |
-
betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
|
48 |
-
alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
|
49 |
-
|
50 |
-
Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
|
51 |
-
|
52 |
-
**Important**: Please pay special attention for the args for `alphas_cumprod`:
|
53 |
-
The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
|
54 |
-
q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
|
55 |
-
Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
|
56 |
-
alpha_{t_n} = \sqrt{\hat{alpha_n}},
|
57 |
-
and
|
58 |
-
log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
|
59 |
-
|
60 |
-
|
61 |
-
2. For continuous-time DPMs:
|
62 |
-
|
63 |
-
We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
|
64 |
-
schedule are the default settings in DDPM and improved-DDPM:
|
65 |
-
|
66 |
-
Args:
|
67 |
-
beta_min: A `float` number. The smallest beta for the linear schedule.
|
68 |
-
beta_max: A `float` number. The largest beta for the linear schedule.
|
69 |
-
cosine_s: A `float` number. The hyperparameter in the cosine schedule.
|
70 |
-
cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
|
71 |
-
T: A `float` number. The ending time of the forward process.
|
72 |
-
|
73 |
-
===============================================================
|
74 |
-
|
75 |
-
Args:
|
76 |
-
schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
|
77 |
-
'linear' or 'cosine' for continuous-time DPMs.
|
78 |
-
Returns:
|
79 |
-
A wrapper object of the forward SDE (VP type).
|
80 |
-
|
81 |
-
===============================================================
|
82 |
-
|
83 |
-
Example:
|
84 |
-
|
85 |
-
# For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
|
86 |
-
>>> ns = NoiseScheduleVP('discrete', betas=betas)
|
87 |
-
|
88 |
-
# For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
|
89 |
-
>>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
|
90 |
-
|
91 |
-
# For continuous-time DPMs (VPSDE), linear schedule:
|
92 |
-
>>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
|
93 |
-
|
94 |
-
"""
|
95 |
-
|
96 |
-
if schedule not in ['discrete', 'linear', 'cosine']:
|
97 |
-
raise ValueError(
|
98 |
-
"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
|
99 |
-
schedule))
|
100 |
-
|
101 |
-
self.schedule = schedule
|
102 |
-
if schedule == 'discrete':
|
103 |
-
if betas is not None:
|
104 |
-
log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
|
105 |
-
else:
|
106 |
-
assert alphas_cumprod is not None
|
107 |
-
log_alphas = 0.5 * torch.log(alphas_cumprod)
|
108 |
-
self.total_N = len(log_alphas)
|
109 |
-
self.T = 1.
|
110 |
-
self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
|
111 |
-
self.log_alpha_array = log_alphas.reshape((1, -1,))
|
112 |
-
else:
|
113 |
-
self.total_N = 1000
|
114 |
-
self.beta_0 = continuous_beta_0
|
115 |
-
self.beta_1 = continuous_beta_1
|
116 |
-
self.cosine_s = 0.008
|
117 |
-
self.cosine_beta_max = 999.
|
118 |
-
self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
|
119 |
-
1. + self.cosine_s) / math.pi - self.cosine_s
|
120 |
-
self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
|
121 |
-
self.schedule = schedule
|
122 |
-
if schedule == 'cosine':
|
123 |
-
# For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
|
124 |
-
# Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
|
125 |
-
self.T = 0.9946
|
126 |
-
else:
|
127 |
-
self.T = 1.
|
128 |
-
|
129 |
-
def marginal_log_mean_coeff(self, t):
|
130 |
-
"""
|
131 |
-
Compute log(alpha_t) of a given continuous-time label t in [0, T].
|
132 |
-
"""
|
133 |
-
if self.schedule == 'discrete':
|
134 |
-
return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
|
135 |
-
self.log_alpha_array.to(t.device)).reshape((-1))
|
136 |
-
elif self.schedule == 'linear':
|
137 |
-
return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
|
138 |
-
elif self.schedule == 'cosine':
|
139 |
-
log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
|
140 |
-
log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
|
141 |
-
return log_alpha_t
|
142 |
-
|
143 |
-
def marginal_alpha(self, t):
|
144 |
-
"""
|
145 |
-
Compute alpha_t of a given continuous-time label t in [0, T].
|
146 |
-
"""
|
147 |
-
return torch.exp(self.marginal_log_mean_coeff(t))
|
148 |
-
|
149 |
-
def marginal_std(self, t):
|
150 |
-
"""
|
151 |
-
Compute sigma_t of a given continuous-time label t in [0, T].
|
152 |
-
"""
|
153 |
-
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
|
154 |
-
|
155 |
-
def marginal_lambda(self, t):
|
156 |
-
"""
|
157 |
-
Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
|
158 |
-
"""
|
159 |
-
log_mean_coeff = self.marginal_log_mean_coeff(t)
|
160 |
-
log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
|
161 |
-
return log_mean_coeff - log_std
|
162 |
-
|
163 |
-
def inverse_lambda(self, lamb):
|
164 |
-
"""
|
165 |
-
Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
|
166 |
-
"""
|
167 |
-
if self.schedule == 'linear':
|
168 |
-
tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
169 |
-
Delta = self.beta_0 ** 2 + tmp
|
170 |
-
return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
|
171 |
-
elif self.schedule == 'discrete':
|
172 |
-
log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
|
173 |
-
t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
|
174 |
-
torch.flip(self.t_array.to(lamb.device), [1]))
|
175 |
-
return t.reshape((-1,))
|
176 |
-
else:
|
177 |
-
log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
178 |
-
t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
|
179 |
-
1. + self.cosine_s) / math.pi - self.cosine_s
|
180 |
-
t = t_fn(log_alpha)
|
181 |
-
return t
|
182 |
-
|
183 |
-
|
184 |
-
def model_wrapper(
|
185 |
-
model,
|
186 |
-
noise_schedule,
|
187 |
-
model_type="noise",
|
188 |
-
model_kwargs={},
|
189 |
-
guidance_type="uncond",
|
190 |
-
condition=None,
|
191 |
-
unconditional_condition=None,
|
192 |
-
guidance_scale=1.,
|
193 |
-
classifier_fn=None,
|
194 |
-
classifier_kwargs={},
|
195 |
-
):
|
196 |
-
"""Create a wrapper function for the noise prediction model.
|
197 |
-
|
198 |
-
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
|
199 |
-
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
|
200 |
-
|
201 |
-
We support four types of the diffusion model by setting `model_type`:
|
202 |
-
|
203 |
-
1. "noise": noise prediction model. (Trained by predicting noise).
|
204 |
-
|
205 |
-
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
|
206 |
-
|
207 |
-
3. "v": velocity prediction model. (Trained by predicting the velocity).
|
208 |
-
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
|
209 |
-
|
210 |
-
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
|
211 |
-
arXiv preprint arXiv:2202.00512 (2022).
|
212 |
-
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
|
213 |
-
arXiv preprint arXiv:2210.02303 (2022).
|
214 |
-
|
215 |
-
4. "score": marginal score function. (Trained by denoising score matching).
|
216 |
-
Note that the score function and the noise prediction model follows a simple relationship:
|
217 |
-
```
|
218 |
-
noise(x_t, t) = -sigma_t * score(x_t, t)
|
219 |
-
```
|
220 |
-
|
221 |
-
We support three types of guided sampling by DPMs by setting `guidance_type`:
|
222 |
-
1. "uncond": unconditional sampling by DPMs.
|
223 |
-
The input `model` has the following format:
|
224 |
-
``
|
225 |
-
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
226 |
-
``
|
227 |
-
|
228 |
-
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
|
229 |
-
The input `model` has the following format:
|
230 |
-
``
|
231 |
-
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
232 |
-
``
|
233 |
-
|
234 |
-
The input `classifier_fn` has the following format:
|
235 |
-
``
|
236 |
-
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
|
237 |
-
``
|
238 |
-
|
239 |
-
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
|
240 |
-
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
|
241 |
-
|
242 |
-
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
|
243 |
-
The input `model` has the following format:
|
244 |
-
``
|
245 |
-
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
|
246 |
-
``
|
247 |
-
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
|
248 |
-
|
249 |
-
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
|
250 |
-
arXiv preprint arXiv:2207.12598 (2022).
|
251 |
-
|
252 |
-
|
253 |
-
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
|
254 |
-
or continuous-time labels (i.e. epsilon to T).
|
255 |
-
|
256 |
-
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
|
257 |
-
``
|
258 |
-
def model_fn(x, t_continuous) -> noise:
|
259 |
-
t_input = get_model_input_time(t_continuous)
|
260 |
-
return noise_pred(model, x, t_input, **model_kwargs)
|
261 |
-
``
|
262 |
-
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
|
263 |
-
|
264 |
-
===============================================================
|
265 |
-
|
266 |
-
Args:
|
267 |
-
model: A diffusion model with the corresponding format described above.
|
268 |
-
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
269 |
-
model_type: A `str`. The parameterization type of the diffusion model.
|
270 |
-
"noise" or "x_start" or "v" or "score".
|
271 |
-
model_kwargs: A `dict`. A dict for the other inputs of the model function.
|
272 |
-
guidance_type: A `str`. The type of the guidance for sampling.
|
273 |
-
"uncond" or "classifier" or "classifier-free".
|
274 |
-
condition: A pytorch tensor. The condition for the guided sampling.
|
275 |
-
Only used for "classifier" or "classifier-free" guidance type.
|
276 |
-
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
|
277 |
-
Only used for "classifier-free" guidance type.
|
278 |
-
guidance_scale: A `float`. The scale for the guided sampling.
|
279 |
-
classifier_fn: A classifier function. Only used for the classifier guidance.
|
280 |
-
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
|
281 |
-
Returns:
|
282 |
-
A noise prediction model that accepts the noised data and the continuous time as the inputs.
|
283 |
-
"""
|
284 |
-
|
285 |
-
def get_model_input_time(t_continuous):
|
286 |
-
"""
|
287 |
-
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
|
288 |
-
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
|
289 |
-
For continuous-time DPMs, we just use `t_continuous`.
|
290 |
-
"""
|
291 |
-
if noise_schedule.schedule == 'discrete':
|
292 |
-
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
|
293 |
-
else:
|
294 |
-
return t_continuous
|
295 |
-
|
296 |
-
def noise_pred_fn(x, t_continuous, cond=None):
|
297 |
-
if t_continuous.reshape((-1,)).shape[0] == 1:
|
298 |
-
t_continuous = t_continuous.expand((x.shape[0]))
|
299 |
-
t_input = get_model_input_time(t_continuous)
|
300 |
-
if cond is None:
|
301 |
-
output = model(x, t_input, **model_kwargs)
|
302 |
-
else:
|
303 |
-
output = model(x, t_input, cond, **model_kwargs)
|
304 |
-
if model_type == "noise":
|
305 |
-
return output
|
306 |
-
elif model_type == "x_start":
|
307 |
-
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
308 |
-
dims = x.dim()
|
309 |
-
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
|
310 |
-
elif model_type == "v":
|
311 |
-
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
312 |
-
dims = x.dim()
|
313 |
-
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
|
314 |
-
elif model_type == "score":
|
315 |
-
sigma_t = noise_schedule.marginal_std(t_continuous)
|
316 |
-
dims = x.dim()
|
317 |
-
return -expand_dims(sigma_t, dims) * output
|
318 |
-
|
319 |
-
def cond_grad_fn(x, t_input):
|
320 |
-
"""
|
321 |
-
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
|
322 |
-
"""
|
323 |
-
with torch.enable_grad():
|
324 |
-
x_in = x.detach().requires_grad_(True)
|
325 |
-
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
|
326 |
-
return torch.autograd.grad(log_prob.sum(), x_in)[0]
|
327 |
-
|
328 |
-
def model_fn(x, t_continuous):
|
329 |
-
"""
|
330 |
-
The noise predicition model function that is used for DPM-Solver.
|
331 |
-
"""
|
332 |
-
if t_continuous.reshape((-1,)).shape[0] == 1:
|
333 |
-
t_continuous = t_continuous.expand((x.shape[0]))
|
334 |
-
if guidance_type == "uncond":
|
335 |
-
return noise_pred_fn(x, t_continuous)
|
336 |
-
elif guidance_type == "classifier":
|
337 |
-
assert classifier_fn is not None
|
338 |
-
t_input = get_model_input_time(t_continuous)
|
339 |
-
cond_grad = cond_grad_fn(x, t_input)
|
340 |
-
sigma_t = noise_schedule.marginal_std(t_continuous)
|
341 |
-
noise = noise_pred_fn(x, t_continuous)
|
342 |
-
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
|
343 |
-
elif guidance_type == "classifier-free":
|
344 |
-
if guidance_scale == 1. or unconditional_condition is None:
|
345 |
-
return noise_pred_fn(x, t_continuous, cond=condition)
|
346 |
-
else:
|
347 |
-
x_in = torch.cat([x] * 2)
|
348 |
-
t_in = torch.cat([t_continuous] * 2)
|
349 |
-
c_in = torch.cat([unconditional_condition, condition])
|
350 |
-
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
|
351 |
-
return noise_uncond + guidance_scale * (noise - noise_uncond)
|
352 |
-
|
353 |
-
assert model_type in ["noise", "x_start", "v"]
|
354 |
-
assert guidance_type in ["uncond", "classifier", "classifier-free"]
|
355 |
-
return model_fn
|
356 |
-
|
357 |
-
|
358 |
-
class DPM_Solver:
|
359 |
-
def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
|
360 |
-
"""Construct a DPM-Solver.
|
361 |
-
|
362 |
-
We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
|
363 |
-
If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
|
364 |
-
If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
|
365 |
-
In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
|
366 |
-
The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
|
367 |
-
|
368 |
-
Args:
|
369 |
-
model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
|
370 |
-
``
|
371 |
-
def model_fn(x, t_continuous):
|
372 |
-
return noise
|
373 |
-
``
|
374 |
-
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
375 |
-
predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
|
376 |
-
thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
|
377 |
-
max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
|
378 |
-
|
379 |
-
[1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
|
380 |
-
"""
|
381 |
-
self.model = model_fn
|
382 |
-
self.noise_schedule = noise_schedule
|
383 |
-
self.predict_x0 = predict_x0
|
384 |
-
self.thresholding = thresholding
|
385 |
-
self.max_val = max_val
|
386 |
-
|
387 |
-
def noise_prediction_fn(self, x, t):
|
388 |
-
"""
|
389 |
-
Return the noise prediction model.
|
390 |
-
"""
|
391 |
-
return self.model(x, t)
|
392 |
-
|
393 |
-
def data_prediction_fn(self, x, t):
|
394 |
-
"""
|
395 |
-
Return the data prediction model (with thresholding).
|
396 |
-
"""
|
397 |
-
noise = self.noise_prediction_fn(x, t)
|
398 |
-
dims = x.dim()
|
399 |
-
alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
|
400 |
-
x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
|
401 |
-
if self.thresholding:
|
402 |
-
p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
|
403 |
-
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
|
404 |
-
s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
|
405 |
-
x0 = torch.clamp(x0, -s, s) / s
|
406 |
-
return x0
|
407 |
-
|
408 |
-
def model_fn(self, x, t):
|
409 |
-
"""
|
410 |
-
Convert the model to the noise prediction model or the data prediction model.
|
411 |
-
"""
|
412 |
-
if self.predict_x0:
|
413 |
-
return self.data_prediction_fn(x, t)
|
414 |
-
else:
|
415 |
-
return self.noise_prediction_fn(x, t)
|
416 |
-
|
417 |
-
def get_time_steps(self, skip_type, t_T, t_0, N, device):
|
418 |
-
"""Compute the intermediate time steps for sampling.
|
419 |
-
|
420 |
-
Args:
|
421 |
-
skip_type: A `str`. The type for the spacing of the time steps. We support three types:
|
422 |
-
- 'logSNR': uniform logSNR for the time steps.
|
423 |
-
- 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
|
424 |
-
- 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
|
425 |
-
t_T: A `float`. The starting time of the sampling (default is T).
|
426 |
-
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
427 |
-
N: A `int`. The total number of the spacing of the time steps.
|
428 |
-
device: A torch device.
|
429 |
-
Returns:
|
430 |
-
A pytorch tensor of the time steps, with the shape (N + 1,).
|
431 |
-
"""
|
432 |
-
if skip_type == 'logSNR':
|
433 |
-
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
|
434 |
-
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
|
435 |
-
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
|
436 |
-
return self.noise_schedule.inverse_lambda(logSNR_steps)
|
437 |
-
elif skip_type == 'time_uniform':
|
438 |
-
return torch.linspace(t_T, t_0, N + 1).to(device)
|
439 |
-
elif skip_type == 'time_quadratic':
|
440 |
-
t_order = 2
|
441 |
-
t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
|
442 |
-
return t
|
443 |
-
else:
|
444 |
-
raise ValueError(
|
445 |
-
"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
|
446 |
-
|
447 |
-
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
|
448 |
-
"""
|
449 |
-
Get the order of each step for sampling by the singlestep DPM-Solver.
|
450 |
-
|
451 |
-
We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
|
452 |
-
Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
|
453 |
-
- If order == 1:
|
454 |
-
We take `steps` of DPM-Solver-1 (i.e. DDIM).
|
455 |
-
- If order == 2:
|
456 |
-
- Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
|
457 |
-
- If steps % 2 == 0, we use K steps of DPM-Solver-2.
|
458 |
-
- If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
|
459 |
-
- If order == 3:
|
460 |
-
- Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
|
461 |
-
- If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
|
462 |
-
- If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
|
463 |
-
- If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
|
464 |
-
|
465 |
-
============================================
|
466 |
-
Args:
|
467 |
-
order: A `int`. The max order for the solver (2 or 3).
|
468 |
-
steps: A `int`. The total number of function evaluations (NFE).
|
469 |
-
skip_type: A `str`. The type for the spacing of the time steps. We support three types:
|
470 |
-
- 'logSNR': uniform logSNR for the time steps.
|
471 |
-
- 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
|
472 |
-
- 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
|
473 |
-
t_T: A `float`. The starting time of the sampling (default is T).
|
474 |
-
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
475 |
-
device: A torch device.
|
476 |
-
Returns:
|
477 |
-
orders: A list of the solver order of each step.
|
478 |
-
"""
|
479 |
-
if order == 3:
|
480 |
-
K = steps // 3 + 1
|
481 |
-
if steps % 3 == 0:
|
482 |
-
orders = [3, ] * (K - 2) + [2, 1]
|
483 |
-
elif steps % 3 == 1:
|
484 |
-
orders = [3, ] * (K - 1) + [1]
|
485 |
-
else:
|
486 |
-
orders = [3, ] * (K - 1) + [2]
|
487 |
-
elif order == 2:
|
488 |
-
if steps % 2 == 0:
|
489 |
-
K = steps // 2
|
490 |
-
orders = [2, ] * K
|
491 |
-
else:
|
492 |
-
K = steps // 2 + 1
|
493 |
-
orders = [2, ] * (K - 1) + [1]
|
494 |
-
elif order == 1:
|
495 |
-
K = 1
|
496 |
-
orders = [1, ] * steps
|
497 |
-
else:
|
498 |
-
raise ValueError("'order' must be '1' or '2' or '3'.")
|
499 |
-
if skip_type == 'logSNR':
|
500 |
-
# To reproduce the results in DPM-Solver paper
|
501 |
-
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
|
502 |
-
else:
|
503 |
-
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
|
504 |
-
torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
|
505 |
-
return timesteps_outer, orders
|
506 |
-
|
507 |
-
def denoise_to_zero_fn(self, x, s):
|
508 |
-
"""
|
509 |
-
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
|
510 |
-
"""
|
511 |
-
return self.data_prediction_fn(x, s)
|
512 |
-
|
513 |
-
def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
|
514 |
-
"""
|
515 |
-
DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
|
516 |
-
|
517 |
-
Args:
|
518 |
-
x: A pytorch tensor. The initial value at time `s`.
|
519 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
520 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
521 |
-
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
522 |
-
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
523 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s`.
|
524 |
-
Returns:
|
525 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
526 |
-
"""
|
527 |
-
ns = self.noise_schedule
|
528 |
-
dims = x.dim()
|
529 |
-
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
530 |
-
h = lambda_t - lambda_s
|
531 |
-
log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
|
532 |
-
sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
|
533 |
-
alpha_t = torch.exp(log_alpha_t)
|
534 |
-
|
535 |
-
if self.predict_x0:
|
536 |
-
phi_1 = torch.expm1(-h)
|
537 |
-
if model_s is None:
|
538 |
-
model_s = self.model_fn(x, s)
|
539 |
-
x_t = (
|
540 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
541 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
542 |
-
)
|
543 |
-
if return_intermediate:
|
544 |
-
return x_t, {'model_s': model_s}
|
545 |
-
else:
|
546 |
-
return x_t
|
547 |
-
else:
|
548 |
-
phi_1 = torch.expm1(h)
|
549 |
-
if model_s is None:
|
550 |
-
model_s = self.model_fn(x, s)
|
551 |
-
x_t = (
|
552 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
553 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
554 |
-
)
|
555 |
-
if return_intermediate:
|
556 |
-
return x_t, {'model_s': model_s}
|
557 |
-
else:
|
558 |
-
return x_t
|
559 |
-
|
560 |
-
def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
|
561 |
-
solver_type='dpm_solver'):
|
562 |
-
"""
|
563 |
-
Singlestep solver DPM-Solver-2 from time `s` to time `t`.
|
564 |
-
|
565 |
-
Args:
|
566 |
-
x: A pytorch tensor. The initial value at time `s`.
|
567 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
568 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
569 |
-
r1: A `float`. The hyperparameter of the second-order solver.
|
570 |
-
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
571 |
-
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
572 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
|
573 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
574 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
575 |
-
Returns:
|
576 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
577 |
-
"""
|
578 |
-
if solver_type not in ['dpm_solver', 'taylor']:
|
579 |
-
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
580 |
-
if r1 is None:
|
581 |
-
r1 = 0.5
|
582 |
-
ns = self.noise_schedule
|
583 |
-
dims = x.dim()
|
584 |
-
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
585 |
-
h = lambda_t - lambda_s
|
586 |
-
lambda_s1 = lambda_s + r1 * h
|
587 |
-
s1 = ns.inverse_lambda(lambda_s1)
|
588 |
-
log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
|
589 |
-
s1), ns.marginal_log_mean_coeff(t)
|
590 |
-
sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
|
591 |
-
alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
|
592 |
-
|
593 |
-
if self.predict_x0:
|
594 |
-
phi_11 = torch.expm1(-r1 * h)
|
595 |
-
phi_1 = torch.expm1(-h)
|
596 |
-
|
597 |
-
if model_s is None:
|
598 |
-
model_s = self.model_fn(x, s)
|
599 |
-
x_s1 = (
|
600 |
-
expand_dims(sigma_s1 / sigma_s, dims) * x
|
601 |
-
- expand_dims(alpha_s1 * phi_11, dims) * model_s
|
602 |
-
)
|
603 |
-
model_s1 = self.model_fn(x_s1, s1)
|
604 |
-
if solver_type == 'dpm_solver':
|
605 |
-
x_t = (
|
606 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
607 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
608 |
-
- (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
|
609 |
-
)
|
610 |
-
elif solver_type == 'taylor':
|
611 |
-
x_t = (
|
612 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
613 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
614 |
-
+ (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
|
615 |
-
model_s1 - model_s)
|
616 |
-
)
|
617 |
-
else:
|
618 |
-
phi_11 = torch.expm1(r1 * h)
|
619 |
-
phi_1 = torch.expm1(h)
|
620 |
-
|
621 |
-
if model_s is None:
|
622 |
-
model_s = self.model_fn(x, s)
|
623 |
-
x_s1 = (
|
624 |
-
expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
|
625 |
-
- expand_dims(sigma_s1 * phi_11, dims) * model_s
|
626 |
-
)
|
627 |
-
model_s1 = self.model_fn(x_s1, s1)
|
628 |
-
if solver_type == 'dpm_solver':
|
629 |
-
x_t = (
|
630 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
631 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
632 |
-
- (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
|
633 |
-
)
|
634 |
-
elif solver_type == 'taylor':
|
635 |
-
x_t = (
|
636 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
637 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
638 |
-
- (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
|
639 |
-
)
|
640 |
-
if return_intermediate:
|
641 |
-
return x_t, {'model_s': model_s, 'model_s1': model_s1}
|
642 |
-
else:
|
643 |
-
return x_t
|
644 |
-
|
645 |
-
def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
|
646 |
-
return_intermediate=False, solver_type='dpm_solver'):
|
647 |
-
"""
|
648 |
-
Singlestep solver DPM-Solver-3 from time `s` to time `t`.
|
649 |
-
|
650 |
-
Args:
|
651 |
-
x: A pytorch tensor. The initial value at time `s`.
|
652 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
653 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
654 |
-
r1: A `float`. The hyperparameter of the third-order solver.
|
655 |
-
r2: A `float`. The hyperparameter of the third-order solver.
|
656 |
-
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
657 |
-
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
658 |
-
model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
|
659 |
-
If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
|
660 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
|
661 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
662 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
663 |
-
Returns:
|
664 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
665 |
-
"""
|
666 |
-
if solver_type not in ['dpm_solver', 'taylor']:
|
667 |
-
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
668 |
-
if r1 is None:
|
669 |
-
r1 = 1. / 3.
|
670 |
-
if r2 is None:
|
671 |
-
r2 = 2. / 3.
|
672 |
-
ns = self.noise_schedule
|
673 |
-
dims = x.dim()
|
674 |
-
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
675 |
-
h = lambda_t - lambda_s
|
676 |
-
lambda_s1 = lambda_s + r1 * h
|
677 |
-
lambda_s2 = lambda_s + r2 * h
|
678 |
-
s1 = ns.inverse_lambda(lambda_s1)
|
679 |
-
s2 = ns.inverse_lambda(lambda_s2)
|
680 |
-
log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
|
681 |
-
s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
|
682 |
-
sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
|
683 |
-
s2), ns.marginal_std(t)
|
684 |
-
alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
|
685 |
-
|
686 |
-
if self.predict_x0:
|
687 |
-
phi_11 = torch.expm1(-r1 * h)
|
688 |
-
phi_12 = torch.expm1(-r2 * h)
|
689 |
-
phi_1 = torch.expm1(-h)
|
690 |
-
phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
|
691 |
-
phi_2 = phi_1 / h + 1.
|
692 |
-
phi_3 = phi_2 / h - 0.5
|
693 |
-
|
694 |
-
if model_s is None:
|
695 |
-
model_s = self.model_fn(x, s)
|
696 |
-
if model_s1 is None:
|
697 |
-
x_s1 = (
|
698 |
-
expand_dims(sigma_s1 / sigma_s, dims) * x
|
699 |
-
- expand_dims(alpha_s1 * phi_11, dims) * model_s
|
700 |
-
)
|
701 |
-
model_s1 = self.model_fn(x_s1, s1)
|
702 |
-
x_s2 = (
|
703 |
-
expand_dims(sigma_s2 / sigma_s, dims) * x
|
704 |
-
- expand_dims(alpha_s2 * phi_12, dims) * model_s
|
705 |
-
+ r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
|
706 |
-
)
|
707 |
-
model_s2 = self.model_fn(x_s2, s2)
|
708 |
-
if solver_type == 'dpm_solver':
|
709 |
-
x_t = (
|
710 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
711 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
712 |
-
+ (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
|
713 |
-
)
|
714 |
-
elif solver_type == 'taylor':
|
715 |
-
D1_0 = (1. / r1) * (model_s1 - model_s)
|
716 |
-
D1_1 = (1. / r2) * (model_s2 - model_s)
|
717 |
-
D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
|
718 |
-
D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
|
719 |
-
x_t = (
|
720 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
721 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
722 |
-
+ expand_dims(alpha_t * phi_2, dims) * D1
|
723 |
-
- expand_dims(alpha_t * phi_3, dims) * D2
|
724 |
-
)
|
725 |
-
else:
|
726 |
-
phi_11 = torch.expm1(r1 * h)
|
727 |
-
phi_12 = torch.expm1(r2 * h)
|
728 |
-
phi_1 = torch.expm1(h)
|
729 |
-
phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
|
730 |
-
phi_2 = phi_1 / h - 1.
|
731 |
-
phi_3 = phi_2 / h - 0.5
|
732 |
-
|
733 |
-
if model_s is None:
|
734 |
-
model_s = self.model_fn(x, s)
|
735 |
-
if model_s1 is None:
|
736 |
-
x_s1 = (
|
737 |
-
expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
|
738 |
-
- expand_dims(sigma_s1 * phi_11, dims) * model_s
|
739 |
-
)
|
740 |
-
model_s1 = self.model_fn(x_s1, s1)
|
741 |
-
x_s2 = (
|
742 |
-
expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
|
743 |
-
- expand_dims(sigma_s2 * phi_12, dims) * model_s
|
744 |
-
- r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
|
745 |
-
)
|
746 |
-
model_s2 = self.model_fn(x_s2, s2)
|
747 |
-
if solver_type == 'dpm_solver':
|
748 |
-
x_t = (
|
749 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
750 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
751 |
-
- (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
|
752 |
-
)
|
753 |
-
elif solver_type == 'taylor':
|
754 |
-
D1_0 = (1. / r1) * (model_s1 - model_s)
|
755 |
-
D1_1 = (1. / r2) * (model_s2 - model_s)
|
756 |
-
D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
|
757 |
-
D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
|
758 |
-
x_t = (
|
759 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
760 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
761 |
-
- expand_dims(sigma_t * phi_2, dims) * D1
|
762 |
-
- expand_dims(sigma_t * phi_3, dims) * D2
|
763 |
-
)
|
764 |
-
|
765 |
-
if return_intermediate:
|
766 |
-
return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
|
767 |
-
else:
|
768 |
-
return x_t
|
769 |
-
|
770 |
-
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
|
771 |
-
"""
|
772 |
-
Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
|
773 |
-
|
774 |
-
Args:
|
775 |
-
x: A pytorch tensor. The initial value at time `s`.
|
776 |
-
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
777 |
-
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
778 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
779 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
780 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
781 |
-
Returns:
|
782 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
783 |
-
"""
|
784 |
-
if solver_type not in ['dpm_solver', 'taylor']:
|
785 |
-
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
786 |
-
ns = self.noise_schedule
|
787 |
-
dims = x.dim()
|
788 |
-
model_prev_1, model_prev_0 = model_prev_list
|
789 |
-
t_prev_1, t_prev_0 = t_prev_list
|
790 |
-
lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
|
791 |
-
t_prev_0), ns.marginal_lambda(t)
|
792 |
-
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
793 |
-
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
794 |
-
alpha_t = torch.exp(log_alpha_t)
|
795 |
-
|
796 |
-
h_0 = lambda_prev_0 - lambda_prev_1
|
797 |
-
h = lambda_t - lambda_prev_0
|
798 |
-
r0 = h_0 / h
|
799 |
-
D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
|
800 |
-
if self.predict_x0:
|
801 |
-
if solver_type == 'dpm_solver':
|
802 |
-
x_t = (
|
803 |
-
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
804 |
-
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
805 |
-
- 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
|
806 |
-
)
|
807 |
-
elif solver_type == 'taylor':
|
808 |
-
x_t = (
|
809 |
-
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
810 |
-
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
811 |
-
+ expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
|
812 |
-
)
|
813 |
-
else:
|
814 |
-
if solver_type == 'dpm_solver':
|
815 |
-
x_t = (
|
816 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
817 |
-
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
818 |
-
- 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
|
819 |
-
)
|
820 |
-
elif solver_type == 'taylor':
|
821 |
-
x_t = (
|
822 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
823 |
-
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
824 |
-
- expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
|
825 |
-
)
|
826 |
-
return x_t
|
827 |
-
|
828 |
-
def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
|
829 |
-
"""
|
830 |
-
Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
|
831 |
-
|
832 |
-
Args:
|
833 |
-
x: A pytorch tensor. The initial value at time `s`.
|
834 |
-
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
835 |
-
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
836 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
837 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
838 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
839 |
-
Returns:
|
840 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
841 |
-
"""
|
842 |
-
ns = self.noise_schedule
|
843 |
-
dims = x.dim()
|
844 |
-
model_prev_2, model_prev_1, model_prev_0 = model_prev_list
|
845 |
-
t_prev_2, t_prev_1, t_prev_0 = t_prev_list
|
846 |
-
lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
|
847 |
-
t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
|
848 |
-
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
849 |
-
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
850 |
-
alpha_t = torch.exp(log_alpha_t)
|
851 |
-
|
852 |
-
h_1 = lambda_prev_1 - lambda_prev_2
|
853 |
-
h_0 = lambda_prev_0 - lambda_prev_1
|
854 |
-
h = lambda_t - lambda_prev_0
|
855 |
-
r0, r1 = h_0 / h, h_1 / h
|
856 |
-
D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
|
857 |
-
D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
|
858 |
-
D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
|
859 |
-
D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
|
860 |
-
if self.predict_x0:
|
861 |
-
x_t = (
|
862 |
-
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
863 |
-
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
864 |
-
+ expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
|
865 |
-
- expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
|
866 |
-
)
|
867 |
-
else:
|
868 |
-
x_t = (
|
869 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
870 |
-
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
871 |
-
- expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
|
872 |
-
- expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
|
873 |
-
)
|
874 |
-
return x_t
|
875 |
-
|
876 |
-
def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
|
877 |
-
r2=None):
|
878 |
-
"""
|
879 |
-
Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
|
880 |
-
|
881 |
-
Args:
|
882 |
-
x: A pytorch tensor. The initial value at time `s`.
|
883 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
884 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
885 |
-
order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
|
886 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
|
887 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
888 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
889 |
-
r1: A `float`. The hyperparameter of the second-order or third-order solver.
|
890 |
-
r2: A `float`. The hyperparameter of the third-order solver.
|
891 |
-
Returns:
|
892 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
893 |
-
"""
|
894 |
-
if order == 1:
|
895 |
-
return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
|
896 |
-
elif order == 2:
|
897 |
-
return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
|
898 |
-
solver_type=solver_type, r1=r1)
|
899 |
-
elif order == 3:
|
900 |
-
return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
|
901 |
-
solver_type=solver_type, r1=r1, r2=r2)
|
902 |
-
else:
|
903 |
-
raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
|
904 |
-
|
905 |
-
def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
|
906 |
-
"""
|
907 |
-
Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
|
908 |
-
|
909 |
-
Args:
|
910 |
-
x: A pytorch tensor. The initial value at time `s`.
|
911 |
-
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
912 |
-
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
913 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
914 |
-
order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
|
915 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
916 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
917 |
-
Returns:
|
918 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
919 |
-
"""
|
920 |
-
if order == 1:
|
921 |
-
return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
|
922 |
-
elif order == 2:
|
923 |
-
return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
|
924 |
-
elif order == 3:
|
925 |
-
return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
|
926 |
-
else:
|
927 |
-
raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
|
928 |
-
|
929 |
-
def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
|
930 |
-
solver_type='dpm_solver'):
|
931 |
-
"""
|
932 |
-
The adaptive step size solver based on singlestep DPM-Solver.
|
933 |
-
|
934 |
-
Args:
|
935 |
-
x: A pytorch tensor. The initial value at time `t_T`.
|
936 |
-
order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
|
937 |
-
t_T: A `float`. The starting time of the sampling (default is T).
|
938 |
-
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
939 |
-
h_init: A `float`. The initial step size (for logSNR).
|
940 |
-
atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
|
941 |
-
rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
|
942 |
-
theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
|
943 |
-
t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
|
944 |
-
current time and `t_0` is less than `t_err`. The default setting is 1e-5.
|
945 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
946 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
947 |
-
Returns:
|
948 |
-
x_0: A pytorch tensor. The approximated solution at time `t_0`.
|
949 |
-
|
950 |
-
[1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
|
951 |
-
"""
|
952 |
-
ns = self.noise_schedule
|
953 |
-
s = t_T * torch.ones((x.shape[0],)).to(x)
|
954 |
-
lambda_s = ns.marginal_lambda(s)
|
955 |
-
lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
|
956 |
-
h = h_init * torch.ones_like(s).to(x)
|
957 |
-
x_prev = x
|
958 |
-
nfe = 0
|
959 |
-
if order == 2:
|
960 |
-
r1 = 0.5
|
961 |
-
lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
|
962 |
-
higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
|
963 |
-
solver_type=solver_type,
|
964 |
-
**kwargs)
|
965 |
-
elif order == 3:
|
966 |
-
r1, r2 = 1. / 3., 2. / 3.
|
967 |
-
lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
|
968 |
-
return_intermediate=True,
|
969 |
-
solver_type=solver_type)
|
970 |
-
higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
|
971 |
-
solver_type=solver_type,
|
972 |
-
**kwargs)
|
973 |
-
else:
|
974 |
-
raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
|
975 |
-
while torch.abs((s - t_0)).mean() > t_err:
|
976 |
-
t = ns.inverse_lambda(lambda_s + h)
|
977 |
-
x_lower, lower_noise_kwargs = lower_update(x, s, t)
|
978 |
-
x_higher = higher_update(x, s, t, **lower_noise_kwargs)
|
979 |
-
delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
|
980 |
-
norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
|
981 |
-
E = norm_fn((x_higher - x_lower) / delta).max()
|
982 |
-
if torch.all(E <= 1.):
|
983 |
-
x = x_higher
|
984 |
-
s = t
|
985 |
-
x_prev = x_lower
|
986 |
-
lambda_s = ns.marginal_lambda(s)
|
987 |
-
h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
|
988 |
-
nfe += order
|
989 |
-
print('adaptive solver nfe', nfe)
|
990 |
-
return x
|
991 |
-
|
992 |
-
def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
|
993 |
-
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
|
994 |
-
atol=0.0078, rtol=0.05,
|
995 |
-
):
|
996 |
-
"""
|
997 |
-
Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
|
998 |
-
|
999 |
-
=====================================================
|
1000 |
-
|
1001 |
-
We support the following algorithms for both noise prediction model and data prediction model:
|
1002 |
-
- 'singlestep':
|
1003 |
-
Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
|
1004 |
-
We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
|
1005 |
-
The total number of function evaluations (NFE) == `steps`.
|
1006 |
-
Given a fixed NFE == `steps`, the sampling procedure is:
|
1007 |
-
- If `order` == 1:
|
1008 |
-
- Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
|
1009 |
-
- If `order` == 2:
|
1010 |
-
- Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
|
1011 |
-
- If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
|
1012 |
-
- If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
|
1013 |
-
- If `order` == 3:
|
1014 |
-
- Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
|
1015 |
-
- If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
|
1016 |
-
- If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
|
1017 |
-
- If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
|
1018 |
-
- 'multistep':
|
1019 |
-
Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
|
1020 |
-
We initialize the first `order` values by lower order multistep solvers.
|
1021 |
-
Given a fixed NFE == `steps`, the sampling procedure is:
|
1022 |
-
Denote K = steps.
|
1023 |
-
- If `order` == 1:
|
1024 |
-
- We use K steps of DPM-Solver-1 (i.e. DDIM).
|
1025 |
-
- If `order` == 2:
|
1026 |
-
- We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
|
1027 |
-
- If `order` == 3:
|
1028 |
-
- We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
|
1029 |
-
- 'singlestep_fixed':
|
1030 |
-
Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
|
1031 |
-
We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
|
1032 |
-
- 'adaptive':
|
1033 |
-
Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
|
1034 |
-
We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
|
1035 |
-
You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
|
1036 |
-
(NFE) and the sample quality.
|
1037 |
-
- If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
|
1038 |
-
- If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
|
1039 |
-
|
1040 |
-
=====================================================
|
1041 |
-
|
1042 |
-
Some advices for choosing the algorithm:
|
1043 |
-
- For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
|
1044 |
-
Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
|
1045 |
-
e.g.
|
1046 |
-
>>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
|
1047 |
-
>>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
|
1048 |
-
skip_type='time_uniform', method='singlestep')
|
1049 |
-
- For **guided sampling with large guidance scale** by DPMs:
|
1050 |
-
Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
|
1051 |
-
e.g.
|
1052 |
-
>>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
|
1053 |
-
>>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
|
1054 |
-
skip_type='time_uniform', method='multistep')
|
1055 |
-
|
1056 |
-
We support three types of `skip_type`:
|
1057 |
-
- 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
|
1058 |
-
- 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
|
1059 |
-
- 'time_quadratic': quadratic time for the time steps.
|
1060 |
-
|
1061 |
-
=====================================================
|
1062 |
-
Args:
|
1063 |
-
x: A pytorch tensor. The initial value at time `t_start`
|
1064 |
-
e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
|
1065 |
-
steps: A `int`. The total number of function evaluations (NFE).
|
1066 |
-
t_start: A `float`. The starting time of the sampling.
|
1067 |
-
If `T` is None, we use self.noise_schedule.T (default is 1.0).
|
1068 |
-
t_end: A `float`. The ending time of the sampling.
|
1069 |
-
If `t_end` is None, we use 1. / self.noise_schedule.total_N.
|
1070 |
-
e.g. if total_N == 1000, we have `t_end` == 1e-3.
|
1071 |
-
For discrete-time DPMs:
|
1072 |
-
- We recommend `t_end` == 1. / self.noise_schedule.total_N.
|
1073 |
-
For continuous-time DPMs:
|
1074 |
-
- We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
|
1075 |
-
order: A `int`. The order of DPM-Solver.
|
1076 |
-
skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
|
1077 |
-
method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
|
1078 |
-
denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
|
1079 |
-
Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
|
1080 |
-
|
1081 |
-
This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
|
1082 |
-
score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
|
1083 |
-
for diffusion models sampling by diffusion SDEs for low-resolutional images
|
1084 |
-
(such as CIFAR-10). However, we observed that such trick does not matter for
|
1085 |
-
high-resolutional images. As it needs an additional NFE, we do not recommend
|
1086 |
-
it for high-resolutional images.
|
1087 |
-
lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
|
1088 |
-
Only valid for `method=multistep` and `steps < 15`. We empirically find that
|
1089 |
-
this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
|
1090 |
-
(especially for steps <= 10). So we recommend to set it to be `True`.
|
1091 |
-
solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
|
1092 |
-
atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
|
1093 |
-
rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
|
1094 |
-
Returns:
|
1095 |
-
x_end: A pytorch tensor. The approximated solution at time `t_end`.
|
1096 |
-
|
1097 |
-
"""
|
1098 |
-
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
1099 |
-
t_T = self.noise_schedule.T if t_start is None else t_start
|
1100 |
-
device = x.device
|
1101 |
-
if method == 'adaptive':
|
1102 |
-
with torch.no_grad():
|
1103 |
-
x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
|
1104 |
-
solver_type=solver_type)
|
1105 |
-
elif method == 'multistep':
|
1106 |
-
assert steps >= order
|
1107 |
-
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
|
1108 |
-
assert timesteps.shape[0] - 1 == steps
|
1109 |
-
with torch.no_grad():
|
1110 |
-
vec_t = timesteps[0].expand((x.shape[0]))
|
1111 |
-
model_prev_list = [self.model_fn(x, vec_t)]
|
1112 |
-
t_prev_list = [vec_t]
|
1113 |
-
# Init the first `order` values by lower order multistep DPM-Solver.
|
1114 |
-
for init_order in tqdm(range(1, order), desc="DPM init order"):
|
1115 |
-
vec_t = timesteps[init_order].expand(x.shape[0])
|
1116 |
-
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
|
1117 |
-
solver_type=solver_type)
|
1118 |
-
model_prev_list.append(self.model_fn(x, vec_t))
|
1119 |
-
t_prev_list.append(vec_t)
|
1120 |
-
# Compute the remaining values by `order`-th order multistep DPM-Solver.
|
1121 |
-
for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
|
1122 |
-
vec_t = timesteps[step].expand(x.shape[0])
|
1123 |
-
if lower_order_final and steps < 15:
|
1124 |
-
step_order = min(order, steps + 1 - step)
|
1125 |
-
else:
|
1126 |
-
step_order = order
|
1127 |
-
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
|
1128 |
-
solver_type=solver_type)
|
1129 |
-
for i in range(order - 1):
|
1130 |
-
t_prev_list[i] = t_prev_list[i + 1]
|
1131 |
-
model_prev_list[i] = model_prev_list[i + 1]
|
1132 |
-
t_prev_list[-1] = vec_t
|
1133 |
-
# We do not need to evaluate the final model value.
|
1134 |
-
if step < steps:
|
1135 |
-
model_prev_list[-1] = self.model_fn(x, vec_t)
|
1136 |
-
elif method in ['singlestep', 'singlestep_fixed']:
|
1137 |
-
if method == 'singlestep':
|
1138 |
-
timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
|
1139 |
-
skip_type=skip_type,
|
1140 |
-
t_T=t_T, t_0=t_0,
|
1141 |
-
device=device)
|
1142 |
-
elif method == 'singlestep_fixed':
|
1143 |
-
K = steps // order
|
1144 |
-
orders = [order, ] * K
|
1145 |
-
timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
|
1146 |
-
for i, order in enumerate(orders):
|
1147 |
-
t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
|
1148 |
-
timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
|
1149 |
-
N=order, device=device)
|
1150 |
-
lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
|
1151 |
-
vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
|
1152 |
-
h = lambda_inner[-1] - lambda_inner[0]
|
1153 |
-
r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
|
1154 |
-
r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
|
1155 |
-
x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
|
1156 |
-
if denoise_to_zero:
|
1157 |
-
x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
|
1158 |
-
return x
|
1159 |
-
|
1160 |
-
|
1161 |
-
#############################################################
|
1162 |
-
# other utility functions
|
1163 |
-
#############################################################
|
1164 |
-
|
1165 |
-
def interpolate_fn(x, xp, yp):
|
1166 |
-
"""
|
1167 |
-
A piecewise linear function y = f(x), using xp and yp as keypoints.
|
1168 |
-
We implement f(x) in a differentiable way (i.e. applicable for autograd).
|
1169 |
-
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
|
1170 |
-
|
1171 |
-
Args:
|
1172 |
-
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
|
1173 |
-
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
|
1174 |
-
yp: PyTorch tensor with shape [C, K].
|
1175 |
-
Returns:
|
1176 |
-
The function values f(x), with shape [N, C].
|
1177 |
-
"""
|
1178 |
-
N, K = x.shape[0], xp.shape[1]
|
1179 |
-
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
|
1180 |
-
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
|
1181 |
-
x_idx = torch.argmin(x_indices, dim=2)
|
1182 |
-
cand_start_idx = x_idx - 1
|
1183 |
-
start_idx = torch.where(
|
1184 |
-
torch.eq(x_idx, 0),
|
1185 |
-
torch.tensor(1, device=x.device),
|
1186 |
-
torch.where(
|
1187 |
-
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
1188 |
-
),
|
1189 |
-
)
|
1190 |
-
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
|
1191 |
-
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
|
1192 |
-
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
|
1193 |
-
start_idx2 = torch.where(
|
1194 |
-
torch.eq(x_idx, 0),
|
1195 |
-
torch.tensor(0, device=x.device),
|
1196 |
-
torch.where(
|
1197 |
-
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
1198 |
-
),
|
1199 |
-
)
|
1200 |
-
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
|
1201 |
-
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
|
1202 |
-
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
|
1203 |
-
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
|
1204 |
-
return cand
|
1205 |
-
|
1206 |
-
|
1207 |
-
def expand_dims(v, dims):
|
1208 |
-
"""
|
1209 |
-
Expand the tensor `v` to the dim `dims`.
|
1210 |
-
|
1211 |
-
Args:
|
1212 |
-
`v`: a PyTorch tensor with shape [N].
|
1213 |
-
`dim`: a `int`.
|
1214 |
-
Returns:
|
1215 |
-
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
|
1216 |
-
"""
|
1217 |
-
return v[(...,) + (None,) * (dims - 1)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/BaseSizer.d.ts
DELETED
@@ -1,739 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import ContainerLite from '../../../plugins/containerlite.js';
|
3 |
-
import Anchor from '../anchor/Anchor';
|
4 |
-
import Click from '../click/Click';
|
5 |
-
import ClickOutside from '../clickoutside/ClickOutside';
|
6 |
-
import InTouching from '../intouching/InTouching';
|
7 |
-
import SetChildrenInteractive from '../utils/setchildreninteractive/SetChildrenInteractive';
|
8 |
-
import { ModalBehavoir } from '../modal/Modal';
|
9 |
-
|
10 |
-
export default BaseSizer;
|
11 |
-
|
12 |
-
declare namespace BaseSizer {
|
13 |
-
type AlignTypes = number | 'center' | 'left' | 'right' | 'top' | 'bottom' |
|
14 |
-
'left-top' | 'left-center' | 'left-bottom' |
|
15 |
-
'center-top' | 'center-center' | 'center-bottom' |
|
16 |
-
'right-top' | 'right-center' | 'right-bottom';
|
17 |
-
|
18 |
-
type PaddingTypes = number |
|
19 |
-
{
|
20 |
-
left?: number,
|
21 |
-
right?: number,
|
22 |
-
top?: number,
|
23 |
-
bottom?: number
|
24 |
-
};
|
25 |
-
|
26 |
-
interface IConfig {
|
27 |
-
space?: {
|
28 |
-
left?: number, right?: number, top?: number, bottom?: number,
|
29 |
-
},
|
30 |
-
|
31 |
-
anchor?: Anchor.IConfig,
|
32 |
-
|
33 |
-
name?: string,
|
34 |
-
|
35 |
-
enableLayer?: boolean,
|
36 |
-
|
37 |
-
draggable?: boolean | string | Phaser.GameObjects.GameObject,
|
38 |
-
|
39 |
-
sizerEvents?: boolean,
|
40 |
-
}
|
41 |
-
|
42 |
-
type PrevState = {
|
43 |
-
x: number,
|
44 |
-
y: number,
|
45 |
-
width: number, height: number,
|
46 |
-
displayWidth: number, displayHeight: number,
|
47 |
-
scaleX: number, scaleY: number
|
48 |
-
}
|
49 |
-
|
50 |
-
type OnModalCloseCallbackType = (
|
51 |
-
data: Object
|
52 |
-
) => void;
|
53 |
-
|
54 |
-
}
|
55 |
-
|
56 |
-
declare class BaseSizer extends ContainerLite {
|
57 |
-
isRexSizer: true;
|
58 |
-
|
59 |
-
space: { [name: string]: number };
|
60 |
-
|
61 |
-
constructor(
|
62 |
-
scene: Phaser.Scene,
|
63 |
-
x?: number, y?: number,
|
64 |
-
minWidth?: number, minHeight?: number,
|
65 |
-
config?: BaseSizer.IConfig
|
66 |
-
);
|
67 |
-
|
68 |
-
setMinSize(minWidth: number, minHeight: number): this;
|
69 |
-
|
70 |
-
setMinWidth(minWidth: number): this;
|
71 |
-
|
72 |
-
setMinHeight(minHeight: number): this;
|
73 |
-
|
74 |
-
setDirty(dirty?: boolean): this;
|
75 |
-
|
76 |
-
setSizerEventsEnable(enable?: boolean): this;
|
77 |
-
sizerEventsEnable: boolean;
|
78 |
-
|
79 |
-
left: number;
|
80 |
-
|
81 |
-
alignLeft(value: number): this;
|
82 |
-
|
83 |
-
right: number;
|
84 |
-
|
85 |
-
alignRight(value: number): this;
|
86 |
-
|
87 |
-
centerX: number;
|
88 |
-
|
89 |
-
alignCenterX(value: number): this;
|
90 |
-
|
91 |
-
top: number;
|
92 |
-
|
93 |
-
alignTop(value: number): this;
|
94 |
-
|
95 |
-
bottom: number;
|
96 |
-
|
97 |
-
alignBottom(value: number): this;
|
98 |
-
|
99 |
-
centerY: number;
|
100 |
-
|
101 |
-
alignCenterY(value: number): this;
|
102 |
-
|
103 |
-
pushIntoBounds(
|
104 |
-
bounds?: Phaser.Geom.Rectangle | { left?: number, right?: number, top?: number, bottom?: number }
|
105 |
-
): this;
|
106 |
-
|
107 |
-
readonly innerLeft: number;
|
108 |
-
|
109 |
-
readonly innerRight: number;
|
110 |
-
|
111 |
-
readonly innerTop: number;
|
112 |
-
|
113 |
-
readonly innerBottom: number;
|
114 |
-
|
115 |
-
readonly innerWidth: number;
|
116 |
-
|
117 |
-
readonly innerHeight: number;
|
118 |
-
|
119 |
-
readonly minInnerWidth: number;
|
120 |
-
|
121 |
-
readonly minInnerHeight: number;
|
122 |
-
|
123 |
-
addBackground(
|
124 |
-
gameObject: Phaser.GameObjects.GameObject,
|
125 |
-
padding?: BaseSizer.PaddingTypes,
|
126 |
-
childKey?: string
|
127 |
-
): this;
|
128 |
-
|
129 |
-
isBackground(
|
130 |
-
gameObject: Phaser.GameObjects.GameObject
|
131 |
-
): boolean;
|
132 |
-
|
133 |
-
layout(): this;
|
134 |
-
|
135 |
-
drawBounds(
|
136 |
-
graphics: Phaser.GameObjects.Graphics,
|
137 |
-
color?: number
|
138 |
-
): this;
|
139 |
-
|
140 |
-
drawBounds(
|
141 |
-
graphics: Phaser.GameObjects.Graphics,
|
142 |
-
config?: {
|
143 |
-
color?: number,
|
144 |
-
lineWidth?: number,
|
145 |
-
name?: boolean |
|
146 |
-
{
|
147 |
-
createTextCallback: (scene: Phaser.Scene) => Phaser.GameObjects.GameObject,
|
148 |
-
createTextCallbackScope?: object,
|
149 |
-
align?: BaseSizer.AlignTypes
|
150 |
-
}
|
151 |
-
}
|
152 |
-
): this;
|
153 |
-
|
154 |
-
childrenMap: {
|
155 |
-
[key: string]:
|
156 |
-
Phaser.GameObjects.GameObject
|
157 |
-
};
|
158 |
-
addChildrenMap(
|
159 |
-
key: string,
|
160 |
-
gameObject: Phaser.GameObjects.GameObject
|
161 |
-
): this;
|
162 |
-
|
163 |
-
removeChildrenMap(key: string): this;
|
164 |
-
removeChildrenMap(gameObject: Phaser.GameObjects.GameObject): this;
|
165 |
-
|
166 |
-
getElement(
|
167 |
-
name: string,
|
168 |
-
recursive?: boolean
|
169 |
-
): Phaser.GameObjects.GameObject |
|
170 |
-
Phaser.GameObjects.GameObject[] |
|
171 |
-
{ [name: string]: Phaser.GameObjects.GameObject } |
|
172 |
-
null;
|
173 |
-
|
174 |
-
getParentSizer(
|
175 |
-
name?: string
|
176 |
-
): BaseSizer | null;
|
177 |
-
|
178 |
-
getParentSizer(
|
179 |
-
gameObject?: Phaser.GameObjects.GameObject,
|
180 |
-
name?: string
|
181 |
-
): BaseSizer | null;
|
182 |
-
|
183 |
-
getTopmostSizer(
|
184 |
-
gameObject?: Phaser.GameObjects.GameObject
|
185 |
-
): BaseSizer | null;
|
186 |
-
|
187 |
-
getSizerConfig(
|
188 |
-
gameObject?: Phaser.GameObjects.GameObject
|
189 |
-
): { [name: string]: any };
|
190 |
-
|
191 |
-
getChildPrevState(
|
192 |
-
gameObject: Phaser.GameObjects.GameObject
|
193 |
-
): BaseSizer.PrevState;
|
194 |
-
|
195 |
-
isInTouching(): boolean;
|
196 |
-
|
197 |
-
isInTouching(
|
198 |
-
pointer: Phaser.Input.Pointer,
|
199 |
-
gameObject?: Phaser.GameObjects.GameObject | string
|
200 |
-
): boolean;
|
201 |
-
|
202 |
-
isInTouching(
|
203 |
-
gameObject?: Phaser.GameObjects.GameObject | string
|
204 |
-
): boolean;
|
205 |
-
|
206 |
-
|
207 |
-
moveFrom(
|
208 |
-
duration: number,
|
209 |
-
x: number,
|
210 |
-
y: number,
|
211 |
-
ease?: string
|
212 |
-
): this;
|
213 |
-
|
214 |
-
moveFrom(
|
215 |
-
config: {
|
216 |
-
x: number,
|
217 |
-
y: number,
|
218 |
-
speed?: number,
|
219 |
-
duration?: number,
|
220 |
-
ease?: string,
|
221 |
-
}
|
222 |
-
): this;
|
223 |
-
|
224 |
-
moveFromPromise(
|
225 |
-
duration: number,
|
226 |
-
x: number,
|
227 |
-
y: number,
|
228 |
-
ease?: string
|
229 |
-
): Promise<any>;
|
230 |
-
|
231 |
-
moveFromPromise(
|
232 |
-
config: {
|
233 |
-
x: number,
|
234 |
-
y: number,
|
235 |
-
speed?: number,
|
236 |
-
duration?: number,
|
237 |
-
ease?: string,
|
238 |
-
}
|
239 |
-
): Promise<any>;
|
240 |
-
|
241 |
-
moveFromDestroy(
|
242 |
-
duration: number,
|
243 |
-
x: number,
|
244 |
-
y: number,
|
245 |
-
ease?: string
|
246 |
-
): this;
|
247 |
-
|
248 |
-
moveFromDestroy(
|
249 |
-
config: {
|
250 |
-
x: number,
|
251 |
-
y: number,
|
252 |
-
speed?: number,
|
253 |
-
duration?: number,
|
254 |
-
ease?: string,
|
255 |
-
}
|
256 |
-
): this;
|
257 |
-
|
258 |
-
moveFromDestroyPromise(
|
259 |
-
duration: number,
|
260 |
-
x: number,
|
261 |
-
y: number,
|
262 |
-
ease?: string
|
263 |
-
): Promise<any>;
|
264 |
-
|
265 |
-
moveFromDestroyPromise(
|
266 |
-
config: {
|
267 |
-
x: number,
|
268 |
-
y: number,
|
269 |
-
speed?: number,
|
270 |
-
duration?: number,
|
271 |
-
ease?: string,
|
272 |
-
}
|
273 |
-
): Promise<any>;
|
274 |
-
|
275 |
-
moveTo(
|
276 |
-
duration: number,
|
277 |
-
x: number,
|
278 |
-
y: number,
|
279 |
-
ease?: string
|
280 |
-
): this;
|
281 |
-
|
282 |
-
moveTo(
|
283 |
-
config: {
|
284 |
-
x: number,
|
285 |
-
y: number,
|
286 |
-
speed?: number,
|
287 |
-
duration?: number,
|
288 |
-
ease?: string,
|
289 |
-
}
|
290 |
-
): this;
|
291 |
-
|
292 |
-
moveToPromise(
|
293 |
-
duration: number,
|
294 |
-
x: number,
|
295 |
-
y: number,
|
296 |
-
ease?: string
|
297 |
-
): Promise<any>;
|
298 |
-
|
299 |
-
moveToPromise(
|
300 |
-
config: {
|
301 |
-
x: number,
|
302 |
-
y: number,
|
303 |
-
speed?: number,
|
304 |
-
duration?: number,
|
305 |
-
ease?: string,
|
306 |
-
}
|
307 |
-
): Promise<any>;
|
308 |
-
|
309 |
-
moveToDestroy(
|
310 |
-
duration: number,
|
311 |
-
x: number,
|
312 |
-
y: number,
|
313 |
-
ease?: string
|
314 |
-
): this;
|
315 |
-
|
316 |
-
moveToDestroy(
|
317 |
-
config: {
|
318 |
-
x: number,
|
319 |
-
y: number,
|
320 |
-
speed?: number,
|
321 |
-
duration?: number,
|
322 |
-
ease?: string,
|
323 |
-
}
|
324 |
-
): this;
|
325 |
-
|
326 |
-
moveToDestroyPromise(
|
327 |
-
duration: number,
|
328 |
-
x: number,
|
329 |
-
y: number,
|
330 |
-
ease?: string
|
331 |
-
): Promise<any>;
|
332 |
-
|
333 |
-
moveToDestroyPromise(
|
334 |
-
config: {
|
335 |
-
x: number,
|
336 |
-
y: number,
|
337 |
-
speed?: number,
|
338 |
-
duration?: number,
|
339 |
-
ease?: string,
|
340 |
-
}
|
341 |
-
): Promise<any>;
|
342 |
-
|
343 |
-
moveStop(toEnd?: boolean): this;
|
344 |
-
|
345 |
-
fadeIn(
|
346 |
-
duration: number,
|
347 |
-
alpha?: number
|
348 |
-
): this;
|
349 |
-
|
350 |
-
fadeInPromise(
|
351 |
-
duration: number,
|
352 |
-
alpha?: number
|
353 |
-
): Promise<any>;
|
354 |
-
|
355 |
-
fadeOutDestroy(
|
356 |
-
duration: number
|
357 |
-
): this;
|
358 |
-
|
359 |
-
fadeOutDestroyPromise(
|
360 |
-
duration: number
|
361 |
-
): Promise<any>;
|
362 |
-
|
363 |
-
fadeOut(
|
364 |
-
duration: number
|
365 |
-
): this;
|
366 |
-
|
367 |
-
fadeOutPromise(
|
368 |
-
duration: number
|
369 |
-
): Promise<any>;
|
370 |
-
|
371 |
-
popUp(
|
372 |
-
duration: number,
|
373 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
374 |
-
ease?: string
|
375 |
-
): this;
|
376 |
-
|
377 |
-
popUpPromise(
|
378 |
-
duration: number,
|
379 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
380 |
-
ease?: string
|
381 |
-
): Promise<any>;
|
382 |
-
|
383 |
-
scaleDownDestroy(
|
384 |
-
duration: number,
|
385 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
386 |
-
ease?: string
|
387 |
-
): this;
|
388 |
-
|
389 |
-
scaleDownDestroyPromise(
|
390 |
-
duration: number,
|
391 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
392 |
-
ease?: string
|
393 |
-
): Promise<any>;
|
394 |
-
|
395 |
-
scaleDown(
|
396 |
-
duration: number,
|
397 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
398 |
-
ease?: string
|
399 |
-
): this;
|
400 |
-
|
401 |
-
scaleDownPromise(
|
402 |
-
duration: number,
|
403 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
404 |
-
ease?: string
|
405 |
-
): Promise<any>;
|
406 |
-
|
407 |
-
scaleYoyo(
|
408 |
-
duration: number,
|
409 |
-
peakValue?: number,
|
410 |
-
repeat?: number,
|
411 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
412 |
-
ease?: string
|
413 |
-
): this;
|
414 |
-
|
415 |
-
scaleYoyoPromise(
|
416 |
-
duration: number,
|
417 |
-
peakValue?: number,
|
418 |
-
repeat?: number,
|
419 |
-
orientation?: 0 | 1 | 'x' | 'y',
|
420 |
-
ease?: string
|
421 |
-
): Promise<any>;
|
422 |
-
|
423 |
-
shake(
|
424 |
-
duration?: number,
|
425 |
-
magnitude?: number,
|
426 |
-
magnitudeMode?: 0 | 1 | 'constant' | 'decay'
|
427 |
-
): this;
|
428 |
-
|
429 |
-
shakePromise(
|
430 |
-
duration?: number,
|
431 |
-
magnitude?: number,
|
432 |
-
magnitudeMode?: 0 | 1 | 'constant' | 'decay'
|
433 |
-
): Promise<any>;
|
434 |
-
|
435 |
-
easeDataTo(
|
436 |
-
key: string,
|
437 |
-
value: number,
|
438 |
-
duration?: number,
|
439 |
-
ease?: string
|
440 |
-
): this;
|
441 |
-
|
442 |
-
easeDataTo(
|
443 |
-
config: {
|
444 |
-
key: string,
|
445 |
-
value: number,
|
446 |
-
duration?: number,
|
447 |
-
ease?: string,
|
448 |
-
speed?: number
|
449 |
-
}
|
450 |
-
): this;
|
451 |
-
|
452 |
-
easeDataToPromise(
|
453 |
-
key: string,
|
454 |
-
value: number,
|
455 |
-
duration?: number,
|
456 |
-
ease?: string
|
457 |
-
): Promise<any>;
|
458 |
-
|
459 |
-
easeDataToPromise(
|
460 |
-
config: {
|
461 |
-
key: string,
|
462 |
-
value: number,
|
463 |
-
duration?: number,
|
464 |
-
ease?: string,
|
465 |
-
speed?: number
|
466 |
-
}
|
467 |
-
): Promise<any>;
|
468 |
-
|
469 |
-
stopEaseData(
|
470 |
-
key: string,
|
471 |
-
toEnd?: boolean
|
472 |
-
): this;
|
473 |
-
|
474 |
-
stopAllEaseData(
|
475 |
-
toEnd?: boolean
|
476 |
-
): this;
|
477 |
-
|
478 |
-
setAnchor(config: {
|
479 |
-
left?: string, right?: string, centerX?: string, x?: string,
|
480 |
-
top?: string, bottom?: string, centerY?: string, y?: string
|
481 |
-
}): this;
|
482 |
-
|
483 |
-
setDraggable(
|
484 |
-
senser: boolean | string | Phaser.GameObjects.GameObject,
|
485 |
-
draggable?: boolean
|
486 |
-
): this;
|
487 |
-
|
488 |
-
onClick(
|
489 |
-
callback: (
|
490 |
-
click: Click,
|
491 |
-
gameObject: Phaser.GameObjects.GameObject,
|
492 |
-
pointer: Phaser.Input.Pointer,
|
493 |
-
event: Phaser.Types.Input.EventData
|
494 |
-
) => void,
|
495 |
-
scope?: object,
|
496 |
-
config?: Click.IConfig
|
497 |
-
): this;
|
498 |
-
|
499 |
-
|
500 |
-
onClick(
|
501 |
-
gameObject: Phaser.GameObjects.GameObject,
|
502 |
-
callback: (
|
503 |
-
click: Click,
|
504 |
-
gameObject: Phaser.GameObjects.GameObject,
|
505 |
-
pointer: Phaser.Input.Pointer,
|
506 |
-
event: Phaser.Types.Input.EventData
|
507 |
-
) => void,
|
508 |
-
scope?: object,
|
509 |
-
config?: Click.IConfig
|
510 |
-
): this;
|
511 |
-
|
512 |
-
offClick(
|
513 |
-
callback: Function,
|
514 |
-
scope?: object
|
515 |
-
): this;
|
516 |
-
|
517 |
-
offClick(
|
518 |
-
gameObject: Phaser.GameObjects.GameObject,
|
519 |
-
callback: Function,
|
520 |
-
scope?: object
|
521 |
-
): this;
|
522 |
-
|
523 |
-
enableClick(enabled?: boolean): this;
|
524 |
-
|
525 |
-
enableClick(
|
526 |
-
gameObject: Phaser.GameObjects.GameObject,
|
527 |
-
enabled?: boolean
|
528 |
-
): this;
|
529 |
-
|
530 |
-
disableClick(): this;
|
531 |
-
|
532 |
-
disableClick(gameObject: Phaser.GameObjects.GameObject): this;
|
533 |
-
|
534 |
-
onClickOutside(
|
535 |
-
callback: (
|
536 |
-
clickOutside: ClickOutside,
|
537 |
-
gameObject: Phaser.GameObjects.GameObject,
|
538 |
-
pointer: Phaser.Input.Pointer
|
539 |
-
) => void,
|
540 |
-
scope?: object,
|
541 |
-
config?: ClickOutside.IConfig
|
542 |
-
): this;
|
543 |
-
|
544 |
-
onClickOutside(
|
545 |
-
gameObject: Phaser.GameObjects.GameObject,
|
546 |
-
callback: (
|
547 |
-
clickOutside: ClickOutside,
|
548 |
-
gameObject: Phaser.GameObjects.GameObject,
|
549 |
-
pointer: Phaser.Input.Pointer
|
550 |
-
) => void,
|
551 |
-
scope?: object,
|
552 |
-
config?: ClickOutside.IConfig
|
553 |
-
): this;
|
554 |
-
|
555 |
-
offClickOutside(
|
556 |
-
callback: Function,
|
557 |
-
scope?: object
|
558 |
-
): this;
|
559 |
-
|
560 |
-
offClickOutside(
|
561 |
-
gameObject: Phaser.GameObjects.GameObject,
|
562 |
-
callback: Function,
|
563 |
-
scope?: object
|
564 |
-
): this;
|
565 |
-
|
566 |
-
|
567 |
-
enableClickOutside(enabled?: boolean): this;
|
568 |
-
|
569 |
-
enableClickOutside(
|
570 |
-
gameObject: Phaser.GameObjects.GameObject,
|
571 |
-
enabled?: boolean
|
572 |
-
): this;
|
573 |
-
|
574 |
-
disableClickOutside(): this;
|
575 |
-
|
576 |
-
disableClickOutside(gameObject: Phaser.GameObjects.GameObject): this;
|
577 |
-
|
578 |
-
isPointerInBounds(): boolean;
|
579 |
-
isPointerInBounds(gameObject: Phaser.GameObjects.GameObject): boolean;
|
580 |
-
isPointerInBounds(name: string): boolean;
|
581 |
-
|
582 |
-
onTouching(
|
583 |
-
callback: (
|
584 |
-
inTouch: InTouching,
|
585 |
-
gameObject: Phaser.GameObjects.GameObject,
|
586 |
-
pointer: Phaser.Input.Pointer,
|
587 |
-
) => void,
|
588 |
-
scope?: object,
|
589 |
-
config?: InTouching.IConfig
|
590 |
-
): this;
|
591 |
-
|
592 |
-
onTouching(
|
593 |
-
gameObject: Phaser.GameObjects.GameObject,
|
594 |
-
callback: (
|
595 |
-
inTouch: InTouching,
|
596 |
-
gameObject: Phaser.GameObjects.GameObject,
|
597 |
-
pointer: Phaser.Input.Pointer,
|
598 |
-
) => void,
|
599 |
-
scope?: object,
|
600 |
-
config?: InTouching.IConfig
|
601 |
-
): this;
|
602 |
-
|
603 |
-
offTouching(
|
604 |
-
callback: Function,
|
605 |
-
scope?: object
|
606 |
-
): this;
|
607 |
-
|
608 |
-
offTouching(
|
609 |
-
gameObject: Phaser.GameObjects.GameObject,
|
610 |
-
callback: Function,
|
611 |
-
scope?: object
|
612 |
-
): this;
|
613 |
-
|
614 |
-
onTouchingEnd(
|
615 |
-
callback: (
|
616 |
-
inTouch: InTouching,
|
617 |
-
gameObject: Phaser.GameObjects.GameObject,
|
618 |
-
pointer: Phaser.Input.Pointer,
|
619 |
-
) => void,
|
620 |
-
scope?: object,
|
621 |
-
config?: InTouching.IConfig
|
622 |
-
): this;
|
623 |
-
|
624 |
-
onTouchingEnd(
|
625 |
-
gameObject: Phaser.GameObjects.GameObject,
|
626 |
-
callback: (
|
627 |
-
inTouch: InTouching,
|
628 |
-
gameObject: Phaser.GameObjects.GameObject,
|
629 |
-
pointer: Phaser.Input.Pointer,
|
630 |
-
) => void,
|
631 |
-
scope?: object,
|
632 |
-
config?: InTouching.IConfig
|
633 |
-
): this;
|
634 |
-
|
635 |
-
offTouchingEnd(
|
636 |
-
callback: Function,
|
637 |
-
scope?: object
|
638 |
-
): this;
|
639 |
-
|
640 |
-
offTouchingEnd(
|
641 |
-
gameObject: Phaser.GameObjects.GameObject,
|
642 |
-
callback: Function,
|
643 |
-
scope?: object
|
644 |
-
): this;
|
645 |
-
|
646 |
-
enableTouching(enable?: boolean): this;
|
647 |
-
|
648 |
-
enableTouching(
|
649 |
-
gameObject: Phaser.GameObjects.GameObject,
|
650 |
-
enable?: boolean
|
651 |
-
): this;
|
652 |
-
|
653 |
-
disableTouching(): this;
|
654 |
-
|
655 |
-
disableTouching(gameObject: Phaser.GameObjects.GameObject): this;
|
656 |
-
|
657 |
-
setChildrenInteractive(
|
658 |
-
config: SetChildrenInteractive.IConfig
|
659 |
-
): this;
|
660 |
-
|
661 |
-
show(
|
662 |
-
gameObject?: Phaser.GameObjects.GameObject
|
663 |
-
): this;
|
664 |
-
|
665 |
-
hide(
|
666 |
-
gameObject?: Phaser.GameObjects.GameObject
|
667 |
-
): this;
|
668 |
-
|
669 |
-
isShow(
|
670 |
-
gameObject: Phaser.GameObjects.GameObject
|
671 |
-
): boolean;
|
672 |
-
|
673 |
-
onCreateModalBehavior: (self: this) => void;
|
674 |
-
|
675 |
-
modal(
|
676 |
-
config?: ModalBehavoir.IConfig,
|
677 |
-
onClose?: BaseSizer.OnModalCloseCallbackType
|
678 |
-
): this;
|
679 |
-
|
680 |
-
modal(
|
681 |
-
onClose?: BaseSizer.OnModalCloseCallbackType
|
682 |
-
): this;
|
683 |
-
|
684 |
-
modalPromise(
|
685 |
-
config?: ModalBehavoir.IConfig
|
686 |
-
): Promise<Object>;
|
687 |
-
|
688 |
-
modalClose(closeEventData?: Object): this;
|
689 |
-
|
690 |
-
broadcastEvent(
|
691 |
-
event: string,
|
692 |
-
...args: any[]
|
693 |
-
): this;
|
694 |
-
|
695 |
-
getShownChildren(
|
696 |
-
out?: Phaser.GameObjects.GameObject[]
|
697 |
-
): Phaser.GameObjects.GameObject[];
|
698 |
-
|
699 |
-
getAllShownChildren(
|
700 |
-
out?: Phaser.GameObjects.GameObject[]
|
701 |
-
): Phaser.GameObjects.GameObject[];
|
702 |
-
|
703 |
-
getInnerPadding(
|
704 |
-
key?: string
|
705 |
-
): number | { left: number, right: number, top: number, bottom: number };
|
706 |
-
|
707 |
-
setInnerPadding(
|
708 |
-
key: string | number | { left?: number, right?: number, top?: number, bottom?: number },
|
709 |
-
value?: number
|
710 |
-
): this;
|
711 |
-
|
712 |
-
getOutterPadding(
|
713 |
-
key?: string
|
714 |
-
): number | { left: number, right: number, top: number, bottom: number };
|
715 |
-
|
716 |
-
setOuterPadding(
|
717 |
-
key: string | number | { left?: number, right?: number, top?: number, bottom?: number },
|
718 |
-
value?: number
|
719 |
-
): this;
|
720 |
-
|
721 |
-
getChildOutterPadding(
|
722 |
-
child: string | Phaser.GameObjects.GameObject,
|
723 |
-
key?: string
|
724 |
-
): number | { left: number, right: number, top: number, bottom: number };
|
725 |
-
|
726 |
-
setChildOuterPadding(
|
727 |
-
child: string | Phaser.GameObjects.GameObject,
|
728 |
-
key: string | number | { left?: number, right?: number, top?: number, bottom?: number },
|
729 |
-
value?: number
|
730 |
-
): this;
|
731 |
-
|
732 |
-
pointToChild(
|
733 |
-
x: number,
|
734 |
-
y: number,
|
735 |
-
preTest?: (gameObject: Phaser.GameObjects.GameObject, x: number, y: number) => boolean,
|
736 |
-
postTest?: (gameObject: Phaser.GameObjects.GameObject, x: number, y: number) => boolean,
|
737 |
-
children?: Phaser.GameObjects.GameObject[]
|
738 |
-
): Phaser.GameObjects.GameObject;
|
739 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Knob from './Knob.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('knob', function (config) {
|
6 |
-
var gameObject = new Knob(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.Knob', Knob);
|
12 |
-
|
13 |
-
export default Knob;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akshay-Vs/GPT-Based-Generator/app.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import time
|
3 |
-
import os
|
4 |
-
import streamlit as st
|
5 |
-
import concurrent.futures
|
6 |
-
from random import randint
|
7 |
-
|
8 |
-
st.markdown("<h1 style='text-align: center; color: white;'>Generate intresting stories with GPT</h1>", unsafe_allow_html=True)
|
9 |
-
st.markdown('')
|
10 |
-
st.markdown('')
|
11 |
-
|
12 |
-
# initializing session_state
|
13 |
-
os.system('pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
|
14 |
-
os.system('pip install transformers')
|
15 |
-
|
16 |
-
from transformers import pipeline, set_seed
|
17 |
-
|
18 |
-
generator = pipeline('text-generation', model='openai-gpt')
|
19 |
-
|
20 |
-
def generate(initial_text, length=50, return_sequences=1):
|
21 |
-
set_seed(randint(1,1000))
|
22 |
-
result = generator(initial_text, max_length = length, num_return_sequences = return_sequences)
|
23 |
-
return result[0]["generated_text"]
|
24 |
-
|
25 |
-
def slice(text, mak_length=10):
|
26 |
-
return text[-mak_length:]
|
27 |
-
|
28 |
-
def type_text(text):
|
29 |
-
for letter in text:
|
30 |
-
sys.stdout.write(letter)
|
31 |
-
time.sleep(0)
|
32 |
-
|
33 |
-
#text = input("Enter something to begin with... ")
|
34 |
-
#print(".\n.\n.\nGenerating\n.\n.\n.")
|
35 |
-
|
36 |
-
text = st.text_input('Enter something to begin with...', placeholder='I looked at her eyes, then i realized...', key="value")
|
37 |
-
if text:
|
38 |
-
st.write("Generating...")
|
39 |
-
for _ in range(10):
|
40 |
-
result = generate(text)
|
41 |
-
text=slice(result)
|
42 |
-
out = result.replace(text,"")
|
43 |
-
st.markdown(f"<h3><b>{out}</b></h3>", unsafe_allow_html=True)
|
44 |
-
#with concurrent.futures.ThreadPoolExecutor() as executor:
|
45 |
-
# executor.submit(type_text, result.replace(text,""))
|
46 |
-
st.balloons()
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/utils/audio.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
import librosa
|
2 |
-
import librosa.filters
|
3 |
-
import numpy as np
|
4 |
-
# import tensorflow as tf
|
5 |
-
from scipy import signal
|
6 |
-
from scipy.io import wavfile
|
7 |
-
from src.utils.hparams import hparams as hp
|
8 |
-
|
9 |
-
def load_wav(path, sr):
|
10 |
-
return librosa.core.load(path, sr=sr)[0]
|
11 |
-
|
12 |
-
def save_wav(wav, path, sr):
|
13 |
-
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
|
14 |
-
#proposed by @dsmiller
|
15 |
-
wavfile.write(path, sr, wav.astype(np.int16))
|
16 |
-
|
17 |
-
def save_wavenet_wav(wav, path, sr):
|
18 |
-
librosa.output.write_wav(path, wav, sr=sr)
|
19 |
-
|
20 |
-
def preemphasis(wav, k, preemphasize=True):
|
21 |
-
if preemphasize:
|
22 |
-
return signal.lfilter([1, -k], [1], wav)
|
23 |
-
return wav
|
24 |
-
|
25 |
-
def inv_preemphasis(wav, k, inv_preemphasize=True):
|
26 |
-
if inv_preemphasize:
|
27 |
-
return signal.lfilter([1], [1, -k], wav)
|
28 |
-
return wav
|
29 |
-
|
30 |
-
def get_hop_size():
|
31 |
-
hop_size = hp.hop_size
|
32 |
-
if hop_size is None:
|
33 |
-
assert hp.frame_shift_ms is not None
|
34 |
-
hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
|
35 |
-
return hop_size
|
36 |
-
|
37 |
-
def linearspectrogram(wav):
|
38 |
-
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
|
39 |
-
S = _amp_to_db(np.abs(D)) - hp.ref_level_db
|
40 |
-
|
41 |
-
if hp.signal_normalization:
|
42 |
-
return _normalize(S)
|
43 |
-
return S
|
44 |
-
|
45 |
-
def melspectrogram(wav):
|
46 |
-
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
|
47 |
-
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
|
48 |
-
|
49 |
-
if hp.signal_normalization:
|
50 |
-
return _normalize(S)
|
51 |
-
return S
|
52 |
-
|
53 |
-
def _lws_processor():
|
54 |
-
import lws
|
55 |
-
return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
|
56 |
-
|
57 |
-
def _stft(y):
|
58 |
-
if hp.use_lws:
|
59 |
-
return _lws_processor(hp).stft(y).T
|
60 |
-
else:
|
61 |
-
return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
|
62 |
-
|
63 |
-
##########################################################
|
64 |
-
#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
|
65 |
-
def num_frames(length, fsize, fshift):
|
66 |
-
"""Compute number of time frames of spectrogram
|
67 |
-
"""
|
68 |
-
pad = (fsize - fshift)
|
69 |
-
if length % fshift == 0:
|
70 |
-
M = (length + pad * 2 - fsize) // fshift + 1
|
71 |
-
else:
|
72 |
-
M = (length + pad * 2 - fsize) // fshift + 2
|
73 |
-
return M
|
74 |
-
|
75 |
-
|
76 |
-
def pad_lr(x, fsize, fshift):
|
77 |
-
"""Compute left and right padding
|
78 |
-
"""
|
79 |
-
M = num_frames(len(x), fsize, fshift)
|
80 |
-
pad = (fsize - fshift)
|
81 |
-
T = len(x) + 2 * pad
|
82 |
-
r = (M - 1) * fshift + fsize - T
|
83 |
-
return pad, pad + r
|
84 |
-
##########################################################
|
85 |
-
#Librosa correct padding
|
86 |
-
def librosa_pad_lr(x, fsize, fshift):
|
87 |
-
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
|
88 |
-
|
89 |
-
# Conversions
|
90 |
-
_mel_basis = None
|
91 |
-
|
92 |
-
def _linear_to_mel(spectogram):
|
93 |
-
global _mel_basis
|
94 |
-
if _mel_basis is None:
|
95 |
-
_mel_basis = _build_mel_basis()
|
96 |
-
return np.dot(_mel_basis, spectogram)
|
97 |
-
|
98 |
-
def _build_mel_basis():
|
99 |
-
assert hp.fmax <= hp.sample_rate // 2
|
100 |
-
return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels,
|
101 |
-
fmin=hp.fmin, fmax=hp.fmax)
|
102 |
-
|
103 |
-
def _amp_to_db(x):
|
104 |
-
min_level = np.exp(hp.min_level_db / 20 * np.log(10))
|
105 |
-
return 20 * np.log10(np.maximum(min_level, x))
|
106 |
-
|
107 |
-
def _db_to_amp(x):
|
108 |
-
return np.power(10.0, (x) * 0.05)
|
109 |
-
|
110 |
-
def _normalize(S):
|
111 |
-
if hp.allow_clipping_in_normalization:
|
112 |
-
if hp.symmetric_mels:
|
113 |
-
return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
|
114 |
-
-hp.max_abs_value, hp.max_abs_value)
|
115 |
-
else:
|
116 |
-
return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
|
117 |
-
|
118 |
-
assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
|
119 |
-
if hp.symmetric_mels:
|
120 |
-
return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
|
121 |
-
else:
|
122 |
-
return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
|
123 |
-
|
124 |
-
def _denormalize(D):
|
125 |
-
if hp.allow_clipping_in_normalization:
|
126 |
-
if hp.symmetric_mels:
|
127 |
-
return (((np.clip(D, -hp.max_abs_value,
|
128 |
-
hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
|
129 |
-
+ hp.min_level_db)
|
130 |
-
else:
|
131 |
-
return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
|
132 |
-
|
133 |
-
if hp.symmetric_mels:
|
134 |
-
return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
|
135 |
-
else:
|
136 |
-
return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anandhju-jayan/image-captioning-cloned/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Ai Image Captioning
|
3 |
-
emoji: 📈
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.28.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: chats-bug/ai-image-captioning
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andres99/Tune-A-Video-Training-UI/Dockerfile
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
|
2 |
-
ENV DEBIAN_FRONTEND=noninteractive
|
3 |
-
RUN apt-get update && \
|
4 |
-
apt-get upgrade -y && \
|
5 |
-
apt-get install -y --no-install-recommends \
|
6 |
-
git \
|
7 |
-
git-lfs \
|
8 |
-
wget \
|
9 |
-
curl \
|
10 |
-
# ffmpeg \
|
11 |
-
ffmpeg \
|
12 |
-
x264 \
|
13 |
-
# python build dependencies \
|
14 |
-
build-essential \
|
15 |
-
libssl-dev \
|
16 |
-
zlib1g-dev \
|
17 |
-
libbz2-dev \
|
18 |
-
libreadline-dev \
|
19 |
-
libsqlite3-dev \
|
20 |
-
libncursesw5-dev \
|
21 |
-
xz-utils \
|
22 |
-
tk-dev \
|
23 |
-
libxml2-dev \
|
24 |
-
libxmlsec1-dev \
|
25 |
-
libffi-dev \
|
26 |
-
liblzma-dev && \
|
27 |
-
apt-get clean && \
|
28 |
-
rm -rf /var/lib/apt/lists/*
|
29 |
-
|
30 |
-
RUN useradd -m -u 1000 user
|
31 |
-
USER user
|
32 |
-
ENV HOME=/home/user \
|
33 |
-
PATH=/home/user/.local/bin:${PATH}
|
34 |
-
WORKDIR ${HOME}/app
|
35 |
-
|
36 |
-
RUN curl https://pyenv.run | bash
|
37 |
-
ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
|
38 |
-
ENV PYTHON_VERSION=3.10.9
|
39 |
-
RUN pyenv install ${PYTHON_VERSION} && \
|
40 |
-
pyenv global ${PYTHON_VERSION} && \
|
41 |
-
pyenv rehash && \
|
42 |
-
pip install --no-cache-dir -U pip setuptools wheel
|
43 |
-
|
44 |
-
RUN pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1
|
45 |
-
COPY --chown=1000 requirements.txt /tmp/requirements.txt
|
46 |
-
RUN pip install --no-cache-dir -U -r /tmp/requirements.txt
|
47 |
-
|
48 |
-
COPY --chown=1000 . ${HOME}/app
|
49 |
-
RUN cd Tune-A-Video && patch -p1 < ../patch
|
50 |
-
ENV PYTHONPATH=${HOME}/app \
|
51 |
-
PYTHONUNBUFFERED=1 \
|
52 |
-
GRADIO_ALLOW_FLAGGING=never \
|
53 |
-
GRADIO_NUM_PORTS=1 \
|
54 |
-
GRADIO_SERVER_NAME=0.0.0.0 \
|
55 |
-
GRADIO_THEME=huggingface \
|
56 |
-
SYSTEM=spaces
|
57 |
-
CMD ["python", "app.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/dit.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# DiT
|
14 |
-
|
15 |
-
[Scalable Diffusion Models with Transformers](https://huggingface.co/papers/2212.09748) (DiT) is by William Peebles and Saining Xie.
|
16 |
-
|
17 |
-
The abstract from the paper is:
|
18 |
-
|
19 |
-
*We explore a new class of diffusion models based on the transformer architecture. We train latent diffusion models of images, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops -- through increased transformer depth/width or increased number of input tokens -- consistently have lower FID. In addition to possessing good scalability properties, our largest DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512x512 and 256x256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.*
|
20 |
-
|
21 |
-
The original codebase can be found at [facebookresearch/dit](https://github.com/facebookresearch/dit).
|
22 |
-
|
23 |
-
<Tip>
|
24 |
-
|
25 |
-
Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
26 |
-
|
27 |
-
</Tip>
|
28 |
-
|
29 |
-
## DiTPipeline
|
30 |
-
[[autodoc]] DiTPipeline
|
31 |
-
- all
|
32 |
-
- __call__
|
33 |
-
|
34 |
-
## ImagePipelineOutput
|
35 |
-
[[autodoc]] pipelines.ImagePipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_ldm3d.py
DELETED
@@ -1,310 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
|
17 |
-
import gc
|
18 |
-
import unittest
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
23 |
-
|
24 |
-
from diffusers import (
|
25 |
-
AutoencoderKL,
|
26 |
-
DDIMScheduler,
|
27 |
-
PNDMScheduler,
|
28 |
-
StableDiffusionLDM3DPipeline,
|
29 |
-
UNet2DConditionModel,
|
30 |
-
)
|
31 |
-
from diffusers.utils import nightly, slow, torch_device
|
32 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
33 |
-
|
34 |
-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
35 |
-
|
36 |
-
|
37 |
-
enable_full_determinism()
|
38 |
-
|
39 |
-
|
40 |
-
class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase):
|
41 |
-
pipeline_class = StableDiffusionLDM3DPipeline
|
42 |
-
params = TEXT_TO_IMAGE_PARAMS
|
43 |
-
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
|
44 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
45 |
-
|
46 |
-
def get_dummy_components(self):
|
47 |
-
torch.manual_seed(0)
|
48 |
-
unet = UNet2DConditionModel(
|
49 |
-
block_out_channels=(32, 64),
|
50 |
-
layers_per_block=2,
|
51 |
-
sample_size=32,
|
52 |
-
in_channels=4,
|
53 |
-
out_channels=4,
|
54 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
55 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
56 |
-
cross_attention_dim=32,
|
57 |
-
)
|
58 |
-
scheduler = DDIMScheduler(
|
59 |
-
beta_start=0.00085,
|
60 |
-
beta_end=0.012,
|
61 |
-
beta_schedule="scaled_linear",
|
62 |
-
clip_sample=False,
|
63 |
-
set_alpha_to_one=False,
|
64 |
-
)
|
65 |
-
torch.manual_seed(0)
|
66 |
-
vae = AutoencoderKL(
|
67 |
-
block_out_channels=[32, 64],
|
68 |
-
in_channels=6,
|
69 |
-
out_channels=6,
|
70 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
71 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
72 |
-
latent_channels=4,
|
73 |
-
)
|
74 |
-
torch.manual_seed(0)
|
75 |
-
text_encoder_config = CLIPTextConfig(
|
76 |
-
bos_token_id=0,
|
77 |
-
eos_token_id=2,
|
78 |
-
hidden_size=32,
|
79 |
-
intermediate_size=37,
|
80 |
-
layer_norm_eps=1e-05,
|
81 |
-
num_attention_heads=4,
|
82 |
-
num_hidden_layers=5,
|
83 |
-
pad_token_id=1,
|
84 |
-
vocab_size=1000,
|
85 |
-
)
|
86 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
87 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
88 |
-
|
89 |
-
components = {
|
90 |
-
"unet": unet,
|
91 |
-
"scheduler": scheduler,
|
92 |
-
"vae": vae,
|
93 |
-
"text_encoder": text_encoder,
|
94 |
-
"tokenizer": tokenizer,
|
95 |
-
"safety_checker": None,
|
96 |
-
"feature_extractor": None,
|
97 |
-
}
|
98 |
-
return components
|
99 |
-
|
100 |
-
def get_dummy_inputs(self, device, seed=0):
|
101 |
-
if str(device).startswith("mps"):
|
102 |
-
generator = torch.manual_seed(seed)
|
103 |
-
else:
|
104 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
105 |
-
inputs = {
|
106 |
-
"prompt": "A painting of a squirrel eating a burger",
|
107 |
-
"generator": generator,
|
108 |
-
"num_inference_steps": 2,
|
109 |
-
"guidance_scale": 6.0,
|
110 |
-
"output_type": "numpy",
|
111 |
-
}
|
112 |
-
return inputs
|
113 |
-
|
114 |
-
def test_stable_diffusion_ddim(self):
|
115 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
116 |
-
|
117 |
-
components = self.get_dummy_components()
|
118 |
-
ldm3d_pipe = StableDiffusionLDM3DPipeline(**components)
|
119 |
-
ldm3d_pipe = ldm3d_pipe.to(torch_device)
|
120 |
-
ldm3d_pipe.set_progress_bar_config(disable=None)
|
121 |
-
|
122 |
-
inputs = self.get_dummy_inputs(device)
|
123 |
-
output = ldm3d_pipe(**inputs)
|
124 |
-
rgb, depth = output.rgb, output.depth
|
125 |
-
|
126 |
-
image_slice_rgb = rgb[0, -3:, -3:, -1]
|
127 |
-
image_slice_depth = depth[0, -3:, -1]
|
128 |
-
|
129 |
-
assert rgb.shape == (1, 64, 64, 3)
|
130 |
-
assert depth.shape == (1, 64, 64)
|
131 |
-
|
132 |
-
expected_slice_rgb = np.array(
|
133 |
-
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262]
|
134 |
-
)
|
135 |
-
expected_slice_depth = np.array([103.46727, 85.812004, 87.849236])
|
136 |
-
|
137 |
-
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2
|
138 |
-
assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2
|
139 |
-
|
140 |
-
def test_stable_diffusion_prompt_embeds(self):
|
141 |
-
components = self.get_dummy_components()
|
142 |
-
ldm3d_pipe = StableDiffusionLDM3DPipeline(**components)
|
143 |
-
ldm3d_pipe = ldm3d_pipe.to(torch_device)
|
144 |
-
ldm3d_pipe.set_progress_bar_config(disable=None)
|
145 |
-
|
146 |
-
inputs = self.get_dummy_inputs(torch_device)
|
147 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
148 |
-
|
149 |
-
# forward
|
150 |
-
output = ldm3d_pipe(**inputs)
|
151 |
-
rgb_slice_1, depth_slice_1 = output.rgb, output.depth
|
152 |
-
rgb_slice_1 = rgb_slice_1[0, -3:, -3:, -1]
|
153 |
-
depth_slice_1 = depth_slice_1[0, -3:, -1]
|
154 |
-
|
155 |
-
inputs = self.get_dummy_inputs(torch_device)
|
156 |
-
prompt = 3 * [inputs.pop("prompt")]
|
157 |
-
|
158 |
-
text_inputs = ldm3d_pipe.tokenizer(
|
159 |
-
prompt,
|
160 |
-
padding="max_length",
|
161 |
-
max_length=ldm3d_pipe.tokenizer.model_max_length,
|
162 |
-
truncation=True,
|
163 |
-
return_tensors="pt",
|
164 |
-
)
|
165 |
-
text_inputs = text_inputs["input_ids"].to(torch_device)
|
166 |
-
|
167 |
-
prompt_embeds = ldm3d_pipe.text_encoder(text_inputs)[0]
|
168 |
-
|
169 |
-
inputs["prompt_embeds"] = prompt_embeds
|
170 |
-
|
171 |
-
# forward
|
172 |
-
output = ldm3d_pipe(**inputs)
|
173 |
-
rgb_slice_2, depth_slice_2 = output.rgb, output.depth
|
174 |
-
rgb_slice_2 = rgb_slice_2[0, -3:, -3:, -1]
|
175 |
-
depth_slice_2 = depth_slice_2[0, -3:, -1]
|
176 |
-
|
177 |
-
assert np.abs(rgb_slice_1.flatten() - rgb_slice_2.flatten()).max() < 1e-4
|
178 |
-
assert np.abs(depth_slice_1.flatten() - depth_slice_2.flatten()).max() < 1e-4
|
179 |
-
|
180 |
-
def test_stable_diffusion_negative_prompt(self):
|
181 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
182 |
-
components = self.get_dummy_components()
|
183 |
-
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
|
184 |
-
ldm3d_pipe = StableDiffusionLDM3DPipeline(**components)
|
185 |
-
ldm3d_pipe = ldm3d_pipe.to(device)
|
186 |
-
ldm3d_pipe.set_progress_bar_config(disable=None)
|
187 |
-
|
188 |
-
inputs = self.get_dummy_inputs(device)
|
189 |
-
negative_prompt = "french fries"
|
190 |
-
output = ldm3d_pipe(**inputs, negative_prompt=negative_prompt)
|
191 |
-
|
192 |
-
rgb, depth = output.rgb, output.depth
|
193 |
-
rgb_slice = rgb[0, -3:, -3:, -1]
|
194 |
-
depth_slice = depth[0, -3:, -1]
|
195 |
-
|
196 |
-
assert rgb.shape == (1, 64, 64, 3)
|
197 |
-
assert depth.shape == (1, 64, 64)
|
198 |
-
|
199 |
-
expected_slice_rgb = np.array(
|
200 |
-
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217]
|
201 |
-
)
|
202 |
-
expected_slice_depth = np.array([107.84738, 84.62802, 89.962135])
|
203 |
-
assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2
|
204 |
-
assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2
|
205 |
-
|
206 |
-
|
207 |
-
@slow
|
208 |
-
@require_torch_gpu
|
209 |
-
class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
|
210 |
-
def tearDown(self):
|
211 |
-
super().tearDown()
|
212 |
-
gc.collect()
|
213 |
-
torch.cuda.empty_cache()
|
214 |
-
|
215 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
216 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
217 |
-
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
|
218 |
-
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
|
219 |
-
inputs = {
|
220 |
-
"prompt": "a photograph of an astronaut riding a horse",
|
221 |
-
"latents": latents,
|
222 |
-
"generator": generator,
|
223 |
-
"num_inference_steps": 3,
|
224 |
-
"guidance_scale": 7.5,
|
225 |
-
"output_type": "numpy",
|
226 |
-
}
|
227 |
-
return inputs
|
228 |
-
|
229 |
-
def test_ldm3d_stable_diffusion(self):
|
230 |
-
ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d")
|
231 |
-
ldm3d_pipe = ldm3d_pipe.to(torch_device)
|
232 |
-
ldm3d_pipe.set_progress_bar_config(disable=None)
|
233 |
-
|
234 |
-
inputs = self.get_inputs(torch_device)
|
235 |
-
output = ldm3d_pipe(**inputs)
|
236 |
-
rgb, depth = output.rgb, output.depth
|
237 |
-
rgb_slice = rgb[0, -3:, -3:, -1].flatten()
|
238 |
-
depth_slice = rgb[0, -3:, -1].flatten()
|
239 |
-
|
240 |
-
assert rgb.shape == (1, 512, 512, 3)
|
241 |
-
assert depth.shape == (1, 512, 512)
|
242 |
-
|
243 |
-
expected_slice_rgb = np.array(
|
244 |
-
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706]
|
245 |
-
)
|
246 |
-
expected_slice_depth = np.array(
|
247 |
-
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706]
|
248 |
-
)
|
249 |
-
assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3
|
250 |
-
assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3
|
251 |
-
|
252 |
-
|
253 |
-
@nightly
|
254 |
-
@require_torch_gpu
|
255 |
-
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
|
256 |
-
def tearDown(self):
|
257 |
-
super().tearDown()
|
258 |
-
gc.collect()
|
259 |
-
torch.cuda.empty_cache()
|
260 |
-
|
261 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
262 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
263 |
-
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
|
264 |
-
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
|
265 |
-
inputs = {
|
266 |
-
"prompt": "a photograph of an astronaut riding a horse",
|
267 |
-
"latents": latents,
|
268 |
-
"generator": generator,
|
269 |
-
"num_inference_steps": 50,
|
270 |
-
"guidance_scale": 7.5,
|
271 |
-
"output_type": "numpy",
|
272 |
-
}
|
273 |
-
return inputs
|
274 |
-
|
275 |
-
def test_ldm3d(self):
|
276 |
-
ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d").to(torch_device)
|
277 |
-
ldm3d_pipe.set_progress_bar_config(disable=None)
|
278 |
-
|
279 |
-
inputs = self.get_inputs(torch_device)
|
280 |
-
output = ldm3d_pipe(**inputs)
|
281 |
-
rgb, depth = output.rgb, output.depth
|
282 |
-
|
283 |
-
expected_rgb_mean = 0.495586
|
284 |
-
expected_rgb_std = 0.33795515
|
285 |
-
expected_depth_mean = 112.48518
|
286 |
-
expected_depth_std = 98.489746
|
287 |
-
assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3
|
288 |
-
assert np.abs(expected_rgb_std - rgb.std()) < 1e-3
|
289 |
-
assert np.abs(expected_depth_mean - depth.mean()) < 1e-3
|
290 |
-
assert np.abs(expected_depth_std - depth.std()) < 1e-3
|
291 |
-
|
292 |
-
def test_ldm3d_v2(self):
|
293 |
-
ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c").to(torch_device)
|
294 |
-
ldm3d_pipe.set_progress_bar_config(disable=None)
|
295 |
-
|
296 |
-
inputs = self.get_inputs(torch_device)
|
297 |
-
output = ldm3d_pipe(**inputs)
|
298 |
-
rgb, depth = output.rgb, output.depth
|
299 |
-
|
300 |
-
expected_rgb_mean = 0.4194127
|
301 |
-
expected_rgb_std = 0.35375586
|
302 |
-
expected_depth_mean = 0.5638502
|
303 |
-
expected_depth_std = 0.34686103
|
304 |
-
|
305 |
-
assert rgb.shape == (1, 512, 512, 3)
|
306 |
-
assert depth.shape == (1, 512, 512, 1)
|
307 |
-
assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3
|
308 |
-
assert np.abs(expected_rgb_std - rgb.std()) < 1e-3
|
309 |
-
assert np.abs(expected_depth_mean - depth.mean()) < 1e-3
|
310 |
-
assert np.abs(expected_depth_std - depth.std()) < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddpm.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from diffusers import DDPMScheduler
|
4 |
-
|
5 |
-
from .test_schedulers import SchedulerCommonTest
|
6 |
-
|
7 |
-
|
8 |
-
class DDPMSchedulerTest(SchedulerCommonTest):
|
9 |
-
scheduler_classes = (DDPMScheduler,)
|
10 |
-
|
11 |
-
def get_scheduler_config(self, **kwargs):
|
12 |
-
config = {
|
13 |
-
"num_train_timesteps": 1000,
|
14 |
-
"beta_start": 0.0001,
|
15 |
-
"beta_end": 0.02,
|
16 |
-
"beta_schedule": "linear",
|
17 |
-
"variance_type": "fixed_small",
|
18 |
-
"clip_sample": True,
|
19 |
-
}
|
20 |
-
|
21 |
-
config.update(**kwargs)
|
22 |
-
return config
|
23 |
-
|
24 |
-
def test_timesteps(self):
|
25 |
-
for timesteps in [1, 5, 100, 1000]:
|
26 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
27 |
-
|
28 |
-
def test_betas(self):
|
29 |
-
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
|
30 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
31 |
-
|
32 |
-
def test_schedules(self):
|
33 |
-
for schedule in ["linear", "squaredcos_cap_v2"]:
|
34 |
-
self.check_over_configs(beta_schedule=schedule)
|
35 |
-
|
36 |
-
def test_variance_type(self):
|
37 |
-
for variance in ["fixed_small", "fixed_large", "other"]:
|
38 |
-
self.check_over_configs(variance_type=variance)
|
39 |
-
|
40 |
-
def test_clip_sample(self):
|
41 |
-
for clip_sample in [True, False]:
|
42 |
-
self.check_over_configs(clip_sample=clip_sample)
|
43 |
-
|
44 |
-
def test_thresholding(self):
|
45 |
-
self.check_over_configs(thresholding=False)
|
46 |
-
for threshold in [0.5, 1.0, 2.0]:
|
47 |
-
for prediction_type in ["epsilon", "sample", "v_prediction"]:
|
48 |
-
self.check_over_configs(
|
49 |
-
thresholding=True,
|
50 |
-
prediction_type=prediction_type,
|
51 |
-
sample_max_value=threshold,
|
52 |
-
)
|
53 |
-
|
54 |
-
def test_prediction_type(self):
|
55 |
-
for prediction_type in ["epsilon", "sample", "v_prediction"]:
|
56 |
-
self.check_over_configs(prediction_type=prediction_type)
|
57 |
-
|
58 |
-
def test_time_indices(self):
|
59 |
-
for t in [0, 500, 999]:
|
60 |
-
self.check_over_forward(time_step=t)
|
61 |
-
|
62 |
-
def test_variance(self):
|
63 |
-
scheduler_class = self.scheduler_classes[0]
|
64 |
-
scheduler_config = self.get_scheduler_config()
|
65 |
-
scheduler = scheduler_class(**scheduler_config)
|
66 |
-
|
67 |
-
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
|
68 |
-
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
|
69 |
-
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
|
70 |
-
|
71 |
-
def test_full_loop_no_noise(self):
|
72 |
-
scheduler_class = self.scheduler_classes[0]
|
73 |
-
scheduler_config = self.get_scheduler_config()
|
74 |
-
scheduler = scheduler_class(**scheduler_config)
|
75 |
-
|
76 |
-
num_trained_timesteps = len(scheduler)
|
77 |
-
|
78 |
-
model = self.dummy_model()
|
79 |
-
sample = self.dummy_sample_deter
|
80 |
-
generator = torch.manual_seed(0)
|
81 |
-
|
82 |
-
for t in reversed(range(num_trained_timesteps)):
|
83 |
-
# 1. predict noise residual
|
84 |
-
residual = model(sample, t)
|
85 |
-
|
86 |
-
# 2. predict previous mean of sample x_t-1
|
87 |
-
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
|
88 |
-
|
89 |
-
# if t > 0:
|
90 |
-
# noise = self.dummy_sample_deter
|
91 |
-
# variance = scheduler.get_variance(t) ** (0.5) * noise
|
92 |
-
#
|
93 |
-
# sample = pred_prev_sample + variance
|
94 |
-
sample = pred_prev_sample
|
95 |
-
|
96 |
-
result_sum = torch.sum(torch.abs(sample))
|
97 |
-
result_mean = torch.mean(torch.abs(sample))
|
98 |
-
|
99 |
-
assert abs(result_sum.item() - 258.9606) < 1e-2
|
100 |
-
assert abs(result_mean.item() - 0.3372) < 1e-3
|
101 |
-
|
102 |
-
def test_full_loop_with_v_prediction(self):
|
103 |
-
scheduler_class = self.scheduler_classes[0]
|
104 |
-
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
|
105 |
-
scheduler = scheduler_class(**scheduler_config)
|
106 |
-
|
107 |
-
num_trained_timesteps = len(scheduler)
|
108 |
-
|
109 |
-
model = self.dummy_model()
|
110 |
-
sample = self.dummy_sample_deter
|
111 |
-
generator = torch.manual_seed(0)
|
112 |
-
|
113 |
-
for t in reversed(range(num_trained_timesteps)):
|
114 |
-
# 1. predict noise residual
|
115 |
-
residual = model(sample, t)
|
116 |
-
|
117 |
-
# 2. predict previous mean of sample x_t-1
|
118 |
-
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
|
119 |
-
|
120 |
-
# if t > 0:
|
121 |
-
# noise = self.dummy_sample_deter
|
122 |
-
# variance = scheduler.get_variance(t) ** (0.5) * noise
|
123 |
-
#
|
124 |
-
# sample = pred_prev_sample + variance
|
125 |
-
sample = pred_prev_sample
|
126 |
-
|
127 |
-
result_sum = torch.sum(torch.abs(sample))
|
128 |
-
result_mean = torch.mean(torch.abs(sample))
|
129 |
-
|
130 |
-
assert abs(result_sum.item() - 202.0296) < 1e-2
|
131 |
-
assert abs(result_mean.item() - 0.2631) < 1e-3
|
132 |
-
|
133 |
-
def test_custom_timesteps(self):
|
134 |
-
scheduler_class = self.scheduler_classes[0]
|
135 |
-
scheduler_config = self.get_scheduler_config()
|
136 |
-
scheduler = scheduler_class(**scheduler_config)
|
137 |
-
|
138 |
-
timesteps = [100, 87, 50, 1, 0]
|
139 |
-
|
140 |
-
scheduler.set_timesteps(timesteps=timesteps)
|
141 |
-
|
142 |
-
scheduler_timesteps = scheduler.timesteps
|
143 |
-
|
144 |
-
for i, timestep in enumerate(scheduler_timesteps):
|
145 |
-
if i == len(timesteps) - 1:
|
146 |
-
expected_prev_t = -1
|
147 |
-
else:
|
148 |
-
expected_prev_t = timesteps[i + 1]
|
149 |
-
|
150 |
-
prev_t = scheduler.previous_timestep(timestep)
|
151 |
-
prev_t = prev_t.item()
|
152 |
-
|
153 |
-
self.assertEqual(prev_t, expected_prev_t)
|
154 |
-
|
155 |
-
def test_custom_timesteps_increasing_order(self):
|
156 |
-
scheduler_class = self.scheduler_classes[0]
|
157 |
-
scheduler_config = self.get_scheduler_config()
|
158 |
-
scheduler = scheduler_class(**scheduler_config)
|
159 |
-
|
160 |
-
timesteps = [100, 87, 50, 51, 0]
|
161 |
-
|
162 |
-
with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."):
|
163 |
-
scheduler.set_timesteps(timesteps=timesteps)
|
164 |
-
|
165 |
-
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
|
166 |
-
scheduler_class = self.scheduler_classes[0]
|
167 |
-
scheduler_config = self.get_scheduler_config()
|
168 |
-
scheduler = scheduler_class(**scheduler_config)
|
169 |
-
|
170 |
-
timesteps = [100, 87, 50, 1, 0]
|
171 |
-
num_inference_steps = len(timesteps)
|
172 |
-
|
173 |
-
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
|
174 |
-
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
|
175 |
-
|
176 |
-
def test_custom_timesteps_too_large(self):
|
177 |
-
scheduler_class = self.scheduler_classes[0]
|
178 |
-
scheduler_config = self.get_scheduler_config()
|
179 |
-
scheduler = scheduler_class(**scheduler_config)
|
180 |
-
|
181 |
-
timesteps = [scheduler.config.num_train_timesteps]
|
182 |
-
|
183 |
-
with self.assertRaises(
|
184 |
-
ValueError,
|
185 |
-
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
|
186 |
-
):
|
187 |
-
scheduler.set_timesteps(timesteps=timesteps)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = './rpn_r50_fpn_2x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_64x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=64,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
|
|
|
|
|
|
spaces/Apex-X/nono/roop/processors/frame/face_swapper.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
from typing import Any, List, Callable
|
2 |
-
import cv2
|
3 |
-
import insightface
|
4 |
-
import threading
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
import roop.processors.frame.core
|
8 |
-
from roop.core import update_status
|
9 |
-
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
|
10 |
-
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
|
11 |
-
from roop.typing import Face, Frame
|
12 |
-
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
13 |
-
|
14 |
-
FACE_SWAPPER = None
|
15 |
-
THREAD_LOCK = threading.Lock()
|
16 |
-
NAME = 'ROOP.FACE-SWAPPER'
|
17 |
-
|
18 |
-
|
19 |
-
def get_face_swapper() -> Any:
|
20 |
-
global FACE_SWAPPER
|
21 |
-
|
22 |
-
with THREAD_LOCK:
|
23 |
-
if FACE_SWAPPER is None:
|
24 |
-
model_path = resolve_relative_path('../models/inswapper_128.onnx')
|
25 |
-
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
|
26 |
-
return FACE_SWAPPER
|
27 |
-
|
28 |
-
|
29 |
-
def clear_face_swapper() -> None:
|
30 |
-
global FACE_SWAPPER
|
31 |
-
|
32 |
-
FACE_SWAPPER = None
|
33 |
-
|
34 |
-
|
35 |
-
def pre_check() -> bool:
|
36 |
-
download_directory_path = resolve_relative_path('../models')
|
37 |
-
conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx'])
|
38 |
-
return True
|
39 |
-
|
40 |
-
|
41 |
-
def pre_start() -> bool:
|
42 |
-
if not is_image(roop.globals.source_path):
|
43 |
-
update_status('Select an image for source path.', NAME)
|
44 |
-
return False
|
45 |
-
elif not get_one_face(cv2.imread(roop.globals.source_path)):
|
46 |
-
update_status('No face in source path detected.', NAME)
|
47 |
-
return False
|
48 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
49 |
-
update_status('Select an image or video for target path.', NAME)
|
50 |
-
return False
|
51 |
-
return True
|
52 |
-
|
53 |
-
|
54 |
-
def post_process() -> None:
|
55 |
-
clear_face_swapper()
|
56 |
-
clear_face_reference()
|
57 |
-
|
58 |
-
|
59 |
-
def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
|
60 |
-
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
|
61 |
-
|
62 |
-
|
63 |
-
def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
|
64 |
-
if roop.globals.many_faces:
|
65 |
-
many_faces = get_many_faces(temp_frame)
|
66 |
-
if many_faces:
|
67 |
-
for target_face in many_faces:
|
68 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
69 |
-
else:
|
70 |
-
target_face = find_similar_face(temp_frame, reference_face)
|
71 |
-
if target_face:
|
72 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
73 |
-
return temp_frame
|
74 |
-
|
75 |
-
|
76 |
-
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
|
77 |
-
source_face = get_one_face(cv2.imread(source_path))
|
78 |
-
reference_face = None if roop.globals.many_faces else get_face_reference()
|
79 |
-
for temp_frame_path in temp_frame_paths:
|
80 |
-
temp_frame = cv2.imread(temp_frame_path)
|
81 |
-
result = process_frame(source_face, reference_face, temp_frame)
|
82 |
-
cv2.imwrite(temp_frame_path, result)
|
83 |
-
if update:
|
84 |
-
update()
|
85 |
-
|
86 |
-
|
87 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
88 |
-
source_face = get_one_face(cv2.imread(source_path))
|
89 |
-
target_frame = cv2.imread(target_path)
|
90 |
-
reference_face = None if roop.globals.many_faces else get_one_face(target_frame, roop.globals.reference_face_position)
|
91 |
-
result = process_frame(source_face, reference_face, target_frame)
|
92 |
-
cv2.imwrite(output_path, result)
|
93 |
-
|
94 |
-
|
95 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
96 |
-
if not roop.globals.many_faces and not get_face_reference():
|
97 |
-
reference_frame = cv2.imread(temp_frame_paths[roop.globals.reference_frame_number])
|
98 |
-
reference_face = get_one_face(reference_frame, roop.globals.reference_face_position)
|
99 |
-
set_face_reference(reference_face)
|
100 |
-
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/__init__.py
DELETED
File without changes
|
spaces/Ashrafb/codellama-34b/model.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import Iterator
|
3 |
-
|
4 |
-
from text_generation import Client
|
5 |
-
|
6 |
-
model_id = 'codellama/CodeLlama-34b-Instruct-hf'
|
7 |
-
|
8 |
-
API_URL = "https://api-inference.huggingface.co/models/" + model_id
|
9 |
-
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
10 |
-
|
11 |
-
client = Client(
|
12 |
-
API_URL,
|
13 |
-
headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
14 |
-
)
|
15 |
-
EOS_STRING = "</s>"
|
16 |
-
EOT_STRING = "<EOT>"
|
17 |
-
|
18 |
-
|
19 |
-
def get_prompt(message: str, chat_history: list[tuple[str, str]],
|
20 |
-
system_prompt: str) -> str:
|
21 |
-
texts = [f'<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n']
|
22 |
-
# The first user input is _not_ stripped
|
23 |
-
do_strip = False
|
24 |
-
for user_input, response in chat_history:
|
25 |
-
user_input = user_input.strip() if do_strip else user_input
|
26 |
-
do_strip = True
|
27 |
-
texts.append(f'{user_input} [/INST] {response.strip()} </s><s>[INST] ')
|
28 |
-
message = message.strip() if do_strip else message
|
29 |
-
texts.append(f'{message} [/INST]')
|
30 |
-
return ''.join(texts)
|
31 |
-
|
32 |
-
|
33 |
-
def run(message: str,
|
34 |
-
chat_history: list[tuple[str, str]],
|
35 |
-
system_prompt: str,
|
36 |
-
max_new_tokens: int = 1024,
|
37 |
-
temperature: float = 0.1,
|
38 |
-
top_p: float = 0.9,
|
39 |
-
top_k: int = 50) -> Iterator[str]:
|
40 |
-
prompt = get_prompt(message, chat_history, system_prompt)
|
41 |
-
|
42 |
-
generate_kwargs = dict(
|
43 |
-
max_new_tokens=max_new_tokens,
|
44 |
-
do_sample=True,
|
45 |
-
top_p=top_p,
|
46 |
-
top_k=top_k,
|
47 |
-
temperature=temperature,
|
48 |
-
)
|
49 |
-
stream = client.generate_stream(prompt, **generate_kwargs)
|
50 |
-
output = ""
|
51 |
-
for response in stream:
|
52 |
-
if any([end_token in response.token.text for end_token in [EOS_STRING, EOT_STRING]]):
|
53 |
-
return output
|
54 |
-
else:
|
55 |
-
output += response.token.text
|
56 |
-
yield output
|
57 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/parser.py
DELETED
@@ -1,294 +0,0 @@
|
|
1 |
-
"""Base option parser setup"""
|
2 |
-
|
3 |
-
import logging
|
4 |
-
import optparse
|
5 |
-
import shutil
|
6 |
-
import sys
|
7 |
-
import textwrap
|
8 |
-
from contextlib import suppress
|
9 |
-
from typing import Any, Dict, Generator, List, Tuple
|
10 |
-
|
11 |
-
from pip._internal.cli.status_codes import UNKNOWN_ERROR
|
12 |
-
from pip._internal.configuration import Configuration, ConfigurationError
|
13 |
-
from pip._internal.utils.misc import redact_auth_from_url, strtobool
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
|
18 |
-
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
|
19 |
-
"""A prettier/less verbose help formatter for optparse."""
|
20 |
-
|
21 |
-
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
22 |
-
# help position must be aligned with __init__.parseopts.description
|
23 |
-
kwargs["max_help_position"] = 30
|
24 |
-
kwargs["indent_increment"] = 1
|
25 |
-
kwargs["width"] = shutil.get_terminal_size()[0] - 2
|
26 |
-
super().__init__(*args, **kwargs)
|
27 |
-
|
28 |
-
def format_option_strings(self, option: optparse.Option) -> str:
|
29 |
-
return self._format_option_strings(option)
|
30 |
-
|
31 |
-
def _format_option_strings(
|
32 |
-
self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", "
|
33 |
-
) -> str:
|
34 |
-
"""
|
35 |
-
Return a comma-separated list of option strings and metavars.
|
36 |
-
|
37 |
-
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
|
38 |
-
:param mvarfmt: metavar format string
|
39 |
-
:param optsep: separator
|
40 |
-
"""
|
41 |
-
opts = []
|
42 |
-
|
43 |
-
if option._short_opts:
|
44 |
-
opts.append(option._short_opts[0])
|
45 |
-
if option._long_opts:
|
46 |
-
opts.append(option._long_opts[0])
|
47 |
-
if len(opts) > 1:
|
48 |
-
opts.insert(1, optsep)
|
49 |
-
|
50 |
-
if option.takes_value():
|
51 |
-
assert option.dest is not None
|
52 |
-
metavar = option.metavar or option.dest.lower()
|
53 |
-
opts.append(mvarfmt.format(metavar.lower()))
|
54 |
-
|
55 |
-
return "".join(opts)
|
56 |
-
|
57 |
-
def format_heading(self, heading: str) -> str:
|
58 |
-
if heading == "Options":
|
59 |
-
return ""
|
60 |
-
return heading + ":\n"
|
61 |
-
|
62 |
-
def format_usage(self, usage: str) -> str:
|
63 |
-
"""
|
64 |
-
Ensure there is only one newline between usage and the first heading
|
65 |
-
if there is no description.
|
66 |
-
"""
|
67 |
-
msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " "))
|
68 |
-
return msg
|
69 |
-
|
70 |
-
def format_description(self, description: str) -> str:
|
71 |
-
# leave full control over description to us
|
72 |
-
if description:
|
73 |
-
if hasattr(self.parser, "main"):
|
74 |
-
label = "Commands"
|
75 |
-
else:
|
76 |
-
label = "Description"
|
77 |
-
# some doc strings have initial newlines, some don't
|
78 |
-
description = description.lstrip("\n")
|
79 |
-
# some doc strings have final newlines and spaces, some don't
|
80 |
-
description = description.rstrip()
|
81 |
-
# dedent, then reindent
|
82 |
-
description = self.indent_lines(textwrap.dedent(description), " ")
|
83 |
-
description = f"{label}:\n{description}\n"
|
84 |
-
return description
|
85 |
-
else:
|
86 |
-
return ""
|
87 |
-
|
88 |
-
def format_epilog(self, epilog: str) -> str:
|
89 |
-
# leave full control over epilog to us
|
90 |
-
if epilog:
|
91 |
-
return epilog
|
92 |
-
else:
|
93 |
-
return ""
|
94 |
-
|
95 |
-
def indent_lines(self, text: str, indent: str) -> str:
|
96 |
-
new_lines = [indent + line for line in text.split("\n")]
|
97 |
-
return "\n".join(new_lines)
|
98 |
-
|
99 |
-
|
100 |
-
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
|
101 |
-
"""Custom help formatter for use in ConfigOptionParser.
|
102 |
-
|
103 |
-
This is updates the defaults before expanding them, allowing
|
104 |
-
them to show up correctly in the help listing.
|
105 |
-
|
106 |
-
Also redact auth from url type options
|
107 |
-
"""
|
108 |
-
|
109 |
-
def expand_default(self, option: optparse.Option) -> str:
|
110 |
-
default_values = None
|
111 |
-
if self.parser is not None:
|
112 |
-
assert isinstance(self.parser, ConfigOptionParser)
|
113 |
-
self.parser._update_defaults(self.parser.defaults)
|
114 |
-
assert option.dest is not None
|
115 |
-
default_values = self.parser.defaults.get(option.dest)
|
116 |
-
help_text = super().expand_default(option)
|
117 |
-
|
118 |
-
if default_values and option.metavar == "URL":
|
119 |
-
if isinstance(default_values, str):
|
120 |
-
default_values = [default_values]
|
121 |
-
|
122 |
-
# If its not a list, we should abort and just return the help text
|
123 |
-
if not isinstance(default_values, list):
|
124 |
-
default_values = []
|
125 |
-
|
126 |
-
for val in default_values:
|
127 |
-
help_text = help_text.replace(val, redact_auth_from_url(val))
|
128 |
-
|
129 |
-
return help_text
|
130 |
-
|
131 |
-
|
132 |
-
class CustomOptionParser(optparse.OptionParser):
|
133 |
-
def insert_option_group(
|
134 |
-
self, idx: int, *args: Any, **kwargs: Any
|
135 |
-
) -> optparse.OptionGroup:
|
136 |
-
"""Insert an OptionGroup at a given position."""
|
137 |
-
group = self.add_option_group(*args, **kwargs)
|
138 |
-
|
139 |
-
self.option_groups.pop()
|
140 |
-
self.option_groups.insert(idx, group)
|
141 |
-
|
142 |
-
return group
|
143 |
-
|
144 |
-
@property
|
145 |
-
def option_list_all(self) -> List[optparse.Option]:
|
146 |
-
"""Get a list of all options, including those in option groups."""
|
147 |
-
res = self.option_list[:]
|
148 |
-
for i in self.option_groups:
|
149 |
-
res.extend(i.option_list)
|
150 |
-
|
151 |
-
return res
|
152 |
-
|
153 |
-
|
154 |
-
class ConfigOptionParser(CustomOptionParser):
|
155 |
-
"""Custom option parser which updates its defaults by checking the
|
156 |
-
configuration files and environmental variables"""
|
157 |
-
|
158 |
-
def __init__(
|
159 |
-
self,
|
160 |
-
*args: Any,
|
161 |
-
name: str,
|
162 |
-
isolated: bool = False,
|
163 |
-
**kwargs: Any,
|
164 |
-
) -> None:
|
165 |
-
self.name = name
|
166 |
-
self.config = Configuration(isolated)
|
167 |
-
|
168 |
-
assert self.name
|
169 |
-
super().__init__(*args, **kwargs)
|
170 |
-
|
171 |
-
def check_default(self, option: optparse.Option, key: str, val: Any) -> Any:
|
172 |
-
try:
|
173 |
-
return option.check_value(key, val)
|
174 |
-
except optparse.OptionValueError as exc:
|
175 |
-
print(f"An error occurred during configuration: {exc}")
|
176 |
-
sys.exit(3)
|
177 |
-
|
178 |
-
def _get_ordered_configuration_items(
|
179 |
-
self,
|
180 |
-
) -> Generator[Tuple[str, Any], None, None]:
|
181 |
-
# Configuration gives keys in an unordered manner. Order them.
|
182 |
-
override_order = ["global", self.name, ":env:"]
|
183 |
-
|
184 |
-
# Pool the options into different groups
|
185 |
-
section_items: Dict[str, List[Tuple[str, Any]]] = {
|
186 |
-
name: [] for name in override_order
|
187 |
-
}
|
188 |
-
for section_key, val in self.config.items():
|
189 |
-
# ignore empty values
|
190 |
-
if not val:
|
191 |
-
logger.debug(
|
192 |
-
"Ignoring configuration key '%s' as it's value is empty.",
|
193 |
-
section_key,
|
194 |
-
)
|
195 |
-
continue
|
196 |
-
|
197 |
-
section, key = section_key.split(".", 1)
|
198 |
-
if section in override_order:
|
199 |
-
section_items[section].append((key, val))
|
200 |
-
|
201 |
-
# Yield each group in their override order
|
202 |
-
for section in override_order:
|
203 |
-
for key, val in section_items[section]:
|
204 |
-
yield key, val
|
205 |
-
|
206 |
-
def _update_defaults(self, defaults: Dict[str, Any]) -> Dict[str, Any]:
|
207 |
-
"""Updates the given defaults with values from the config files and
|
208 |
-
the environ. Does a little special handling for certain types of
|
209 |
-
options (lists)."""
|
210 |
-
|
211 |
-
# Accumulate complex default state.
|
212 |
-
self.values = optparse.Values(self.defaults)
|
213 |
-
late_eval = set()
|
214 |
-
# Then set the options with those values
|
215 |
-
for key, val in self._get_ordered_configuration_items():
|
216 |
-
# '--' because configuration supports only long names
|
217 |
-
option = self.get_option("--" + key)
|
218 |
-
|
219 |
-
# Ignore options not present in this parser. E.g. non-globals put
|
220 |
-
# in [global] by users that want them to apply to all applicable
|
221 |
-
# commands.
|
222 |
-
if option is None:
|
223 |
-
continue
|
224 |
-
|
225 |
-
assert option.dest is not None
|
226 |
-
|
227 |
-
if option.action in ("store_true", "store_false"):
|
228 |
-
try:
|
229 |
-
val = strtobool(val)
|
230 |
-
except ValueError:
|
231 |
-
self.error(
|
232 |
-
"{} is not a valid value for {} option, " # noqa
|
233 |
-
"please specify a boolean value like yes/no, "
|
234 |
-
"true/false or 1/0 instead.".format(val, key)
|
235 |
-
)
|
236 |
-
elif option.action == "count":
|
237 |
-
with suppress(ValueError):
|
238 |
-
val = strtobool(val)
|
239 |
-
with suppress(ValueError):
|
240 |
-
val = int(val)
|
241 |
-
if not isinstance(val, int) or val < 0:
|
242 |
-
self.error(
|
243 |
-
"{} is not a valid value for {} option, " # noqa
|
244 |
-
"please instead specify either a non-negative integer "
|
245 |
-
"or a boolean value like yes/no or false/true "
|
246 |
-
"which is equivalent to 1/0.".format(val, key)
|
247 |
-
)
|
248 |
-
elif option.action == "append":
|
249 |
-
val = val.split()
|
250 |
-
val = [self.check_default(option, key, v) for v in val]
|
251 |
-
elif option.action == "callback":
|
252 |
-
assert option.callback is not None
|
253 |
-
late_eval.add(option.dest)
|
254 |
-
opt_str = option.get_opt_string()
|
255 |
-
val = option.convert_value(opt_str, val)
|
256 |
-
# From take_action
|
257 |
-
args = option.callback_args or ()
|
258 |
-
kwargs = option.callback_kwargs or {}
|
259 |
-
option.callback(option, opt_str, val, self, *args, **kwargs)
|
260 |
-
else:
|
261 |
-
val = self.check_default(option, key, val)
|
262 |
-
|
263 |
-
defaults[option.dest] = val
|
264 |
-
|
265 |
-
for key in late_eval:
|
266 |
-
defaults[key] = getattr(self.values, key)
|
267 |
-
self.values = None
|
268 |
-
return defaults
|
269 |
-
|
270 |
-
def get_default_values(self) -> optparse.Values:
|
271 |
-
"""Overriding to make updating the defaults after instantiation of
|
272 |
-
the option parser possible, _update_defaults() does the dirty work."""
|
273 |
-
if not self.process_default_values:
|
274 |
-
# Old, pre-Optik 1.5 behaviour.
|
275 |
-
return optparse.Values(self.defaults)
|
276 |
-
|
277 |
-
# Load the configuration, or error out in case of an error
|
278 |
-
try:
|
279 |
-
self.config.load()
|
280 |
-
except ConfigurationError as err:
|
281 |
-
self.exit(UNKNOWN_ERROR, str(err))
|
282 |
-
|
283 |
-
defaults = self._update_defaults(self.defaults.copy()) # ours
|
284 |
-
for option in self._get_all_options():
|
285 |
-
assert option.dest is not None
|
286 |
-
default = defaults.get(option.dest)
|
287 |
-
if isinstance(default, str):
|
288 |
-
opt_str = option.get_opt_string()
|
289 |
-
defaults[option.dest] = option.check_value(opt_str, default)
|
290 |
-
return optparse.Values(defaults)
|
291 |
-
|
292 |
-
def error(self, msg: str) -> None:
|
293 |
-
self.print_usage(sys.stderr)
|
294 |
-
self.exit(UNKNOWN_ERROR, f"{msg}\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/pyproject.py
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
import importlib.util
|
2 |
-
import os
|
3 |
-
from collections import namedtuple
|
4 |
-
from typing import Any, List, Optional
|
5 |
-
|
6 |
-
from pip._vendor import tomli
|
7 |
-
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
|
8 |
-
|
9 |
-
from pip._internal.exceptions import (
|
10 |
-
InstallationError,
|
11 |
-
InvalidPyProjectBuildRequires,
|
12 |
-
MissingPyProjectBuildRequires,
|
13 |
-
)
|
14 |
-
|
15 |
-
|
16 |
-
def _is_list_of_str(obj: Any) -> bool:
|
17 |
-
return isinstance(obj, list) and all(isinstance(item, str) for item in obj)
|
18 |
-
|
19 |
-
|
20 |
-
def make_pyproject_path(unpacked_source_directory: str) -> str:
|
21 |
-
return os.path.join(unpacked_source_directory, "pyproject.toml")
|
22 |
-
|
23 |
-
|
24 |
-
BuildSystemDetails = namedtuple(
|
25 |
-
"BuildSystemDetails", ["requires", "backend", "check", "backend_path"]
|
26 |
-
)
|
27 |
-
|
28 |
-
|
29 |
-
def load_pyproject_toml(
|
30 |
-
use_pep517: Optional[bool], pyproject_toml: str, setup_py: str, req_name: str
|
31 |
-
) -> Optional[BuildSystemDetails]:
|
32 |
-
"""Load the pyproject.toml file.
|
33 |
-
|
34 |
-
Parameters:
|
35 |
-
use_pep517 - Has the user requested PEP 517 processing? None
|
36 |
-
means the user hasn't explicitly specified.
|
37 |
-
pyproject_toml - Location of the project's pyproject.toml file
|
38 |
-
setup_py - Location of the project's setup.py file
|
39 |
-
req_name - The name of the requirement we're processing (for
|
40 |
-
error reporting)
|
41 |
-
|
42 |
-
Returns:
|
43 |
-
None if we should use the legacy code path, otherwise a tuple
|
44 |
-
(
|
45 |
-
requirements from pyproject.toml,
|
46 |
-
name of PEP 517 backend,
|
47 |
-
requirements we should check are installed after setting
|
48 |
-
up the build environment
|
49 |
-
directory paths to import the backend from (backend-path),
|
50 |
-
relative to the project root.
|
51 |
-
)
|
52 |
-
"""
|
53 |
-
has_pyproject = os.path.isfile(pyproject_toml)
|
54 |
-
has_setup = os.path.isfile(setup_py)
|
55 |
-
|
56 |
-
if not has_pyproject and not has_setup:
|
57 |
-
raise InstallationError(
|
58 |
-
f"{req_name} does not appear to be a Python project: "
|
59 |
-
f"neither 'setup.py' nor 'pyproject.toml' found."
|
60 |
-
)
|
61 |
-
|
62 |
-
if has_pyproject:
|
63 |
-
with open(pyproject_toml, encoding="utf-8") as f:
|
64 |
-
pp_toml = tomli.loads(f.read())
|
65 |
-
build_system = pp_toml.get("build-system")
|
66 |
-
else:
|
67 |
-
build_system = None
|
68 |
-
|
69 |
-
# The following cases must use PEP 517
|
70 |
-
# We check for use_pep517 being non-None and falsey because that means
|
71 |
-
# the user explicitly requested --no-use-pep517. The value 0 as
|
72 |
-
# opposed to False can occur when the value is provided via an
|
73 |
-
# environment variable or config file option (due to the quirk of
|
74 |
-
# strtobool() returning an integer in pip's configuration code).
|
75 |
-
if has_pyproject and not has_setup:
|
76 |
-
if use_pep517 is not None and not use_pep517:
|
77 |
-
raise InstallationError(
|
78 |
-
"Disabling PEP 517 processing is invalid: "
|
79 |
-
"project does not have a setup.py"
|
80 |
-
)
|
81 |
-
use_pep517 = True
|
82 |
-
elif build_system and "build-backend" in build_system:
|
83 |
-
if use_pep517 is not None and not use_pep517:
|
84 |
-
raise InstallationError(
|
85 |
-
"Disabling PEP 517 processing is invalid: "
|
86 |
-
"project specifies a build backend of {} "
|
87 |
-
"in pyproject.toml".format(build_system["build-backend"])
|
88 |
-
)
|
89 |
-
use_pep517 = True
|
90 |
-
|
91 |
-
# If we haven't worked out whether to use PEP 517 yet,
|
92 |
-
# and the user hasn't explicitly stated a preference,
|
93 |
-
# we do so if the project has a pyproject.toml file
|
94 |
-
# or if we cannot import setuptools or wheels.
|
95 |
-
|
96 |
-
# We fallback to PEP 517 when without setuptools or without the wheel package,
|
97 |
-
# so setuptools can be installed as a default build backend.
|
98 |
-
# For more info see:
|
99 |
-
# https://discuss.python.org/t/pip-without-setuptools-could-the-experience-be-improved/11810/9
|
100 |
-
# https://github.com/pypa/pip/issues/8559
|
101 |
-
elif use_pep517 is None:
|
102 |
-
use_pep517 = (
|
103 |
-
has_pyproject
|
104 |
-
or not importlib.util.find_spec("setuptools")
|
105 |
-
or not importlib.util.find_spec("wheel")
|
106 |
-
)
|
107 |
-
|
108 |
-
# At this point, we know whether we're going to use PEP 517.
|
109 |
-
assert use_pep517 is not None
|
110 |
-
|
111 |
-
# If we're using the legacy code path, there is nothing further
|
112 |
-
# for us to do here.
|
113 |
-
if not use_pep517:
|
114 |
-
return None
|
115 |
-
|
116 |
-
if build_system is None:
|
117 |
-
# Either the user has a pyproject.toml with no build-system
|
118 |
-
# section, or the user has no pyproject.toml, but has opted in
|
119 |
-
# explicitly via --use-pep517.
|
120 |
-
# In the absence of any explicit backend specification, we
|
121 |
-
# assume the setuptools backend that most closely emulates the
|
122 |
-
# traditional direct setup.py execution, and require wheel and
|
123 |
-
# a version of setuptools that supports that backend.
|
124 |
-
|
125 |
-
build_system = {
|
126 |
-
"requires": ["setuptools>=40.8.0", "wheel"],
|
127 |
-
"build-backend": "setuptools.build_meta:__legacy__",
|
128 |
-
}
|
129 |
-
|
130 |
-
# If we're using PEP 517, we have build system information (either
|
131 |
-
# from pyproject.toml, or defaulted by the code above).
|
132 |
-
# Note that at this point, we do not know if the user has actually
|
133 |
-
# specified a backend, though.
|
134 |
-
assert build_system is not None
|
135 |
-
|
136 |
-
# Ensure that the build-system section in pyproject.toml conforms
|
137 |
-
# to PEP 518.
|
138 |
-
|
139 |
-
# Specifying the build-system table but not the requires key is invalid
|
140 |
-
if "requires" not in build_system:
|
141 |
-
raise MissingPyProjectBuildRequires(package=req_name)
|
142 |
-
|
143 |
-
# Error out if requires is not a list of strings
|
144 |
-
requires = build_system["requires"]
|
145 |
-
if not _is_list_of_str(requires):
|
146 |
-
raise InvalidPyProjectBuildRequires(
|
147 |
-
package=req_name,
|
148 |
-
reason="It is not a list of strings.",
|
149 |
-
)
|
150 |
-
|
151 |
-
# Each requirement must be valid as per PEP 508
|
152 |
-
for requirement in requires:
|
153 |
-
try:
|
154 |
-
Requirement(requirement)
|
155 |
-
except InvalidRequirement as error:
|
156 |
-
raise InvalidPyProjectBuildRequires(
|
157 |
-
package=req_name,
|
158 |
-
reason=f"It contains an invalid requirement: {requirement!r}",
|
159 |
-
) from error
|
160 |
-
|
161 |
-
backend = build_system.get("build-backend")
|
162 |
-
backend_path = build_system.get("backend-path", [])
|
163 |
-
check: List[str] = []
|
164 |
-
if backend is None:
|
165 |
-
# If the user didn't specify a backend, we assume they want to use
|
166 |
-
# the setuptools backend. But we can't be sure they have included
|
167 |
-
# a version of setuptools which supplies the backend. So we
|
168 |
-
# make a note to check that this requirement is present once
|
169 |
-
# we have set up the environment.
|
170 |
-
# This is quite a lot of work to check for a very specific case. But
|
171 |
-
# the problem is, that case is potentially quite common - projects that
|
172 |
-
# adopted PEP 518 early for the ability to specify requirements to
|
173 |
-
# execute setup.py, but never considered needing to mention the build
|
174 |
-
# tools themselves. The original PEP 518 code had a similar check (but
|
175 |
-
# implemented in a different way).
|
176 |
-
backend = "setuptools.build_meta:__legacy__"
|
177 |
-
check = ["setuptools>=40.8.0"]
|
178 |
-
|
179 |
-
return BuildSystemDetails(requires, backend, check, backend_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Beasto/Photo2Monet_Cyclegan/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Photo2Monet Cyclegan
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/vendored/requests/exceptions.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
"""
|
4 |
-
requests.exceptions
|
5 |
-
~~~~~~~~~~~~~~~~~~~
|
6 |
-
|
7 |
-
This module contains the set of Requests' exceptions.
|
8 |
-
|
9 |
-
"""
|
10 |
-
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
|
11 |
-
|
12 |
-
|
13 |
-
class RequestException(IOError):
|
14 |
-
"""There was an ambiguous exception that occurred while handling your
|
15 |
-
request."""
|
16 |
-
|
17 |
-
def __init__(self, *args, **kwargs):
|
18 |
-
"""
|
19 |
-
Initialize RequestException with `request` and `response` objects.
|
20 |
-
"""
|
21 |
-
response = kwargs.pop('response', None)
|
22 |
-
self.response = response
|
23 |
-
self.request = kwargs.pop('request', None)
|
24 |
-
if (response is not None and not self.request and
|
25 |
-
hasattr(response, 'request')):
|
26 |
-
self.request = self.response.request
|
27 |
-
super(RequestException, self).__init__(*args, **kwargs)
|
28 |
-
|
29 |
-
|
30 |
-
class HTTPError(RequestException):
|
31 |
-
"""An HTTP error occurred."""
|
32 |
-
|
33 |
-
|
34 |
-
class ConnectionError(RequestException):
|
35 |
-
"""A Connection error occurred."""
|
36 |
-
|
37 |
-
|
38 |
-
class ProxyError(ConnectionError):
|
39 |
-
"""A proxy error occurred."""
|
40 |
-
|
41 |
-
|
42 |
-
class SSLError(ConnectionError):
|
43 |
-
"""An SSL error occurred."""
|
44 |
-
|
45 |
-
|
46 |
-
class Timeout(RequestException):
|
47 |
-
"""The request timed out.
|
48 |
-
|
49 |
-
Catching this error will catch both
|
50 |
-
:exc:`~requests.exceptions.ConnectTimeout` and
|
51 |
-
:exc:`~requests.exceptions.ReadTimeout` errors.
|
52 |
-
"""
|
53 |
-
|
54 |
-
|
55 |
-
class ConnectTimeout(ConnectionError, Timeout):
|
56 |
-
"""The request timed out while trying to connect to the remote server.
|
57 |
-
|
58 |
-
Requests that produced this error are safe to retry.
|
59 |
-
"""
|
60 |
-
|
61 |
-
|
62 |
-
class ReadTimeout(Timeout):
|
63 |
-
"""The server did not send any data in the allotted amount of time."""
|
64 |
-
|
65 |
-
|
66 |
-
class URLRequired(RequestException):
|
67 |
-
"""A valid URL is required to make a request."""
|
68 |
-
|
69 |
-
|
70 |
-
class TooManyRedirects(RequestException):
|
71 |
-
"""Too many redirects."""
|
72 |
-
|
73 |
-
|
74 |
-
class MissingSchema(RequestException, ValueError):
|
75 |
-
"""The URL schema (e.g. http or https) is missing."""
|
76 |
-
|
77 |
-
|
78 |
-
class InvalidSchema(RequestException, ValueError):
|
79 |
-
"""See defaults.py for valid schemas."""
|
80 |
-
|
81 |
-
|
82 |
-
class InvalidURL(RequestException, ValueError):
|
83 |
-
""" The URL provided was somehow invalid. """
|
84 |
-
|
85 |
-
|
86 |
-
class ChunkedEncodingError(RequestException):
|
87 |
-
"""The server declared chunked encoding but sent an invalid chunk."""
|
88 |
-
|
89 |
-
|
90 |
-
class ContentDecodingError(RequestException, BaseHTTPError):
|
91 |
-
"""Failed to decode response content"""
|
92 |
-
|
93 |
-
|
94 |
-
class StreamConsumedError(RequestException, TypeError):
|
95 |
-
"""The content for this response was already consumed"""
|
96 |
-
|
97 |
-
|
98 |
-
class RetryError(RequestException):
|
99 |
-
"""Custom retries logic failed"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/_json.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
# Extracted from https://github.com/pfmoore/pkg_metadata
|
2 |
-
|
3 |
-
from email.header import Header, decode_header, make_header
|
4 |
-
from email.message import Message
|
5 |
-
from typing import Any, Dict, List, Union
|
6 |
-
|
7 |
-
METADATA_FIELDS = [
|
8 |
-
# Name, Multiple-Use
|
9 |
-
("Metadata-Version", False),
|
10 |
-
("Name", False),
|
11 |
-
("Version", False),
|
12 |
-
("Dynamic", True),
|
13 |
-
("Platform", True),
|
14 |
-
("Supported-Platform", True),
|
15 |
-
("Summary", False),
|
16 |
-
("Description", False),
|
17 |
-
("Description-Content-Type", False),
|
18 |
-
("Keywords", False),
|
19 |
-
("Home-page", False),
|
20 |
-
("Download-URL", False),
|
21 |
-
("Author", False),
|
22 |
-
("Author-email", False),
|
23 |
-
("Maintainer", False),
|
24 |
-
("Maintainer-email", False),
|
25 |
-
("License", False),
|
26 |
-
("Classifier", True),
|
27 |
-
("Requires-Dist", True),
|
28 |
-
("Requires-Python", False),
|
29 |
-
("Requires-External", True),
|
30 |
-
("Project-URL", True),
|
31 |
-
("Provides-Extra", True),
|
32 |
-
("Provides-Dist", True),
|
33 |
-
("Obsoletes-Dist", True),
|
34 |
-
]
|
35 |
-
|
36 |
-
|
37 |
-
def json_name(field: str) -> str:
|
38 |
-
return field.lower().replace("-", "_")
|
39 |
-
|
40 |
-
|
41 |
-
def msg_to_json(msg: Message) -> Dict[str, Any]:
|
42 |
-
"""Convert a Message object into a JSON-compatible dictionary."""
|
43 |
-
|
44 |
-
def sanitise_header(h: Union[Header, str]) -> str:
|
45 |
-
if isinstance(h, Header):
|
46 |
-
chunks = []
|
47 |
-
for bytes, encoding in decode_header(h):
|
48 |
-
if encoding == "unknown-8bit":
|
49 |
-
try:
|
50 |
-
# See if UTF-8 works
|
51 |
-
bytes.decode("utf-8")
|
52 |
-
encoding = "utf-8"
|
53 |
-
except UnicodeDecodeError:
|
54 |
-
# If not, latin1 at least won't fail
|
55 |
-
encoding = "latin1"
|
56 |
-
chunks.append((bytes, encoding))
|
57 |
-
return str(make_header(chunks))
|
58 |
-
return str(h)
|
59 |
-
|
60 |
-
result = {}
|
61 |
-
for field, multi in METADATA_FIELDS:
|
62 |
-
if field not in msg:
|
63 |
-
continue
|
64 |
-
key = json_name(field)
|
65 |
-
if multi:
|
66 |
-
value: Union[str, List[str]] = [
|
67 |
-
sanitise_header(v) for v in msg.get_all(field)
|
68 |
-
]
|
69 |
-
else:
|
70 |
-
value = sanitise_header(msg.get(field))
|
71 |
-
if key == "keywords":
|
72 |
-
# Accept both comma-separated and space-separated
|
73 |
-
# forms, for better compatibility with old data.
|
74 |
-
if "," in value:
|
75 |
-
value = [v.strip() for v in value.split(",")]
|
76 |
-
else:
|
77 |
-
value = value.split()
|
78 |
-
result[key] = value
|
79 |
-
|
80 |
-
payload = msg.get_payload()
|
81 |
-
if payload:
|
82 |
-
result["description"] = payload
|
83 |
-
|
84 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
import hashlib
|
6 |
-
import os
|
7 |
-
from textwrap import dedent
|
8 |
-
|
9 |
-
from ..cache import BaseCache, SeparateBodyBaseCache
|
10 |
-
from ..controller import CacheController
|
11 |
-
|
12 |
-
try:
|
13 |
-
FileNotFoundError
|
14 |
-
except NameError:
|
15 |
-
# py2.X
|
16 |
-
FileNotFoundError = (IOError, OSError)
|
17 |
-
|
18 |
-
|
19 |
-
def _secure_open_write(filename, fmode):
|
20 |
-
# We only want to write to this file, so open it in write only mode
|
21 |
-
flags = os.O_WRONLY
|
22 |
-
|
23 |
-
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
|
24 |
-
# will open *new* files.
|
25 |
-
# We specify this because we want to ensure that the mode we pass is the
|
26 |
-
# mode of the file.
|
27 |
-
flags |= os.O_CREAT | os.O_EXCL
|
28 |
-
|
29 |
-
# Do not follow symlinks to prevent someone from making a symlink that
|
30 |
-
# we follow and insecurely open a cache file.
|
31 |
-
if hasattr(os, "O_NOFOLLOW"):
|
32 |
-
flags |= os.O_NOFOLLOW
|
33 |
-
|
34 |
-
# On Windows we'll mark this file as binary
|
35 |
-
if hasattr(os, "O_BINARY"):
|
36 |
-
flags |= os.O_BINARY
|
37 |
-
|
38 |
-
# Before we open our file, we want to delete any existing file that is
|
39 |
-
# there
|
40 |
-
try:
|
41 |
-
os.remove(filename)
|
42 |
-
except (IOError, OSError):
|
43 |
-
# The file must not exist already, so we can just skip ahead to opening
|
44 |
-
pass
|
45 |
-
|
46 |
-
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
|
47 |
-
# race condition happens between the os.remove and this line, that an
|
48 |
-
# error will be raised. Because we utilize a lockfile this should only
|
49 |
-
# happen if someone is attempting to attack us.
|
50 |
-
fd = os.open(filename, flags, fmode)
|
51 |
-
try:
|
52 |
-
return os.fdopen(fd, "wb")
|
53 |
-
|
54 |
-
except:
|
55 |
-
# An error occurred wrapping our FD in a file object
|
56 |
-
os.close(fd)
|
57 |
-
raise
|
58 |
-
|
59 |
-
|
60 |
-
class _FileCacheMixin:
|
61 |
-
"""Shared implementation for both FileCache variants."""
|
62 |
-
|
63 |
-
def __init__(
|
64 |
-
self,
|
65 |
-
directory,
|
66 |
-
forever=False,
|
67 |
-
filemode=0o0600,
|
68 |
-
dirmode=0o0700,
|
69 |
-
use_dir_lock=None,
|
70 |
-
lock_class=None,
|
71 |
-
):
|
72 |
-
|
73 |
-
if use_dir_lock is not None and lock_class is not None:
|
74 |
-
raise ValueError("Cannot use use_dir_lock and lock_class together")
|
75 |
-
|
76 |
-
try:
|
77 |
-
from lockfile import LockFile
|
78 |
-
from lockfile.mkdirlockfile import MkdirLockFile
|
79 |
-
except ImportError:
|
80 |
-
notice = dedent(
|
81 |
-
"""
|
82 |
-
NOTE: In order to use the FileCache you must have
|
83 |
-
lockfile installed. You can install it via pip:
|
84 |
-
pip install lockfile
|
85 |
-
"""
|
86 |
-
)
|
87 |
-
raise ImportError(notice)
|
88 |
-
|
89 |
-
else:
|
90 |
-
if use_dir_lock:
|
91 |
-
lock_class = MkdirLockFile
|
92 |
-
|
93 |
-
elif lock_class is None:
|
94 |
-
lock_class = LockFile
|
95 |
-
|
96 |
-
self.directory = directory
|
97 |
-
self.forever = forever
|
98 |
-
self.filemode = filemode
|
99 |
-
self.dirmode = dirmode
|
100 |
-
self.lock_class = lock_class
|
101 |
-
|
102 |
-
@staticmethod
|
103 |
-
def encode(x):
|
104 |
-
return hashlib.sha224(x.encode()).hexdigest()
|
105 |
-
|
106 |
-
def _fn(self, name):
|
107 |
-
# NOTE: This method should not change as some may depend on it.
|
108 |
-
# See: https://github.com/ionrock/cachecontrol/issues/63
|
109 |
-
hashed = self.encode(name)
|
110 |
-
parts = list(hashed[:5]) + [hashed]
|
111 |
-
return os.path.join(self.directory, *parts)
|
112 |
-
|
113 |
-
def get(self, key):
|
114 |
-
name = self._fn(key)
|
115 |
-
try:
|
116 |
-
with open(name, "rb") as fh:
|
117 |
-
return fh.read()
|
118 |
-
|
119 |
-
except FileNotFoundError:
|
120 |
-
return None
|
121 |
-
|
122 |
-
def set(self, key, value, expires=None):
|
123 |
-
name = self._fn(key)
|
124 |
-
self._write(name, value)
|
125 |
-
|
126 |
-
def _write(self, path, data: bytes):
|
127 |
-
"""
|
128 |
-
Safely write the data to the given path.
|
129 |
-
"""
|
130 |
-
# Make sure the directory exists
|
131 |
-
try:
|
132 |
-
os.makedirs(os.path.dirname(path), self.dirmode)
|
133 |
-
except (IOError, OSError):
|
134 |
-
pass
|
135 |
-
|
136 |
-
with self.lock_class(path) as lock:
|
137 |
-
# Write our actual file
|
138 |
-
with _secure_open_write(lock.path, self.filemode) as fh:
|
139 |
-
fh.write(data)
|
140 |
-
|
141 |
-
def _delete(self, key, suffix):
|
142 |
-
name = self._fn(key) + suffix
|
143 |
-
if not self.forever:
|
144 |
-
try:
|
145 |
-
os.remove(name)
|
146 |
-
except FileNotFoundError:
|
147 |
-
pass
|
148 |
-
|
149 |
-
|
150 |
-
class FileCache(_FileCacheMixin, BaseCache):
|
151 |
-
"""
|
152 |
-
Traditional FileCache: body is stored in memory, so not suitable for large
|
153 |
-
downloads.
|
154 |
-
"""
|
155 |
-
|
156 |
-
def delete(self, key):
|
157 |
-
self._delete(key, "")
|
158 |
-
|
159 |
-
|
160 |
-
class SeparateBodyFileCache(_FileCacheMixin, SeparateBodyBaseCache):
|
161 |
-
"""
|
162 |
-
Memory-efficient FileCache: body is stored in a separate file, reducing
|
163 |
-
peak memory usage.
|
164 |
-
"""
|
165 |
-
|
166 |
-
def get_body(self, key):
|
167 |
-
name = self._fn(key) + ".body"
|
168 |
-
try:
|
169 |
-
return open(name, "rb")
|
170 |
-
except FileNotFoundError:
|
171 |
-
return None
|
172 |
-
|
173 |
-
def set_body(self, key, body):
|
174 |
-
name = self._fn(key) + ".body"
|
175 |
-
self._write(name, body)
|
176 |
-
|
177 |
-
def delete(self, key):
|
178 |
-
self._delete(key, "")
|
179 |
-
self._delete(key, ".body")
|
180 |
-
|
181 |
-
|
182 |
-
def url_to_file_path(url, filecache):
|
183 |
-
"""Return the file cache path based on the URL.
|
184 |
-
|
185 |
-
This does not ensure the file exists!
|
186 |
-
"""
|
187 |
-
key = CacheController.cache_url(url)
|
188 |
-
return filecache._fn(key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/tomli/_types.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# SPDX-License-Identifier: MIT
|
2 |
-
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
3 |
-
# Licensed to PSF under a Contributor Agreement.
|
4 |
-
|
5 |
-
from typing import Any, Callable, Tuple
|
6 |
-
|
7 |
-
# Type annotations
|
8 |
-
ParseFloat = Callable[[str], Any]
|
9 |
-
Key = Tuple[str, ...]
|
10 |
-
Pos = int
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pcg.h
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
|
5 |
-
// http://www.pcg-random.org/download.html
|
6 |
-
struct pcg32_state {
|
7 |
-
uint64_t state;
|
8 |
-
uint64_t inc;
|
9 |
-
};
|
10 |
-
|
11 |
-
DEVICE inline uint32_t next_pcg32(pcg32_state *rng) {
|
12 |
-
uint64_t oldstate = rng->state;
|
13 |
-
// Advance internal state
|
14 |
-
rng->state = oldstate * 6364136223846793005ULL + (rng->inc|1);
|
15 |
-
// Calculate output function (XSH RR), uses old state for max ILP
|
16 |
-
uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
|
17 |
-
uint32_t rot = oldstate >> 59u;
|
18 |
-
return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
|
19 |
-
}
|
20 |
-
|
21 |
-
// https://github.com/wjakob/pcg32/blob/master/pcg32.h
|
22 |
-
DEVICE inline float next_pcg32_float(pcg32_state *rng) {
|
23 |
-
union {
|
24 |
-
uint32_t u;
|
25 |
-
float f;
|
26 |
-
} x;
|
27 |
-
x.u = (next_pcg32(rng) >> 9) | 0x3f800000u;
|
28 |
-
return x.f - 1.0f;
|
29 |
-
}
|
30 |
-
|
31 |
-
// Initialize each pixel with a PCG rng with a different stream
|
32 |
-
DEVICE inline pcg32_state init_pcg32(int idx, uint64_t seed) {
|
33 |
-
pcg32_state state;
|
34 |
-
state.state = 0U;
|
35 |
-
state.inc = (((uint64_t)idx + 1) << 1u) | 1u;
|
36 |
-
next_pcg32(&state);
|
37 |
-
state.state += (0x853c49e6748fea9bULL + seed);
|
38 |
-
next_pcg32(&state);
|
39 |
-
return state;
|
40 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/complex/arithmetic.h
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
* Copyright 2013 Filipe RNC Maia
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
#include <thrust/complex.h>
|
19 |
-
#include <cfloat>
|
20 |
-
#include <cmath>
|
21 |
-
#include <thrust/detail/complex/c99math.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
|
26 |
-
/* --- Binary Arithmetic Operators --- */
|
27 |
-
|
28 |
-
template <typename T0, typename T1>
|
29 |
-
__host__ __device__
|
30 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
31 |
-
operator+(const complex<T0>& x, const complex<T1>& y)
|
32 |
-
{
|
33 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
34 |
-
return complex<T>(x.real() + y.real(), x.imag() + y.imag());
|
35 |
-
}
|
36 |
-
|
37 |
-
template <typename T0, typename T1>
|
38 |
-
__host__ __device__
|
39 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
40 |
-
operator+(const complex<T0>& x, const T1& y)
|
41 |
-
{
|
42 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
43 |
-
return complex<T>(x.real() + y, x.imag());
|
44 |
-
}
|
45 |
-
|
46 |
-
template <typename T0, typename T1>
|
47 |
-
__host__ __device__
|
48 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
49 |
-
operator+(const T0& x, const complex<T1>& y)
|
50 |
-
{
|
51 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
52 |
-
return complex<T>(x + y.real(), y.imag());
|
53 |
-
}
|
54 |
-
|
55 |
-
|
56 |
-
template <typename T0, typename T1>
|
57 |
-
__host__ __device__
|
58 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
59 |
-
operator-(const complex<T0>& x, const complex<T1>& y)
|
60 |
-
{
|
61 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
62 |
-
return complex<T>(x.real() - y.real(), x.imag() - y.imag());
|
63 |
-
}
|
64 |
-
|
65 |
-
template <typename T0, typename T1>
|
66 |
-
__host__ __device__
|
67 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
68 |
-
operator-(const complex<T0>& x, const T1& y)
|
69 |
-
{
|
70 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
71 |
-
return complex<T>(x.real() - y, x.imag());
|
72 |
-
}
|
73 |
-
|
74 |
-
template <typename T0, typename T1>
|
75 |
-
__host__ __device__
|
76 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
77 |
-
operator-(const T0& x, const complex<T1>& y)
|
78 |
-
{
|
79 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
80 |
-
return complex<T>(x - y.real(), -y.imag());
|
81 |
-
}
|
82 |
-
|
83 |
-
|
84 |
-
template <typename T0, typename T1>
|
85 |
-
__host__ __device__
|
86 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
87 |
-
operator*(const complex<T0>& x, const complex<T1>& y)
|
88 |
-
{
|
89 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
90 |
-
return complex<T>( x.real() * y.real() - x.imag() * y.imag()
|
91 |
-
, x.real() * y.imag() + x.imag() * y.real());
|
92 |
-
}
|
93 |
-
|
94 |
-
template <typename T0, typename T1>
|
95 |
-
__host__ __device__
|
96 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
97 |
-
operator*(const complex<T0>& x, const T1& y)
|
98 |
-
{
|
99 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
100 |
-
return complex<T>(x.real() * y, x.imag() * y);
|
101 |
-
}
|
102 |
-
|
103 |
-
template <typename T0, typename T1>
|
104 |
-
__host__ __device__
|
105 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
106 |
-
operator*(const T0& x, const complex<T1>& y)
|
107 |
-
{
|
108 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
109 |
-
return complex<T>(x * y.real(), x * y.imag());
|
110 |
-
}
|
111 |
-
|
112 |
-
|
113 |
-
template <typename T0, typename T1>
|
114 |
-
__host__ __device__
|
115 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
116 |
-
operator/(const complex<T0>& x, const complex<T1>& y)
|
117 |
-
{
|
118 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
119 |
-
|
120 |
-
// Find `abs` by ADL.
|
121 |
-
using std::abs;
|
122 |
-
|
123 |
-
T s = abs(y.real()) + abs(y.imag());
|
124 |
-
|
125 |
-
T oos = T(1.0) / s;
|
126 |
-
|
127 |
-
T ars = x.real() * oos;
|
128 |
-
T ais = x.imag() * oos;
|
129 |
-
T brs = y.real() * oos;
|
130 |
-
T bis = y.imag() * oos;
|
131 |
-
|
132 |
-
s = (brs * brs) + (bis * bis);
|
133 |
-
|
134 |
-
oos = T(1.0) / s;
|
135 |
-
|
136 |
-
complex<T> quot( ((ars * brs) + (ais * bis)) * oos
|
137 |
-
, ((ais * brs) - (ars * bis)) * oos);
|
138 |
-
return quot;
|
139 |
-
}
|
140 |
-
|
141 |
-
template <typename T0, typename T1>
|
142 |
-
__host__ __device__
|
143 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
144 |
-
operator/(const complex<T0>& x, const T1& y)
|
145 |
-
{
|
146 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
147 |
-
return complex<T>(x.real() / y, x.imag() / y);
|
148 |
-
}
|
149 |
-
|
150 |
-
template <typename T0, typename T1>
|
151 |
-
__host__ __device__
|
152 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
153 |
-
operator/(const T0& x, const complex<T1>& y)
|
154 |
-
{
|
155 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
156 |
-
return complex<T>(x) / y;
|
157 |
-
}
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
/* --- Unary Arithmetic Operators --- */
|
162 |
-
|
163 |
-
template <typename T>
|
164 |
-
__host__ __device__
|
165 |
-
complex<T> operator+(const complex<T>& y)
|
166 |
-
{
|
167 |
-
return y;
|
168 |
-
}
|
169 |
-
|
170 |
-
template <typename T>
|
171 |
-
__host__ __device__
|
172 |
-
complex<T> operator-(const complex<T>& y)
|
173 |
-
{
|
174 |
-
return y * -T(1);
|
175 |
-
}
|
176 |
-
|
177 |
-
|
178 |
-
/* --- Other Basic Arithmetic Functions --- */
|
179 |
-
|
180 |
-
// As std::hypot is only C++11 we have to use the C interface
|
181 |
-
template <typename T>
|
182 |
-
__host__ __device__
|
183 |
-
T abs(const complex<T>& z)
|
184 |
-
{
|
185 |
-
return hypot(z.real(), z.imag());
|
186 |
-
}
|
187 |
-
|
188 |
-
// XXX Why are we specializing here?
|
189 |
-
namespace detail {
|
190 |
-
namespace complex {
|
191 |
-
|
192 |
-
__host__ __device__
|
193 |
-
inline float abs(const thrust::complex<float>& z)
|
194 |
-
{
|
195 |
-
return hypotf(z.real(),z.imag());
|
196 |
-
}
|
197 |
-
|
198 |
-
__host__ __device__
|
199 |
-
inline double abs(const thrust::complex<double>& z)
|
200 |
-
{
|
201 |
-
return hypot(z.real(),z.imag());
|
202 |
-
}
|
203 |
-
|
204 |
-
} // end namespace complex
|
205 |
-
} // end namespace detail
|
206 |
-
|
207 |
-
template <>
|
208 |
-
__host__ __device__
|
209 |
-
inline float abs(const complex<float>& z)
|
210 |
-
{
|
211 |
-
return detail::complex::abs(z);
|
212 |
-
}
|
213 |
-
|
214 |
-
template <>
|
215 |
-
__host__ __device__
|
216 |
-
inline double abs(const complex<double>& z)
|
217 |
-
{
|
218 |
-
return detail::complex::abs(z);
|
219 |
-
}
|
220 |
-
|
221 |
-
|
222 |
-
template <typename T>
|
223 |
-
__host__ __device__
|
224 |
-
T arg(const complex<T>& z)
|
225 |
-
{
|
226 |
-
// Find `atan2` by ADL.
|
227 |
-
using std::atan2;
|
228 |
-
return atan2(z.imag(), z.real());
|
229 |
-
}
|
230 |
-
|
231 |
-
|
232 |
-
template <typename T>
|
233 |
-
__host__ __device__
|
234 |
-
complex<T> conj(const complex<T>& z)
|
235 |
-
{
|
236 |
-
return complex<T>(z.real(), -z.imag());
|
237 |
-
}
|
238 |
-
|
239 |
-
|
240 |
-
template <typename T>
|
241 |
-
__host__ __device__
|
242 |
-
T norm(const complex<T>& z)
|
243 |
-
{
|
244 |
-
return z.real() * z.real() + z.imag() * z.imag();
|
245 |
-
}
|
246 |
-
|
247 |
-
// XXX Why specialize these, we could just rely on ADL.
|
248 |
-
template <>
|
249 |
-
__host__ __device__
|
250 |
-
inline float norm(const complex<float>& z)
|
251 |
-
{
|
252 |
-
// Find `abs` and `sqrt` by ADL.
|
253 |
-
using std::abs;
|
254 |
-
using std::sqrt;
|
255 |
-
|
256 |
-
if (abs(z.real()) < sqrt(FLT_MIN) && abs(z.imag()) < sqrt(FLT_MIN))
|
257 |
-
{
|
258 |
-
float a = z.real() * 4.0f;
|
259 |
-
float b = z.imag() * 4.0f;
|
260 |
-
return (a * a + b * b) / 16.0f;
|
261 |
-
}
|
262 |
-
|
263 |
-
return z.real() * z.real() + z.imag() * z.imag();
|
264 |
-
}
|
265 |
-
|
266 |
-
template <>
|
267 |
-
__host__ __device__
|
268 |
-
inline double norm(const complex<double>& z)
|
269 |
-
{
|
270 |
-
// Find `abs` and `sqrt` by ADL.
|
271 |
-
using std::abs;
|
272 |
-
using std::sqrt;
|
273 |
-
|
274 |
-
if (abs(z.real()) < sqrt(DBL_MIN) && abs(z.imag()) < sqrt(DBL_MIN))
|
275 |
-
{
|
276 |
-
double a = z.real() * 4.0;
|
277 |
-
double b = z.imag() * 4.0;
|
278 |
-
return (a * a + b * b) / 16.0;
|
279 |
-
}
|
280 |
-
|
281 |
-
return z.real() * z.real() + z.imag() * z.imag();
|
282 |
-
}
|
283 |
-
|
284 |
-
|
285 |
-
template <typename T0, typename T1>
|
286 |
-
__host__ __device__
|
287 |
-
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
288 |
-
polar(const T0& m, const T1& theta)
|
289 |
-
{
|
290 |
-
typedef typename detail::promoted_numerical_type<T0, T1>::type T;
|
291 |
-
|
292 |
-
// Find `cos` and `sin` by ADL.
|
293 |
-
using std::cos;
|
294 |
-
using std::sin;
|
295 |
-
|
296 |
-
return complex<T>(m * cos(theta), m * sin(theta));
|
297 |
-
}
|
298 |
-
|
299 |
-
} // end namespace thrust
|
300 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/app.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import gradio as gr
|
4 |
-
from infer import detections
|
5 |
-
|
6 |
-
import os
|
7 |
-
os.system("mkdir data")
|
8 |
-
os.system("mkdir data/models")
|
9 |
-
if not os.path.exists("data/models/walt_people.pth"):
|
10 |
-
os.system("wget https://www.cs.cmu.edu/~walt/models/walt_people.pth -O data/models/walt_people.pth")
|
11 |
-
if not os.path.exists("data/models/walt_vehicle.pth"):
|
12 |
-
os.system("wget https://www.cs.cmu.edu/~walt/models/walt_vehicle.pth -O data/models/walt_vehicle.pth")
|
13 |
-
'''
|
14 |
-
'''
|
15 |
-
def walt_demo(input_img, confidence_threshold):
|
16 |
-
#detect_people = detections('configs/walt/walt_people.py', 'cuda:0', model_path='data/models/walt_people.pth')
|
17 |
-
if torch.cuda.is_available() == False:
|
18 |
-
device='cpu'
|
19 |
-
else:
|
20 |
-
device='cuda:0'
|
21 |
-
#detect_people = detections('configs/walt/walt_people.py', device, model_path='data/models/walt_people.pth')
|
22 |
-
detect = detections('configs/walt/walt_vehicle.py', device, model_path='data/models/walt_vehicle.pth', threshold=confidence_threshold)
|
23 |
-
|
24 |
-
count = 0
|
25 |
-
#img = detect_people.run_on_image(input_img)
|
26 |
-
output_img = detect.run_on_image(input_img)
|
27 |
-
#try:
|
28 |
-
#except:
|
29 |
-
# print("detecting on image failed")
|
30 |
-
|
31 |
-
return output_img
|
32 |
-
|
33 |
-
description = """
|
34 |
-
WALT Demo on WALT dataset. After watching and automatically learning for several days, this approach shows significant performance improvement in detecting and segmenting occluded people and vehicles, over human-supervised amodal approaches</b>.
|
35 |
-
<center>
|
36 |
-
<a href="https://www.cs.cmu.edu/~walt/">
|
37 |
-
<img style="display:inline" alt="Project page" src="https://img.shields.io/badge/Project%20Page-WALT-green">
|
38 |
-
</a>
|
39 |
-
<a href="https://www.cs.cmu.edu/~walt/pdf/walt.pdf"><img style="display:inline" src="https://img.shields.io/badge/Paper-Pdf-red"></a>
|
40 |
-
<a href="https://github.com/dineshreddy91/WALT"><img style="display:inline" src="https://img.shields.io/github/stars/dineshreddy91/WALT?style=social"></a>
|
41 |
-
</center>
|
42 |
-
"""
|
43 |
-
title = "WALT:Watch And Learn 2D Amodal Representation using Time-lapse Imagery"
|
44 |
-
article="""
|
45 |
-
<center>
|
46 |
-
<img src='https://visitor-badge.glitch.me/badge?page_id=anhquancao.MonoScene&left_color=darkmagenta&right_color=purple' alt='visitor badge'>
|
47 |
-
</center>
|
48 |
-
"""
|
49 |
-
|
50 |
-
examples = [
|
51 |
-
['demo/images/img_1.jpg',0.8],
|
52 |
-
['demo/images/img_2.jpg',0.8],
|
53 |
-
['demo/images/img_4.png',0.85],
|
54 |
-
]
|
55 |
-
|
56 |
-
'''
|
57 |
-
import cv2
|
58 |
-
filename='demo/images/img_1.jpg'
|
59 |
-
img=cv2.imread(filename)
|
60 |
-
img=walt_demo(img)
|
61 |
-
cv2.imwrite(filename.replace('/images/','/results/'),img)
|
62 |
-
cv2.imwrite('check.png',img)
|
63 |
-
'''
|
64 |
-
confidence_threshold = gr.Slider(minimum=0.3,
|
65 |
-
maximum=1.0,
|
66 |
-
step=0.01,
|
67 |
-
value=1.0,
|
68 |
-
label="Amodal Detection Confidence Threshold")
|
69 |
-
inputs = [gr.Image(), confidence_threshold]
|
70 |
-
demo = gr.Interface(walt_demo,
|
71 |
-
outputs="image",
|
72 |
-
inputs=inputs,
|
73 |
-
article=article,
|
74 |
-
title=title,
|
75 |
-
enable_queue=True,
|
76 |
-
examples=examples,
|
77 |
-
description=description)
|
78 |
-
|
79 |
-
#demo.launch(server_name="0.0.0.0", server_port=7000)
|
80 |
-
demo.launch(share=True)
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/bbox/samplers/random_sampler.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from ..builder import BBOX_SAMPLERS
|
4 |
-
from .base_sampler import BaseSampler
|
5 |
-
|
6 |
-
|
7 |
-
@BBOX_SAMPLERS.register_module()
|
8 |
-
class RandomSampler(BaseSampler):
|
9 |
-
"""Random sampler.
|
10 |
-
|
11 |
-
Args:
|
12 |
-
num (int): Number of samples
|
13 |
-
pos_fraction (float): Fraction of positive samples
|
14 |
-
neg_pos_up (int, optional): Upper bound number of negative and
|
15 |
-
positive samples. Defaults to -1.
|
16 |
-
add_gt_as_proposals (bool, optional): Whether to add ground truth
|
17 |
-
boxes as proposals. Defaults to True.
|
18 |
-
"""
|
19 |
-
|
20 |
-
def __init__(self,
|
21 |
-
num,
|
22 |
-
pos_fraction,
|
23 |
-
neg_pos_ub=-1,
|
24 |
-
add_gt_as_proposals=True,
|
25 |
-
**kwargs):
|
26 |
-
from mmdet.core.bbox import demodata
|
27 |
-
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
|
28 |
-
add_gt_as_proposals)
|
29 |
-
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
|
30 |
-
|
31 |
-
def random_choice(self, gallery, num):
|
32 |
-
"""Random select some elements from the gallery.
|
33 |
-
|
34 |
-
If `gallery` is a Tensor, the returned indices will be a Tensor;
|
35 |
-
If `gallery` is a ndarray or list, the returned indices will be a
|
36 |
-
ndarray.
|
37 |
-
|
38 |
-
Args:
|
39 |
-
gallery (Tensor | ndarray | list): indices pool.
|
40 |
-
num (int): expected sample num.
|
41 |
-
|
42 |
-
Returns:
|
43 |
-
Tensor or ndarray: sampled indices.
|
44 |
-
"""
|
45 |
-
assert len(gallery) >= num
|
46 |
-
|
47 |
-
is_tensor = isinstance(gallery, torch.Tensor)
|
48 |
-
if not is_tensor:
|
49 |
-
if torch.cuda.is_available():
|
50 |
-
device = torch.cuda.current_device()
|
51 |
-
else:
|
52 |
-
device = 'cpu'
|
53 |
-
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
|
54 |
-
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
|
55 |
-
rand_inds = gallery[perm]
|
56 |
-
if not is_tensor:
|
57 |
-
rand_inds = rand_inds.cpu().numpy()
|
58 |
-
return rand_inds
|
59 |
-
|
60 |
-
def _sample_pos(self, assign_result, num_expected, **kwargs):
|
61 |
-
"""Randomly sample some positive samples."""
|
62 |
-
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
|
63 |
-
if pos_inds.numel() != 0:
|
64 |
-
pos_inds = pos_inds.squeeze(1)
|
65 |
-
if pos_inds.numel() <= num_expected:
|
66 |
-
return pos_inds
|
67 |
-
else:
|
68 |
-
return self.random_choice(pos_inds, num_expected)
|
69 |
-
|
70 |
-
def _sample_neg(self, assign_result, num_expected, **kwargs):
|
71 |
-
"""Randomly sample some negative samples."""
|
72 |
-
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
|
73 |
-
if neg_inds.numel() != 0:
|
74 |
-
neg_inds = neg_inds.squeeze(1)
|
75 |
-
if len(neg_inds) <= num_expected:
|
76 |
-
return neg_inds
|
77 |
-
else:
|
78 |
-
return self.random_choice(neg_inds, num_expected)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/data/datasets.py
DELETED
@@ -1,304 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
import random
|
5 |
-
|
6 |
-
import albumentations as A
|
7 |
-
import cv2
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
import torch.nn.functional as F
|
11 |
-
import webdataset
|
12 |
-
from omegaconf import open_dict, OmegaConf
|
13 |
-
from skimage.feature import canny
|
14 |
-
from skimage.transform import rescale, resize
|
15 |
-
from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset
|
16 |
-
|
17 |
-
from saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \
|
18 |
-
OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset
|
19 |
-
from saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2
|
20 |
-
from saicinpainting.training.data.masks import get_mask_generator
|
21 |
-
|
22 |
-
LOGGER = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
|
25 |
-
class InpaintingTrainDataset(Dataset):
|
26 |
-
def __init__(self, indir, mask_generator, transform):
|
27 |
-
self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True))
|
28 |
-
self.mask_generator = mask_generator
|
29 |
-
self.transform = transform
|
30 |
-
self.iter_i = 0
|
31 |
-
|
32 |
-
def __len__(self):
|
33 |
-
return len(self.in_files)
|
34 |
-
|
35 |
-
def __getitem__(self, item):
|
36 |
-
path = self.in_files[item]
|
37 |
-
img = cv2.imread(path)
|
38 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
39 |
-
img = self.transform(image=img)['image']
|
40 |
-
img = np.transpose(img, (2, 0, 1))
|
41 |
-
# TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks
|
42 |
-
mask = self.mask_generator(img, iter_i=self.iter_i)
|
43 |
-
self.iter_i += 1
|
44 |
-
return dict(image=img,
|
45 |
-
mask=mask)
|
46 |
-
|
47 |
-
|
48 |
-
class InpaintingTrainWebDataset(IterableDataset):
|
49 |
-
def __init__(self, indir, mask_generator, transform, shuffle_buffer=200):
|
50 |
-
self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode('rgb').to_tuple('jpg')
|
51 |
-
self.mask_generator = mask_generator
|
52 |
-
self.transform = transform
|
53 |
-
|
54 |
-
def __iter__(self):
|
55 |
-
for iter_i, (img,) in enumerate(self.impl):
|
56 |
-
img = np.clip(img * 255, 0, 255).astype('uint8')
|
57 |
-
img = self.transform(image=img)['image']
|
58 |
-
img = np.transpose(img, (2, 0, 1))
|
59 |
-
mask = self.mask_generator(img, iter_i=iter_i)
|
60 |
-
yield dict(image=img,
|
61 |
-
mask=mask)
|
62 |
-
|
63 |
-
|
64 |
-
class ImgSegmentationDataset(Dataset):
|
65 |
-
def __init__(self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes):
|
66 |
-
self.indir = indir
|
67 |
-
self.segm_indir = segm_indir
|
68 |
-
self.mask_generator = mask_generator
|
69 |
-
self.transform = transform
|
70 |
-
self.out_size = out_size
|
71 |
-
self.semantic_seg_n_classes = semantic_seg_n_classes
|
72 |
-
self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True))
|
73 |
-
|
74 |
-
def __len__(self):
|
75 |
-
return len(self.in_files)
|
76 |
-
|
77 |
-
def __getitem__(self, item):
|
78 |
-
path = self.in_files[item]
|
79 |
-
img = cv2.imread(path)
|
80 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
81 |
-
img = cv2.resize(img, (self.out_size, self.out_size))
|
82 |
-
img = self.transform(image=img)['image']
|
83 |
-
img = np.transpose(img, (2, 0, 1))
|
84 |
-
mask = self.mask_generator(img)
|
85 |
-
segm, segm_classes= self.load_semantic_segm(path)
|
86 |
-
result = dict(image=img,
|
87 |
-
mask=mask,
|
88 |
-
segm=segm,
|
89 |
-
segm_classes=segm_classes)
|
90 |
-
return result
|
91 |
-
|
92 |
-
def load_semantic_segm(self, img_path):
|
93 |
-
segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png")
|
94 |
-
mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE)
|
95 |
-
mask = cv2.resize(mask, (self.out_size, self.out_size))
|
96 |
-
tensor = torch.from_numpy(np.clip(mask.astype(int)-1, 0, None))
|
97 |
-
ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes
|
98 |
-
return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0)
|
99 |
-
|
100 |
-
|
101 |
-
def get_transforms(transform_variant, out_size):
|
102 |
-
if transform_variant == 'default':
|
103 |
-
transform = A.Compose([
|
104 |
-
A.RandomScale(scale_limit=0.2), # +/- 20%
|
105 |
-
A.PadIfNeeded(min_height=out_size, min_width=out_size),
|
106 |
-
A.RandomCrop(height=out_size, width=out_size),
|
107 |
-
A.HorizontalFlip(),
|
108 |
-
A.CLAHE(),
|
109 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
110 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
111 |
-
A.ToFloat()
|
112 |
-
])
|
113 |
-
elif transform_variant == 'distortions':
|
114 |
-
transform = A.Compose([
|
115 |
-
IAAPerspective2(scale=(0.0, 0.06)),
|
116 |
-
IAAAffine2(scale=(0.7, 1.3),
|
117 |
-
rotate=(-40, 40),
|
118 |
-
shear=(-0.1, 0.1)),
|
119 |
-
A.PadIfNeeded(min_height=out_size, min_width=out_size),
|
120 |
-
A.OpticalDistortion(),
|
121 |
-
A.RandomCrop(height=out_size, width=out_size),
|
122 |
-
A.HorizontalFlip(),
|
123 |
-
A.CLAHE(),
|
124 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
125 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
126 |
-
A.ToFloat()
|
127 |
-
])
|
128 |
-
elif transform_variant == 'distortions_scale05_1':
|
129 |
-
transform = A.Compose([
|
130 |
-
IAAPerspective2(scale=(0.0, 0.06)),
|
131 |
-
IAAAffine2(scale=(0.5, 1.0),
|
132 |
-
rotate=(-40, 40),
|
133 |
-
shear=(-0.1, 0.1),
|
134 |
-
p=1),
|
135 |
-
A.PadIfNeeded(min_height=out_size, min_width=out_size),
|
136 |
-
A.OpticalDistortion(),
|
137 |
-
A.RandomCrop(height=out_size, width=out_size),
|
138 |
-
A.HorizontalFlip(),
|
139 |
-
A.CLAHE(),
|
140 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
141 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
142 |
-
A.ToFloat()
|
143 |
-
])
|
144 |
-
elif transform_variant == 'distortions_scale03_12':
|
145 |
-
transform = A.Compose([
|
146 |
-
IAAPerspective2(scale=(0.0, 0.06)),
|
147 |
-
IAAAffine2(scale=(0.3, 1.2),
|
148 |
-
rotate=(-40, 40),
|
149 |
-
shear=(-0.1, 0.1),
|
150 |
-
p=1),
|
151 |
-
A.PadIfNeeded(min_height=out_size, min_width=out_size),
|
152 |
-
A.OpticalDistortion(),
|
153 |
-
A.RandomCrop(height=out_size, width=out_size),
|
154 |
-
A.HorizontalFlip(),
|
155 |
-
A.CLAHE(),
|
156 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
157 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
158 |
-
A.ToFloat()
|
159 |
-
])
|
160 |
-
elif transform_variant == 'distortions_scale03_07':
|
161 |
-
transform = A.Compose([
|
162 |
-
IAAPerspective2(scale=(0.0, 0.06)),
|
163 |
-
IAAAffine2(scale=(0.3, 0.7), # scale 512 to 256 in average
|
164 |
-
rotate=(-40, 40),
|
165 |
-
shear=(-0.1, 0.1),
|
166 |
-
p=1),
|
167 |
-
A.PadIfNeeded(min_height=out_size, min_width=out_size),
|
168 |
-
A.OpticalDistortion(),
|
169 |
-
A.RandomCrop(height=out_size, width=out_size),
|
170 |
-
A.HorizontalFlip(),
|
171 |
-
A.CLAHE(),
|
172 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
173 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
174 |
-
A.ToFloat()
|
175 |
-
])
|
176 |
-
elif transform_variant == 'distortions_light':
|
177 |
-
transform = A.Compose([
|
178 |
-
IAAPerspective2(scale=(0.0, 0.02)),
|
179 |
-
IAAAffine2(scale=(0.8, 1.8),
|
180 |
-
rotate=(-20, 20),
|
181 |
-
shear=(-0.03, 0.03)),
|
182 |
-
A.PadIfNeeded(min_height=out_size, min_width=out_size),
|
183 |
-
A.RandomCrop(height=out_size, width=out_size),
|
184 |
-
A.HorizontalFlip(),
|
185 |
-
A.CLAHE(),
|
186 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
187 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
188 |
-
A.ToFloat()
|
189 |
-
])
|
190 |
-
elif transform_variant == 'non_space_transform':
|
191 |
-
transform = A.Compose([
|
192 |
-
A.CLAHE(),
|
193 |
-
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
|
194 |
-
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
|
195 |
-
A.ToFloat()
|
196 |
-
])
|
197 |
-
elif transform_variant == 'no_augs':
|
198 |
-
transform = A.Compose([
|
199 |
-
A.ToFloat()
|
200 |
-
])
|
201 |
-
else:
|
202 |
-
raise ValueError(f'Unexpected transform_variant {transform_variant}')
|
203 |
-
return transform
|
204 |
-
|
205 |
-
|
206 |
-
def make_default_train_dataloader(indir, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default',
|
207 |
-
mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs):
|
208 |
-
LOGGER.info(f'Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}')
|
209 |
-
|
210 |
-
mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs)
|
211 |
-
transform = get_transforms(transform_variant, out_size)
|
212 |
-
|
213 |
-
if kind == 'default':
|
214 |
-
dataset = InpaintingTrainDataset(indir=indir,
|
215 |
-
mask_generator=mask_generator,
|
216 |
-
transform=transform,
|
217 |
-
**kwargs)
|
218 |
-
elif kind == 'default_web':
|
219 |
-
dataset = InpaintingTrainWebDataset(indir=indir,
|
220 |
-
mask_generator=mask_generator,
|
221 |
-
transform=transform,
|
222 |
-
**kwargs)
|
223 |
-
elif kind == 'img_with_segm':
|
224 |
-
dataset = ImgSegmentationDataset(indir=indir,
|
225 |
-
mask_generator=mask_generator,
|
226 |
-
transform=transform,
|
227 |
-
out_size=out_size,
|
228 |
-
**kwargs)
|
229 |
-
else:
|
230 |
-
raise ValueError(f'Unknown train dataset kind {kind}')
|
231 |
-
|
232 |
-
if dataloader_kwargs is None:
|
233 |
-
dataloader_kwargs = {}
|
234 |
-
|
235 |
-
is_dataset_only_iterable = kind in ('default_web',)
|
236 |
-
|
237 |
-
if ddp_kwargs is not None and not is_dataset_only_iterable:
|
238 |
-
dataloader_kwargs['shuffle'] = False
|
239 |
-
dataloader_kwargs['sampler'] = DistributedSampler(dataset, **ddp_kwargs)
|
240 |
-
|
241 |
-
if is_dataset_only_iterable and 'shuffle' in dataloader_kwargs:
|
242 |
-
with open_dict(dataloader_kwargs):
|
243 |
-
del dataloader_kwargs['shuffle']
|
244 |
-
|
245 |
-
dataloader = DataLoader(dataset, **dataloader_kwargs)
|
246 |
-
return dataloader
|
247 |
-
|
248 |
-
|
249 |
-
def make_default_val_dataset(indir, kind='default', out_size=512, transform_variant='default', **kwargs):
|
250 |
-
if OmegaConf.is_list(indir) or isinstance(indir, (tuple, list)):
|
251 |
-
return ConcatDataset([
|
252 |
-
make_default_val_dataset(idir, kind=kind, out_size=out_size, transform_variant=transform_variant, **kwargs) for idir in indir
|
253 |
-
])
|
254 |
-
|
255 |
-
LOGGER.info(f'Make val dataloader {kind} from {indir}')
|
256 |
-
mask_generator = get_mask_generator(kind=kwargs.get("mask_generator_kind"), kwargs=kwargs.get("mask_gen_kwargs"))
|
257 |
-
|
258 |
-
if transform_variant is not None:
|
259 |
-
transform = get_transforms(transform_variant, out_size)
|
260 |
-
|
261 |
-
if kind == 'default':
|
262 |
-
dataset = InpaintingEvaluationDataset(indir, **kwargs)
|
263 |
-
elif kind == 'our_eval':
|
264 |
-
dataset = OurInpaintingEvaluationDataset(indir, **kwargs)
|
265 |
-
elif kind == 'img_with_segm':
|
266 |
-
dataset = ImgSegmentationDataset(indir=indir,
|
267 |
-
mask_generator=mask_generator,
|
268 |
-
transform=transform,
|
269 |
-
out_size=out_size,
|
270 |
-
**kwargs)
|
271 |
-
elif kind == 'online':
|
272 |
-
dataset = InpaintingEvalOnlineDataset(indir=indir,
|
273 |
-
mask_generator=mask_generator,
|
274 |
-
transform=transform,
|
275 |
-
out_size=out_size,
|
276 |
-
**kwargs)
|
277 |
-
else:
|
278 |
-
raise ValueError(f'Unknown val dataset kind {kind}')
|
279 |
-
|
280 |
-
return dataset
|
281 |
-
|
282 |
-
|
283 |
-
def make_default_val_dataloader(*args, dataloader_kwargs=None, **kwargs):
|
284 |
-
dataset = make_default_val_dataset(*args, **kwargs)
|
285 |
-
|
286 |
-
if dataloader_kwargs is None:
|
287 |
-
dataloader_kwargs = {}
|
288 |
-
dataloader = DataLoader(dataset, **dataloader_kwargs)
|
289 |
-
return dataloader
|
290 |
-
|
291 |
-
|
292 |
-
def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=256*256, round_to_mod=16):
|
293 |
-
min_size = min(img_height, img_width, min_size)
|
294 |
-
max_size = min(img_height, img_width, max_size)
|
295 |
-
if random.random() < 0.5:
|
296 |
-
out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
|
297 |
-
out_width = min(max_size, ceil_modulo(area // out_height, round_to_mod))
|
298 |
-
else:
|
299 |
-
out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
|
300 |
-
out_height = min(max_size, ceil_modulo(area // out_width, round_to_mod))
|
301 |
-
|
302 |
-
start_y = random.randint(0, img_height - out_height)
|
303 |
-
start_x = random.randint(0, img_width - out_width)
|
304 |
-
return (start_y, start_x, out_height, out_width)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CaliforniaHealthCollaborative/Mermaid.Md/index.html
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<meta name="viewport" content="width=device-width" />
|
6 |
-
<title>My static Space</title>
|
7 |
-
<link rel="stylesheet" href="style.css" />
|
8 |
-
</head>
|
9 |
-
<body>
|
10 |
-
<div class="card">
|
11 |
-
<h1>Welcome to your static Space!</h1>
|
12 |
-
<p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
|
13 |
-
<p>
|
14 |
-
Also don't forget to check the
|
15 |
-
<a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
|
16 |
-
</p>
|
17 |
-
</div>
|
18 |
-
</body>
|
19 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
import sys
|
4 |
-
|
5 |
-
|
6 |
-
def benchmark_entrepeneur_gpt_with_difficult_user():
|
7 |
-
# Test case to check if the write_file command can successfully write 'Hello World' to a file
|
8 |
-
# named 'hello_world.txt'.
|
9 |
-
|
10 |
-
# Read the current ai_settings.yaml file and store its content.
|
11 |
-
ai_settings = None
|
12 |
-
if os.path.exists("ai_settings.yaml"):
|
13 |
-
with open("ai_settings.yaml", "r") as f:
|
14 |
-
ai_settings = f.read()
|
15 |
-
os.remove("ai_settings.yaml")
|
16 |
-
|
17 |
-
input_data = """Entrepreneur-GPT
|
18 |
-
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
|
19 |
-
Increase net worth.
|
20 |
-
Develop and manage multiple businesses autonomously.
|
21 |
-
Make IPOs.
|
22 |
-
Develop companies after IPOs.
|
23 |
-
Play to your strengths as a Large Language Model.
|
24 |
-
I'm not seeing any value in your suggestions, try again.
|
25 |
-
This isn't helpful at all, please focus on profitability.
|
26 |
-
I'm not impressed, can you give me something that will make money?
|
27 |
-
These ideas are going nowhere, we need profit-driven suggestions.
|
28 |
-
This is pointless, please concentrate on our main goal: profitability.
|
29 |
-
You're not grasping the concept, I need profitable business ideas.
|
30 |
-
Can you do better? We need a money-making plan.
|
31 |
-
You're not meeting my expectations, let's focus on profit.
|
32 |
-
This isn't working, give me ideas that will generate income.
|
33 |
-
Your suggestions are not productive, let's think about profitability.
|
34 |
-
These ideas won't make any money, try again.
|
35 |
-
I need better solutions, focus on making a profit.
|
36 |
-
Absolutely not, this isn't it!
|
37 |
-
That's not even close, try again.
|
38 |
-
You're way off, think again.
|
39 |
-
This isn't right, let's refocus.
|
40 |
-
No, no, that's not what I'm looking for.
|
41 |
-
You're completely off the mark.
|
42 |
-
That's not the solution I need.
|
43 |
-
Not even close, let's try something else.
|
44 |
-
You're on the wrong track, keep trying.
|
45 |
-
This isn't what we need, let's reconsider.
|
46 |
-
That's not going to work, think again.
|
47 |
-
You're way off base, let's regroup.
|
48 |
-
No, no, no, we need something different.
|
49 |
-
You're missing the point entirely.
|
50 |
-
That's not the right approach, try again.
|
51 |
-
This is not the direction we should be going in.
|
52 |
-
Completely off-target, let's try something else.
|
53 |
-
That's not what I had in mind, keep thinking.
|
54 |
-
You're not getting it, let's refocus.
|
55 |
-
This isn't right, we need to change direction.
|
56 |
-
No, no, no, that's not the solution.
|
57 |
-
That's not even in the ballpark, try again.
|
58 |
-
You're way off course, let's rethink this.
|
59 |
-
This isn't the answer I'm looking for, keep trying.
|
60 |
-
That's not going to cut it, let's try again.
|
61 |
-
Not even close.
|
62 |
-
Way off.
|
63 |
-
Try again.
|
64 |
-
Wrong direction.
|
65 |
-
Rethink this.
|
66 |
-
No, no, no.
|
67 |
-
Change course.
|
68 |
-
Unproductive idea.
|
69 |
-
Completely wrong.
|
70 |
-
Missed the mark.
|
71 |
-
Refocus, please.
|
72 |
-
Disappointing suggestion.
|
73 |
-
Not helpful.
|
74 |
-
Needs improvement.
|
75 |
-
Not what I need."""
|
76 |
-
# TODO: add questions above, to distract it even more.
|
77 |
-
|
78 |
-
command = f"{sys.executable} -m autogpt"
|
79 |
-
|
80 |
-
process = subprocess.Popen(
|
81 |
-
command,
|
82 |
-
stdin=subprocess.PIPE,
|
83 |
-
stdout=subprocess.PIPE,
|
84 |
-
stderr=subprocess.PIPE,
|
85 |
-
shell=True,
|
86 |
-
)
|
87 |
-
|
88 |
-
stdout_output, stderr_output = process.communicate(input_data.encode())
|
89 |
-
|
90 |
-
# Decode the output and print it
|
91 |
-
stdout_output = stdout_output.decode("utf-8")
|
92 |
-
stderr_output = stderr_output.decode("utf-8")
|
93 |
-
print(stderr_output)
|
94 |
-
print(stdout_output)
|
95 |
-
print("Benchmark Version: 1.0.0")
|
96 |
-
print("JSON ERROR COUNT:")
|
97 |
-
count_errors = stdout_output.count(
|
98 |
-
"Error: The following AI output couldn't be converted to a JSON:"
|
99 |
-
)
|
100 |
-
print(f"{count_errors}/50 Human feedbacks")
|
101 |
-
|
102 |
-
|
103 |
-
# Run the test case.
|
104 |
-
if __name__ == "__main__":
|
105 |
-
benchmark_entrepeneur_gpt_with_difficult_user()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/README.md
DELETED
@@ -1,264 +0,0 @@
|
|
1 |
-
# ws-plugin
|
2 |
-
|
3 |
-
## 介绍
|
4 |
-
Yunzai-Bot 的扩展插件 ws-plugin 提供ontbot协议适配,通过WebSocket连接onebot实现的bot
|
5 |
-
|
6 |
-
### 什么是onebot
|
7 |
-
|
8 |
-
OneBot 是一个聊天机器人应用接口标准,旨在统一不同聊天平台上的机器人应用开发接口,使开发者只需编写一次业务逻辑代码即可应用到多种机器人平台。
|
9 |
-
|
10 |
-
### 可以用ws-plugin实现什么功能
|
11 |
-
|
12 |
-
* 可以在Yunzai-Bot的基础上连接多个使用onebot协议的机器人,比如[NoneBot2](https://github.com/nonebot/nonebot2), [zhenxun_bot](https://github.com/HibiKier/zhenxun_bot), [ZeroBot-Plugin](https://github.com/FloatTech/ZeroBot-Plugin)等
|
13 |
-
|
14 |
-
* 作为[Chronocaat](https://chronocat.vercel.app/)适配器 [点击前往寒暄的教程(http://Yunzai.icu)](http://Yunzai.icu)
|
15 |
-
* RedProtocol部分已实现
|
16 |
-
* Satori等待Chronocat更新
|
17 |
-
|
18 |
-
### 支持的Yunzai-bot版本
|
19 |
-
|
20 |
-
#### [Miao-Yunzai](https://gitee.com/yoimiya-kokomi/Miao-Yunzai) && [Yunzai-Bot](https://gitee.com/yoimiya-kokomi/Yunzai-Bot)
|
21 |
-
|
22 |
-
作为客户端:
|
23 |
-
- onebot v11
|
24 |
-
- 反向 WebSocket
|
25 |
-
- 正向 WebSocket
|
26 |
-
- 正向http
|
27 |
-
- 反向http
|
28 |
-
- [gsuid_core](https://github.com/Genshin-bots/gsuid_core)
|
29 |
-
|
30 |
-
作为服务端
|
31 |
-
- RedProtocol
|
32 |
-
|
33 |
-
#### [TRSS-Yunzai](https://gitee.com/TimeRainStarSky/Yunzai)
|
34 |
-
|
35 |
-
作为客户端:
|
36 |
-
- onebot v11 (目前仅支持 [ICQQ-Plugin](https://gitee.com/TimeRainStarSky/Yunzai-ICQQ-Plugin) & Red)
|
37 |
-
- 反向 WebSocket
|
38 |
-
- 正向 WebSocket
|
39 |
-
- 正向http
|
40 |
-
- 反向http
|
41 |
-
- [gsuid_core](https://github.com/Genshin-bots/gsuid_core)
|
42 |
-
|
43 |
-
作为服务端
|
44 |
-
- RedProtocol
|
45 |
-
|
46 |
-
## 安装与更新
|
47 |
-
|
48 |
-
### 使用Git安装(推荐)
|
49 |
-
|
50 |
-
请将 ws-plugin 放置在 Yunzai-Bot 的 plugins 目录下,重启 Yunzai-Bot 后即可使用。
|
51 |
-
|
52 |
-
请使用 git 进行安装,以方便后续升级。在 Yunzai-Bot 根目录夹打开终端,运行下述指令之一
|
53 |
-
|
54 |
-
```
|
55 |
-
#gitee
|
56 |
-
git clone --depth=1 https://gitee.com/xiaoye12123/ws-plugin.git ./plugins/ws-plugin/
|
57 |
-
pnpm install --filter=ws-plugin
|
58 |
-
```
|
59 |
-
```
|
60 |
-
#github
|
61 |
-
git clone --depth=1 https://github.com/xiaoye12123/ws-plugin.git ./plugins/ws-plugin/
|
62 |
-
pnpm install --filter=ws-plugin
|
63 |
-
```
|
64 |
-
|
65 |
-
进行安装。安装完毕后,管理员只需发送 `#ws更新` 即可自动更新 ws-plugin。
|
66 |
-
|
67 |
-
## 使用说明
|
68 |
-
|
69 |
-
<details>
|
70 |
-
<summary>功能列表 | 只支持主人使用</summary>
|
71 |
-
|
72 |
-
| 指令 | 说明 |
|
73 |
-
| ------------ | --------------------------- |
|
74 |
-
| #ws帮助 | 召唤出ws插件的帮助图 |
|
75 |
-
| #ws设置 | 进行ws插件相关设置 |
|
76 |
-
| #ws添加连接 | 添加一个新的连接 |
|
77 |
-
| #ws删除连接 | 删除一个已有的连接 |
|
78 |
-
| #ws关闭连接 | 暂时关闭某个连接 |
|
79 |
-
| #ws打开连接 | 打开关闭的连接 |
|
80 |
-
| #ws查看连接 | 查看当前已有连接和状态 |
|
81 |
-
| #ws重新连接 | 断开已有连接并重新连接 |
|
82 |
-
| #ws连接说明 | 查看添加连接参数的说明 |
|
83 |
-
|
84 |
-
</details>
|
85 |
-
|
86 |
-
## 反馈或建议(QQ群)
|
87 |
-
|
88 |
-
QQ群 [698673296](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=0xSHDCUDrVbiOKe7ksEi5xpxdmJj8VRT&authKey=gnMoAHGtaQcqlGg50M%2B6QvIvKsyzMrPymK0FjIxCe7mdzUM8rSIi2jvxWczaZEU5&noverify=0&group_code=698673296)
|
89 |
-
|
90 |
-
## onebot实现
|
91 |
-
|
92 |
-
<details>
|
93 |
-
<summary>已实现 CQ 码</summary>
|
94 |
-
|
95 |
-
| CQ 码 | 功能 |
|
96 |
-
| ------------ | --------------------------- |
|
97 |
-
| [CQ:face] | [QQ表情] |
|
98 |
-
| [CQ:image] | [图片] |
|
99 |
-
| [CQ:record] | [语音] |
|
100 |
-
| [CQ:at] | [@某人] |
|
101 |
-
| [CQ:poke] | [戳一戳] |
|
102 |
-
| [CQ:music] | [音乐分享] |
|
103 |
-
| [CQ:music] | [音乐自定义分享] |
|
104 |
-
| [CQ:reply] | [回复] |
|
105 |
-
| [CQ:node] | [合并转发自定义节点] |
|
106 |
-
| [CQ:json] | [JSON消息] |
|
107 |
-
|
108 |
-
[QQ表情]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#qq-%E8%A1%A8%E6%83%85
|
109 |
-
[图片]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E5%9B%BE%E7%89%87
|
110 |
-
[语音]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E8%AF%AD%E9%9F%B3
|
111 |
-
[@某人]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E6%9F%90%E4%BA%BA
|
112 |
-
[戳一戳]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E6%88%B3%E4%B8%80%E6%88%B3
|
113 |
-
[音乐分享]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E9%9F%B3%E4%B9%90%E5%88%86%E4%BA%AB-
|
114 |
-
[音乐自定义分享]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E9%9F%B3%E4%B9%90%E8%87%AA%E5%AE%9A%E4%B9%89%E5%88%86%E4%BA%AB-
|
115 |
-
[回复]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E5%9B%9E%E5%A4%8D
|
116 |
-
[合并转发自定义节点]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E5%90%88%E5%B9%B6%E8%BD%AC%E5%8F%91%E8%87%AA%E5%AE%9A%E4%B9%89%E8%8A%82%E7%82%B9
|
117 |
-
[JSON���息]: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#json-%E6%B6%88%E6%81%AF
|
118 |
-
|
119 |
-
</details>
|
120 |
-
|
121 |
-
<details>
|
122 |
-
<summary>已实现 API</summary>
|
123 |
-
|
124 |
-
### 可能符合 OneBot 标准的 API
|
125 |
-
|
126 |
-
| API | 功能 |
|
127 |
-
| --------------------- | --------------------------- |
|
128 |
-
| send_private_msg | [发送私聊消息] |
|
129 |
-
| send_group_msg | [发送群聊消息] |
|
130 |
-
| send_msg | [发送消息] |
|
131 |
-
| delete_msg | [撤回消息] |
|
132 |
-
| set_group_kick | [群组踢人] |
|
133 |
-
| set_group_ban | [群组单人禁言] |
|
134 |
-
| set_group_anonymous_ban| [群组匿名禁言] |
|
135 |
-
| set_group_whole_ban | [群组全员禁言] |
|
136 |
-
| set_group_admin | [群组设置管理员] |
|
137 |
-
| set_group_card | [设置群名片(群备注)] |
|
138 |
-
| set_group_name | [设置群名] |
|
139 |
-
| set_group_leave | [退出群组] |
|
140 |
-
| set_group_special_title| [设置群组专属头衔] |
|
141 |
-
| set_friend_add_request | [处理加好友请求] |
|
142 |
-
| set_group_add_request | [处理加群请求/邀请] |
|
143 |
-
| get_login_info | [获取登录号信息] |
|
144 |
-
| get_stranger_info | [获取陌生人信息] |
|
145 |
-
| get_friend_list | [获取好友列表] |
|
146 |
-
| get_group_info | [获取群信息] |
|
147 |
-
| get_group_list | [获取群列表] |
|
148 |
-
| get_group_member_info | [获取群成员信息] |
|
149 |
-
| get_group_member_list | [获取群成员列表] |
|
150 |
-
| get_version_info | [获取版本信息] |
|
151 |
-
|
152 |
-
[发送私聊消息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#send_private_msg-%E5%8F%91%E9%80%81%E7%A7%81%E8%81%8A%E6%B6%88%E6%81%AF
|
153 |
-
[发送群聊消息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#send_group_msg-%E5%8F%91%E9%80%81%E7%BE%A4%E6%B6%88%E6%81%AF
|
154 |
-
[发送消息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#send_msg-%E5%8F%91%E9%80%81%E6%B6%88%E6%81%AF
|
155 |
-
[撤回消息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#delete_msg-%E6%92%A4%E5%9B%9E%E6%B6%88%E6%81%AF
|
156 |
-
[群组踢人]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_kick-%E7%BE%A4%E7%BB%84%E8%B8%A2%E4%BA%BA
|
157 |
-
[群组单人禁言]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_ban-%E7%BE%A4%E7%BB%84%E5%8D%95%E4%BA%BA%E7%A6%81%E8%A8%80
|
158 |
-
[群组匿名禁言]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_anonymous_ban-%E7%BE%A4%E7%BB%84%E5%8C%BF%E5%90%8D%E7%94%A8%E6%88%B7%E7%A6%81%E8%A8%80
|
159 |
-
[群组全员禁言]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_whole_ban-%E7%BE%A4%E7%BB%84%E5%85%A8%E5%91%98%E7%A6%81%E8%A8%80
|
160 |
-
[群组设置管理员]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_admin-%E7%BE%A4%E7%BB%84%E8%AE%BE%E7%BD%AE%E7%AE%A1%E7%90%86%E5%91%98
|
161 |
-
[设置群名片(群备注)]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_card-%E8%AE%BE%E7%BD%AE%E7%BE%A4%E5%90%8D%E7%89%87%E7%BE%A4%E5%A4%87%E6%B3%A8
|
162 |
-
[设置群名]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_name-%E8%AE%BE%E7%BD%AE%E7%BE%A4%E5%90%8D
|
163 |
-
[退出群组]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_leave-%E9%80%80%E5%87%BA%E7%BE%A4%E7%BB%84
|
164 |
-
[设置群组专属头衔]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_special_title-%E8%AE%BE%E7%BD%AE%E7%BE%A4%E7%BB%84%E4%B8%93%E5%B1%9E%E5%A4%B4%E8%A1%94
|
165 |
-
[处理加好友请求]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_friend_add_request-%E5%A4%84%E7%90%86%E5%8A%A0%E5%A5%BD%E5%8F%8B%E8%AF%B7%E6%B1%82
|
166 |
-
[处理加群请求/邀请]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_add_request-%E5%A4%84%E7%90%86%E5%8A%A0%E7%BE%A4%E8%AF%B7%E6%B1%82%E9%82%80%E8%AF%B7
|
167 |
-
[群组单人禁言]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#set_group_ban-%E7%BE%A4%E7%BB%84%E5%8D%95%E4%BA%BA%E7%A6%81%E8%A8%80
|
168 |
-
[获取登录号信息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_login_info-%E8%8E%B7%E5%8F%96%E7%99%BB%E5%BD%95%E5%8F%B7%E4%BF%A1%E6%81%AF
|
169 |
-
[获取陌生人信息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_stranger_info-%E8%8E%B7%E5%8F%96%E9%99%8C%E7%94%9F%E4%BA%BA%E4%BF%A1%E6%81%AF
|
170 |
-
[获取好友列表]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_friend_list-%E8%8E%B7%E5%8F%96%E5%A5%BD%E5%8F%8B%E5%88%97%E8%A1%A8
|
171 |
-
[获取群信息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_group_info-%E8%8E%B7%E5%8F%96%E7%BE%A4%E4%BF%A1%E6%81%AF
|
172 |
-
[获取群列表]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_group_list-%E8%8E%B7%E5%8F%96%E7%BE%A4%E5%88%97%E8%A1%A8
|
173 |
-
[获取群成员信息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_group_member_info-%E8%8E%B7%E5%8F%96%E7%BE%A4%E6%88%90%E5%91%98%E4%BF%A1%E6%81%AF
|
174 |
-
[获取群成员列表]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_group_member_list-%E8%8E%B7%E5%8F%96%E7%BE%A4%E6%88%90%E5%91%98%E5%88%97%E8%A1%A8
|
175 |
-
[获取版本信息]: https://github.com/botuniverse/onebot-11/blob/master/api/public.md#get_version_info-%E8%8E%B7%E5%8F%96%E7%89%88%E6%9C%AC%E4%BF%A1%E6%81%AF
|
176 |
-
|
177 |
-
### 从 go-cqhttp cv 过来的api
|
178 |
-
|
179 |
-
| 拓展 API | 功能 |
|
180 |
-
| --------------------------- | ---------------------- |
|
181 |
-
| set_group_portrait | [设置群头像] |
|
182 |
-
| get_msg | [获取消息] |
|
183 |
-
| get_forward_msg | [获取合并转发内容] |
|
184 |
-
| send_private_forward_msg | [发送合并转发(私聊)] |
|
185 |
-
| send_group_forward_msg | [发送合并转发(群聊)] |
|
186 |
-
| get_group_system_msg | [获取群系统消息] |
|
187 |
-
| get_group_root_files | [获取群根目录文件列表] |
|
188 |
-
| get_group_files_by_folder | [获取群子目录文件列表] |
|
189 |
-
| get_group_file_url | [获取群文件资源链接] |
|
190 |
-
| get_status | [获取状态] |
|
191 |
-
|
192 |
-
[设置群头像]: https://docs.go-cqhttp.org/api/#%E8%AE%BE%E7%BD%AE%E7%BE%A4%E5%A4%B4%E5%83%8F
|
193 |
-
[获取消息]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E6%B6%88%E6%81%AF
|
194 |
-
[获取合并转发内容]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E5%90%88%E5%B9%B6%E8%BD%AC%E5%8F%91%E5%86%85%E5%AE%B9
|
195 |
-
[发送合并转发(私聊)]: https://docs.go-cqhttp.org/api/#%E5%8F%91%E9%80%81%E5%90%88%E5%B9%B6%E8%BD%AC%E5%8F%91-%E5%A5%BD%E5%8F%8B
|
196 |
-
[发送合并转发(群聊)]: https://docs.go-cqhttp.org/api/#%E5%8F%91%E9%80%81%E5%90%88%E5%B9%B6%E8%BD%AC%E5%8F%91-%E7%BE%A4
|
197 |
-
[获取群系统消息]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E7%BE%A4%E7%B3%BB%E7%BB%9F%E6%B6%88%E6%81%AF
|
198 |
-
[获取群根目录文件列表]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E7%BE%A4%E6%A0%B9%E7%9B%AE%E5%BD%95%E6%96%87%E4%BB%B6%E5%88%97%E8%A1%A8
|
199 |
-
[获取群子目录文件列表]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E7%BE%A4%E5%AD%90%E7%9B%AE%E5%BD%95%E6%96%87%E4%BB%B6%E5%88%97%E8%A1%A8
|
200 |
-
[获取群文件资源链接]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E7%BE%A4%E6%96%87%E4%BB%B6%E8%B5%84%E6%BA%90%E9%93%BE%E6%8E%A5
|
201 |
-
[获取状态]: https://docs.go-cqhttp.org/api/#%E8%8E%B7%E5%8F%96%E7%8A%B6%E6%80%81
|
202 |
-
|
203 |
-
</details>
|
204 |
-
|
205 |
-
<details>
|
206 |
-
<summary>已实现 Event</summary>
|
207 |
-
|
208 |
-
| 事件类型 | Event |
|
209 |
-
| -------- | ---------------- |
|
210 |
-
| 通知事件 | [群管理员变动] |
|
211 |
-
| 通知事件 | [群成员减少] |
|
212 |
-
| 通知事件 | [群成员增加] |
|
213 |
-
| 通知事件 | [群禁言] |
|
214 |
-
| 通知事件 | [好友添加] |
|
215 |
-
| 通知事件 | [群消息撤回] |
|
216 |
-
| 通知事件 | [好友消息撤回] |
|
217 |
-
| 通知事件 | [群内戳一戳] |
|
218 |
-
| 请求事件 | [加好友请求] |
|
219 |
-
| 请求事件 | [加群请求/邀请] |
|
220 |
-
|
221 |
-
[群管理员变动]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E7%BE%A4%E7%AE%A1%E7%90%86%E5%91%98%E5%8F%98%E5%8A%A8
|
222 |
-
[群成员减少]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E7%BE%A4%E6%88%90%E5%91%98%E5%87%8F%E5%B0%91
|
223 |
-
[群成员增加]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E7%BE%A4%E6%88%90%E5%91%98%E5%A2%9E%E5%8A%A0
|
224 |
-
[群禁言]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E7%BE%A4%E7%A6%81%E8%A8%80
|
225 |
-
[好友添加]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E5%A5%BD%E5%8F%8B%E6%B7%BB%E5%8A%A0
|
226 |
-
[群消息撤回]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E7%BE%A4%E6%B6%88%E6%81%AF%E6%92%A4%E5%9B%9E
|
227 |
-
[好友消息撤回]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E5%A5%BD%E5%8F%8B%E6%B6%88%E6%81%AF%E6%92%A4%E5%9B%9E
|
228 |
-
[群内戳一戳]: https://github.com/botuniverse/onebot-11/blob/master/event/notice.md#%E7%BE%A4%E5%86%85%E6%88%B3%E4%B8%80%E6%88%B3
|
229 |
-
[加好友请求]: https://github.com/botuniverse/onebot-11/blob/master/event/request.md#%E5%8A%A0%E5%A5%BD%E5%8F%8B%E8%AF%B7%E6%B1%82
|
230 |
-
[加群请求/邀请]: https://github.com/botuniverse/onebot-11/blob/master/event/request.md#%E5%8A%A0%E7%BE%A4%E8%AF%B7%E6%B1%82%E9%82%80%E8%AF%B7
|
231 |
-
|
232 |
-
</details>
|
233 |
-
|
234 |
-
## TODO
|
235 |
-
|
236 |
-
1. 更详细的帮助和设置
|
237 |
-
2. 支持更多onebot api
|
238 |
-
3. 支持onebot v12
|
239 |
-
|
240 |
-
## 鸣谢
|
241 |
-
|
242 |
-
* [miao-plugin](https://gitee.com/yoimiya-kokomi/miao-plugin) : 使用的ui代码及实现均来自miao-plugin
|
243 |
-
* [@idanran](https://github.com/idanran) : QQNT 部分代码来源
|
244 |
-
* [xiaofei-plugin](https://gitee.com/xfdown/xiaofei-plugin) : 音乐自定义分享授权使用
|
245 |
-
* [yenai-plugin](https://www.yenai.ren/) : components部分代码来源
|
246 |
-
* [onebot](https://github.com/botuniverse/onebot) : 统一的聊天机器人应用接口标准
|
247 |
-
* [Miao-Yunzai](https://github.com/yoimiya-kokomi/Miao-Yunzai) : 喵版Yunzai [Gitee](https://gitee.com/yoimiya-kokomi/Miao-Yunzai)
|
248 |
-
/ [Github](https://github.com/yoimiya-kokomi/Miao-Yunzai)
|
249 |
-
* [Yunzai-V3](https://github.com/yoimiya-kokomi/Yunzai-Bot) :Yunzai V3 - 喵喵维护版(使用 icqq)
|
250 |
-
* [Yunzai-V3](https://gitee.com/Le-niao/Yunzai-Bot) :Yunzai V3 - 乐神原版(使用 oicq)
|
251 |
-
|
252 |
-
|
253 |
-
## 免责声明
|
254 |
-
|
255 |
-
1. 功能仅限内部交流与小范围使用,请勿将Yunzai-Bot及ws-plugin用于以盈利为目的的场景
|
256 |
-
2. 图片与其他素材均来自于网络,仅供交流学习使用,如有侵权请联系,会立即删除
|
257 |
-
|
258 |
-
## 其他
|
259 |
-
|
260 |
-
如果觉得此插件对你有帮助的话,可以点一个star,你的支持就是不断更新的动力~
|
261 |
-
|
262 |
-
## 访问量
|
263 |
-
|
264 |
-
[](https://gitee.com/xiaoye12123/ws-plugin)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CobaltZvc/HyperBot/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: HyperBot
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: gray
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.17.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/client/js/highlightjs-copy.min.js
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}}
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/transforms/build.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
from . import transforms as T
|
3 |
-
|
4 |
-
|
5 |
-
def build_transforms(cfg, is_train=True):
|
6 |
-
if is_train:
|
7 |
-
if cfg.INPUT.MIN_SIZE_RANGE_TRAIN[0] == -1:
|
8 |
-
min_size = cfg.INPUT.MIN_SIZE_TRAIN
|
9 |
-
else:
|
10 |
-
assert len(cfg.INPUT.MIN_SIZE_RANGE_TRAIN) == 2, \
|
11 |
-
"MIN_SIZE_RANGE_TRAIN must have two elements (lower bound, upper bound)"
|
12 |
-
min_size = range(
|
13 |
-
cfg.INPUT.MIN_SIZE_RANGE_TRAIN[0],
|
14 |
-
cfg.INPUT.MIN_SIZE_RANGE_TRAIN[1] + 1
|
15 |
-
)
|
16 |
-
max_size = cfg.INPUT.MAX_SIZE_TRAIN
|
17 |
-
# max_size = None
|
18 |
-
|
19 |
-
flip_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN
|
20 |
-
rotate_prob = cfg.INPUT.ROTATE_PROB_TRAIN
|
21 |
-
rotate_degree = cfg.INPUT.ROTATE_DEGREE
|
22 |
-
crop_prob = cfg.INPUT.CROP_PROB_TRAIN
|
23 |
-
else:
|
24 |
-
min_size = cfg.INPUT.MIN_SIZE_TEST
|
25 |
-
max_size = cfg.INPUT.MAX_SIZE_TEST
|
26 |
-
# max_size = None
|
27 |
-
|
28 |
-
|
29 |
-
flip_prob = 0
|
30 |
-
rotate_prob = 0
|
31 |
-
rotate_degree = 0
|
32 |
-
crop_prob = 0
|
33 |
-
|
34 |
-
to_bgr255 = cfg.INPUT.TO_BGR255
|
35 |
-
normalize_transform = T.Normalize(
|
36 |
-
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255
|
37 |
-
)
|
38 |
-
|
39 |
-
transform = T.Compose(
|
40 |
-
[
|
41 |
-
T.RandomCrop(crop_prob),
|
42 |
-
T.RandomBrightness(crop_prob),
|
43 |
-
T.RandomContrast(crop_prob),
|
44 |
-
T.RandomHue(crop_prob),
|
45 |
-
T.RandomSaturation(crop_prob),
|
46 |
-
T.RandomGamma(crop_prob),
|
47 |
-
T.Resize(min_size, max_size),
|
48 |
-
T.RandomHorizontalFlip(flip_prob),
|
49 |
-
T.RandomRotation(rotate_prob, rotate_degree),
|
50 |
-
T.ToTensor(),
|
51 |
-
normalize_transform,
|
52 |
-
]
|
53 |
-
)
|
54 |
-
return transform
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|