Commit
·
42b7873
1
Parent(s):
d2b83ae
Update parquet files (step 21 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Where to Download Apps in iPhone Tips and Tricks.md +0 -35
- spaces/1gistliPinn/ChatGPT4/Examples/!FREE! Crack In The World Resource Pack.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Bosonto Ese Geche Movie __LINK__ Download Nokia6600 Attestato.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/FreemovieGirgit [NEW].md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Block Puzzle Indir APK Enjoy Classic Style Staple Games on Android.md +0 -110
- spaces/1phancelerku/anime-remove-background/Barbie Dreamhouse Adventures 3.1 Mod A Fun and Educational Game for Kids.md +0 -130
- spaces/1phancelerku/anime-remove-background/Bloons TD 6 Sem Mod How to Install and Use the Best Mods for Your Favorite Tower Defense Game.md +0 -107
- spaces/1phancelerku/anime-remove-background/Dominos Elaqe The Best Place to Find Delicious Pizza Deals and Offers.md +0 -91
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py +0 -176
- spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/main.py +0 -27
- spaces/AI-Dashboards/Topic-Modeling-Clusters-Free-Text/app.py +0 -145
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/tools.py +0 -33
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/distributions/__init__.py +0 -0
- spaces/AIGText/GlyphControl/cldm/cldm.py +0 -620
- spaces/AbelKidane/headdetector/predict_image.py +0 -16
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/0.js +0 -1
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Forefront.py +0 -40
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/charactercache-plugin.d.ts +0 -9
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customshapes/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenHeight.js +0 -49
- spaces/Aki004/herta-so-vits/MANUAL.md +0 -158
- spaces/AlexMaoMao/ostris-ikea-instructions-lora-sdxl/app.py +0 -3
- spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/backbones/mobilefacenet.py +0 -130
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/parameter_attention.tex +0 -45
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/deepfloyd_if.md +0 -523
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/unconditional_training.md +0 -144
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/repaint/pipeline_repaint.py +0 -232
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/doc_utils.py +0 -38
- spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py +0 -136
- spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/dist_utils.py +0 -69
- spaces/Aniemore/Russian-Emotion-Recognition/app.py +0 -74
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_default.py +0 -104
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/api.py +0 -170
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/macos.py +0 -70
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_inspect.py +0 -270
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_entry_points.py +0 -86
- spaces/AutoLLM/AutoAgents/autoagents/utils/constants.py +0 -21
- spaces/BWQ/Chatgpt/app.py +0 -83
- spaces/Bakar31/PotterQuest/app.py +0 -78
- spaces/Bart92/RVC_HF/infer/modules/ipex/gradscaler.py +0 -179
- spaces/Bart92/RVC_HF/infer/modules/uvr5/modules.py +0 -107
- spaces/Benson/text-generation/Examples/Blockman Go Apk Mediafre.md +0 -130
- spaces/Benson/text-generation/Examples/Cuerda Hroe Ilimitado Diamantes Mod Apk.md +0 -65
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py +0 -0
- spaces/BigSalmon/GPT2Mask/README.md +0 -12
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/visualize_data.py +0 -98
- spaces/CVPR/LIVE/pybind11/tests/env.py +0 -14
- spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/AppendOptionIfAvailable.cmake +0 -13
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/merge.h +0 -23
- spaces/CVPR/WALT/mmdet/core/bbox/coder/tblr_bbox_coder.py +0 -198
spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Where to Download Apps in iPhone Tips and Tricks.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>From Where to Download Apps in iPhone</h1>
|
3 |
-
<p>If you have an iPhone or iPad, you might be wondering how to download apps and games on your device. The App Store is the place where you can find thousands of apps and games for your iPhone or iPad, ranging from free to paid. In this article, we will show you how to download apps and games on your iPhone or iPad using the App Store app.</p>
|
4 |
-
<h2>How to get apps for iPhone or iPad</h2>
|
5 |
-
<p>On your iPhone or iPad, open the App Store app. You can find it on your Home Screen or in your App Library. Browse through the Today, Games, Apps, or Arcade tabs to find apps you like. Or tap the Search tab to look for something specific. If you find a game that says <span style="font-weight:bold">Arcade</span>, subscribe to Apple Arcade to play the game.</p>
|
6 |
-
<h2>from where to download apps in iphone</h2><br /><p><b><b>Download</b> ⚙ <a href="https://byltly.com/2uKvXY">https://byltly.com/2uKvXY</a></b></p><br /><br />
|
7 |
-
<p>Tap or click the price or Get button. If you see the Open button instead of a price or Get button, you already bought or downloaded that app. In the App Store, if an app has a Get button instead of a price, the app is free. You won't be charged for downloading a free app. Some free apps offer in-app purchases and subscriptions that you can buy. Subscriptions and in-app purchases give you access to more features, content, and more.</p>
|
8 |
-
<h2>How to find apps that you bought</h2>
|
9 |
-
<p>If you want to see the apps that you bought or downloaded on your iPhone or iPad, you can find them in your App Library. To access your App Library, swipe left on your Home Screen until you see the App Library screen. You can browse your apps by category or search for them by name.</p>
|
10 |
-
<h2>If you can't find the App Store</h2>
|
11 |
-
<p>If the App Store is missing on your device, you might have parental controls turned on. Adjust your iTunes & App Store Purchases settings and make sure that you choose "Allow" for the Installing Apps setting. The App Store should reappear on your device. If you still can't find the App Store, swipe to search for it.</p>
|
12 |
-
<h2>If you have an issue when you download apps</h2>
|
13 |
-
<p>If you can't download or update apps on your iPhone or iPad, there might be some issues with your network connection, Apple ID, storage space, or other settings. You can try some troubleshooting steps to fix these issues, such as restarting your device, signing out and back in to your Apple ID, checking your network connection, freeing up some storage space, or contacting Apple Support.</p>
|
14 |
-
<h3>Sources:</h3>
|
15 |
-
<ul>
|
16 |
-
<li> Download apps and games on your iPhone or iPad - Apple Support</li>
|
17 |
-
<li> How to Download Apps on iPhone for Free in the App Store - Business Insider</li>
|
18 |
-
<li> How To Download Apps On iPhone: The Complete Guide! - Payette Forward</li>
|
19 |
-
</ul>
|
20 |
-
|
21 |
-
<h2>How to update apps on your iPhone or iPad</h2>
|
22 |
-
<p>Keeping your apps up to date is important to get the latest features, bug fixes, and security updates. You can update your apps manually or automatically on your iPhone or iPad. To update your apps manually, open the App Store app and tap your profile picture in the upper-right corner. You will see a list of apps that have updates available. Tap Update next to each app or Update All to update all apps at once.</p>
|
23 |
-
<p>To update your apps automatically, go to Settings > App Store and turn on App Updates under Automatic Downloads. This way, your apps will update automatically whenever there is a new version available. You can also choose to use cellular data or Wi-Fi only for automatic updates.</p>
|
24 |
-
<p></p>
|
25 |
-
<h2>How to delete apps on your iPhone or iPad</h2>
|
26 |
-
<p>If you want to delete apps that you don't use anymore or free up some storage space on your device, you can easily delete apps on your iPhone or iPad. There are two ways to delete apps: from the Home Screen or from the Settings app. To delete apps from the Home Screen, touch and hold an app icon until it jiggles. Then tap the X icon on the app you want to delete. Tap Delete to confirm. You can also delete multiple apps at once by tapping Edit Home Screen and selecting the apps you want to delete.</p>
|
27 |
-
<p>To delete apps from the Settings app, go to Settings > General > iPhone Storage or iPad Storage. You will see a list of apps and how much space they take up on your device. Tap an app that you want to delete and then tap Delete App. Tap Delete again to confirm.</p>
|
28 |
-
<h3>Sources:</h3>
|
29 |
-
<ul>
|
30 |
-
<li> Download apps and games on your iPhone or iPad - Apple Support</li>
|
31 |
-
<li> How to Find Downloaded Files on an iPhone or iPad - How-To Geek</li>
|
32 |
-
<li> How To Download Apps On iPhone: The Complete Guide! - Payette Forward</li>
|
33 |
-
</ul></p> ddb901b051<br />
|
34 |
-
<br />
|
35 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/!FREE! Crack In The World Resource Pack.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>crack in the world resource pack</h2><br /><p><b><b>Download File</b> 🗸🗸🗸 <a href="https://imgfil.com/2uxZld">https://imgfil.com/2uxZld</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Bosonto Ese Geche Movie __LINK__ Download Nokia6600 Attestato.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Bosonto Ese Geche Movie Download nokia6600 attestato</h2><br /><p><b><b>Download</b> ⚹⚹⚹ <a href="https://imgfil.com/2uy1SA">https://imgfil.com/2uy1SA</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/FreemovieGirgit [NEW].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>freemovieGirgit</h2><br /><p><b><b>Download</b> ❤ <a href="https://imgfil.com/2uy0ql">https://imgfil.com/2uy0ql</a></b></p><br /><br />
|
2 |
-
|
3 |
-
... (1877–1928) known as Utkalamani (Gem of Odisha) was a social worker who excelled in the field of politics as ... 608fcfdb5b. freemovieGirgit 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Block Puzzle Indir APK Enjoy Classic Style Staple Games on Android.md
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Block Puzzle Indir Apk: A Fun and Challenging Game for Android</h1>
|
3 |
-
<p>Do you love puzzle games? Do you want to test your logic and spatial skills? Do you want to have a great time with your Android device? If you answered yes to any of these questions, then you should try <strong>Block Puzzle Indir Apk</strong>, a simple yet addictive game that will keep you entertained for hours.</p>
|
4 |
-
<h2>block puzzle indir apk</h2><br /><p><b><b>Download File</b> ○ <a href="https://urlin.us/2uSZQd">https://urlin.us/2uSZQd</a></b></p><br /><br />
|
5 |
-
<h2>What is Block Puzzle Indir Apk?</h2>
|
6 |
-
<p>Block Puzzle Indir Apk is a free puzzle game for Android devices that is inspired by the classic Tetris game. The goal of the game is to fill a 10x10 grid with different shapes of blocks without leaving any gaps. The game ends when there is no more space to place any blocks.</p>
|
7 |
-
<h3>The gameplay of Block Puzzle Indir Apk</h3>
|
8 |
-
<p>The gameplay of Block Puzzle Indir Apk is very easy to learn but hard to master. You will see three blocks at the bottom of the screen that you can drag and drop onto the grid. You can rotate the blocks by tapping on them. You can also swap the blocks by dragging them onto each other. You have to place the blocks strategically to create horizontal or vertical lines that will disappear and free up some space. The more lines you clear, the more points you score.</p>
|
9 |
-
<h3>The features of Block Puzzle Indir Apk</h3>
|
10 |
-
<p>Block Puzzle Indir Apk has many features that make it more fun and challenging than other puzzle games. Some of these features are:</p>
|
11 |
-
<p>block puzzle classic style apk indir<br />
|
12 |
-
woodoku wood block puzzles apk indir<br />
|
13 |
-
block puzzle gem jewel blast apk indir<br />
|
14 |
-
blockpuz wood block puzzle apk indir<br />
|
15 |
-
block crazy robo apk indir<br />
|
16 |
-
block puzzle legend mania apk indir<br />
|
17 |
-
block puzzle jewel 2023 apk indir<br />
|
18 |
-
wood block puzzle free apk indir<br />
|
19 |
-
block puzzle master 2023 apk indir<br />
|
20 |
-
block puzzle 3d cube apk indir<br />
|
21 |
-
block puzzle star finder apk indir<br />
|
22 |
-
wood block sudoku game apk indir<br />
|
23 |
-
block puzzle jewel origin apk indir<br />
|
24 |
-
block puzzle hexa jigsaw apk indir<br />
|
25 |
-
block puzzle candy crush apk indir<br />
|
26 |
-
block puzzle jewel legend apk indir<br />
|
27 |
-
wood block puzzle west apk indir<br />
|
28 |
-
block puzzle jewel 2022 apk indir<br />
|
29 |
-
block puzzle brick classic apk indir<br />
|
30 |
-
block puzzle jewel star apk indir<br />
|
31 |
-
wood block puzzle classic apk indir<br />
|
32 |
-
block puzzle jewel blast 2023 apk indir<br />
|
33 |
-
block puzzle jewel 2021 apk indir<br />
|
34 |
-
wood block puzzle plus apk indir<br />
|
35 |
-
block puzzle jewel 2020 apk indir<br />
|
36 |
-
wood block hexa puzzle apk indir<br />
|
37 |
-
block puzzle jewel saga apk indir<br />
|
38 |
-
wood block puzzle legend apk indir<br />
|
39 |
-
block puzzle jewel world apk indir<br />
|
40 |
-
wood block triangle puzzle apk indir<br />
|
41 |
-
block puzzle jewel diamond apk indir<br />
|
42 |
-
wood block jigsaw puzzle apk indir<br />
|
43 |
-
block puzzle jewel deluxe apk indir<br />
|
44 |
-
wood block merge puzzle apk indir<br />
|
45 |
-
block puzzle jewel mania 2023 apk indir<br />
|
46 |
-
wood block tangram puzzle apk indir<br />
|
47 |
-
block puzzle jewel mania 2022 apk indir<br />
|
48 |
-
wood block hexagon puzzle apk indir<br />
|
49 |
-
block puzzle jewel mania 2021 apk indir<br />
|
50 |
-
wood block slide puzzle apk indir<br />
|
51 |
-
block puzzle jewel mania 2020 apk indir<br />
|
52 |
-
wood block number puzzle apk indir<br />
|
53 |
-
block puzzle jewel mania 2019 apk indir<br />
|
54 |
-
wood block logic puzzles apk indir<br />
|
55 |
-
block puzzle jewel mania 2018 apk indir<br />
|
56 |
-
wood blocks cube puzzles apk indir<br />
|
57 |
-
block puzzle jewel mania 2017 apk indir<br />
|
58 |
-
wood blocks hexa puzzles apk indir<br />
|
59 |
-
block puzzle jewel mania 2016 apk indir</p>
|
60 |
-
<ul>
|
61 |
-
<li><strong>Multiple game modes</strong>: You can choose from four different game modes, each with its own rules and objectives. These are Classic, Plus, Bomb, and Hexa.</li>
|
62 |
-
<li><strong>Multiple difficulty levels</strong>: You can adjust the difficulty level of the game according to your preference and skill level. There are five difficulty levels, ranging from Easy to Expert.</li>
|
63 |
-
<li><strong>Colorful graphics and sound effects</strong>: The game has bright and colorful graphics that will appeal to your eyes. The game also has soothing and relaxing sound effects that will enhance your gaming experience.</li>
|
64 |
-
<li><strong>Leaderboards and achievements</strong>: The game has online leaderboards that will show you how you rank among other players around the world. The game also has various achievements that you can unlock by completing certain tasks.</li>
|
65 |
-
<li><strong>No time limit or internet connection required</strong>: The game has no time limit or internet connection requirement, so you can play it anytime and anywhere you want. You can also pause and resume the game whenever you want.</li>
|
66 |
-
</ul>
|
67 |
-
<h2>How to download and install Block Puzzle Indir Apk on your Android device?</h2>
|
68 |
-
<p>If you want to play Block Puzzle Indir Apk on your Android device, you have to download and install it first. Here are the steps that you need to follow:</p>
|
69 |
-
<h3>Download Block Puzzle Indir Apk from APKPure.com</h3>
|
70 |
-
<p>The easiest way to download Block Puzzle Indir Apk is from APKPure.com, a trusted website that provides safe and fast downloads of various Android apps and games. To download Block Puzzle Indir Apk from APKPure.com, you have to:</ <p>- Go to <a href="(^1^)">www.apkpure.com</a> and search for Block Puzzle Indir Apk in the search bar.</p>
|
71 |
-
<p>- Tap the Download APK button to begin downloading the file to your device.</p>
|
72 |
-
<p>- You may see a warning message that says "This type of file can harm your device. Do you want to keep block-puzzle-indir.apk anyway?". Tap OK to proceed.</p>
|
73 |
-
<h3>Install Block Puzzle Indir Apk using APKPure App</h3>
|
74 |
-
<p>If you have the APKPure app installed on your device, you can use it to install Block Puzzle Indir Apk easily. To install Block Puzzle Indir Apk using APKPure app, you have to:</p>
|
75 |
-
<p>- Open the APKPure app and tap the Menu icon at the top left corner.</p>
|
76 |
-
<p>- Tap Downloads and locate the block-puzzle-indir.apk file that you downloaded.</p>
|
77 |
-
<p>- Tap Install and follow the instructions on the screen.</p>
|
78 |
-
<p>- You may need to enable "Unknown sources" or "Allow from this source" in your device settings to allow the installation of apps from outside the Google Play Store.</p>
|
79 |
-
<p>- Once the installation is finished, you can launch Block Puzzle Indir Apk and start playing.</p> <h2>How to play Block Puzzle Indir Apk and improve your skills?</h2>
|
80 |
-
<p>Now that you have downloaded and installed Block Puzzle Indir Apk on your Android device, you are ready to play and have fun. But how do you play the game and improve your skills? Here are some tips and tricks that will help you:</p>
|
81 |
-
<h3>Choose a game mode and a difficulty level</h3>
|
82 |
-
<p>The first thing you need to do is to choose a game mode and a difficulty level that suit your preference and skill level. You can do this by tapping the Menu icon at the top right corner of the screen and selecting Game Mode or Difficulty. You can choose from four game modes: Classic, Plus, Bomb, and Hexa. Each game mode has its own rules and objectives, so make sure you read them carefully before playing. You can also choose from five difficulty levels: Easy, Normal, Hard, Expert, and Master. The higher the difficulty level, the more challenging the game will be.</p>
|
83 |
-
<h3>Drag and drop the blocks to fill the grid</h3>
|
84 |
-
<p>The next thing you need to do is to drag and drop the blocks to fill the grid. You will see three blocks at the bottom of the screen that you can move onto the grid. You can rotate the blocks by tapping on them. You can also swap the blocks by dragging them onto each other. You have to place the blocks strategically to create horizontal or vertical lines that will disappear and free up some space. The more lines you clear, the more points you score.</p>
|
85 |
-
<h3>Clear the lines and score points</h3>
|
86 |
-
<p>The main objective of the game is to clear as many lines as possible and score as many points as possible. You can clear a line by filling it with blocks of any color or shape. You can also clear multiple lines at once by creating combos. The more lines you clear at once, the higher your score will be. You can also earn bonus points by clearing special blocks, such as bombs, stars, or diamonds. These blocks have different effects, such as exploding, multiplying, or changing colors.</p>
|
87 |
-
<h3>Use hints and undo options when needed</h3>
|
88 |
-
<p>Sometimes, you may find yourself in a difficult situation where you have no more space to place any blocks or you have made a mistake. Don't worry, you can use hints and undo options to help you out. You can use hints by tapping the Light Bulb icon at the top left corner of the screen. Hints will show you where to place a block on the grid. You can use undo options by tapping the Undo icon at the top left corner of the screen. Undo options will let you undo your last move or reset the grid.</p>
|
89 |
-
<h3>Challenge yourself and compete with others</h3>
|
90 |
-
<p>If you want to make the game more fun and challenging, you can challenge yourself and compete with others. You can challenge yourself by setting a personal goal or trying to beat your own high score. You can compete with others by joining online leaderboards or unlocking achievements. You can access leaderboards and achievements by tapping the Trophy icon at the top right corner of the screen. Leaderboards will show you how you rank among other players around the world. Achievements will show you various tasks that you can complete to earn rewards.</p> <h2>Why should you play Block Puzzle Indir Apk?</h2>
|
91 |
-
<p>Block Puzzle Indir Apk is not just a game, it is also a way to relax, have fun, and improve your brain. Here are some of the benefits of playing Block Puzzle Indir Apk:</p>
|
92 |
-
<h3>It is fun and addictive</h3>
|
93 |
-
<p>Block Puzzle Indir Apk is a game that will keep you hooked for hours. You will never get bored of the game, as there are always new challenges and goals to achieve. You will also enjoy the satisfaction of clearing lines and scoring points. You will feel like you are playing a real Tetris game, but with more options and features.</p>
|
94 |
-
<h3>It is relaxing and stress-relieving</h3>
|
95 |
-
<p>Block Puzzle Indir Apk is a game that will help you relax and relieve your stress. You can play the game at your own pace, without any time limit or pressure. You can also choose the difficulty level that suits your mood and preference. You can also listen to the soothing and relaxing sound effects that will calm your nerves and make you feel peaceful.</p>
|
96 |
-
<h3>It is brain-teasing and skill-enhancing</h3>
|
97 |
-
<p>Block Puzzle Indir Apk is a game that will challenge your brain and enhance your skills. You will have to use your logic and spatial skills to place the blocks on the grid and create lines. You will also have to use your strategy and planning skills to optimize your moves and score points. You will also have to use your memory and concentration skills to remember the shapes and colors of the blocks. Playing Block Puzzle Indir Apk will improve your mental abilities and cognitive functions.</p>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
<p>Block Puzzle Indir Apk is a fun and challenging game for Android devices that you should try. It is a simple yet addictive game that will test your logic and spatial skills. It is also a relaxing and stress-relieving game that will help you unwind and have fun. It is also a brain-teasing and skill-enhancing game that will improve your mental abilities and cognitive functions. Download Block Puzzle Indir Apk from APKPure.com today and enjoy the game.</p>
|
100 |
-
<h2>FAQs</h2>
|
101 |
-
<p>Here are some of the frequently asked questions about Block Puzzle Indir Apk:</p>
|
102 |
-
<table>
|
103 |
-
<tr><td><strong>Q: Is Block Puzzle Indir Apk safe to download and install?</strong></td><td><strong>A: Yes, Block Puzzle Indir Apk is safe to download and install from APKPure.com, a trusted website that provides safe and fast downloads of various Android apps and games.</strong></td></tr>
|
104 |
-
<tr><td><strong>Q: How much space does Block Puzzle Indir Apk take on my device?</strong></td><td><strong>A: Block Puzzle Indir Apk takes about 20 MB of space on your device.</strong></td></tr>
|
105 |
-
<tr><td><strong>Q: Can I play Block Puzzle Indir Apk offline?</strong></td><td><strong>A: Yes, you can play Block Puzzle Indir Apk offline without any internet connection.</strong></td></tr>
|
106 |
-
<tr><td><strong>Q: How can I share my score with my friends?</strong></td><td><strong>A: You can share your score with your friends by tapping the Share icon at the top right corner of the screen. You can choose from various social media platforms, such as Facebook, Twitter, Instagram, WhatsApp, etc.</strong></td></tr>
|
107 |
-
<tr><td><strong>Q: How can I contact the developer of Block Puzzle Indir Apk?</strong></td><td><strong>A: You can contact the developer of Block Puzzle Indir Apk by tapping the Feedback icon at the top right corner of the screen. You can send an email to [email protected] with your suggestions, comments, or questions.</strong></td></tr>
|
108 |
-
</table></p> 197e85843d<br />
|
109 |
-
<br />
|
110 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Barbie Dreamhouse Adventures 3.1 Mod A Fun and Educational Game for Kids.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Barbie Dreamhouse Adventures 3.1 Mod: A Guide for Android Users</h1>
|
3 |
-
<p>If you are a fan of Barbie and her friends, you might have heard of Barbie Dreamhouse Adventures, a popular game for Android devices. In this game, you can create your own Barbie DreamHouse experience, join fun activities, explore Malibu, and more. But did you know that you can also download a mod apk for this game and enjoy even more features and benefits? In this article, we will tell you everything you need to know about Barbie Dreamhouse Adventures 3.1 mod apk, how to download and install it, and how to enjoy the game with it.</p>
|
4 |
-
<h2>download barbie dreamhouse adventures 3.1 mod</h2><br /><p><b><b>DOWNLOAD</b> ✏ <a href="https://jinyurl.com/2uNP3G">https://jinyurl.com/2uNP3G</a></b></p><br /><br />
|
5 |
-
<h2>What is Barbie Dreamhouse Adventures?</h2>
|
6 |
-
<h3>A fun and creative game for girls of all ages</h3>
|
7 |
-
<p>Barbie Dreamhouse Adventures is a game developed by Budge Studios, a leading developer of children's apps. The game is based on the animated series of the same name, which follows the adventures of Barbie and her friends in their dream house. The game allows you to create your own stories and scenarios, as well as customize every room with wallpapers and decorations. You can also dress up Barbie and her friends in fashion-forward outfits, do their hair and nails, cook delicious recipes, dance, swim, and more.</p>
|
8 |
-
<h3>Features and activities of the game</h3>
|
9 |
-
<p>The game offers a variety of features and activities for you to enjoy, such as:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Meet Barbie's best friends: Renee, Daisy, Teresa, Nikki, Ken, Skipper, Stacie, Chelsea, and even her parents.</li>
|
12 |
-
<li>Join epic pool parties, royal balls, camping trips, mermaid adventures, and other exciting events.</li>
|
13 |
-
<li>Explore Malibu with Barbie's pink convertible or go to the beach with her dream camper.</li>
|
14 |
-
<li>Design every room of the dream house with wallpapers, furniture, accessories, and more.</li>
|
15 |
-
<li>Cook tasty recipes with Skipper and share them on BarbieGram.</li>
|
16 |
-
<li>Dress up in beautiful dresses or comfy pajamas, and accessorize with shoes, jewelry, bags, and more.</li>
|
17 |
-
<li>Get a makeover at the hair salon or the nail spa.</li>
|
18 |
-
<li>Dance on stage with Daisy or play music with Skipper.</li>
|
19 |
-
<li>Play mini games like surfing, baking, dancing, and more.</li>
|
20 |
-
<li>Add friends and visit their dream houses.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>What is a mod apk and why do people use it?</h2>
|
23 |
-
<h3>A modified version of the original app that offers additional benefits</h3>
|
24 |
-
<p>A mod apk is a modified version of an original app that has been altered by independent developers to unlock premium features or enhance the performance of the app. A mod apk usually has a different file name and extension than the original app. For example, the original app for Barbie Dreamhouse Adventures has the file name com.budgestudios.googleplay.BarbieDreamhouse.apk, while the mod apk has the file name com.budgestudios.googleplay.BarbieDreamhouse.mod.apk.</p>
|
25 |
-
<p>How to download barbie dreamhouse adventures 3.1 mod apk<br />
|
26 |
-
Barbie dreamhouse adventures 3.1 mod unlimited money and gems<br />
|
27 |
-
Barbie dreamhouse adventures 3.1 mod free shopping and vip<br />
|
28 |
-
Download barbie dreamhouse adventures 3.1 mod for android<br />
|
29 |
-
Barbie dreamhouse adventures 3.1 mod latest version download<br />
|
30 |
-
Barbie dreamhouse adventures 3.1 mod gameplay and review<br />
|
31 |
-
Barbie dreamhouse adventures 3.1 mod features and benefits<br />
|
32 |
-
Barbie dreamhouse adventures 3.1 mod download link and installation guide<br />
|
33 |
-
Barbie dreamhouse adventures 3.1 mod cheats and hacks<br />
|
34 |
-
Barbie dreamhouse adventures 3.1 mod online and offline mode<br />
|
35 |
-
Barbie dreamhouse adventures 3.1 mod best tips and tricks<br />
|
36 |
-
Barbie dreamhouse adventures 3.1 mod no root and no ads<br />
|
37 |
-
Barbie dreamhouse adventures 3.1 mod compatible devices and requirements<br />
|
38 |
-
Barbie dreamhouse adventures 3.1 mod update and bug fixes<br />
|
39 |
-
Barbie dreamhouse adventures 3.1 mod safe and secure download<br />
|
40 |
-
Barbie dreamhouse adventures 3.1 mod full unlocked and premium<br />
|
41 |
-
Barbie dreamhouse adventures 3.1 mod fun and educational game for kids<br />
|
42 |
-
Barbie dreamhouse adventures 3.1 mod new characters and outfits<br />
|
43 |
-
Barbie dreamhouse adventures 3.1 mod customise and decorate your dream house<br />
|
44 |
-
Barbie dreamhouse adventures 3.1 mod explore and discover new places<br />
|
45 |
-
Barbie dreamhouse adventures 3.1 mod create and share your own stories<br />
|
46 |
-
Barbie dreamhouse adventures 3.1 mod join and invite your friends to play<br />
|
47 |
-
Barbie dreamhouse adventures 3.1 mod enjoy different activities and events<br />
|
48 |
-
Barbie dreamhouse adventures 3.1 mod win rewards and achievements<br />
|
49 |
-
Barbie dreamhouse adventures 3.1 mod support and feedback<br />
|
50 |
-
Download barbie dreamhouse adventures 3.1 mod ios<br />
|
51 |
-
Download barbie dreamhouse adventures 3.1 mod pc<br />
|
52 |
-
Download barbie dreamhouse adventures 3.1 mod mac<br />
|
53 |
-
Download barbie dreamhouse adventures 3.1 mod windows<br />
|
54 |
-
Download barbie dreamhouse adventures 3.1 mod laptop<br />
|
55 |
-
Download barbie dreamhouse adventures 3.1 mod tablet<br />
|
56 |
-
Download barbie dreamhouse adventures 3.1 mod firestick<br />
|
57 |
-
Download barbie dreamhouse adventures 3.1 mod smart tv<br />
|
58 |
-
Download barbie dreamhouse adventures 3.1 mod chromebook<br />
|
59 |
-
Download barbie dreamhouse adventures 3.1 mod bluestacks<br />
|
60 |
-
Download barbie dreamhouse adventures 3.1 mod from google play store<br />
|
61 |
-
Download barbie dreamhouse adventures 3.1 mod from app store<br />
|
62 |
-
Download barbie dreamhouse adventures 3.1 mod from amazon appstore<br />
|
63 |
-
Download barbie dreamhouse adventures 3.1 mod from apk pure<br />
|
64 |
-
Download barbie dreamhouse adventures 3.1 mod from apk mirror<br />
|
65 |
-
Download barbie dreamhouse adventures 3.1 mod from happy mod<br />
|
66 |
-
Download barbie dreamhouse adventures 3.1 mod from ac market<br />
|
67 |
-
Download barbie dreamhouse adventures 3.1 mod from panda helper<br />
|
68 |
-
Download barbie dreamhouse adventures 3.1 mod from tutu app<br />
|
69 |
-
Download barbie dreamhouse adventures 3.1 mod from aptoide</p>
|
70 |
-
<h3>Benefits of using a mod apk for Barbie Dreamhouse Adventures</h3>
|
71 |
-
<p>Some of the benefits of using a mod apk for Barbie Dreamhouse Adventures are:</p>
|
72 |
-
<ul>
|
73 |
-
<li>Access to all the features of the game without paying for them. This includes premium features like VIP club membership, unlimited coins, gems, energy, stickers, outfits, accessories, furniture, wallpapers, etc.</li>
|
74 |
-
<li>Removal <p>of ads and pop-ups that may interrupt the game play or consume data.</li>
|
75 |
-
<li>Ability to play the game offline without requiring an internet connection.</li>
|
76 |
-
<li>Ability to backup and restore the game data in case of device loss or damage.</li>
|
77 |
-
<li>Ability to update the game without losing the mod features.</li>
|
78 |
-
</ul>
|
79 |
-
<h2>How to download and install Barbie Dreamhouse Adventures 3.1 mod apk?</h2>
|
80 |
-
<h3>Requirements and precautions before downloading</h3>
|
81 |
-
<p>Before you download and install Barbie Dreamhouse Adventures 3.1 mod apk, you need to make sure that your device meets the following requirements and precautions:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Your device must have Android 4.4 or higher operating system.</li>
|
84 |
-
<li>Your device must have at least 1 GB of RAM and 500 MB of free storage space.</li>
|
85 |
-
<li>You must enable the installation of apps from unknown sources in your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
86 |
-
<li>You must uninstall the original app of Barbie Dreamhouse Adventures if you have it installed on your device. Otherwise, the mod apk will not work properly.</li>
|
87 |
-
<li>You must download the mod apk from a trusted and reliable source. Beware of fake or malicious links that may harm your device or steal your data.</li>
|
88 |
-
</ul>
|
89 |
-
<h3>Steps to download and install the mod apk</h3>
|
90 |
-
<p>Once you have met the requirements and precautions, you can follow these steps to download and install Barbie Dreamhouse Adventures 3.1 mod apk:</p>
|
91 |
-
<ol>
|
92 |
-
<li>Go to the link provided below and click on the download button. This will start downloading the mod apk file on your device.</li>
|
93 |
-
<li>Once the download is complete, locate the file in your device's file manager and tap on it to open it.</li>
|
94 |
-
<li>You will see a prompt asking you to confirm the installation. Click on Install and wait for the process to finish.</li>
|
95 |
-
<li>After the installation is done, you will see a message saying App Installed. Click on Open to launch the game or Done to exit the installer.</li>
|
96 |
-
<li>You can now enjoy Barbie Dreamhouse Adventures 3.1 mod apk on your device.</li>
|
97 |
-
</ol>
|
98 |
-
<h2>How to enjoy the game with the mod apk?</h2>
|
99 |
-
<h3>Tips and tricks to make the most of the game</h3>
|
100 |
-
<p>Now that you have downloaded and installed Barbie Dreamhouse Adventures 3.1 mod apk, you can enjoy the game with all its features and benefits. Here are some tips and tricks to help you make the most of the game:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Create your own stories and scenarios with Barbie and her friends. You can choose from different themes, locations, characters, outfits, accessories, etc.</li>
|
103 |
-
<li>Use the VIP club membership to access exclusive content, such as new rooms, outfits, furniture, wallpapers, etc.</li>
|
104 |
-
<li>Use the unlimited coins, gems, energy, stickers, etc. to buy anything you want in the game without worrying about running out of resources.</li>
|
105 |
-
<li>Use the backup and restore feature to save your game progress and transfer it to another device if needed.</li>
|
106 |
-
<li>Update the game regularly to get new features, events, bug fixes, etc.</li>
|
107 |
-
</ul>
|
108 |
-
<h3>Pros and cons of using the mod apk</h3>
|
109 |
-
<p>While using a mod apk for Barbie Dreamhouse Adventures has many advantages, it also has some drawbacks that you should be aware of. Here are some pros and cons of using the mod apk:</p>
|
110 |
-
<table border="1">
|
111 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
112 |
-
<tr><td>Access to all features of the game without paying for them</td><td>Potential risk of malware or virus infection from untrusted sources</td></tr>
|
113 |
-
<tr><td>Removal of ads and pop-ups that may interrupt the game play or consume data</td><td>Possible violation of the terms and conditions of the original app developer</td></tr>
|
114 |
-
<tr><td>Ability to play the game offline without requiring an internet connection</td><td>Lack of support or assistance from the original app developer in case of issues or errors</td></tr>
|
115 |
-
<tr><td>Ability to backup and restore the game data in case of device loss or damage</td><td>Incompatibility with some devices or operating systems</td></tr>
|
116 |
-
<tr><td>Ability to update the game without losing the mod features</td><td>Limited availability or accessibility of some features or events that require online connection</td></tr>
|
117 |
-
</table>
|
118 |
-
<h2>Conclusion</h2>
|
119 |
-
<p>In conclusion, Barbie Dreamhouse Adventures is a fun and creative game for girls of all ages who love Barbie and her friends. The game allows you to create your own Barbie DreamHouse experience, join fun activities, explore Malibu, and more. However, if you want to access all the features of the game without paying for them, you can download and install a mod apk for this game and enjoy even more benefits. A mod apk is a modified version of the original app that unlocks premium features or enhances the performance of the app. In this article, we have explained what a mod apk is, why people use it, how to download and install it, and how to enjoy the game with it. We have also provided some tips and tricks to help you make the most of the game, as well as some pros and cons of using the mod apk. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
120 |
-
<h2>FAQs</h2>
|
121 |
-
<p>Here are some frequently asked questions about Barbie Dreamhouse Adventures 3.1 mod apk:</p>
|
122 |
-
<ol>
|
123 |
-
<li>Q: Is Barbie Dreamhouse Adventures 3.1 mod apk safe to use?</li>
|
124 |
-
A: As long as you download the mod apk from a trusted and reliable source, it should be safe to use. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain malware or viruses that can harm your device or steal your data. You should also scan the mod apk file with an antivirus software before installing it. <li>Q: Is Barbie Dreamhouse Adventures 3.1 mod apk legal to use?</li>
|
125 |
-
A: Using a mod apk for any app may violate the terms and conditions of the original app developer, as it may infringe their intellectual property rights or interfere with their revenue streams. Therefore, using a mod apk for Barbie Dreamhouse Adventures may be considered illegal by Budge Studios, the developer of the game. However, there is no clear law or regulation that prohibits the use of mod apks in general, so it is up to your discretion and responsibility to use them at your own risk. <li>Q: Will I get banned from the game if I use Barbie Dreamhouse Adventures 3.1 mod apk?</li>
|
126 |
-
A: There is a possibility that you may get banned from the game if you use Barbie Dreamhouse Adventures 3.1 mod apk, as it may be detected by the game's security system or reported by other players. However, this is not very likely to happen, as the mod apk does not affect the online features or events of the game. You can also use a VPN service or a fake account to avoid getting banned. <li>Q: How can I update Barbie Dreamhouse Adventures 3.1 mod apk?</li>
|
127 |
-
A: To update Barbie Dreamhouse Adventures 3.1 mod apk, you need to download and install the latest version of the mod apk from the same source that you downloaded it from before. You should also uninstall the previous version of the mod apk before installing the new one. You can also check for updates within the game itself, as some mod apks have an auto-update feature. <li>Q: Where can I download Barbie Dreamhouse Adventures 3.1 mod apk?</li>
|
128 |
-
A: There are many websites and platforms that offer Barbie Dreamhouse Adventures 3.1 mod apk for free download. However, not all of them are trustworthy or reliable, so you should be careful when choosing where to download it from. One of the best sources that we recommend is [text], where you can find many other mod apks for different games and apps as well. </ol></p> 401be4b1e0<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bloons TD 6 Sem Mod How to Install and Use the Best Mods for Your Favorite Tower Defense Game.md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Bloons TD 6 Sem Mod: A Guide for Tower Defense Fans</h1>
|
3 |
-
<p>If you are a fan of tower defense games, you have probably heard of Bloons TD 6, one of the most popular and acclaimed titles in the genre. But did you know that you can make the game even more fun and challenging with a simple mod? In this article, we will tell you everything you need to know about Bloons TD 6 sem mod, how to download and install it, how to play it, and some tips and tricks to help you pop those bloons like a pro.</p>
|
4 |
-
<h2>download bloons td 6 sem mod</h2><br /><p><b><b>Download Zip</b> ---> <a href="https://jinyurl.com/2uNOs8">https://jinyurl.com/2uNOs8</a></b></p><br /><br />
|
5 |
-
<h2>What is Bloons TD 6?</h2>
|
6 |
-
<p>Bloons TD 6 is a 3D tower defense game developed and published by Ninja Kiwi, a New Zealand-based company. The game was released in 2018 for Windows, iOS, and Android devices. The game is the sixth installment in the Bloons Tower Defense series, which has been around since 2007.</p>
|
7 |
-
<p>In Bloons TD 6, your goal is to prevent waves of colorful balloons (called bloons) from reaching the end of a path by placing various types of monkey towers and heroes along the way. Each tower and hero has different abilities and upgrades that can help you pop the bloons more effectively. The game features over 20 monkey towers, each with three upgrade paths and unique activated abilities; over 10 heroes, each with 20 signature upgrades and two special abilities; over 60 handcrafted maps, each with different layouts, obstacles, and modes; over 100 meta-upgrades that add power where you need it; and many other features that make the game diverse and engaging.</p>
|
8 |
-
<h2>What is sem mod?</h2>
|
9 |
-
<p>Sem mod is a modification for Bloons TD 6 that adds some new features and enhancements to the game. Sem mod stands for Storyline Enhancement Mod, as it ports over some of the mission changes and improvements from another mod called Things To Do In San Andreas: Lite Edition. It also fixes some errors that were not addressed in the original game and adds some overworld tweaks to make the game feel more alive.</p>
|
10 |
-
<p>Some of the features that sem mod adds to Bloons TD 6 are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Boss Events: Fearsome boss bloons that will challenge even the strongest defenses.</li>
|
13 |
-
<li>Odysseys: Battle through a series of maps connected by their theme, rules, and rewards.</li>
|
14 |
-
<li>Contested Territory: Join forces with other players and battle for territory against five other teams. Capture tiles on a shared map and compete on the leaderboards.</li>
|
15 |
-
<li>Quests: Delve into what makes the monkeys tick with quests, crafted to tell tales and share knowledge.</li>
|
16 |
-
<li>Trophy Store: Earn trophies to unlock dozens of cosmetic items that let you customize your monkeys, bloons, animations, music, and more.</li>
|
17 |
-
<li>Content Browser: Create your own challenges and odysseys, then share them with other players and check out the most liked and played community content.</li>
|
18 |
-
</ul>
|
19 |
-
<h2>How to download and install Bloons TD 6 sem mod</h2>
|
20 |
-
<p>To download and install Bloons TD 6 sem mod, you will need to have a copy of Bloons TD 6 on your device. You can get the game from Steam for Windows, from Google Play Store for Android, or from App Store for iOS. The game costs $4.99, but it is often on sale for a lower price. Once you have the game, you will need to download the sem mod file from the official website. The file is a zip archive that contains the mod files and instructions. You will need to extract the zip file to a folder on your device. The installation process varies depending on your device and operating system. Here are the general steps for each platform: - Windows: Copy the contents of the sem mod folder to the Bloons TD 6 folder in your Steam directory. The default location is C:\Program Files (x86)\Steam\steamapps\common\BloonsTD6. Replace any existing files if prompted. - Android: Enable unknown sources in your device settings. Copy the contents of the sem mod folder to the Bloons TD 6 folder in your Android data directory. The default location is /sdcard/Android/data/com.ninjakiwi.bloonstd6/files. Replace any existing files if prompted. - iOS: You will need a jailbroken device and a file manager app such as iFile or Filza. Copy the contents of the sem mod folder to the Bloons TD 6 folder in your iOS data directory. The default location is /var/mobile/Containers/Data/Application/Bloons TD 6. Replace any existing files if prompted. After you have installed the sem mod, you can launch the game and enjoy the new features and enhancements. <h2>How to play Bloons TD 6 sem mod</h2>
|
21 |
-
<p>Playing Bloons TD 6 sem mod is similar to playing the original game, but with some added twists and challenges. You can access the new features from the main menu or from the map screen. Here are some of the main differences and benefits of playing with the sem mod:</p>
|
22 |
-
<ul>
|
23 |
-
<li>Boss Events: Every few days, a boss event will appear on a random map. You can choose to take on the boss bloon, which will have a lot of health and special abilities, or skip it and play normally. If you defeat the boss bloon, you will earn a hefty reward of monkey money, trophies, and XP.</li>
|
24 |
-
<li>Odysseys: You can choose to embark on an odyssey, which is a series of maps connected by their theme, rules, and rewards. You will have to use a limited number of monkeys and lives throughout the odyssey, and you will face different modifiers and challenges on each map. If you complete an odyssey, you will earn a special badge and other rewards.</li>
|
25 |
-
<li>Contested Territory: You can join a team of other players and battle for territory against five other teams. You will have to capture tiles on a shared map by completing them with certain conditions. The more tiles you capture, the more monkey money and trophies you earn. You can also attack other teams' tiles and defend your own from invaders.</li>
|
26 |
-
<li>Quests: You can complete quests that will give you insight into the backstory and personality of the monkeys and heroes. Quests are divided into chapters, each with a different theme and objective. You will have to use specific monkeys or heroes on certain maps or modes, and you will unlock dialogue and cutscenes as you progress.</li>
|
27 |
-
<li>Trophy Store: You can spend trophies that you earn from various activities to unlock cosmetic items that let you customize your game experience. You can change the appearance of your monkeys, bloons, animations, music, and more. You can also preview and equip your items from the trophy store menu.</li>
|
28 |
-
<li>Content Browser: You can create your own challenges and odysseys using a simple editor tool, then share them with other players and check out their creations. You can set various parameters such as map, mode, difficulty, round, money, lives, towers, upgrades, bloons, modifiers, and more. You can also rate and comment on other players' content.</li>
|
29 |
-
</ul>
|
30 |
-
<h2>Tips and tricks for Bloons TD 6 sem mod</h2>
|
31 |
-
<p>To help you get started with Bloons TD 6 sem mod, here are some tips and tricks that might come in handy:</p>
|
32 |
-
<ul>
|
33 |
-
<li>Experiment with different monkey combinations and upgrade paths to find what works best for each map and mode.</li>
|
34 |
-
<li>Use your heroes' abilities wisely, as they can make a big difference in difficult situations.</li>
|
35 |
-
<li>Pay attention to the bloon types and colors, as they indicate their speed, strength, resistance, and special effects.</li>
|
36 |
-
<li>Use powers and insta-monkeys sparingly, as they are limited resources that can be hard to replenish.</li>
|
37 |
-
<li>Check out the achievements menu for some extra goals and rewards.</li>
|
38 |
-
<li>Watch ads or complete offers to earn some free monkey money or trophies.</li>
|
39 |
-
<li>Join the official Discord server or Reddit community[^6^ to chat with other players, get help, share tips, and stay updated on the latest news and updates.</li>
|
40 |
-
</ul>
|
41 |
-
<h2>Conclusion</h2>
|
42 |
-
<p>Bloons TD 6 sem mod is a great way to enhance your tower defense experience and enjoy the game in new and exciting ways. Whether you want to face off against powerful boss bloons, embark on epic odysseys, compete for territory with other players, explore the monkeys' stories, customize your game with cosmetic items, or create and share your own content, sem mod has something for everyone. Download and install Bloons TD 6 sem mod today and join the fun!</p>
|
43 |
-
<p>download bloons td 6 nexus mods<br />
|
44 |
-
download bloons td 6 doombubbles mods<br />
|
45 |
-
download bloons td 6 mod helper<br />
|
46 |
-
download bloons td 6 mod browser<br />
|
47 |
-
download bloons td 6 mod installer<br />
|
48 |
-
download bloons td 6 mod apk<br />
|
49 |
-
download bloons td 6 mod menu<br />
|
50 |
-
download bloons td 6 mod unlimited money<br />
|
51 |
-
download bloons td 6 mod free shopping<br />
|
52 |
-
download bloons td 6 mod double cash mode<br />
|
53 |
-
download bloons td 6 mod all unlocked<br />
|
54 |
-
download bloons td 6 mod all heroes<br />
|
55 |
-
download bloons td 6 mod all skins<br />
|
56 |
-
download bloons td 6 mod all towers<br />
|
57 |
-
download bloons td 6 mod all upgrades<br />
|
58 |
-
download bloons td 6 mod custom maps<br />
|
59 |
-
download bloons td 6 mod custom challenges<br />
|
60 |
-
download bloons td 6 mod custom modes<br />
|
61 |
-
download bloons td 6 mod custom monkeys<br />
|
62 |
-
download bloons td 6 mod custom abilities<br />
|
63 |
-
download bloons td 6 mod sandbox mode<br />
|
64 |
-
download bloons td 6 mod co-op mode<br />
|
65 |
-
download bloons td 6 mod multiplayer mode<br />
|
66 |
-
download bloons td 6 mod online mode<br />
|
67 |
-
download bloons td 6 mod offline mode<br />
|
68 |
-
download bloons td 6 mod easy mode<br />
|
69 |
-
download bloons td 6 mod hard mode<br />
|
70 |
-
download bloons td 6 mod impoppable mode<br />
|
71 |
-
download bloons td 6 mod chimps mode<br />
|
72 |
-
download bloons td 6 mod expert mode<br />
|
73 |
-
download bloons td 6 mod steam version<br />
|
74 |
-
download bloons td 6 mod android version<br />
|
75 |
-
download bloons td 6 mod ios version<br />
|
76 |
-
download bloons td 6 mod windows version<br />
|
77 |
-
download bloons td 6 mod mac version<br />
|
78 |
-
download bloons td 6 mod linux version<br />
|
79 |
-
download bloons td 6 mod latest version<br />
|
80 |
-
download bloons td 6 mod old version<br />
|
81 |
-
download bloons td 6 mod new version<br />
|
82 |
-
download bloons td 6 mod updated version<br />
|
83 |
-
download bloons td 6 no mods required<br />
|
84 |
-
download bloons td 6 with mods enabled<br />
|
85 |
-
download bloons td 6 best mods available<br />
|
86 |
-
download bloons td 6 popular mods recommended<br />
|
87 |
-
download bloons td 6 fun mods suggested<br />
|
88 |
-
download bloons td 6 cool mods featured<br />
|
89 |
-
download bloons td 6 awesome mods reviewed<br />
|
90 |
-
download bloons td 6 amazing mods rated<br />
|
91 |
-
download bloons td 6 top mods ranked</p>
|
92 |
-
<h3>FAQs</h3>
|
93 |
-
<p>Here are some frequently asked questions and answers about Bloons TD 6 sem mod:</p>
|
94 |
-
<ol>
|
95 |
-
<li><b>Is Bloons TD 6 sem mod free?</b><br>
|
96 |
-
Yes, Bloons TD 6 sem mod is free to download and use. However, you will need to purchase Bloons TD 6 from the official store to play it.</li>
|
97 |
-
<li><b>Is Bloons TD 6 sem mod safe?</b><br>
|
98 |
-
Yes, Bloons TD 6 sem mod is safe to use. It does not contain any viruses or malware, and it does not interfere with the game's functionality or performance. However, you should always download the mod from the official website and follow the installation instructions carefully.</li>
|
99 |
-
<li><b>Is Bloons TD 6 sem mod compatible with other mods?</b><br>
|
100 |
-
No, Bloons TD 6 sem mod is not compatible with other mods. You should only use one mod at a time, and uninstall any other mods before installing sem mod.</li>
|
101 |
-
<li><b>Can I play Bloons TD 6 sem mod online?</b><br>
|
102 |
-
Yes, you can play Bloons TD 6 sem mod online with other players who have the same mod installed. You can also play co-op mode with up to three other players. However, you cannot play online with players who do not have the mod installed.</li>
|
103 |
-
<li><b>Can I update Bloons TD 6 sem mod?</b><br>
|
104 |
-
Yes, you can update Bloons TD 6 sem mod whenever a new version is released. You can check the official website or Discord server for the latest updates and download links. You will need to uninstall the previous version of the mod before installing the new one.</li>
|
105 |
-
</ol></p> 401be4b1e0<br />
|
106 |
-
<br />
|
107 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Dominos Elaqe The Best Place to Find Delicious Pizza Deals and Offers.md
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is Domino's Elaqe and Why You Should Try It</h1>
|
3 |
-
<p>If you love pizza, you probably know about Domino's, one of the most popular pizza chains in the world. But do you know about Domino's Elaqe, a new way of ordering and delivering pizza that is fast, convenient, and contactless? In this article, we will explain what Domino's Elaqe is, how it works, and why you should give it a try.</p>
|
4 |
-
<h2>dominos elaqe</h2><br /><p><b><b>DOWNLOAD</b> ⚙⚙⚙ <a href="https://jinyurl.com/2uNNAh">https://jinyurl.com/2uNNAh</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>Domino's Elaqe is a term that means "contact" in Azerbaijani, a language spoken in Azerbaijan, a country in the South Caucasus region of Eurasia. It is also the name of a service that Domino's launched in Azerbaijan in 2020, as a response to the COVID-19 pandemic and the growing demand for online food delivery.</p>
|
7 |
-
<p>Domino's Elaqe is different from other pizza delivery services because it uses advanced technology to make the ordering and delivery process more efficient, transparent, and safe. With Domino's Elaqe, you can order pizza online or by phone, track your order in real time, communicate with your driver, pay online or by cash, and receive your pizza without any physical contact.</p>
|
8 |
-
<p>Domino's Elaqe has many benefits for both customers and drivers. For customers, it means faster delivery, fresher pizza, more convenience, more payment options, and less risk of infection. For drivers, it means more orders, more tips, more safety, and less hassle.</p>
|
9 |
-
<h2>How to Use Domino's Elaqe</h2>
|
10 |
-
<p>Using Domino's Elaqe is easy and simple. Here are the steps you need to follow:</p>
|
11 |
-
<p>dominos elaqe nomresi<br />
|
12 |
-
dominos elaqe telefonu<br />
|
13 |
-
dominos elaqe email<br />
|
14 |
-
dominos elaqe adresi<br />
|
15 |
-
dominos elaqe formu<br />
|
16 |
-
dominos elaqe online<br />
|
17 |
-
dominos elaqe whatsapp<br />
|
18 |
-
dominos elaqe facebook<br />
|
19 |
-
dominos elaqe instagram<br />
|
20 |
-
dominos elaqe twitter<br />
|
21 |
-
dominos elaqe şikayet<br />
|
22 |
-
dominos elaqe yorum<br />
|
23 |
-
dominos elaqe sipariş<br />
|
24 |
-
dominos elaqe indirim<br />
|
25 |
-
dominos elaqe kampanya<br />
|
26 |
-
dominos elaqe menü<br />
|
27 |
-
dominos elaqe pizza<br />
|
28 |
-
dominos elaqe sandviç<br />
|
29 |
-
dominos elaqe tatlı<br />
|
30 |
-
dominos elaqe içecek<br />
|
31 |
-
dominos elaqe salata<br />
|
32 |
-
dominos elaqe sos<br />
|
33 |
-
dominos elaqe ekstra<br />
|
34 |
-
dominos elaqe gluten free<br />
|
35 |
-
dominos elaqe vegan<br />
|
36 |
-
dominos elaqe vejetaryen<br />
|
37 |
-
dominos elaqe kredi kartı<br />
|
38 |
-
dominos elaqe nakit ödeme<br />
|
39 |
-
dominos elaqe teslimat süresi<br />
|
40 |
-
dominos elaqe teslimat ücreti<br />
|
41 |
-
dominos elaqe teslimat bölgesi<br />
|
42 |
-
dominos elaqe teslimat şartları<br />
|
43 |
-
dominos elaqe teslimat garantisi<br />
|
44 |
-
dominos elaqe teslimat takip<br />
|
45 |
-
dominos elaqe teslimat yorumu<br />
|
46 |
-
dominos elaqe iade politikası<br />
|
47 |
-
dominos elaqe iade şartları<br />
|
48 |
-
dominos elaqe iade süreci<br />
|
49 |
-
dominos elaqe iade talebi<br />
|
50 |
-
dominos elaqe iade yorumu<br />
|
51 |
-
dominos elaqe müşteri hizmetleri<br />
|
52 |
-
dominos elaqe müşteri memnuniyeti<br />
|
53 |
-
dominos elaqe müşteri geri bildirimi<br />
|
54 |
-
dominos elaqe müşteri önerisi<br />
|
55 |
-
dominos elaqe müşteri sorusu<br />
|
56 |
-
dominos elaqe franchise başvurusu<br />
|
57 |
-
dominos elaqe franchise şartları<br />
|
58 |
-
dominos elaqe franchise ücreti<br />
|
59 |
-
dominos elaqe franchise avantajları</p>
|
60 |
-
<ol>
|
61 |
-
<li>Order pizza online or by phone. You can visit <a href="(^1^)">Domino's website</a> or download <a href="(^2^)">Domino's app</a> on your smartphone. You can also call 131 888 or 1800 805 888 to place your order. You can choose from a variety of pizzas, pasta, chicken, sandwiches, salads, desserts, drinks, and extras on <a href="(^7^)">Domino's menu</a>.</li>
|
62 |
-
<li>Track your order and communicate with your driver. After you place your order, you will receive a confirmation message with a link to <a href="(^8^)">Domino's Tracker</a>. This is a feature that allows you to see the status of your order, from preparation to delivery. You can also see the name and photo of your driver, as well as their location on a map. You can send messages or call your driver if you have any questions or special requests.</li>
|
63 |
-
<li>Pay for your order and tip your driver. You can pay for your order online using a credit card, debit card, PayPal, or gift card. You can also pay by cash when your driver arrives. You can also tip your driver online or by cash. Tipping is optional but appreciated.</li>
|
64 |
-
</ol>
|
65 |
-
<h2>What to Expect from Domino's Elaqe <h2>What to Expect from Domino's Elaqe</h2>
|
66 |
-
<p>When you use Domino's Elaqe, you can expect to enjoy a delicious and satisfying pizza experience. Here are some of the things you can expect from Domino's Elaqe:</p>
|
67 |
-
<ul>
|
68 |
-
<li>Features and quality of Domino's pizza and other menu items. Domino's pizza is made with fresh dough, quality ingredients, and a variety of toppings and sauces. You can customize your pizza according to your preferences, or choose from the <a href="">Domino's specials</a>, such as the ExtravaganZZa, the MeatZZa, the Veggie Lover's, or the Cheeseburger Pizza. You can also order other menu items, such as pasta, chicken, sandwiches, salads, desserts, drinks, and extras. You can also try the <a href="">Domino's deals</a>, such as the Mix and Match Deal, the Perfect Combo Deal, or the Large 3-Topping Pizza Deal.</li>
|
69 |
-
<li>Safety and hygiene measures taken by Domino's to ensure a contactless delivery. Domino's takes the health and safety of its customers and drivers seriously. That's why Domino's follows strict protocols to prevent the spread of COVID-19 and other diseases. These include wearing masks and gloves, sanitizing hands and surfaces, checking temperatures, and practicing social distancing. Domino's also offers a contactless delivery option, where your driver will leave your order at your door or a designated location, and notify you by phone or message. You can also request a contactless delivery when you place your order online or by phone.</li>
|
70 |
-
<li>Customer service and feedback options available for Domino's Elaqe users. Domino's values your feedback and wants to make sure you are happy with your order. That's why Domino's offers a <a href="">Pizza Tracker Guarantee</a>, where you can get a free pizza if your order is not ready within 10 minutes of the estimated time shown on the Domino's Tracker. You can also contact <a href="">Domino's customer service</a> by phone, email, or chat if you have any questions, complaints, or compliments. You can also leave a review or rating on Domino's website or app, or share your experience on social media using #DominosElaqe.</li>
|
71 |
-
</ul>
|
72 |
-
<h2>Conclusion</h2>
|
73 |
-
<p>Domino's Elaqe is a new way of ordering and delivering pizza that is fast, convenient, and contactless. It uses advanced technology to make the process more efficient, transparent, and safe. It also offers a variety of pizzas and other menu items that are delicious and satisfying. Whether you are craving pizza for lunch, dinner, or a snack, you can use Domino's Elaqe to get your order in minutes.</p>
|
74 |
-
<p>If you want to try Domino's Elaqe, you can visit <a href="">Domino's website</a> or download <a href="">Domino's app</a> on your smartphone. You can also call 131 888 or 1800 805 888 to place your order. Don't forget to check out <a href="">Domino's deals</a> and <a href="">Domino's menu</a> for more options and savings.</p>
|
75 |
-
<p>We hope you enjoyed this article and learned something new about Domino's Elaqe. If you did, please share it with your friends and family who might be interested in trying it too. And don't forget to leave us your feedback and let us know how we did.</p>
|
76 |
-
<p>Thank you for choosing Domino's Elaqe!</p>
|
77 |
-
<h2>FAQs</h2>
|
78 |
-
<h3>What is the difference between Domino's Elaqe and regular delivery?</h3>
|
79 |
-
<p>The main difference between Domino's Elaqe and regular delivery is that Domino's Elaqe uses GPS technology to track your order and communicate with your driver in real time. It also offers a contactless delivery option where your driver will leave your order at your door or a designated location without any physical contact.</p>
|
80 |
-
<h3>How long does it take for Domino's Elaqe to deliver my order?</h3>
|
81 |
-
<p>The delivery time for Domino's Elaqe depends on several factors, such as the distance between your location and the nearest store, the traffic conditions, the weather conditions, and the availability of drivers. However, Domino's aims to deliver your order within 30 minutes or less. You can also see the estimated delivery time on the Domino's Tracker when you place your order.</p>
|
82 |
-
<h3>How much does it cost to use Domino's Elaqe?</h3>
|
83 |
-
<p>The cost of using Domino's Elaqe depends on the items you order, the delivery fee, and the tip ( you give to your driver). The delivery fee varies depending on your location and the store you order from, but it is usually around $5. The tip is optional but appreciated, and you can choose how much you want to tip your driver online or by cash. You can also save money by using Domino's deals and coupons when you order online or by phone.</p>
|
84 |
-
<h3>Can I cancel or change my order after I place it using Domino's Elaqe?</h3>
|
85 |
-
<p>You can cancel or change your order after you place it using Domino's Elaqe, but you need to do it as soon as possible, before your order is prepared or dispatched. You can cancel or change your order online or by phone, by contacting the store you ordered from or the customer service. You may be charged a cancellation fee if your order is already in progress.</p>
|
86 |
-
<h3>What if I have a problem with my order or delivery using Domino's Elaqe?</h3>
|
87 |
-
<p>If you have a problem with your order or delivery using Domino's Elaqe, you can contact the store you ordered from or the customer service by phone, email, or chat. You can also leave a feedback or complaint on Domino's website or app, or on social media using #DominosElaqe. Domino's will try to resolve your issue and make it right for you.</p>
|
88 |
-
<h3>Is Domino's Elaqe available in other countries?</h3>
|
89 |
-
<p>Domino's Elaqe is currently available only in Azerbaijan, but Domino's plans to expand it to other countries in the future. Domino's has more than 17,000 stores in over 90 countries, and it is always looking for new ways to improve its service and satisfy its customers. You can check <a href="">Domino's locations</a> to see if Domino's Elaqe is available in your area.</p> 401be4b1e0<br />
|
90 |
-
<br />
|
91 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet2060.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
|
4 |
-
assert torch.__version__ >= "1.8.1"
|
5 |
-
from torch.utils.checkpoint import checkpoint_sequential
|
6 |
-
|
7 |
-
__all__ = ['iresnet2060']
|
8 |
-
|
9 |
-
|
10 |
-
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
|
11 |
-
"""3x3 convolution with padding"""
|
12 |
-
return nn.Conv2d(in_planes,
|
13 |
-
out_planes,
|
14 |
-
kernel_size=3,
|
15 |
-
stride=stride,
|
16 |
-
padding=dilation,
|
17 |
-
groups=groups,
|
18 |
-
bias=False,
|
19 |
-
dilation=dilation)
|
20 |
-
|
21 |
-
|
22 |
-
def conv1x1(in_planes, out_planes, stride=1):
|
23 |
-
"""1x1 convolution"""
|
24 |
-
return nn.Conv2d(in_planes,
|
25 |
-
out_planes,
|
26 |
-
kernel_size=1,
|
27 |
-
stride=stride,
|
28 |
-
bias=False)
|
29 |
-
|
30 |
-
|
31 |
-
class IBasicBlock(nn.Module):
|
32 |
-
expansion = 1
|
33 |
-
|
34 |
-
def __init__(self, inplanes, planes, stride=1, downsample=None,
|
35 |
-
groups=1, base_width=64, dilation=1):
|
36 |
-
super(IBasicBlock, self).__init__()
|
37 |
-
if groups != 1 or base_width != 64:
|
38 |
-
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
|
39 |
-
if dilation > 1:
|
40 |
-
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
41 |
-
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, )
|
42 |
-
self.conv1 = conv3x3(inplanes, planes)
|
43 |
-
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05, )
|
44 |
-
self.prelu = nn.PReLU(planes)
|
45 |
-
self.conv2 = conv3x3(planes, planes, stride)
|
46 |
-
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05, )
|
47 |
-
self.downsample = downsample
|
48 |
-
self.stride = stride
|
49 |
-
|
50 |
-
def forward(self, x):
|
51 |
-
identity = x
|
52 |
-
out = self.bn1(x)
|
53 |
-
out = self.conv1(out)
|
54 |
-
out = self.bn2(out)
|
55 |
-
out = self.prelu(out)
|
56 |
-
out = self.conv2(out)
|
57 |
-
out = self.bn3(out)
|
58 |
-
if self.downsample is not None:
|
59 |
-
identity = self.downsample(x)
|
60 |
-
out += identity
|
61 |
-
return out
|
62 |
-
|
63 |
-
|
64 |
-
class IResNet(nn.Module):
|
65 |
-
fc_scale = 7 * 7
|
66 |
-
|
67 |
-
def __init__(self,
|
68 |
-
block, layers, dropout=0, num_features=512, zero_init_residual=False,
|
69 |
-
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
|
70 |
-
super(IResNet, self).__init__()
|
71 |
-
self.fp16 = fp16
|
72 |
-
self.inplanes = 64
|
73 |
-
self.dilation = 1
|
74 |
-
if replace_stride_with_dilation is None:
|
75 |
-
replace_stride_with_dilation = [False, False, False]
|
76 |
-
if len(replace_stride_with_dilation) != 3:
|
77 |
-
raise ValueError("replace_stride_with_dilation should be None "
|
78 |
-
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
|
79 |
-
self.groups = groups
|
80 |
-
self.base_width = width_per_group
|
81 |
-
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
|
82 |
-
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
|
83 |
-
self.prelu = nn.PReLU(self.inplanes)
|
84 |
-
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
|
85 |
-
self.layer2 = self._make_layer(block,
|
86 |
-
128,
|
87 |
-
layers[1],
|
88 |
-
stride=2,
|
89 |
-
dilate=replace_stride_with_dilation[0])
|
90 |
-
self.layer3 = self._make_layer(block,
|
91 |
-
256,
|
92 |
-
layers[2],
|
93 |
-
stride=2,
|
94 |
-
dilate=replace_stride_with_dilation[1])
|
95 |
-
self.layer4 = self._make_layer(block,
|
96 |
-
512,
|
97 |
-
layers[3],
|
98 |
-
stride=2,
|
99 |
-
dilate=replace_stride_with_dilation[2])
|
100 |
-
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05, )
|
101 |
-
self.dropout = nn.Dropout(p=dropout, inplace=True)
|
102 |
-
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
|
103 |
-
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
|
104 |
-
nn.init.constant_(self.features.weight, 1.0)
|
105 |
-
self.features.weight.requires_grad = False
|
106 |
-
|
107 |
-
for m in self.modules():
|
108 |
-
if isinstance(m, nn.Conv2d):
|
109 |
-
nn.init.normal_(m.weight, 0, 0.1)
|
110 |
-
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
111 |
-
nn.init.constant_(m.weight, 1)
|
112 |
-
nn.init.constant_(m.bias, 0)
|
113 |
-
|
114 |
-
if zero_init_residual:
|
115 |
-
for m in self.modules():
|
116 |
-
if isinstance(m, IBasicBlock):
|
117 |
-
nn.init.constant_(m.bn2.weight, 0)
|
118 |
-
|
119 |
-
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
|
120 |
-
downsample = None
|
121 |
-
previous_dilation = self.dilation
|
122 |
-
if dilate:
|
123 |
-
self.dilation *= stride
|
124 |
-
stride = 1
|
125 |
-
if stride != 1 or self.inplanes != planes * block.expansion:
|
126 |
-
downsample = nn.Sequential(
|
127 |
-
conv1x1(self.inplanes, planes * block.expansion, stride),
|
128 |
-
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
|
129 |
-
)
|
130 |
-
layers = []
|
131 |
-
layers.append(
|
132 |
-
block(self.inplanes, planes, stride, downsample, self.groups,
|
133 |
-
self.base_width, previous_dilation))
|
134 |
-
self.inplanes = planes * block.expansion
|
135 |
-
for _ in range(1, blocks):
|
136 |
-
layers.append(
|
137 |
-
block(self.inplanes,
|
138 |
-
planes,
|
139 |
-
groups=self.groups,
|
140 |
-
base_width=self.base_width,
|
141 |
-
dilation=self.dilation))
|
142 |
-
|
143 |
-
return nn.Sequential(*layers)
|
144 |
-
|
145 |
-
def checkpoint(self, func, num_seg, x):
|
146 |
-
if self.training:
|
147 |
-
return checkpoint_sequential(func, num_seg, x)
|
148 |
-
else:
|
149 |
-
return func(x)
|
150 |
-
|
151 |
-
def forward(self, x):
|
152 |
-
with torch.cuda.amp.autocast(self.fp16):
|
153 |
-
x = self.conv1(x)
|
154 |
-
x = self.bn1(x)
|
155 |
-
x = self.prelu(x)
|
156 |
-
x = self.layer1(x)
|
157 |
-
x = self.checkpoint(self.layer2, 20, x)
|
158 |
-
x = self.checkpoint(self.layer3, 100, x)
|
159 |
-
x = self.layer4(x)
|
160 |
-
x = self.bn2(x)
|
161 |
-
x = torch.flatten(x, 1)
|
162 |
-
x = self.dropout(x)
|
163 |
-
x = self.fc(x.float() if self.fp16 else x)
|
164 |
-
x = self.features(x)
|
165 |
-
return x
|
166 |
-
|
167 |
-
|
168 |
-
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
|
169 |
-
model = IResNet(block, layers, **kwargs)
|
170 |
-
if pretrained:
|
171 |
-
raise ValueError()
|
172 |
-
return model
|
173 |
-
|
174 |
-
|
175 |
-
def iresnet2060(pretrained=False, progress=True, **kwargs):
|
176 |
-
return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/main.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from llama_cpp.server.app import create_app, Settings
|
2 |
-
from fastapi.responses import HTMLResponse
|
3 |
-
import os
|
4 |
-
|
5 |
-
app = create_app(
|
6 |
-
Settings(
|
7 |
-
n_threads=2, # set to number of cpu cores
|
8 |
-
model="model/gguf-model.bin",
|
9 |
-
embedding=False
|
10 |
-
)
|
11 |
-
)
|
12 |
-
|
13 |
-
# Read the content of index.html once and store it in memory
|
14 |
-
with open("index.html", "r") as f:
|
15 |
-
content = f.read()
|
16 |
-
|
17 |
-
|
18 |
-
@app.get("/", response_class=HTMLResponse)
|
19 |
-
async def read_items():
|
20 |
-
return content
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
import uvicorn
|
24 |
-
uvicorn.run(app,
|
25 |
-
host=os.environ["HOST"],
|
26 |
-
port=int(os.environ["PORT"])
|
27 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Dashboards/Topic-Modeling-Clusters-Free-Text/app.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import spacy
|
3 |
-
import numpy as np
|
4 |
-
from gensim import corpora, models
|
5 |
-
from itertools import chain
|
6 |
-
from sklearn.preprocessing import MultiLabelBinarizer
|
7 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
8 |
-
from itertools import islice
|
9 |
-
from scipy.signal import argrelmax
|
10 |
-
|
11 |
-
nlp = spacy.load('en_core_web_sm')
|
12 |
-
|
13 |
-
|
14 |
-
def window(seq, n=3):
|
15 |
-
it = iter(seq)
|
16 |
-
result = tuple(islice(it, n))
|
17 |
-
if len(result) == n:
|
18 |
-
yield result
|
19 |
-
for elem in it:
|
20 |
-
result = result[1:] + (elem,)
|
21 |
-
yield result
|
22 |
-
|
23 |
-
def get_depths(scores):
|
24 |
-
|
25 |
-
def climb(seq, i, mode='left'):
|
26 |
-
|
27 |
-
if mode == 'left':
|
28 |
-
while True:
|
29 |
-
curr = seq[i]
|
30 |
-
if i == 0:
|
31 |
-
return curr
|
32 |
-
i = i-1
|
33 |
-
if not seq[i] > curr:
|
34 |
-
return curr
|
35 |
-
|
36 |
-
if mode == 'right':
|
37 |
-
while True:
|
38 |
-
curr = seq[i]
|
39 |
-
if i == (len(seq)-1):
|
40 |
-
return curr
|
41 |
-
i = i+1
|
42 |
-
if not seq[i] > curr:
|
43 |
-
return curr
|
44 |
-
|
45 |
-
depths = []
|
46 |
-
for i in range(len(scores)):
|
47 |
-
score = scores[i]
|
48 |
-
l_peak = climb(scores, i, mode='left')
|
49 |
-
r_peak = climb(scores, i, mode='right')
|
50 |
-
depth = 0.5 * (l_peak + r_peak - (2*score))
|
51 |
-
depths.append(depth)
|
52 |
-
|
53 |
-
return np.array(depths)
|
54 |
-
|
55 |
-
|
56 |
-
def get_local_maxima(depth_scores, order=1):
|
57 |
-
maxima_ids = argrelmax(depth_scores, order=order)[0]
|
58 |
-
filtered_scores = np.zeros(len(depth_scores))
|
59 |
-
filtered_scores[maxima_ids] = depth_scores[maxima_ids]
|
60 |
-
return filtered_scores
|
61 |
-
|
62 |
-
def compute_threshold(scores):
|
63 |
-
s = scores[np.nonzero(scores)]
|
64 |
-
threshold = np.mean(s) - (np.std(s) / 2)
|
65 |
-
return threshold
|
66 |
-
|
67 |
-
def get_threshold_segments(scores, threshold=0.1):
|
68 |
-
segment_ids = np.where(scores >= threshold)[0]
|
69 |
-
return segment_ids
|
70 |
-
|
71 |
-
|
72 |
-
def print_list(lst):
|
73 |
-
for e in lst:
|
74 |
-
st.markdown("- " + e)
|
75 |
-
|
76 |
-
|
77 |
-
st.subheader("Topic Modeling with Segmentation")
|
78 |
-
uploaded_file = st.file_uploader("choose a text file", type=["txt"])
|
79 |
-
if uploaded_file is not None:
|
80 |
-
st.session_state["text"] = uploaded_file.getvalue().decode('utf-8')
|
81 |
-
|
82 |
-
st.write("OR")
|
83 |
-
|
84 |
-
input_text = st.text_area(
|
85 |
-
label="Enter text separated by newlines",
|
86 |
-
value="",
|
87 |
-
key="text",
|
88 |
-
height=150
|
89 |
-
)
|
90 |
-
|
91 |
-
button=st.button('Get Segments')
|
92 |
-
if (button==True) and input_text != "":
|
93 |
-
texts = input_text.split('\n')
|
94 |
-
sents = []
|
95 |
-
for text in texts:
|
96 |
-
doc = nlp(text)
|
97 |
-
for sent in doc.sents:
|
98 |
-
sents.append(sent)
|
99 |
-
MIN_LENGTH = 3
|
100 |
-
tokenized_sents = [[token.lemma_.lower() for token in sent if
|
101 |
-
not token.is_stop and not token.is_punct and token.text.strip() and len(token) >= MIN_LENGTH]
|
102 |
-
for sent in sents]
|
103 |
-
st.write("Modeling topics:")
|
104 |
-
|
105 |
-
|
106 |
-
np.random.seed(123)
|
107 |
-
|
108 |
-
N_TOPICS = 5
|
109 |
-
N_PASSES = 5
|
110 |
-
|
111 |
-
dictionary = corpora.Dictionary(tokenized_sents)
|
112 |
-
bow = [dictionary.doc2bow(sent) for sent in tokenized_sents]
|
113 |
-
topic_model = models.LdaModel(corpus=bow, id2word=dictionary, num_topics=N_TOPICS, passes=N_PASSES)
|
114 |
-
st.write("inferring topics ...")
|
115 |
-
THRESHOLD = 0.05
|
116 |
-
doc_topics = list(topic_model.get_document_topics(bow, minimum_probability=THRESHOLD))
|
117 |
-
k = 3
|
118 |
-
top_k_topics = [[t[0] for t in sorted(sent_topics, key=lambda x: x[1], reverse=True)][:k]
|
119 |
-
for sent_topics in doc_topics]
|
120 |
-
WINDOW_SIZE = 3
|
121 |
-
window_topics = window(top_k_topics, n=WINDOW_SIZE)
|
122 |
-
window_topics = [list(set(chain.from_iterable(window))) for window in window_topics]
|
123 |
-
|
124 |
-
binarizer = MultiLabelBinarizer(classes=range(N_TOPICS))
|
125 |
-
|
126 |
-
encoded_topic = binarizer.fit_transform(window_topics)
|
127 |
-
st.write("generating segments ...")
|
128 |
-
sims_topic = [cosine_similarity([pair[0]], [pair[1]])[0][0] for pair in zip(encoded_topic, encoded_topic[1:])]
|
129 |
-
depths_topic = get_depths(sims_topic)
|
130 |
-
filtered_topic = get_local_maxima(depths_topic, order=1)
|
131 |
-
threshold_topic = compute_threshold(filtered_topic)
|
132 |
-
threshold_segments_topic = get_threshold_segments(filtered_topic, threshold_topic)
|
133 |
-
|
134 |
-
segment_ids = threshold_segments_topic + WINDOW_SIZE
|
135 |
-
|
136 |
-
segment_ids = [0] + segment_ids.tolist() + [len(sents)]
|
137 |
-
slices = list(zip(segment_ids[:-1], segment_ids[1:]))
|
138 |
-
|
139 |
-
segmented = [sents[s[0]: s[1]] for s in slices]
|
140 |
-
|
141 |
-
for segment in segmented[:-1]:
|
142 |
-
print_list([s.text for s in segment])
|
143 |
-
st.markdown("""---""")
|
144 |
-
|
145 |
-
print_list([s.text for s in segmented[-1]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/tools.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
def get_mel_from_wav(audio, _stft):
|
6 |
-
audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)
|
7 |
-
audio = torch.autograd.Variable(audio, requires_grad=False)
|
8 |
-
melspec, log_magnitudes_stft, energy = _stft.mel_spectrogram(audio)
|
9 |
-
melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)
|
10 |
-
log_magnitudes_stft = (
|
11 |
-
torch.squeeze(log_magnitudes_stft, 0).numpy().astype(np.float32)
|
12 |
-
)
|
13 |
-
energy = torch.squeeze(energy, 0).numpy().astype(np.float32)
|
14 |
-
return melspec, log_magnitudes_stft, energy
|
15 |
-
|
16 |
-
|
17 |
-
# def inv_mel_spec(mel, out_filename, _stft, griffin_iters=60):
|
18 |
-
# mel = torch.stack([mel])
|
19 |
-
# mel_decompress = _stft.spectral_de_normalize(mel)
|
20 |
-
# mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
|
21 |
-
# spec_from_mel_scaling = 1000
|
22 |
-
# spec_from_mel = torch.mm(mel_decompress[0], _stft.mel_basis)
|
23 |
-
# spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
|
24 |
-
# spec_from_mel = spec_from_mel * spec_from_mel_scaling
|
25 |
-
|
26 |
-
# audio = griffin_lim(
|
27 |
-
# torch.autograd.Variable(spec_from_mel[:, :, :-1]), _stft._stft_fn, griffin_iters
|
28 |
-
# )
|
29 |
-
|
30 |
-
# audio = audio.squeeze()
|
31 |
-
# audio = audio.cpu().numpy()
|
32 |
-
# audio_path = out_filename
|
33 |
-
# write(audio_path, _stft.sampling_rate, audio)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/distributions/__init__.py
DELETED
File without changes
|
spaces/AIGText/GlyphControl/cldm/cldm.py
DELETED
@@ -1,620 +0,0 @@
|
|
1 |
-
import einops
|
2 |
-
import torch
|
3 |
-
import torch as th
|
4 |
-
import torch.nn as nn
|
5 |
-
|
6 |
-
from ldm.modules.diffusionmodules.util import (
|
7 |
-
conv_nd,
|
8 |
-
linear,
|
9 |
-
zero_module,
|
10 |
-
timestep_embedding,
|
11 |
-
)
|
12 |
-
|
13 |
-
from einops import rearrange, repeat
|
14 |
-
from torchvision.utils import make_grid
|
15 |
-
from ldm.modules.attention import SpatialTransformer
|
16 |
-
from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
|
17 |
-
from ldm.models.diffusion.ddpm import LatentDiffusion
|
18 |
-
from ldm.util import log_txt_as_img, exists, instantiate_from_config
|
19 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
20 |
-
from ldm.modules.ema import LitEma
|
21 |
-
from contextlib import contextmanager, nullcontext
|
22 |
-
from cldm.model import load_state_dict
|
23 |
-
import numpy as np
|
24 |
-
from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, OneCycleLR
|
25 |
-
def disabled_train(self, mode=True):
|
26 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
27 |
-
does not change anymore."""
|
28 |
-
return self
|
29 |
-
|
30 |
-
class ControlledUnetModel(UNetModel):
|
31 |
-
def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs):
|
32 |
-
hs = []
|
33 |
-
with torch.no_grad():
|
34 |
-
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
35 |
-
emb = self.time_embed(t_emb)
|
36 |
-
h = x.type(self.dtype)
|
37 |
-
for module in self.input_blocks:
|
38 |
-
h = module(h, emb, context)
|
39 |
-
hs.append(h)
|
40 |
-
h = self.middle_block(h, emb, context)
|
41 |
-
|
42 |
-
if control is not None:
|
43 |
-
h += control.pop()
|
44 |
-
|
45 |
-
for i, module in enumerate(self.output_blocks):
|
46 |
-
if only_mid_control or control is None:
|
47 |
-
h = torch.cat([h, hs.pop()], dim=1)
|
48 |
-
else:
|
49 |
-
h = torch.cat([h, hs.pop() + control.pop()], dim=1)
|
50 |
-
h = module(h, emb, context)
|
51 |
-
|
52 |
-
h = h.type(x.dtype)
|
53 |
-
return self.out(h)
|
54 |
-
|
55 |
-
|
56 |
-
class ControlNet(nn.Module):
|
57 |
-
def __init__(
|
58 |
-
self,
|
59 |
-
image_size,
|
60 |
-
in_channels,
|
61 |
-
model_channels,
|
62 |
-
hint_channels,
|
63 |
-
num_res_blocks,
|
64 |
-
attention_resolutions,
|
65 |
-
dropout=0,
|
66 |
-
channel_mult=(1, 2, 4, 8),
|
67 |
-
conv_resample=True,
|
68 |
-
dims=2,
|
69 |
-
use_checkpoint=False,
|
70 |
-
use_fp16=False,
|
71 |
-
num_heads=-1,
|
72 |
-
num_head_channels=-1,
|
73 |
-
num_heads_upsample=-1,
|
74 |
-
use_scale_shift_norm=False,
|
75 |
-
resblock_updown=False,
|
76 |
-
use_new_attention_order=False,
|
77 |
-
use_spatial_transformer=False, # custom transformer support
|
78 |
-
transformer_depth=1, # custom transformer support
|
79 |
-
context_dim=None, # custom transformer support
|
80 |
-
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
|
81 |
-
legacy=True,
|
82 |
-
disable_self_attentions=None,
|
83 |
-
num_attention_blocks=None,
|
84 |
-
disable_middle_self_attn=False,
|
85 |
-
use_linear_in_transformer=False,
|
86 |
-
):
|
87 |
-
super().__init__()
|
88 |
-
if use_spatial_transformer:
|
89 |
-
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
90 |
-
|
91 |
-
if context_dim is not None:
|
92 |
-
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
93 |
-
from omegaconf.listconfig import ListConfig
|
94 |
-
if type(context_dim) == ListConfig:
|
95 |
-
context_dim = list(context_dim)
|
96 |
-
|
97 |
-
if num_heads_upsample == -1:
|
98 |
-
num_heads_upsample = num_heads
|
99 |
-
|
100 |
-
if num_heads == -1:
|
101 |
-
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
102 |
-
|
103 |
-
if num_head_channels == -1:
|
104 |
-
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
105 |
-
|
106 |
-
self.dims = dims
|
107 |
-
self.image_size = image_size
|
108 |
-
self.in_channels = in_channels
|
109 |
-
self.model_channels = model_channels
|
110 |
-
if isinstance(num_res_blocks, int):
|
111 |
-
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
112 |
-
else:
|
113 |
-
if len(num_res_blocks) != len(channel_mult):
|
114 |
-
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
|
115 |
-
"as a list/tuple (per-level) with the same length as channel_mult")
|
116 |
-
self.num_res_blocks = num_res_blocks
|
117 |
-
if disable_self_attentions is not None:
|
118 |
-
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
|
119 |
-
assert len(disable_self_attentions) == len(channel_mult)
|
120 |
-
if num_attention_blocks is not None:
|
121 |
-
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
122 |
-
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
|
123 |
-
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
124 |
-
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
125 |
-
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
126 |
-
f"attention will still not be set.")
|
127 |
-
|
128 |
-
self.attention_resolutions = attention_resolutions
|
129 |
-
self.dropout = dropout
|
130 |
-
self.channel_mult = channel_mult
|
131 |
-
self.conv_resample = conv_resample
|
132 |
-
self.use_checkpoint = use_checkpoint
|
133 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
134 |
-
self.num_heads = num_heads
|
135 |
-
self.num_head_channels = num_head_channels
|
136 |
-
self.num_heads_upsample = num_heads_upsample
|
137 |
-
self.predict_codebook_ids = n_embed is not None
|
138 |
-
|
139 |
-
time_embed_dim = model_channels * 4
|
140 |
-
self.time_embed = nn.Sequential(
|
141 |
-
linear(model_channels, time_embed_dim),
|
142 |
-
nn.SiLU(),
|
143 |
-
linear(time_embed_dim, time_embed_dim),
|
144 |
-
)
|
145 |
-
|
146 |
-
self.input_blocks = nn.ModuleList(
|
147 |
-
[
|
148 |
-
TimestepEmbedSequential(
|
149 |
-
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
150 |
-
)
|
151 |
-
]
|
152 |
-
)
|
153 |
-
self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)])
|
154 |
-
|
155 |
-
self.input_hint_block = TimestepEmbedSequential(
|
156 |
-
conv_nd(dims, hint_channels, 16, 3, padding=1),
|
157 |
-
nn.SiLU(),
|
158 |
-
conv_nd(dims, 16, 16, 3, padding=1),
|
159 |
-
nn.SiLU(),
|
160 |
-
conv_nd(dims, 16, 32, 3, padding=1, stride=2),
|
161 |
-
nn.SiLU(),
|
162 |
-
conv_nd(dims, 32, 32, 3, padding=1),
|
163 |
-
nn.SiLU(),
|
164 |
-
conv_nd(dims, 32, 96, 3, padding=1, stride=2),
|
165 |
-
nn.SiLU(),
|
166 |
-
conv_nd(dims, 96, 96, 3, padding=1),
|
167 |
-
nn.SiLU(),
|
168 |
-
conv_nd(dims, 96, 256, 3, padding=1, stride=2),
|
169 |
-
nn.SiLU(),
|
170 |
-
zero_module(conv_nd(dims, 256, model_channels, 3, padding=1))
|
171 |
-
)
|
172 |
-
|
173 |
-
self._feature_size = model_channels
|
174 |
-
input_block_chans = [model_channels]
|
175 |
-
ch = model_channels
|
176 |
-
ds = 1
|
177 |
-
for level, mult in enumerate(channel_mult):
|
178 |
-
for nr in range(self.num_res_blocks[level]):
|
179 |
-
layers = [
|
180 |
-
ResBlock(
|
181 |
-
ch,
|
182 |
-
time_embed_dim,
|
183 |
-
dropout,
|
184 |
-
out_channels=mult * model_channels,
|
185 |
-
dims=dims,
|
186 |
-
use_checkpoint=use_checkpoint,
|
187 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
188 |
-
)
|
189 |
-
]
|
190 |
-
ch = mult * model_channels
|
191 |
-
if ds in attention_resolutions:
|
192 |
-
if num_head_channels == -1:
|
193 |
-
dim_head = ch // num_heads
|
194 |
-
else:
|
195 |
-
num_heads = ch // num_head_channels
|
196 |
-
dim_head = num_head_channels
|
197 |
-
if legacy:
|
198 |
-
# num_heads = 1
|
199 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
200 |
-
if exists(disable_self_attentions):
|
201 |
-
disabled_sa = disable_self_attentions[level]
|
202 |
-
else:
|
203 |
-
disabled_sa = False
|
204 |
-
|
205 |
-
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
|
206 |
-
layers.append(
|
207 |
-
AttentionBlock(
|
208 |
-
ch,
|
209 |
-
use_checkpoint=use_checkpoint,
|
210 |
-
num_heads=num_heads,
|
211 |
-
num_head_channels=dim_head,
|
212 |
-
use_new_attention_order=use_new_attention_order,
|
213 |
-
) if not use_spatial_transformer else SpatialTransformer(
|
214 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
215 |
-
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
216 |
-
use_checkpoint=use_checkpoint
|
217 |
-
)
|
218 |
-
)
|
219 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
220 |
-
self.zero_convs.append(self.make_zero_conv(ch))
|
221 |
-
self._feature_size += ch
|
222 |
-
input_block_chans.append(ch)
|
223 |
-
if level != len(channel_mult) - 1:
|
224 |
-
out_ch = ch
|
225 |
-
self.input_blocks.append(
|
226 |
-
TimestepEmbedSequential(
|
227 |
-
ResBlock(
|
228 |
-
ch,
|
229 |
-
time_embed_dim,
|
230 |
-
dropout,
|
231 |
-
out_channels=out_ch,
|
232 |
-
dims=dims,
|
233 |
-
use_checkpoint=use_checkpoint,
|
234 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
235 |
-
down=True,
|
236 |
-
)
|
237 |
-
if resblock_updown
|
238 |
-
else Downsample(
|
239 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
240 |
-
)
|
241 |
-
)
|
242 |
-
)
|
243 |
-
ch = out_ch
|
244 |
-
input_block_chans.append(ch)
|
245 |
-
self.zero_convs.append(self.make_zero_conv(ch))
|
246 |
-
ds *= 2
|
247 |
-
self._feature_size += ch
|
248 |
-
|
249 |
-
if num_head_channels == -1:
|
250 |
-
dim_head = ch // num_heads
|
251 |
-
else:
|
252 |
-
num_heads = ch // num_head_channels
|
253 |
-
dim_head = num_head_channels
|
254 |
-
if legacy:
|
255 |
-
# num_heads = 1
|
256 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
257 |
-
self.middle_block = TimestepEmbedSequential(
|
258 |
-
ResBlock(
|
259 |
-
ch,
|
260 |
-
time_embed_dim,
|
261 |
-
dropout,
|
262 |
-
dims=dims,
|
263 |
-
use_checkpoint=use_checkpoint,
|
264 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
265 |
-
),
|
266 |
-
AttentionBlock(
|
267 |
-
ch,
|
268 |
-
use_checkpoint=use_checkpoint,
|
269 |
-
num_heads=num_heads,
|
270 |
-
num_head_channels=dim_head,
|
271 |
-
use_new_attention_order=use_new_attention_order,
|
272 |
-
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
|
273 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
274 |
-
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
|
275 |
-
use_checkpoint=use_checkpoint
|
276 |
-
),
|
277 |
-
ResBlock(
|
278 |
-
ch,
|
279 |
-
time_embed_dim,
|
280 |
-
dropout,
|
281 |
-
dims=dims,
|
282 |
-
use_checkpoint=use_checkpoint,
|
283 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
284 |
-
),
|
285 |
-
)
|
286 |
-
self.middle_block_out = self.make_zero_conv(ch)
|
287 |
-
self._feature_size += ch
|
288 |
-
|
289 |
-
def make_zero_conv(self, channels):
|
290 |
-
return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0)))
|
291 |
-
|
292 |
-
def forward(self, x, hint, timesteps, context, **kwargs):
|
293 |
-
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
294 |
-
emb = self.time_embed(t_emb)
|
295 |
-
|
296 |
-
guided_hint = self.input_hint_block(hint, emb, context)
|
297 |
-
|
298 |
-
outs = []
|
299 |
-
|
300 |
-
h = x.type(self.dtype)
|
301 |
-
for module, zero_conv in zip(self.input_blocks, self.zero_convs):
|
302 |
-
if guided_hint is not None:
|
303 |
-
h = module(h, emb, context)
|
304 |
-
h += guided_hint
|
305 |
-
guided_hint = None
|
306 |
-
else:
|
307 |
-
h = module(h, emb, context)
|
308 |
-
outs.append(zero_conv(h, emb, context))
|
309 |
-
|
310 |
-
h = self.middle_block(h, emb, context)
|
311 |
-
outs.append(self.middle_block_out(h, emb, context))
|
312 |
-
|
313 |
-
return outs
|
314 |
-
|
315 |
-
|
316 |
-
class ControlLDM(LatentDiffusion):
|
317 |
-
|
318 |
-
def __init__(self,
|
319 |
-
control_stage_config,
|
320 |
-
control_key, only_mid_control,
|
321 |
-
learnable_conscale = False, guess_mode=False,
|
322 |
-
sd_locked = True, sep_lr = False, decoder_lr = 1.0**-4,
|
323 |
-
sep_cond_txt = True, exchange_cond_txt = False, concat_all_textemb = False,
|
324 |
-
*args, **kwargs
|
325 |
-
):
|
326 |
-
use_ema = kwargs.pop("use_ema", False)
|
327 |
-
ckpt_path = kwargs.pop("ckpt_path", None)
|
328 |
-
reset_ema = kwargs.pop("reset_ema", False)
|
329 |
-
only_model= kwargs.pop("only_model", False)
|
330 |
-
reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
|
331 |
-
keep_num_ema_updates = kwargs.pop("keep_num_ema_updates", False)
|
332 |
-
ignore_keys = kwargs.pop("ignore_keys", [])
|
333 |
-
|
334 |
-
super().__init__(*args, use_ema=False, **kwargs)
|
335 |
-
|
336 |
-
# Glyph ControlNet
|
337 |
-
self.control_model = instantiate_from_config(control_stage_config)
|
338 |
-
self.control_key = control_key
|
339 |
-
self.only_mid_control = only_mid_control
|
340 |
-
|
341 |
-
self.learnable_conscale = learnable_conscale
|
342 |
-
conscale_init = [1.0] * 13 if not guess_mode else [(0.825 ** float(12 - i)) for i in range(13)]
|
343 |
-
if learnable_conscale:
|
344 |
-
# self.control_scales = nn.Parameter(torch.ones(13), requires_grad=True)
|
345 |
-
self.control_scales = nn.Parameter(torch.Tensor(conscale_init), requires_grad=True)
|
346 |
-
else:
|
347 |
-
self.control_scales = conscale_init #[1.0] * 13
|
348 |
-
|
349 |
-
self.optimizer = torch.optim.AdamW
|
350 |
-
# whether to unlock (fine-tune) the decoder parts of SD U-Net
|
351 |
-
self.sd_locked = sd_locked
|
352 |
-
self.sep_lr = sep_lr
|
353 |
-
self.decoder_lr = decoder_lr
|
354 |
-
|
355 |
-
# specify the input text embedding of two branches (SD branch and Glyph ControlNet branch)
|
356 |
-
self.sep_cond_txt = sep_cond_txt
|
357 |
-
self.concat_all_textemb = concat_all_textemb
|
358 |
-
self.exchange_cond_txt = exchange_cond_txt
|
359 |
-
|
360 |
-
# ema
|
361 |
-
self.use_ema = use_ema
|
362 |
-
if self.use_ema:
|
363 |
-
self.model_ema = LitEma(self.control_model, init_num_updates= 0)
|
364 |
-
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
365 |
-
if not self.sd_locked:
|
366 |
-
self.model_diffoutblock_ema = LitEma(self.model.diffusion_model.output_blocks, init_num_updates= 0)
|
367 |
-
print(f"Keeping diffoutblock EMAs of {len(list(self.model_diffoutblock_ema.buffers()))}.")
|
368 |
-
self.model_diffout_ema = LitEma(self.model.diffusion_model.out, init_num_updates= 0)
|
369 |
-
print(f"Keeping diffout EMAs of {len(list(self.model_diffout_ema.buffers()))}.")
|
370 |
-
|
371 |
-
# initialize the model from the checkpoint
|
372 |
-
if ckpt_path is not None:
|
373 |
-
ema_num_updates = self.init_from_ckpt(ckpt_path, ignore_keys, only_model=only_model)
|
374 |
-
self.restarted_from_ckpt = True
|
375 |
-
if self.use_ema and reset_ema:
|
376 |
-
print(
|
377 |
-
f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
378 |
-
self.model_ema = LitEma(self.control_model, init_num_updates= ema_num_updates if keep_num_ema_updates else 0)
|
379 |
-
if not self.sd_locked:
|
380 |
-
self.model_diffoutblock_ema = LitEma(self.model.diffusion_model.output_blocks, init_num_updates= ema_num_updates if keep_num_ema_updates else 0)
|
381 |
-
self.model_diffout_ema = LitEma(self.model.diffusion_model.out, init_num_updates= ema_num_updates if keep_num_ema_updates else 0)
|
382 |
-
|
383 |
-
if reset_num_ema_updates:
|
384 |
-
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
385 |
-
assert self.use_ema
|
386 |
-
self.model_ema.reset_num_updates()
|
387 |
-
if not self.sd_locked: # Update
|
388 |
-
self.model_diffoutblock_ema.reset_num_updates()
|
389 |
-
self.model_diffout_ema.reset_num_updates()
|
390 |
-
|
391 |
-
@contextmanager
|
392 |
-
def ema_scope(self, context=None):
|
393 |
-
if self.use_ema: # TODO: fix the bug while adding transemb_model or trainable control scales
|
394 |
-
self.model_ema.store(self.control_model.parameters())
|
395 |
-
self.model_ema.copy_to(self.control_model)
|
396 |
-
if not self.sd_locked: # Update
|
397 |
-
self.model_diffoutblock_ema.store(self.model.diffusion_model.output_blocks.parameters())
|
398 |
-
self.model_diffoutblock_ema.copy_to(self.model.diffusion_model.output_blocks)
|
399 |
-
self.model_diffout_ema.store(self.model.diffusion_model.out.parameters())
|
400 |
-
self.model_diffout_ema.copy_to(self.model.diffusion_model.out)
|
401 |
-
|
402 |
-
if context is not None:
|
403 |
-
print(f"{context}: Switched ControlNet to EMA weights")
|
404 |
-
try:
|
405 |
-
yield None
|
406 |
-
finally:
|
407 |
-
if self.use_ema:
|
408 |
-
self.model_ema.restore(self.control_model.parameters())
|
409 |
-
if not self.sd_locked: # Update
|
410 |
-
self.model_diffoutblock_ema.restore(self.model.diffusion_model.output_blocks.parameters())
|
411 |
-
self.model_diffout_ema.restore(self.model.diffusion_model.out.parameters())
|
412 |
-
if context is not None:
|
413 |
-
print(f"{context}: Restored training weights of ControlNet")
|
414 |
-
|
415 |
-
@torch.no_grad()
|
416 |
-
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
417 |
-
|
418 |
-
if path.endswith("model_states.pt"):
|
419 |
-
sd = torch.load(path, map_location='cpu')["module"]
|
420 |
-
else:
|
421 |
-
# sd = load_state_dict(path, location='cpu') # abandoned
|
422 |
-
sd = torch.load(path, map_location="cpu")
|
423 |
-
if "state_dict" in list(sd.keys()):
|
424 |
-
sd = sd["state_dict"]
|
425 |
-
|
426 |
-
keys_ = list(sd.keys())[:]
|
427 |
-
for k in keys_:
|
428 |
-
if k.startswith("module."):
|
429 |
-
nk = k[7:]
|
430 |
-
sd[nk] = sd[k]
|
431 |
-
del sd[k]
|
432 |
-
keys = list(sd.keys())
|
433 |
-
for k in keys:
|
434 |
-
for ik in ignore_keys:
|
435 |
-
if k.startswith(ik):
|
436 |
-
print("Deleting key {} from state_dict.".format(k))
|
437 |
-
del sd[k]
|
438 |
-
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
439 |
-
sd, strict=False)
|
440 |
-
|
441 |
-
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
442 |
-
if len(missing) > 0:
|
443 |
-
print(f"Missing Keys:\n {missing}")
|
444 |
-
if len(unexpected) > 0:
|
445 |
-
print(f"\nUnexpected Keys:\n {unexpected}")
|
446 |
-
|
447 |
-
if "model_ema.num_updates" in sd and "model_ema.num_updates" not in unexpected:
|
448 |
-
return sd["model_ema.num_updates"].item()
|
449 |
-
else:
|
450 |
-
return 0
|
451 |
-
|
452 |
-
@torch.no_grad()
|
453 |
-
def get_input(self, batch, k, bs=None, *args, **kwargs):
|
454 |
-
x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs)
|
455 |
-
control = batch[self.control_key]
|
456 |
-
if bs is not None:
|
457 |
-
control = control[:bs]
|
458 |
-
control = control.to(self.device)
|
459 |
-
control = einops.rearrange(control, 'b h w c -> b c h w')
|
460 |
-
control = control.to(memory_format=torch.contiguous_format).float()
|
461 |
-
return x, dict(c_crossattn=[c] if not isinstance(c, list) else c, c_concat=[control])
|
462 |
-
|
463 |
-
def apply_model(self, x_noisy, t, cond, *args, **kwargs):
|
464 |
-
assert isinstance(cond, dict)
|
465 |
-
diffusion_model = self.model.diffusion_model
|
466 |
-
cond_txt_list = cond["c_crossattn"]
|
467 |
-
|
468 |
-
assert len(cond_txt_list) > 0
|
469 |
-
# cond_txt: input text embedding of the pretrained SD branch
|
470 |
-
# cond_txt_2: input text embedding of the Glyph ControlNet branch
|
471 |
-
cond_txt = cond_txt_list[0]
|
472 |
-
if len(cond_txt_list) == 1:
|
473 |
-
cond_txt_2 = None
|
474 |
-
else:
|
475 |
-
if self.sep_cond_txt:
|
476 |
-
# use each embedding for each branch separately
|
477 |
-
cond_txt_2 = cond_txt_list[1]
|
478 |
-
else:
|
479 |
-
# concat the embedding for Glyph ControlNet branch
|
480 |
-
if not self.concat_all_textemb:
|
481 |
-
cond_txt_2 = torch.cat(cond_txt_list[1:], 1)
|
482 |
-
else:
|
483 |
-
cond_txt_2 = torch.cat(cond_txt_list, 1)
|
484 |
-
|
485 |
-
if self.exchange_cond_txt:
|
486 |
-
# exchange the input text embedding of two branches
|
487 |
-
txt_buffer = cond_txt
|
488 |
-
cond_txt = cond_txt_2
|
489 |
-
cond_txt_2 = txt_buffer
|
490 |
-
|
491 |
-
if cond['c_concat'] is None:
|
492 |
-
eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control)
|
493 |
-
else:
|
494 |
-
control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt if cond_txt_2 is None else cond_txt_2)
|
495 |
-
control = [c * scale for c, scale in zip(control, self.control_scales)]
|
496 |
-
eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control)
|
497 |
-
|
498 |
-
return eps
|
499 |
-
|
500 |
-
@torch.no_grad()
|
501 |
-
def get_unconditional_conditioning(self, N):
|
502 |
-
return self.get_learned_conditioning([""] * N)
|
503 |
-
|
504 |
-
def training_step(self, batch, batch_idx, optimizer_idx=0):
|
505 |
-
loss = super().training_step(batch, batch_idx, optimizer_idx)
|
506 |
-
if self.use_scheduler and not self.sd_locked and self.sep_lr:
|
507 |
-
decoder_lr = self.optimizers().param_groups[1]["lr"]
|
508 |
-
self.log('decoder_lr_abs', decoder_lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
509 |
-
return loss
|
510 |
-
|
511 |
-
def configure_optimizers(self):
|
512 |
-
lr = self.learning_rate
|
513 |
-
params = list(self.control_model.parameters())
|
514 |
-
if self.learnable_conscale:
|
515 |
-
params += [self.control_scales]
|
516 |
-
|
517 |
-
params_wlr = []
|
518 |
-
decoder_params = None
|
519 |
-
if not self.sd_locked:
|
520 |
-
decoder_params = list(self.model.diffusion_model.output_blocks.parameters())
|
521 |
-
decoder_params += list(self.model.diffusion_model.out.parameters())
|
522 |
-
if not self.sep_lr:
|
523 |
-
params.extend(decoder_params)
|
524 |
-
decoder_params = None
|
525 |
-
|
526 |
-
params_wlr.append({"params": params, "lr": lr})
|
527 |
-
if decoder_params is not None:
|
528 |
-
params_wlr.append({"params": decoder_params, "lr": self.decoder_lr})
|
529 |
-
|
530 |
-
# opt = torch.optim.AdamW(params_wlr)
|
531 |
-
opt = self.optimizer(params_wlr)
|
532 |
-
opts = [opt]
|
533 |
-
|
534 |
-
# updated
|
535 |
-
schedulers = []
|
536 |
-
if self.use_scheduler:
|
537 |
-
assert 'target' in self.scheduler_config
|
538 |
-
scheduler_func = instantiate_from_config(self.scheduler_config)
|
539 |
-
print("Setting up LambdaLR scheduler...")
|
540 |
-
schedulers = [
|
541 |
-
{
|
542 |
-
'scheduler': LambdaLR(
|
543 |
-
opt,
|
544 |
-
lr_lambda= [scheduler_func.schedule] * len(params_wlr) #if not self.sep_lr else [scheduler_func.schedule, scheduler_func.schedule]
|
545 |
-
),
|
546 |
-
'interval': 'step',
|
547 |
-
'frequency': 1
|
548 |
-
}]
|
549 |
-
|
550 |
-
return opts, schedulers
|
551 |
-
|
552 |
-
def low_vram_shift(self, is_diffusing):
|
553 |
-
if is_diffusing:
|
554 |
-
self.model = self.model.cuda()
|
555 |
-
self.control_model = self.control_model.cuda()
|
556 |
-
self.first_stage_model = self.first_stage_model.cpu()
|
557 |
-
self.cond_stage_model = self.cond_stage_model.cpu()
|
558 |
-
else:
|
559 |
-
self.model = self.model.cpu()
|
560 |
-
self.control_model = self.control_model.cpu()
|
561 |
-
self.first_stage_model = self.first_stage_model.cuda()
|
562 |
-
self.cond_stage_model = self.cond_stage_model.cuda()
|
563 |
-
|
564 |
-
# ema
|
565 |
-
def on_train_batch_end(self, *args, **kwargs):
|
566 |
-
if self.use_ema:
|
567 |
-
self.model_ema(self.control_model)
|
568 |
-
if not self.sd_locked: # Update
|
569 |
-
self.model_diffoutblock_ema(self.model.diffusion_model.output_blocks)
|
570 |
-
self.model_diffout_ema(self.model.diffusion_model.out)
|
571 |
-
if self.log_all_grad_norm:
|
572 |
-
zeroconvs = list(self.control_model.input_hint_block.named_parameters())[-2:]
|
573 |
-
zeroconvs.extend(
|
574 |
-
list(self.control_model.zero_convs.named_parameters())
|
575 |
-
)
|
576 |
-
for item in zeroconvs:
|
577 |
-
self.log(
|
578 |
-
"zero_convs/{}_norm".format(item[0]),
|
579 |
-
item[1].cpu().detach().norm().item(),
|
580 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
581 |
-
)
|
582 |
-
self.log(
|
583 |
-
"zero_convs/{}_max".format(item[0]),
|
584 |
-
torch.max(item[1].cpu().detach()).item(), #TODO: lack torch.abs
|
585 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
586 |
-
)
|
587 |
-
gradnorm_list = []
|
588 |
-
for param_group in self.trainer.optimizers[0].param_groups:
|
589 |
-
for p in param_group['params']:
|
590 |
-
# assert p.requires_grad and p.grad is not None
|
591 |
-
if p.requires_grad and p.grad is not None:
|
592 |
-
grad_norm_v = p.grad.cpu().detach().norm().item()
|
593 |
-
gradnorm_list.append(grad_norm_v)
|
594 |
-
if len(gradnorm_list):
|
595 |
-
self.log("all_gradients/grad_norm_mean",
|
596 |
-
np.mean(gradnorm_list),
|
597 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
598 |
-
)
|
599 |
-
self.log("all_gradients/grad_norm_max",
|
600 |
-
np.max(gradnorm_list),
|
601 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
602 |
-
)
|
603 |
-
self.log("all_gradients/grad_norm_min",
|
604 |
-
np.min(gradnorm_list),
|
605 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
606 |
-
)
|
607 |
-
self.log("all_gradients/param_num",
|
608 |
-
len(gradnorm_list),
|
609 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
610 |
-
)
|
611 |
-
|
612 |
-
if self.learnable_conscale:
|
613 |
-
for i in range(len(self.control_scales)):
|
614 |
-
self.log(
|
615 |
-
"control_scale/control_{}".format(i),
|
616 |
-
self.control_scales[i],
|
617 |
-
prog_bar=False, logger=True, on_step=True, on_epoch=False
|
618 |
-
)
|
619 |
-
del gradnorm_list
|
620 |
-
del zeroconvs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbelKidane/headdetector/predict_image.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from prediction import prediction
|
2 |
-
import matplotlib.pyplot as plt
|
3 |
-
import fire
|
4 |
-
|
5 |
-
def predictFromTerminal(image_path):
|
6 |
-
annotatedImage = prediction(image_path)
|
7 |
-
plt.imshow(annotatedImage)
|
8 |
-
plt.grid(False)
|
9 |
-
plt.axis('off')
|
10 |
-
plt.show()
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
if __name__=='__main__':
|
15 |
-
print("Starting execution:")
|
16 |
-
fire.Fire(predictFromTerminal)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/0.js
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
export { default as component } from "../../../../src/routes/+layout.svelte";
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Forefront.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
|
5 |
-
import requests
|
6 |
-
|
7 |
-
from ..typing import Any, CreateResult
|
8 |
-
from .base_provider import BaseProvider
|
9 |
-
|
10 |
-
|
11 |
-
class Forefront(BaseProvider):
|
12 |
-
url = "https://forefront.com"
|
13 |
-
supports_stream = True
|
14 |
-
supports_gpt_35_turbo = True
|
15 |
-
|
16 |
-
@staticmethod
|
17 |
-
def create_completion(
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
stream: bool, **kwargs: Any) -> CreateResult:
|
21 |
-
|
22 |
-
json_data = {
|
23 |
-
"text" : messages[-1]["content"],
|
24 |
-
"action" : "noauth",
|
25 |
-
"id" : "",
|
26 |
-
"parentId" : "",
|
27 |
-
"workspaceId" : "",
|
28 |
-
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
|
29 |
-
"model" : "gpt-4",
|
30 |
-
"messages" : messages[:-1] if len(messages) > 1 else [],
|
31 |
-
"internetMode" : "auto",
|
32 |
-
}
|
33 |
-
|
34 |
-
response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
|
35 |
-
json=json_data, stream=True)
|
36 |
-
|
37 |
-
response.raise_for_status()
|
38 |
-
for token in response.iter_lines():
|
39 |
-
if b"delta" in token:
|
40 |
-
yield json.loads(token.decode().split("data: ")[1])["delta"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/charactercache-plugin.d.ts
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
import CharacterCache from './charactercache';
|
2 |
-
|
3 |
-
export default class CharacterCachePlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
add(
|
5 |
-
scene: Phaser.Scene,
|
6 |
-
config: CharacterCache.IConfig
|
7 |
-
): CharacterCache;
|
8 |
-
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customshapes/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import CustomShapes from './CustomShapes.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('customShapes', function (x, y, width, height, config) {
|
6 |
-
var gameObject = new CustomShapes(this.scene, x, y, width, height, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.CustomShapes', CustomShapes);
|
12 |
-
|
13 |
-
export default CustomShapes;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenHeight.js
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import { GetDisplayHeight } from '../../../plugins/utils/size/GetDisplaySize.js';
|
2 |
-
import Sum from '../../../plugins/utils/math/Sum.js';
|
3 |
-
|
4 |
-
var GetChildrenHeight = function (minimumMode) {
|
5 |
-
if (this.rexSizer.hidden) {
|
6 |
-
return 0;
|
7 |
-
}
|
8 |
-
|
9 |
-
if (minimumMode === undefined) {
|
10 |
-
minimumMode = true;
|
11 |
-
}
|
12 |
-
|
13 |
-
var result = 0,
|
14 |
-
rowHeight;
|
15 |
-
var children = this.sizerChildren;
|
16 |
-
var child, padding, childHeight, proportion;
|
17 |
-
|
18 |
-
for (var i = 0; i < this.rowCount; i++) {
|
19 |
-
proportion = this.rowProportions[i];
|
20 |
-
rowHeight = 0;
|
21 |
-
if ((proportion === 0) || minimumMode) {
|
22 |
-
for (var j = 0; j < this.columnCount; j++) {
|
23 |
-
child = children[(i * this.columnCount) + j];
|
24 |
-
if (!child) {
|
25 |
-
continue;
|
26 |
-
}
|
27 |
-
if (child.rexSizer.hidden) {
|
28 |
-
continue;
|
29 |
-
}
|
30 |
-
|
31 |
-
childHeight = (child.isRexSizer) ?
|
32 |
-
Math.max(child.minHeight, child.childrenHeight) :
|
33 |
-
(child.hasOwnProperty('minHeight')) ? child.minHeight : GetDisplayHeight(child);
|
34 |
-
padding = child.rexSizer.padding;
|
35 |
-
childHeight += (padding.top + padding.bottom);
|
36 |
-
rowHeight = Math.max(rowHeight, childHeight);
|
37 |
-
}
|
38 |
-
result += rowHeight;
|
39 |
-
}
|
40 |
-
// else,(proportion > 0) : rowHeight is 0
|
41 |
-
this.rowHeight[i] = rowHeight;
|
42 |
-
}
|
43 |
-
|
44 |
-
var space = this.space;
|
45 |
-
var indentTop = Math.max(space.indentTopOdd, space.indentTopEven);
|
46 |
-
return result + Sum(space.top, indentTop, ...space.row, space.bottom);
|
47 |
-
}
|
48 |
-
|
49 |
-
export default GetChildrenHeight;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aki004/herta-so-vits/MANUAL.md
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
# Herta Voice Changer
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
This AI model is based on **SoftVC VITS Singing Voice Conversion**. Refer to this [Github Repository](https://github.com/svc-develop-team/so-vits-svc/tree/4.0) from the 4.0 branch. This model was inspired by [Herta](https://honkai-star-rail.fandom.com/wiki/Herta) from [Honkai Star Rail](https://hsr.hoyoverse.com/en-us/). This model can be used to convert the original voice from an audio file into this character's voice.
|
6 |
-
|
7 |
-
## How to Prepare Audio Files
|
8 |
-
|
9 |
-
Your audio files should be `shorter than 10 seconds`, have no `BGM`, and have a sampling rate of `44100 Hz`.
|
10 |
-
|
11 |
-
1. Create a new folder inside the `dataset_raw` folder (This folder name will be your `SpeakerID`).
|
12 |
-
2. Put your audio files into the folder you created above.
|
13 |
-
|
14 |
-
### Note:
|
15 |
-
|
16 |
-
1. Your audio files should be in `.wav` format.
|
17 |
-
2. If your audio files are longer than 10 seconds, I suggest you trim them down using your desired software or [audio slicer GUI](https://github.com/flutydeer/audio-slicer).
|
18 |
-
3. If your audio files have **BGM**, please remove it using a program such as [Ultimate Vocal Remover](https://ultimatevocalremover.com/). The `3_HP-Vocal-UVR.pth` or `UVR-MDX-NET Main` is recommended.
|
19 |
-
4. If your audio files have a sampling rate different from 44100 Hz, I suggest you resample them using [Audacity](https://www.audacityteam.org/) or by running `python resample.py` in your `CMD`.
|
20 |
-
|
21 |
-
## How to Build Locally
|
22 |
-
|
23 |
-
1. Clone the repository from the 4.0 branch: `git clone https://github.com/svc-develop-team/so-vits-svc.git`
|
24 |
-
2. Put your `prepared audio` into the `dataset_raw` folder.
|
25 |
-
3. Open your **Command Line** and install the `so-vits-svc` library: `%pip install -U so-vits-svc-fork`
|
26 |
-
4. Navigate to your project directory using the **Command Line**.
|
27 |
-
5. Run `svc pre-resample` in your prompt.
|
28 |
-
6. After completing the step above, run `svc pre-config`.
|
29 |
-
7. After completing the step above, run `svc pre-hubert`. **(This step may take a while.)**.
|
30 |
-
8. After completing the step above, run `svc train -t`. **(This step will take a while based on your `GPU` and the number of `epochs` you want.)**.
|
31 |
-
|
32 |
-
### How to Change Epoch Value Locally
|
33 |
-
The meaning of `epoch` is the number of training iterations for your model. **Example: if you set the epoch value to 10000, your model will take 10000 steps to finish** `(default epoch value is 10000)`. To change your `epoch value`:
|
34 |
-
|
35 |
-
1. Go to your project folder.
|
36 |
-
2. Find the folder named `config`.
|
37 |
-
3. Inside that folder, you should see `config.json`.
|
38 |
-
4. In `config.json`, there should be a section that looks like this:
|
39 |
-
|
40 |
-
```json
|
41 |
-
"train": {
|
42 |
-
"log_interval": 200,
|
43 |
-
"eval_interval": 800,
|
44 |
-
"seed": 1234,
|
45 |
-
"epochs": <PUT YOUR VALUE HERE>,
|
46 |
-
"learning_rate": 0.0001,
|
47 |
-
"betas": [0.8, 0.99]
|
48 |
-
}
|
49 |
-
```
|
50 |
-
|
51 |
-
This can be done after `svc pre-config` has already finished.
|
52 |
-
|
53 |
-
|
54 |
-
### How to inferance in local.
|
55 |
-
To perform inference locally, navigate to the project directory, create a Python file, and copy the following lines of code:
|
56 |
-
|
57 |
-
```python
|
58 |
-
your_audio_file = 'your_audio.wav'
|
59 |
-
|
60 |
-
audio, sr = librosa.load(your_audio_file, sr = 16000, mono = True)
|
61 |
-
raw_path = io.BytesIO()
|
62 |
-
soundfile.write(raw_path, audio, 16000, format = 'wav')
|
63 |
-
raw_path.seek(0)
|
64 |
-
|
65 |
-
model = Svc('logs/44k/your_model.pth', 'logs/44k/config.json')
|
66 |
-
|
67 |
-
out_audio, out_sr = model.infer('<YOUR SPEAKER ID>', 0, raw_path, auto_predict_f0 = True)
|
68 |
-
soundfile.write('out_audio.wav', out_audio.cpu().numpy(), 44100)
|
69 |
-
```
|
70 |
-
|
71 |
-
The output file will be in the same directory as your input audio file with the name `your_audio_out.wav`
|
72 |
-
|
73 |
-
## How to Build in Google Colab
|
74 |
-
|
75 |
-
Refer to [My Google Colab](https://colab.research.google.com/drive/1V91RM-2xzSqbmTIlaEzWZovca8stErk0?authuser=3#scrollTo=hhJ2MG1i1vfl) or the [Official Google Colab](https://colab.research.google.com/github/34j/so-vits-svc-fork/blob/main/notebooks/so-vits-svc-fork-4.0.ipynb) for the steps.
|
76 |
-
|
77 |
-
### Google Drive Setup
|
78 |
-
|
79 |
-
1. Create an empty folder (this will be your project folder).
|
80 |
-
2. Inside the project folder, create a folder named `dataset_raw`.
|
81 |
-
3. Create another folder inside `dataset_raw` (this folder name will be your `SpeakerID`).
|
82 |
-
4. Upload your prepared audio files into the folder created in the previous step.
|
83 |
-
|
84 |
-
### Google Colab Setup
|
85 |
-
|
86 |
-
1. Mount your Google Drive:
|
87 |
-
```python
|
88 |
-
from google.colab import drive
|
89 |
-
drive.mount('/content/drive')
|
90 |
-
```
|
91 |
-
|
92 |
-
2. Install dependencies:
|
93 |
-
```python
|
94 |
-
!python -m pip install -U pip setuptools wheel
|
95 |
-
%pip install -U ipython
|
96 |
-
%pip install -U torch torchaudio --index-url https://download.pytorch.org/whl/cu118
|
97 |
-
```
|
98 |
-
|
99 |
-
3. Install `so-vits-svc` library:
|
100 |
-
`%pip install -U so-vits-svc-fork `
|
101 |
-
|
102 |
-
4. Resample your audio files:
|
103 |
-
`!svc pre-resample`
|
104 |
-
|
105 |
-
5. Pre-config:
|
106 |
-
`!svc pre-config`
|
107 |
-
|
108 |
-
6. Pre-hubert (this step may take a while):
|
109 |
-
`!svc pre-hubert`
|
110 |
-
|
111 |
-
7. Train your model (this step will take a while based on your Google Colab GPU and the number of epochs you want):
|
112 |
-
`!svc train -t`
|
113 |
-
|
114 |
-
### How to Change Epoch Value in Google Colab
|
115 |
-
|
116 |
-
The term "epoch" refers to the number of times you want to train your model. For example, if you set the epoch value to 10,000, your model will take 10,000 steps to complete (the default epoch value is 10,000).
|
117 |
-
|
118 |
-
To change the epoch value:
|
119 |
-
|
120 |
-
1. Go to your project folder.
|
121 |
-
2. Find the folder named `config`.
|
122 |
-
3. Inside that folder, you should see `config.json`.
|
123 |
-
4. In `config.json`, there should be a section that looks like this:
|
124 |
-
|
125 |
-
```json
|
126 |
-
"train": {
|
127 |
-
"log_interval": 200,
|
128 |
-
"eval_interval": 800,
|
129 |
-
"seed": 1234,
|
130 |
-
"epochs": <PUT YOUR VALUE HERE>,
|
131 |
-
"learning_rate": 0.0001,
|
132 |
-
"betas": [0.8, 0.99]
|
133 |
-
}
|
134 |
-
```
|
135 |
-
|
136 |
-
This can be done after `svc pre-config` has already finished.
|
137 |
-
|
138 |
-
|
139 |
-
### How to Perform Inference in Google Colab
|
140 |
-
|
141 |
-
After training your model, you can use it to convert any original voice to your model voice by running the following command:
|
142 |
-
|
143 |
-
```shell
|
144 |
-
!svc infer drive/MyDrive/your_model_name/your_audio_file.wav --model-path drive/MyDrive/your_model_name/logs/44k/your_model.pth --config-path drive/MyDrive/your_model_name/logs/44k/your_config.json
|
145 |
-
```
|
146 |
-
The output file will be named `your_audio_file.out.wav`
|
147 |
-
|
148 |
-
### Note:
|
149 |
-
|
150 |
-
1. Your Google Drive must have at least 5 GB of free space. If you don't have enough space, consider registering a new Google account.
|
151 |
-
2. Google Colab's Free Subscription is sufficient, but using the Pro version is recommended.
|
152 |
-
3. Set your Google Colab Hardware Accelerator to `GPU`.
|
153 |
-
|
154 |
-
## Credits
|
155 |
-
|
156 |
-
1. [zomehwh/sovits-models](https://huggingface.co/spaces/zomehwh/sovits-models) from Hugging Face Space
|
157 |
-
2. [svc-develop-team/so-vits-svc](https://github.com/svc-develop-team/so-vits-svc) from GitHub repository
|
158 |
-
3. [voicepaw/so-vits-svc-fork](https://github.com/voicepaw/so-vits-svc-fork) from GitHub repository
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexMaoMao/ostris-ikea-instructions-lora-sdxl/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/ostris/ikea-instructions-lora-sdxl").launch()
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/backbones/mobilefacenet.py
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
Adapted from https://github.com/cavalleria/cavaface.pytorch/blob/master/backbone/mobilefacenet.py
|
3 |
-
Original author cavalleria
|
4 |
-
'''
|
5 |
-
|
6 |
-
import torch.nn as nn
|
7 |
-
from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Sequential, Module
|
8 |
-
import torch
|
9 |
-
|
10 |
-
|
11 |
-
class Flatten(Module):
|
12 |
-
def forward(self, x):
|
13 |
-
return x.view(x.size(0), -1)
|
14 |
-
|
15 |
-
|
16 |
-
class ConvBlock(Module):
|
17 |
-
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
|
18 |
-
super(ConvBlock, self).__init__()
|
19 |
-
self.layers = nn.Sequential(
|
20 |
-
Conv2d(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False),
|
21 |
-
BatchNorm2d(num_features=out_c),
|
22 |
-
PReLU(num_parameters=out_c)
|
23 |
-
)
|
24 |
-
|
25 |
-
def forward(self, x):
|
26 |
-
return self.layers(x)
|
27 |
-
|
28 |
-
|
29 |
-
class LinearBlock(Module):
|
30 |
-
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
|
31 |
-
super(LinearBlock, self).__init__()
|
32 |
-
self.layers = nn.Sequential(
|
33 |
-
Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False),
|
34 |
-
BatchNorm2d(num_features=out_c)
|
35 |
-
)
|
36 |
-
|
37 |
-
def forward(self, x):
|
38 |
-
return self.layers(x)
|
39 |
-
|
40 |
-
|
41 |
-
class DepthWise(Module):
|
42 |
-
def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):
|
43 |
-
super(DepthWise, self).__init__()
|
44 |
-
self.residual = residual
|
45 |
-
self.layers = nn.Sequential(
|
46 |
-
ConvBlock(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)),
|
47 |
-
ConvBlock(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride),
|
48 |
-
LinearBlock(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
|
49 |
-
)
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
short_cut = None
|
53 |
-
if self.residual:
|
54 |
-
short_cut = x
|
55 |
-
x = self.layers(x)
|
56 |
-
if self.residual:
|
57 |
-
output = short_cut + x
|
58 |
-
else:
|
59 |
-
output = x
|
60 |
-
return output
|
61 |
-
|
62 |
-
|
63 |
-
class Residual(Module):
|
64 |
-
def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
|
65 |
-
super(Residual, self).__init__()
|
66 |
-
modules = []
|
67 |
-
for _ in range(num_block):
|
68 |
-
modules.append(DepthWise(c, c, True, kernel, stride, padding, groups))
|
69 |
-
self.layers = Sequential(*modules)
|
70 |
-
|
71 |
-
def forward(self, x):
|
72 |
-
return self.layers(x)
|
73 |
-
|
74 |
-
|
75 |
-
class GDC(Module):
|
76 |
-
def __init__(self, embedding_size):
|
77 |
-
super(GDC, self).__init__()
|
78 |
-
self.layers = nn.Sequential(
|
79 |
-
LinearBlock(512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0)),
|
80 |
-
Flatten(),
|
81 |
-
Linear(512, embedding_size, bias=False),
|
82 |
-
BatchNorm1d(embedding_size))
|
83 |
-
|
84 |
-
def forward(self, x):
|
85 |
-
return self.layers(x)
|
86 |
-
|
87 |
-
|
88 |
-
class MobileFaceNet(Module):
|
89 |
-
def __init__(self, fp16=False, num_features=512):
|
90 |
-
super(MobileFaceNet, self).__init__()
|
91 |
-
scale = 2
|
92 |
-
self.fp16 = fp16
|
93 |
-
self.layers = nn.Sequential(
|
94 |
-
ConvBlock(3, 64 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1)),
|
95 |
-
ConvBlock(64 * scale, 64 * scale, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64),
|
96 |
-
DepthWise(64 * scale, 64 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128),
|
97 |
-
Residual(64 * scale, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)),
|
98 |
-
DepthWise(64 * scale, 128 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256),
|
99 |
-
Residual(128 * scale, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)),
|
100 |
-
DepthWise(128 * scale, 128 * scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512),
|
101 |
-
Residual(128 * scale, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)),
|
102 |
-
)
|
103 |
-
self.conv_sep = ConvBlock(128 * scale, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))
|
104 |
-
self.features = GDC(num_features)
|
105 |
-
self._initialize_weights()
|
106 |
-
|
107 |
-
def _initialize_weights(self):
|
108 |
-
for m in self.modules():
|
109 |
-
if isinstance(m, nn.Conv2d):
|
110 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
111 |
-
if m.bias is not None:
|
112 |
-
m.bias.data.zero_()
|
113 |
-
elif isinstance(m, nn.BatchNorm2d):
|
114 |
-
m.weight.data.fill_(1)
|
115 |
-
m.bias.data.zero_()
|
116 |
-
elif isinstance(m, nn.Linear):
|
117 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
118 |
-
if m.bias is not None:
|
119 |
-
m.bias.data.zero_()
|
120 |
-
|
121 |
-
def forward(self, x):
|
122 |
-
with torch.cuda.amp.autocast(self.fp16):
|
123 |
-
x = self.layers(x)
|
124 |
-
x = self.conv_sep(x.float() if self.fp16 else x)
|
125 |
-
x = self.features(x)
|
126 |
-
return x
|
127 |
-
|
128 |
-
|
129 |
-
def get_mbf(fp16, num_features):
|
130 |
-
return MobileFaceNet(fp16, num_features)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/parameter_attention.tex
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
\pagebreak
|
2 |
-
\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
|
3 |
-
|
4 |
-
In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
|
5 |
-
|
6 |
-
\begin{align*}
|
7 |
-
FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
|
8 |
-
A(q, K, V) = Softmax(qK^T)V
|
9 |
-
\end{align*}
|
10 |
-
|
11 |
-
Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
|
12 |
-
|
13 |
-
%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
|
14 |
-
|
15 |
-
Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
|
16 |
-
|
17 |
-
In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
|
18 |
-
|
19 |
-
In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
|
20 |
-
|
21 |
-
Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
|
22 |
-
|
23 |
-
\begin{table}[h]
|
24 |
-
\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
|
25 |
-
\label{tab:parameter_attention}
|
26 |
-
\begin{center}
|
27 |
-
\vspace{-2mm}
|
28 |
-
%\scalebox{1.0}{
|
29 |
-
\begin{tabular}{c|cccccc|cccc}
|
30 |
-
\hline\rule{0pt}{2.0ex}
|
31 |
-
& \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
|
32 |
-
\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
|
33 |
-
\multirow{2}{*}{$n_p$} &
|
34 |
-
PPL & BLEU & params & training\\
|
35 |
-
& & & & & & & (dev) & (dev) & $\times10^6$ & time \\
|
36 |
-
\hline\rule{0pt}{2.0ex}
|
37 |
-
base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
|
38 |
-
\hline\rule{0pt}{2.0ex}
|
39 |
-
AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
|
40 |
-
AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
|
41 |
-
\hline
|
42 |
-
\end{tabular}
|
43 |
-
%}
|
44 |
-
\end{center}
|
45 |
-
\end{table}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/deepfloyd_if.md
DELETED
@@ -1,523 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# DeepFloyd IF
|
14 |
-
|
15 |
-
## Overview
|
16 |
-
|
17 |
-
DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding.
|
18 |
-
The model is a modular composed of a frozen text encoder and three cascaded pixel diffusion modules:
|
19 |
-
- Stage 1: a base model that generates 64x64 px image based on text prompt,
|
20 |
-
- Stage 2: a 64x64 px => 256x256 px super-resolution model, and a
|
21 |
-
- Stage 3: a 256x256 px => 1024x1024 px super-resolution model
|
22 |
-
Stage 1 and Stage 2 utilize a frozen text encoder based on the T5 transformer to extract text embeddings,
|
23 |
-
which are then fed into a UNet architecture enhanced with cross-attention and attention pooling.
|
24 |
-
Stage 3 is [Stability's x4 Upscaling model](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler).
|
25 |
-
The result is a highly efficient model that outperforms current state-of-the-art models, achieving a zero-shot FID score of 6.66 on the COCO dataset.
|
26 |
-
Our work underscores the potential of larger UNet architectures in the first stage of cascaded diffusion models and depicts a promising future for text-to-image synthesis.
|
27 |
-
|
28 |
-
## Usage
|
29 |
-
|
30 |
-
Before you can use IF, you need to accept its usage conditions. To do so:
|
31 |
-
1. Make sure to have a [Hugging Face account](https://huggingface.co/join) and be logged in
|
32 |
-
2. Accept the license on the model card of [DeepFloyd/IF-I-XL-v1.0](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0). Accepting the license on the stage I model card will auto accept for the other IF models.
|
33 |
-
3. Make sure to login locally. Install `huggingface_hub`
|
34 |
-
```sh
|
35 |
-
pip install huggingface_hub --upgrade
|
36 |
-
```
|
37 |
-
|
38 |
-
run the login function in a Python shell
|
39 |
-
|
40 |
-
```py
|
41 |
-
from huggingface_hub import login
|
42 |
-
|
43 |
-
login()
|
44 |
-
```
|
45 |
-
|
46 |
-
and enter your [Hugging Face Hub access token](https://huggingface.co/docs/hub/security-tokens#what-are-user-access-tokens).
|
47 |
-
|
48 |
-
Next we install `diffusers` and dependencies:
|
49 |
-
|
50 |
-
```sh
|
51 |
-
pip install diffusers accelerate transformers safetensors
|
52 |
-
```
|
53 |
-
|
54 |
-
The following sections give more in-detail examples of how to use IF. Specifically:
|
55 |
-
|
56 |
-
- [Text-to-Image Generation](#text-to-image-generation)
|
57 |
-
- [Image-to-Image Generation](#text-guided-image-to-image-generation)
|
58 |
-
- [Inpainting](#text-guided-inpainting-generation)
|
59 |
-
- [Reusing model weights](#converting-between-different-pipelines)
|
60 |
-
- [Speed optimization](#optimizing-for-speed)
|
61 |
-
- [Memory optimization](#optimizing-for-memory)
|
62 |
-
|
63 |
-
**Available checkpoints**
|
64 |
-
- *Stage-1*
|
65 |
-
- [DeepFloyd/IF-I-XL-v1.0](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0)
|
66 |
-
- [DeepFloyd/IF-I-L-v1.0](https://huggingface.co/DeepFloyd/IF-I-L-v1.0)
|
67 |
-
- [DeepFloyd/IF-I-M-v1.0](https://huggingface.co/DeepFloyd/IF-I-M-v1.0)
|
68 |
-
|
69 |
-
- *Stage-2*
|
70 |
-
- [DeepFloyd/IF-II-L-v1.0](https://huggingface.co/DeepFloyd/IF-II-L-v1.0)
|
71 |
-
- [DeepFloyd/IF-II-M-v1.0](https://huggingface.co/DeepFloyd/IF-II-M-v1.0)
|
72 |
-
|
73 |
-
- *Stage-3*
|
74 |
-
- [stabilityai/stable-diffusion-x4-upscaler](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler)
|
75 |
-
|
76 |
-
**Demo**
|
77 |
-
[](https://huggingface.co/spaces/DeepFloyd/IF)
|
78 |
-
|
79 |
-
**Google Colab**
|
80 |
-
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/deepfloyd_if_free_tier_google_colab.ipynb)
|
81 |
-
|
82 |
-
### Text-to-Image Generation
|
83 |
-
|
84 |
-
By default diffusers makes use of [model cpu offloading](https://huggingface.co/docs/diffusers/optimization/fp16#model-offloading-for-fast-inference-and-memory-savings)
|
85 |
-
to run the whole IF pipeline with as little as 14 GB of VRAM.
|
86 |
-
|
87 |
-
```python
|
88 |
-
from diffusers import DiffusionPipeline
|
89 |
-
from diffusers.utils import pt_to_pil
|
90 |
-
import torch
|
91 |
-
|
92 |
-
# stage 1
|
93 |
-
stage_1 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
94 |
-
stage_1.enable_model_cpu_offload()
|
95 |
-
|
96 |
-
# stage 2
|
97 |
-
stage_2 = DiffusionPipeline.from_pretrained(
|
98 |
-
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
|
99 |
-
)
|
100 |
-
stage_2.enable_model_cpu_offload()
|
101 |
-
|
102 |
-
# stage 3
|
103 |
-
safety_modules = {
|
104 |
-
"feature_extractor": stage_1.feature_extractor,
|
105 |
-
"safety_checker": stage_1.safety_checker,
|
106 |
-
"watermarker": stage_1.watermarker,
|
107 |
-
}
|
108 |
-
stage_3 = DiffusionPipeline.from_pretrained(
|
109 |
-
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16
|
110 |
-
)
|
111 |
-
stage_3.enable_model_cpu_offload()
|
112 |
-
|
113 |
-
prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'
|
114 |
-
generator = torch.manual_seed(1)
|
115 |
-
|
116 |
-
# text embeds
|
117 |
-
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt)
|
118 |
-
|
119 |
-
# stage 1
|
120 |
-
image = stage_1(
|
121 |
-
prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt"
|
122 |
-
).images
|
123 |
-
pt_to_pil(image)[0].save("./if_stage_I.png")
|
124 |
-
|
125 |
-
# stage 2
|
126 |
-
image = stage_2(
|
127 |
-
image=image,
|
128 |
-
prompt_embeds=prompt_embeds,
|
129 |
-
negative_prompt_embeds=negative_embeds,
|
130 |
-
generator=generator,
|
131 |
-
output_type="pt",
|
132 |
-
).images
|
133 |
-
pt_to_pil(image)[0].save("./if_stage_II.png")
|
134 |
-
|
135 |
-
# stage 3
|
136 |
-
image = stage_3(prompt=prompt, image=image, noise_level=100, generator=generator).images
|
137 |
-
image[0].save("./if_stage_III.png")
|
138 |
-
```
|
139 |
-
|
140 |
-
### Text Guided Image-to-Image Generation
|
141 |
-
|
142 |
-
The same IF model weights can be used for text-guided image-to-image translation or image variation.
|
143 |
-
In this case just make sure to load the weights using the [`IFInpaintingPipeline`] and [`IFInpaintingSuperResolutionPipeline`] pipelines.
|
144 |
-
|
145 |
-
**Note**: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines
|
146 |
-
without loading them twice by making use of the [`~DiffusionPipeline.components()`] function as explained [here](#converting-between-different-pipelines).
|
147 |
-
|
148 |
-
```python
|
149 |
-
from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
|
150 |
-
from diffusers.utils import pt_to_pil
|
151 |
-
|
152 |
-
import torch
|
153 |
-
|
154 |
-
from PIL import Image
|
155 |
-
import requests
|
156 |
-
from io import BytesIO
|
157 |
-
|
158 |
-
# download image
|
159 |
-
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
160 |
-
response = requests.get(url)
|
161 |
-
original_image = Image.open(BytesIO(response.content)).convert("RGB")
|
162 |
-
original_image = original_image.resize((768, 512))
|
163 |
-
|
164 |
-
# stage 1
|
165 |
-
stage_1 = IFImg2ImgPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
166 |
-
stage_1.enable_model_cpu_offload()
|
167 |
-
|
168 |
-
# stage 2
|
169 |
-
stage_2 = IFImg2ImgSuperResolutionPipeline.from_pretrained(
|
170 |
-
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
|
171 |
-
)
|
172 |
-
stage_2.enable_model_cpu_offload()
|
173 |
-
|
174 |
-
# stage 3
|
175 |
-
safety_modules = {
|
176 |
-
"feature_extractor": stage_1.feature_extractor,
|
177 |
-
"safety_checker": stage_1.safety_checker,
|
178 |
-
"watermarker": stage_1.watermarker,
|
179 |
-
}
|
180 |
-
stage_3 = DiffusionPipeline.from_pretrained(
|
181 |
-
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16
|
182 |
-
)
|
183 |
-
stage_3.enable_model_cpu_offload()
|
184 |
-
|
185 |
-
prompt = "A fantasy landscape in style minecraft"
|
186 |
-
generator = torch.manual_seed(1)
|
187 |
-
|
188 |
-
# text embeds
|
189 |
-
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt)
|
190 |
-
|
191 |
-
# stage 1
|
192 |
-
image = stage_1(
|
193 |
-
image=original_image,
|
194 |
-
prompt_embeds=prompt_embeds,
|
195 |
-
negative_prompt_embeds=negative_embeds,
|
196 |
-
generator=generator,
|
197 |
-
output_type="pt",
|
198 |
-
).images
|
199 |
-
pt_to_pil(image)[0].save("./if_stage_I.png")
|
200 |
-
|
201 |
-
# stage 2
|
202 |
-
image = stage_2(
|
203 |
-
image=image,
|
204 |
-
original_image=original_image,
|
205 |
-
prompt_embeds=prompt_embeds,
|
206 |
-
negative_prompt_embeds=negative_embeds,
|
207 |
-
generator=generator,
|
208 |
-
output_type="pt",
|
209 |
-
).images
|
210 |
-
pt_to_pil(image)[0].save("./if_stage_II.png")
|
211 |
-
|
212 |
-
# stage 3
|
213 |
-
image = stage_3(prompt=prompt, image=image, generator=generator, noise_level=100).images
|
214 |
-
image[0].save("./if_stage_III.png")
|
215 |
-
```
|
216 |
-
|
217 |
-
### Text Guided Inpainting Generation
|
218 |
-
|
219 |
-
The same IF model weights can be used for text-guided image-to-image translation or image variation.
|
220 |
-
In this case just make sure to load the weights using the [`IFInpaintingPipeline`] and [`IFInpaintingSuperResolutionPipeline`] pipelines.
|
221 |
-
|
222 |
-
**Note**: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines
|
223 |
-
without loading them twice by making use of the [`~DiffusionPipeline.components()`] function as explained [here](#converting-between-different-pipelines).
|
224 |
-
|
225 |
-
```python
|
226 |
-
from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
|
227 |
-
from diffusers.utils import pt_to_pil
|
228 |
-
import torch
|
229 |
-
|
230 |
-
from PIL import Image
|
231 |
-
import requests
|
232 |
-
from io import BytesIO
|
233 |
-
|
234 |
-
# download image
|
235 |
-
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"
|
236 |
-
response = requests.get(url)
|
237 |
-
original_image = Image.open(BytesIO(response.content)).convert("RGB")
|
238 |
-
original_image = original_image
|
239 |
-
|
240 |
-
# download mask
|
241 |
-
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"
|
242 |
-
response = requests.get(url)
|
243 |
-
mask_image = Image.open(BytesIO(response.content))
|
244 |
-
mask_image = mask_image
|
245 |
-
|
246 |
-
# stage 1
|
247 |
-
stage_1 = IFInpaintingPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
248 |
-
stage_1.enable_model_cpu_offload()
|
249 |
-
|
250 |
-
# stage 2
|
251 |
-
stage_2 = IFInpaintingSuperResolutionPipeline.from_pretrained(
|
252 |
-
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
|
253 |
-
)
|
254 |
-
stage_2.enable_model_cpu_offload()
|
255 |
-
|
256 |
-
# stage 3
|
257 |
-
safety_modules = {
|
258 |
-
"feature_extractor": stage_1.feature_extractor,
|
259 |
-
"safety_checker": stage_1.safety_checker,
|
260 |
-
"watermarker": stage_1.watermarker,
|
261 |
-
}
|
262 |
-
stage_3 = DiffusionPipeline.from_pretrained(
|
263 |
-
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16
|
264 |
-
)
|
265 |
-
stage_3.enable_model_cpu_offload()
|
266 |
-
|
267 |
-
prompt = "blue sunglasses"
|
268 |
-
generator = torch.manual_seed(1)
|
269 |
-
|
270 |
-
# text embeds
|
271 |
-
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt)
|
272 |
-
|
273 |
-
# stage 1
|
274 |
-
image = stage_1(
|
275 |
-
image=original_image,
|
276 |
-
mask_image=mask_image,
|
277 |
-
prompt_embeds=prompt_embeds,
|
278 |
-
negative_prompt_embeds=negative_embeds,
|
279 |
-
generator=generator,
|
280 |
-
output_type="pt",
|
281 |
-
).images
|
282 |
-
pt_to_pil(image)[0].save("./if_stage_I.png")
|
283 |
-
|
284 |
-
# stage 2
|
285 |
-
image = stage_2(
|
286 |
-
image=image,
|
287 |
-
original_image=original_image,
|
288 |
-
mask_image=mask_image,
|
289 |
-
prompt_embeds=prompt_embeds,
|
290 |
-
negative_prompt_embeds=negative_embeds,
|
291 |
-
generator=generator,
|
292 |
-
output_type="pt",
|
293 |
-
).images
|
294 |
-
pt_to_pil(image)[0].save("./if_stage_II.png")
|
295 |
-
|
296 |
-
# stage 3
|
297 |
-
image = stage_3(prompt=prompt, image=image, generator=generator, noise_level=100).images
|
298 |
-
image[0].save("./if_stage_III.png")
|
299 |
-
```
|
300 |
-
|
301 |
-
### Converting between different pipelines
|
302 |
-
|
303 |
-
In addition to being loaded with `from_pretrained`, Pipelines can also be loaded directly from each other.
|
304 |
-
|
305 |
-
```python
|
306 |
-
from diffusers import IFPipeline, IFSuperResolutionPipeline
|
307 |
-
|
308 |
-
pipe_1 = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0")
|
309 |
-
pipe_2 = IFSuperResolutionPipeline.from_pretrained("DeepFloyd/IF-II-L-v1.0")
|
310 |
-
|
311 |
-
|
312 |
-
from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline
|
313 |
-
|
314 |
-
pipe_1 = IFImg2ImgPipeline(**pipe_1.components)
|
315 |
-
pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components)
|
316 |
-
|
317 |
-
|
318 |
-
from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline
|
319 |
-
|
320 |
-
pipe_1 = IFInpaintingPipeline(**pipe_1.components)
|
321 |
-
pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components)
|
322 |
-
```
|
323 |
-
|
324 |
-
### Optimizing for speed
|
325 |
-
|
326 |
-
The simplest optimization to run IF faster is to move all model components to the GPU.
|
327 |
-
|
328 |
-
```py
|
329 |
-
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
330 |
-
pipe.to("cuda")
|
331 |
-
```
|
332 |
-
|
333 |
-
You can also run the diffusion process for a shorter number of timesteps.
|
334 |
-
|
335 |
-
This can either be done with the `num_inference_steps` argument
|
336 |
-
|
337 |
-
```py
|
338 |
-
pipe("<prompt>", num_inference_steps=30)
|
339 |
-
```
|
340 |
-
|
341 |
-
Or with the `timesteps` argument
|
342 |
-
|
343 |
-
```py
|
344 |
-
from diffusers.pipelines.deepfloyd_if import fast27_timesteps
|
345 |
-
|
346 |
-
pipe("<prompt>", timesteps=fast27_timesteps)
|
347 |
-
```
|
348 |
-
|
349 |
-
When doing image variation or inpainting, you can also decrease the number of timesteps
|
350 |
-
with the strength argument. The strength argument is the amount of noise to add to
|
351 |
-
the input image which also determines how many steps to run in the denoising process.
|
352 |
-
A smaller number will vary the image less but run faster.
|
353 |
-
|
354 |
-
```py
|
355 |
-
pipe = IFImg2ImgPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
356 |
-
pipe.to("cuda")
|
357 |
-
|
358 |
-
image = pipe(image=image, prompt="<prompt>", strength=0.3).images
|
359 |
-
```
|
360 |
-
|
361 |
-
You can also use [`torch.compile`](../../optimization/torch2.0). Note that we have not exhaustively tested `torch.compile`
|
362 |
-
with IF and it might not give expected results.
|
363 |
-
|
364 |
-
```py
|
365 |
-
import torch
|
366 |
-
|
367 |
-
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
368 |
-
pipe.to("cuda")
|
369 |
-
|
370 |
-
pipe.text_encoder = torch.compile(pipe.text_encoder)
|
371 |
-
pipe.unet = torch.compile(pipe.unet)
|
372 |
-
```
|
373 |
-
|
374 |
-
### Optimizing for memory
|
375 |
-
|
376 |
-
When optimizing for GPU memory, we can use the standard diffusers cpu offloading APIs.
|
377 |
-
|
378 |
-
Either the model based CPU offloading,
|
379 |
-
|
380 |
-
```py
|
381 |
-
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
382 |
-
pipe.enable_model_cpu_offload()
|
383 |
-
```
|
384 |
-
|
385 |
-
or the more aggressive layer based CPU offloading.
|
386 |
-
|
387 |
-
```py
|
388 |
-
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
|
389 |
-
pipe.enable_sequential_cpu_offload()
|
390 |
-
```
|
391 |
-
|
392 |
-
Additionally, T5 can be loaded in 8bit precision
|
393 |
-
|
394 |
-
```py
|
395 |
-
from transformers import T5EncoderModel
|
396 |
-
|
397 |
-
text_encoder = T5EncoderModel.from_pretrained(
|
398 |
-
"DeepFloyd/IF-I-XL-v1.0", subfolder="text_encoder", device_map="auto", load_in_8bit=True, variant="8bit"
|
399 |
-
)
|
400 |
-
|
401 |
-
from diffusers import DiffusionPipeline
|
402 |
-
|
403 |
-
pipe = DiffusionPipeline.from_pretrained(
|
404 |
-
"DeepFloyd/IF-I-XL-v1.0",
|
405 |
-
text_encoder=text_encoder, # pass the previously instantiated 8bit text encoder
|
406 |
-
unet=None,
|
407 |
-
device_map="auto",
|
408 |
-
)
|
409 |
-
|
410 |
-
prompt_embeds, negative_embeds = pipe.encode_prompt("<prompt>")
|
411 |
-
```
|
412 |
-
|
413 |
-
For CPU RAM constrained machines like google colab free tier where we can't load all
|
414 |
-
model components to the CPU at once, we can manually only load the pipeline with
|
415 |
-
the text encoder or unet when the respective model components are needed.
|
416 |
-
|
417 |
-
```py
|
418 |
-
from diffusers import IFPipeline, IFSuperResolutionPipeline
|
419 |
-
import torch
|
420 |
-
import gc
|
421 |
-
from transformers import T5EncoderModel
|
422 |
-
from diffusers.utils import pt_to_pil
|
423 |
-
|
424 |
-
text_encoder = T5EncoderModel.from_pretrained(
|
425 |
-
"DeepFloyd/IF-I-XL-v1.0", subfolder="text_encoder", device_map="auto", load_in_8bit=True, variant="8bit"
|
426 |
-
)
|
427 |
-
|
428 |
-
# text to image
|
429 |
-
|
430 |
-
pipe = DiffusionPipeline.from_pretrained(
|
431 |
-
"DeepFloyd/IF-I-XL-v1.0",
|
432 |
-
text_encoder=text_encoder, # pass the previously instantiated 8bit text encoder
|
433 |
-
unet=None,
|
434 |
-
device_map="auto",
|
435 |
-
)
|
436 |
-
|
437 |
-
prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'
|
438 |
-
prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
|
439 |
-
|
440 |
-
# Remove the pipeline so we can re-load the pipeline with the unet
|
441 |
-
del text_encoder
|
442 |
-
del pipe
|
443 |
-
gc.collect()
|
444 |
-
torch.cuda.empty_cache()
|
445 |
-
|
446 |
-
pipe = IFPipeline.from_pretrained(
|
447 |
-
"DeepFloyd/IF-I-XL-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16, device_map="auto"
|
448 |
-
)
|
449 |
-
|
450 |
-
generator = torch.Generator().manual_seed(0)
|
451 |
-
image = pipe(
|
452 |
-
prompt_embeds=prompt_embeds,
|
453 |
-
negative_prompt_embeds=negative_embeds,
|
454 |
-
output_type="pt",
|
455 |
-
generator=generator,
|
456 |
-
).images
|
457 |
-
|
458 |
-
pt_to_pil(image)[0].save("./if_stage_I.png")
|
459 |
-
|
460 |
-
# Remove the pipeline so we can load the super-resolution pipeline
|
461 |
-
del pipe
|
462 |
-
gc.collect()
|
463 |
-
torch.cuda.empty_cache()
|
464 |
-
|
465 |
-
# First super resolution
|
466 |
-
|
467 |
-
pipe = IFSuperResolutionPipeline.from_pretrained(
|
468 |
-
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16, device_map="auto"
|
469 |
-
)
|
470 |
-
|
471 |
-
generator = torch.Generator().manual_seed(0)
|
472 |
-
image = pipe(
|
473 |
-
image=image,
|
474 |
-
prompt_embeds=prompt_embeds,
|
475 |
-
negative_prompt_embeds=negative_embeds,
|
476 |
-
output_type="pt",
|
477 |
-
generator=generator,
|
478 |
-
).images
|
479 |
-
|
480 |
-
pt_to_pil(image)[0].save("./if_stage_II.png")
|
481 |
-
```
|
482 |
-
|
483 |
-
|
484 |
-
## Available Pipelines:
|
485 |
-
|
486 |
-
| Pipeline | Tasks | Colab
|
487 |
-
|---|---|:---:|
|
488 |
-
| [pipeline_if.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py) | *Text-to-Image Generation* | - |
|
489 |
-
| [pipeline_if_superresolution.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py) | *Text-to-Image Generation* | - |
|
490 |
-
| [pipeline_if_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py) | *Image-to-Image Generation* | - |
|
491 |
-
| [pipeline_if_img2img_superresolution.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py) | *Image-to-Image Generation* | - |
|
492 |
-
| [pipeline_if_inpainting.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py) | *Image-to-Image Generation* | - |
|
493 |
-
| [pipeline_if_inpainting_superresolution.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py) | *Image-to-Image Generation* | - |
|
494 |
-
|
495 |
-
## IFPipeline
|
496 |
-
[[autodoc]] IFPipeline
|
497 |
-
- all
|
498 |
-
- __call__
|
499 |
-
|
500 |
-
## IFSuperResolutionPipeline
|
501 |
-
[[autodoc]] IFSuperResolutionPipeline
|
502 |
-
- all
|
503 |
-
- __call__
|
504 |
-
|
505 |
-
## IFImg2ImgPipeline
|
506 |
-
[[autodoc]] IFImg2ImgPipeline
|
507 |
-
- all
|
508 |
-
- __call__
|
509 |
-
|
510 |
-
## IFImg2ImgSuperResolutionPipeline
|
511 |
-
[[autodoc]] IFImg2ImgSuperResolutionPipeline
|
512 |
-
- all
|
513 |
-
- __call__
|
514 |
-
|
515 |
-
## IFInpaintingPipeline
|
516 |
-
[[autodoc]] IFInpaintingPipeline
|
517 |
-
- all
|
518 |
-
- __call__
|
519 |
-
|
520 |
-
## IFInpaintingSuperResolutionPipeline
|
521 |
-
[[autodoc]] IFInpaintingSuperResolutionPipeline
|
522 |
-
- all
|
523 |
-
- __call__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/unconditional_training.md
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Unconditional 이미지 생성
|
14 |
-
|
15 |
-
unconditional 이미지 생성은 text-to-image 또는 image-to-image 모델과 달리 텍스트나 이미지에 대한 조건이 없이 학습 데이터 분포와 유사한 이미지만을 생성합니다.
|
16 |
-
|
17 |
-
<iframe
|
18 |
-
src="https://stevhliu-ddpm-butterflies-128.hf.space"
|
19 |
-
frameborder="0"
|
20 |
-
width="850"
|
21 |
-
height="550"
|
22 |
-
></iframe>
|
23 |
-
|
24 |
-
|
25 |
-
이 가이드에서는 기존에 존재하던 데이터셋과 자신만의 커스텀 데이터셋에 대해 unconditional image generation 모델을 훈련하는 방법을 설명합니다. 훈련 세부 사항에 대해 더 자세히 알고 싶다면 unconditional image generation을 위한 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation)에서 확인할 수 있습니다.
|
26 |
-
|
27 |
-
스크립트를 실행하기 전, 먼저 의존성 라이브러리들을 설치해야 합니다.
|
28 |
-
|
29 |
-
```bash
|
30 |
-
pip install diffusers[training] accelerate datasets
|
31 |
-
```
|
32 |
-
|
33 |
-
그 다음 🤗 [Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화합니다.
|
34 |
-
|
35 |
-
```bash
|
36 |
-
accelerate config
|
37 |
-
```
|
38 |
-
|
39 |
-
별도의 설정 없이 기본 설정으로 🤗 [Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화해봅시다.
|
40 |
-
|
41 |
-
```bash
|
42 |
-
accelerate config default
|
43 |
-
```
|
44 |
-
|
45 |
-
노트북과 같은 대화형 쉘을 지원하지 않는 환경의 경우, 다음과 같이 사용해볼 수도 있습니다.
|
46 |
-
|
47 |
-
```py
|
48 |
-
from accelerate.utils import write_basic_config
|
49 |
-
|
50 |
-
write_basic_config()
|
51 |
-
```
|
52 |
-
|
53 |
-
## 모델을 허브에 업로드하기
|
54 |
-
|
55 |
-
학습 스크립트에 다음 인자를 추가하여 허브에 모델을 업로드할 수 있습니다.
|
56 |
-
|
57 |
-
```bash
|
58 |
-
--push_to_hub
|
59 |
-
```
|
60 |
-
|
61 |
-
## 체크포인트 저장하고 불러오기
|
62 |
-
|
63 |
-
훈련 중 문제가 발생할 경우를 대비하여 체크포인트를 정기적으로 저장하는 것이 좋습니다. 체크포인트를 저장하려면 학습 스크립트에 다음 인자를 전달합니다:
|
64 |
-
|
65 |
-
```bash
|
66 |
-
--checkpointing_steps=500
|
67 |
-
```
|
68 |
-
|
69 |
-
전체 훈련 상태는 500스텝마다 `output_dir`의 하위 폴더에 저장되며, 학습 스크립트에 `--resume_from_checkpoint` 인자를 전달함으로써 체크포인트를 불러오고 훈련을 재개할 수 있습니다.
|
70 |
-
|
71 |
-
```bash
|
72 |
-
--resume_from_checkpoint="checkpoint-1500"
|
73 |
-
```
|
74 |
-
|
75 |
-
## 파인튜닝
|
76 |
-
|
77 |
-
이제 학습 스크립트를 시작할 준비가 되었습니다! `--dataset_name` 인자에 파인튜닝할 데이터셋 이름을 지정한 다음, `--output_dir` 인자에 지정된 경로로 저장합니다. 본인만의 데이터셋를 사용하려면, [학습용 데이터셋 만들기](create_dataset) 가이드를 참조하세요.
|
78 |
-
|
79 |
-
학습 스크립트는 `diffusion_pytorch_model.bin` 파일을 생성하고, 그것을 당신의 리포지토리에 저장합니다.
|
80 |
-
|
81 |
-
<Tip>
|
82 |
-
|
83 |
-
💡 전체 학습은 V100 GPU 4개를 사용할 경우, 2시간이 소요됩니다.
|
84 |
-
|
85 |
-
</Tip>
|
86 |
-
|
87 |
-
예를 들어, [Oxford Flowers](https://huggingface.co/datasets/huggan/flowers-102-categories) 데이터셋을 사용해 파인튜닝할 경우:
|
88 |
-
|
89 |
-
```bash
|
90 |
-
accelerate launch train_unconditional.py \
|
91 |
-
--dataset_name="huggan/flowers-102-categories" \
|
92 |
-
--resolution=64 \
|
93 |
-
--output_dir="ddpm-ema-flowers-64" \
|
94 |
-
--train_batch_size=16 \
|
95 |
-
--num_epochs=100 \
|
96 |
-
--gradient_accumulation_steps=1 \
|
97 |
-
--learning_rate=1e-4 \
|
98 |
-
--lr_warmup_steps=500 \
|
99 |
-
--mixed_precision=no \
|
100 |
-
--push_to_hub
|
101 |
-
```
|
102 |
-
|
103 |
-
<div class="flex justify-center">
|
104 |
-
<img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png"/>
|
105 |
-
</div>
|
106 |
-
[Pokemon](https://huggingface.co/datasets/huggan/pokemon) 데이터셋을 사용할 경우:
|
107 |
-
|
108 |
-
```bash
|
109 |
-
accelerate launch train_unconditional.py \
|
110 |
-
--dataset_name="huggan/pokemon" \
|
111 |
-
--resolution=64 \
|
112 |
-
--output_dir="ddpm-ema-pokemon-64" \
|
113 |
-
--train_batch_size=16 \
|
114 |
-
--num_epochs=100 \
|
115 |
-
--gradient_accumulation_steps=1 \
|
116 |
-
--learning_rate=1e-4 \
|
117 |
-
--lr_warmup_steps=500 \
|
118 |
-
--mixed_precision=no \
|
119 |
-
--push_to_hub
|
120 |
-
```
|
121 |
-
|
122 |
-
<div class="flex justify-center">
|
123 |
-
<img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png"/>
|
124 |
-
</div>
|
125 |
-
|
126 |
-
### 여러개의 GPU로 훈련하기
|
127 |
-
|
128 |
-
`accelerate`을 사용하면 원활한 다중 GPU 훈련이 가능합니다. `accelerate`을 사용하여 분산 훈련을 실행하려면 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 지침을 따르세요. 다음은 명령어 예제입니다.
|
129 |
-
|
130 |
-
```bash
|
131 |
-
accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \
|
132 |
-
--dataset_name="huggan/pokemon" \
|
133 |
-
--resolution=64 --center_crop --random_flip \
|
134 |
-
--output_dir="ddpm-ema-pokemon-64" \
|
135 |
-
--train_batch_size=16 \
|
136 |
-
--num_epochs=100 \
|
137 |
-
--gradient_accumulation_steps=1 \
|
138 |
-
--use_ema \
|
139 |
-
--learning_rate=1e-4 \
|
140 |
-
--lr_warmup_steps=500 \
|
141 |
-
--mixed_precision="fp16" \
|
142 |
-
--logger="wandb" \
|
143 |
-
--push_to_hub
|
144 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/repaint/pipeline_repaint.py
DELETED
@@ -1,232 +0,0 @@
|
|
1 |
-
# Copyright 2023 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
|
16 |
-
import warnings
|
17 |
-
from typing import List, Optional, Tuple, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import PIL
|
21 |
-
import torch
|
22 |
-
|
23 |
-
from ...models import UNet2DModel
|
24 |
-
from ...schedulers import RePaintScheduler
|
25 |
-
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
|
26 |
-
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
27 |
-
|
28 |
-
|
29 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
30 |
-
|
31 |
-
|
32 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
|
33 |
-
def _preprocess_image(image: Union[List, PIL.Image.Image, torch.Tensor]):
|
34 |
-
warnings.warn(
|
35 |
-
"The preprocess method is deprecated and will be removed in a future version. Please"
|
36 |
-
" use VaeImageProcessor.preprocess instead",
|
37 |
-
FutureWarning,
|
38 |
-
)
|
39 |
-
if isinstance(image, torch.Tensor):
|
40 |
-
return image
|
41 |
-
elif isinstance(image, PIL.Image.Image):
|
42 |
-
image = [image]
|
43 |
-
|
44 |
-
if isinstance(image[0], PIL.Image.Image):
|
45 |
-
w, h = image[0].size
|
46 |
-
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
47 |
-
|
48 |
-
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
49 |
-
image = np.concatenate(image, axis=0)
|
50 |
-
image = np.array(image).astype(np.float32) / 255.0
|
51 |
-
image = image.transpose(0, 3, 1, 2)
|
52 |
-
image = 2.0 * image - 1.0
|
53 |
-
image = torch.from_numpy(image)
|
54 |
-
elif isinstance(image[0], torch.Tensor):
|
55 |
-
image = torch.cat(image, dim=0)
|
56 |
-
return image
|
57 |
-
|
58 |
-
|
59 |
-
def _preprocess_mask(mask: Union[List, PIL.Image.Image, torch.Tensor]):
|
60 |
-
if isinstance(mask, torch.Tensor):
|
61 |
-
return mask
|
62 |
-
elif isinstance(mask, PIL.Image.Image):
|
63 |
-
mask = [mask]
|
64 |
-
|
65 |
-
if isinstance(mask[0], PIL.Image.Image):
|
66 |
-
w, h = mask[0].size
|
67 |
-
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
68 |
-
mask = [np.array(m.convert("L").resize((w, h), resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask]
|
69 |
-
mask = np.concatenate(mask, axis=0)
|
70 |
-
mask = mask.astype(np.float32) / 255.0
|
71 |
-
mask[mask < 0.5] = 0
|
72 |
-
mask[mask >= 0.5] = 1
|
73 |
-
mask = torch.from_numpy(mask)
|
74 |
-
elif isinstance(mask[0], torch.Tensor):
|
75 |
-
mask = torch.cat(mask, dim=0)
|
76 |
-
return mask
|
77 |
-
|
78 |
-
|
79 |
-
class RePaintPipeline(DiffusionPipeline):
|
80 |
-
r"""
|
81 |
-
Pipeline for image inpainting using RePaint.
|
82 |
-
|
83 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
84 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
85 |
-
|
86 |
-
Parameters:
|
87 |
-
unet ([`UNet2DModel`]):
|
88 |
-
A `UNet2DModel` to denoise the encoded image latents.
|
89 |
-
scheduler ([`RePaintScheduler`]):
|
90 |
-
A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image.
|
91 |
-
"""
|
92 |
-
|
93 |
-
unet: UNet2DModel
|
94 |
-
scheduler: RePaintScheduler
|
95 |
-
|
96 |
-
def __init__(self, unet, scheduler):
|
97 |
-
super().__init__()
|
98 |
-
self.register_modules(unet=unet, scheduler=scheduler)
|
99 |
-
|
100 |
-
@torch.no_grad()
|
101 |
-
def __call__(
|
102 |
-
self,
|
103 |
-
image: Union[torch.Tensor, PIL.Image.Image],
|
104 |
-
mask_image: Union[torch.Tensor, PIL.Image.Image],
|
105 |
-
num_inference_steps: int = 250,
|
106 |
-
eta: float = 0.0,
|
107 |
-
jump_length: int = 10,
|
108 |
-
jump_n_sample: int = 10,
|
109 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
110 |
-
output_type: Optional[str] = "pil",
|
111 |
-
return_dict: bool = True,
|
112 |
-
) -> Union[ImagePipelineOutput, Tuple]:
|
113 |
-
r"""
|
114 |
-
The call function to the pipeline for generation.
|
115 |
-
|
116 |
-
Args:
|
117 |
-
image (`torch.FloatTensor` or `PIL.Image.Image`):
|
118 |
-
The original image to inpaint on.
|
119 |
-
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
120 |
-
The mask_image where 0.0 define which part of the original image to inpaint.
|
121 |
-
num_inference_steps (`int`, *optional*, defaults to 1000):
|
122 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
123 |
-
expense of slower inference.
|
124 |
-
eta (`float`):
|
125 |
-
The weight of the added noise in a diffusion step. Its value is between 0.0 and 1.0; 0.0 corresponds to
|
126 |
-
DDIM and 1.0 is the DDPM scheduler.
|
127 |
-
jump_length (`int`, *optional*, defaults to 10):
|
128 |
-
The number of steps taken forward in time before going backward in time for a single jump ("j" in
|
129 |
-
RePaint paper). Take a look at Figure 9 and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf).
|
130 |
-
jump_n_sample (`int`, *optional*, defaults to 10):
|
131 |
-
The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9
|
132 |
-
and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf).
|
133 |
-
generator (`torch.Generator`, *optional*):
|
134 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
135 |
-
generation deterministic.
|
136 |
-
output_type (`str`, `optional`, defaults to `"pil"`):
|
137 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
138 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
139 |
-
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
|
140 |
-
|
141 |
-
Example:
|
142 |
-
|
143 |
-
```py
|
144 |
-
>>> from io import BytesIO
|
145 |
-
>>> import torch
|
146 |
-
>>> import PIL
|
147 |
-
>>> import requests
|
148 |
-
>>> from diffusers import RePaintPipeline, RePaintScheduler
|
149 |
-
|
150 |
-
|
151 |
-
>>> def download_image(url):
|
152 |
-
... response = requests.get(url)
|
153 |
-
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
154 |
-
|
155 |
-
|
156 |
-
>>> img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png"
|
157 |
-
>>> mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png"
|
158 |
-
|
159 |
-
>>> # Load the original image and the mask as PIL images
|
160 |
-
>>> original_image = download_image(img_url).resize((256, 256))
|
161 |
-
>>> mask_image = download_image(mask_url).resize((256, 256))
|
162 |
-
|
163 |
-
>>> # Load the RePaint scheduler and pipeline based on a pretrained DDPM model
|
164 |
-
>>> scheduler = RePaintScheduler.from_pretrained("google/ddpm-ema-celebahq-256")
|
165 |
-
>>> pipe = RePaintPipeline.from_pretrained("google/ddpm-ema-celebahq-256", scheduler=scheduler)
|
166 |
-
>>> pipe = pipe.to("cuda")
|
167 |
-
|
168 |
-
>>> generator = torch.Generator(device="cuda").manual_seed(0)
|
169 |
-
>>> output = pipe(
|
170 |
-
... image=original_image,
|
171 |
-
... mask_image=mask_image,
|
172 |
-
... num_inference_steps=250,
|
173 |
-
... eta=0.0,
|
174 |
-
... jump_length=10,
|
175 |
-
... jump_n_sample=10,
|
176 |
-
... generator=generator,
|
177 |
-
... )
|
178 |
-
>>> inpainted_image = output.images[0]
|
179 |
-
```
|
180 |
-
|
181 |
-
Returns:
|
182 |
-
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
183 |
-
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
184 |
-
returned where the first element is a list with the generated images.
|
185 |
-
"""
|
186 |
-
|
187 |
-
original_image = image
|
188 |
-
|
189 |
-
original_image = _preprocess_image(original_image)
|
190 |
-
original_image = original_image.to(device=self._execution_device, dtype=self.unet.dtype)
|
191 |
-
mask_image = _preprocess_mask(mask_image)
|
192 |
-
mask_image = mask_image.to(device=self._execution_device, dtype=self.unet.dtype)
|
193 |
-
|
194 |
-
batch_size = original_image.shape[0]
|
195 |
-
|
196 |
-
# sample gaussian noise to begin the loop
|
197 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
198 |
-
raise ValueError(
|
199 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
200 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
201 |
-
)
|
202 |
-
|
203 |
-
image_shape = original_image.shape
|
204 |
-
image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype)
|
205 |
-
|
206 |
-
# set step values
|
207 |
-
self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, self._execution_device)
|
208 |
-
self.scheduler.eta = eta
|
209 |
-
|
210 |
-
t_last = self.scheduler.timesteps[0] + 1
|
211 |
-
generator = generator[0] if isinstance(generator, list) else generator
|
212 |
-
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
|
213 |
-
if t < t_last:
|
214 |
-
# predict the noise residual
|
215 |
-
model_output = self.unet(image, t).sample
|
216 |
-
# compute previous image: x_t -> x_t-1
|
217 |
-
image = self.scheduler.step(model_output, t, image, original_image, mask_image, generator).prev_sample
|
218 |
-
|
219 |
-
else:
|
220 |
-
# compute the reverse: x_t-1 -> x_t
|
221 |
-
image = self.scheduler.undo_step(image, t_last, generator)
|
222 |
-
t_last = t
|
223 |
-
|
224 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
225 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
226 |
-
if output_type == "pil":
|
227 |
-
image = self.numpy_to_pil(image)
|
228 |
-
|
229 |
-
if not return_dict:
|
230 |
-
return (image,)
|
231 |
-
|
232 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/doc_utils.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""
|
15 |
-
Doc utilities: Utilities related to documentation
|
16 |
-
"""
|
17 |
-
import re
|
18 |
-
|
19 |
-
|
20 |
-
def replace_example_docstring(example_docstring):
|
21 |
-
def docstring_decorator(fn):
|
22 |
-
func_doc = fn.__doc__
|
23 |
-
lines = func_doc.split("\n")
|
24 |
-
i = 0
|
25 |
-
while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None:
|
26 |
-
i += 1
|
27 |
-
if i < len(lines):
|
28 |
-
lines[i] = example_docstring
|
29 |
-
func_doc = "\n".join(lines)
|
30 |
-
else:
|
31 |
-
raise ValueError(
|
32 |
-
f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, "
|
33 |
-
f"current docstring is:\n{func_doc}"
|
34 |
-
)
|
35 |
-
fn.__doc__ = func_doc
|
36 |
-
return fn
|
37 |
-
|
38 |
-
return docstring_decorator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r50_fpn_1x_coco.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
_base_ = '../htc/htc_r50_fpn_1x_coco.py'
|
2 |
-
# model settings
|
3 |
-
model = dict(
|
4 |
-
type='SCNet',
|
5 |
-
roi_head=dict(
|
6 |
-
_delete_=True,
|
7 |
-
type='SCNetRoIHead',
|
8 |
-
num_stages=3,
|
9 |
-
stage_loss_weights=[1, 0.5, 0.25],
|
10 |
-
bbox_roi_extractor=dict(
|
11 |
-
type='SingleRoIExtractor',
|
12 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
|
13 |
-
out_channels=256,
|
14 |
-
featmap_strides=[4, 8, 16, 32]),
|
15 |
-
bbox_head=[
|
16 |
-
dict(
|
17 |
-
type='SCNetBBoxHead',
|
18 |
-
num_shared_fcs=2,
|
19 |
-
in_channels=256,
|
20 |
-
fc_out_channels=1024,
|
21 |
-
roi_feat_size=7,
|
22 |
-
num_classes=80,
|
23 |
-
bbox_coder=dict(
|
24 |
-
type='DeltaXYWHBBoxCoder',
|
25 |
-
target_means=[0., 0., 0., 0.],
|
26 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
27 |
-
reg_class_agnostic=True,
|
28 |
-
loss_cls=dict(
|
29 |
-
type='CrossEntropyLoss',
|
30 |
-
use_sigmoid=False,
|
31 |
-
loss_weight=1.0),
|
32 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
|
33 |
-
loss_weight=1.0)),
|
34 |
-
dict(
|
35 |
-
type='SCNetBBoxHead',
|
36 |
-
num_shared_fcs=2,
|
37 |
-
in_channels=256,
|
38 |
-
fc_out_channels=1024,
|
39 |
-
roi_feat_size=7,
|
40 |
-
num_classes=80,
|
41 |
-
bbox_coder=dict(
|
42 |
-
type='DeltaXYWHBBoxCoder',
|
43 |
-
target_means=[0., 0., 0., 0.],
|
44 |
-
target_stds=[0.05, 0.05, 0.1, 0.1]),
|
45 |
-
reg_class_agnostic=True,
|
46 |
-
loss_cls=dict(
|
47 |
-
type='CrossEntropyLoss',
|
48 |
-
use_sigmoid=False,
|
49 |
-
loss_weight=1.0),
|
50 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
|
51 |
-
loss_weight=1.0)),
|
52 |
-
dict(
|
53 |
-
type='SCNetBBoxHead',
|
54 |
-
num_shared_fcs=2,
|
55 |
-
in_channels=256,
|
56 |
-
fc_out_channels=1024,
|
57 |
-
roi_feat_size=7,
|
58 |
-
num_classes=80,
|
59 |
-
bbox_coder=dict(
|
60 |
-
type='DeltaXYWHBBoxCoder',
|
61 |
-
target_means=[0., 0., 0., 0.],
|
62 |
-
target_stds=[0.033, 0.033, 0.067, 0.067]),
|
63 |
-
reg_class_agnostic=True,
|
64 |
-
loss_cls=dict(
|
65 |
-
type='CrossEntropyLoss',
|
66 |
-
use_sigmoid=False,
|
67 |
-
loss_weight=1.0),
|
68 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
|
69 |
-
],
|
70 |
-
mask_roi_extractor=dict(
|
71 |
-
type='SingleRoIExtractor',
|
72 |
-
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
|
73 |
-
out_channels=256,
|
74 |
-
featmap_strides=[4, 8, 16, 32]),
|
75 |
-
mask_head=dict(
|
76 |
-
type='SCNetMaskHead',
|
77 |
-
num_convs=12,
|
78 |
-
in_channels=256,
|
79 |
-
conv_out_channels=256,
|
80 |
-
num_classes=80,
|
81 |
-
conv_to_res=True,
|
82 |
-
loss_mask=dict(
|
83 |
-
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
|
84 |
-
semantic_roi_extractor=dict(
|
85 |
-
type='SingleRoIExtractor',
|
86 |
-
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
|
87 |
-
out_channels=256,
|
88 |
-
featmap_strides=[8]),
|
89 |
-
semantic_head=dict(
|
90 |
-
type='SCNetSemanticHead',
|
91 |
-
num_ins=5,
|
92 |
-
fusion_level=1,
|
93 |
-
num_convs=4,
|
94 |
-
in_channels=256,
|
95 |
-
conv_out_channels=256,
|
96 |
-
num_classes=183,
|
97 |
-
ignore_label=255,
|
98 |
-
loss_weight=0.2,
|
99 |
-
conv_to_res=True),
|
100 |
-
glbctx_head=dict(
|
101 |
-
type='GlobalContextHead',
|
102 |
-
num_convs=4,
|
103 |
-
in_channels=256,
|
104 |
-
conv_out_channels=256,
|
105 |
-
num_classes=80,
|
106 |
-
loss_weight=3.0,
|
107 |
-
conv_to_res=True),
|
108 |
-
feat_relay_head=dict(
|
109 |
-
type='FeatureRelayHead',
|
110 |
-
in_channels=1024,
|
111 |
-
out_conv_channels=256,
|
112 |
-
roi_feat_size=7,
|
113 |
-
scale_factor=2)))
|
114 |
-
|
115 |
-
# uncomment below code to enable test time augmentations
|
116 |
-
# img_norm_cfg = dict(
|
117 |
-
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
118 |
-
# test_pipeline = [
|
119 |
-
# dict(type='LoadImageFromFile'),
|
120 |
-
# dict(
|
121 |
-
# type='MultiScaleFlipAug',
|
122 |
-
# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800),
|
123 |
-
# (1400, 2100)],
|
124 |
-
# flip=True,
|
125 |
-
# transforms=[
|
126 |
-
# dict(type='Resize', keep_ratio=True),
|
127 |
-
# dict(type='RandomFlip', flip_ratio=0.5),
|
128 |
-
# dict(type='Normalize', **img_norm_cfg),
|
129 |
-
# dict(type='Pad', size_divisor=32),
|
130 |
-
# dict(type='ImageToTensor', keys=['img']),
|
131 |
-
# dict(type='Collect', keys=['img']),
|
132 |
-
# ])
|
133 |
-
# ]
|
134 |
-
# data = dict(
|
135 |
-
# val=dict(pipeline=test_pipeline),
|
136 |
-
# test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/dist_utils.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
import warnings
|
2 |
-
from collections import OrderedDict
|
3 |
-
|
4 |
-
import torch.distributed as dist
|
5 |
-
from mmcv.runner import OptimizerHook
|
6 |
-
from torch._utils import (_flatten_dense_tensors, _take_tensors,
|
7 |
-
_unflatten_dense_tensors)
|
8 |
-
|
9 |
-
|
10 |
-
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
|
11 |
-
if bucket_size_mb > 0:
|
12 |
-
bucket_size_bytes = bucket_size_mb * 1024 * 1024
|
13 |
-
buckets = _take_tensors(tensors, bucket_size_bytes)
|
14 |
-
else:
|
15 |
-
buckets = OrderedDict()
|
16 |
-
for tensor in tensors:
|
17 |
-
tp = tensor.type()
|
18 |
-
if tp not in buckets:
|
19 |
-
buckets[tp] = []
|
20 |
-
buckets[tp].append(tensor)
|
21 |
-
buckets = buckets.values()
|
22 |
-
|
23 |
-
for bucket in buckets:
|
24 |
-
flat_tensors = _flatten_dense_tensors(bucket)
|
25 |
-
dist.all_reduce(flat_tensors)
|
26 |
-
flat_tensors.div_(world_size)
|
27 |
-
for tensor, synced in zip(
|
28 |
-
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
|
29 |
-
tensor.copy_(synced)
|
30 |
-
|
31 |
-
|
32 |
-
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
|
33 |
-
"""Allreduce gradients.
|
34 |
-
|
35 |
-
Args:
|
36 |
-
params (list[torch.Parameters]): List of parameters of a model
|
37 |
-
coalesce (bool, optional): Whether allreduce parameters as a whole.
|
38 |
-
Defaults to True.
|
39 |
-
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
|
40 |
-
Defaults to -1.
|
41 |
-
"""
|
42 |
-
grads = [
|
43 |
-
param.grad.data for param in params
|
44 |
-
if param.requires_grad and param.grad is not None
|
45 |
-
]
|
46 |
-
world_size = dist.get_world_size()
|
47 |
-
if coalesce:
|
48 |
-
_allreduce_coalesced(grads, world_size, bucket_size_mb)
|
49 |
-
else:
|
50 |
-
for tensor in grads:
|
51 |
-
dist.all_reduce(tensor.div_(world_size))
|
52 |
-
|
53 |
-
|
54 |
-
class DistOptimizerHook(OptimizerHook):
|
55 |
-
"""Deprecated optimizer hook for distributed training."""
|
56 |
-
|
57 |
-
def __init__(self, *args, **kwargs):
|
58 |
-
warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
|
59 |
-
'"mmcv.runner.OptimizerHook".')
|
60 |
-
super().__init__(*args, **kwargs)
|
61 |
-
|
62 |
-
|
63 |
-
def reduce_mean(tensor):
|
64 |
-
""""Obtain the mean of tensor on different GPUs."""
|
65 |
-
if not (dist.is_available() and dist.is_initialized()):
|
66 |
-
return tensor
|
67 |
-
tensor = tensor.clone()
|
68 |
-
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
|
69 |
-
return tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aniemore/Russian-Emotion-Recognition/app.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
-
import gradio as gr
|
3 |
-
from pyctcdecode import BeamSearchDecoderCTC
|
4 |
-
import os
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
import torch.nn.functional as F
|
8 |
-
import torchaudio
|
9 |
-
from transformers import AutoConfig, AutoModel, Wav2Vec2FeatureExtractor
|
10 |
-
import librosa
|
11 |
-
import numpy as np
|
12 |
-
import subprocess
|
13 |
-
import time
|
14 |
-
|
15 |
-
TRUST = True
|
16 |
-
SR = 16000
|
17 |
-
|
18 |
-
|
19 |
-
def resample(speech_array, sampling_rate):
|
20 |
-
speech = torch.from_numpy(speech_array)
|
21 |
-
print(speech, speech.shape, sampling_rate)
|
22 |
-
resampler = torchaudio.transforms.Resample(sampling_rate)
|
23 |
-
speech = resampler(speech).squeeze().numpy()
|
24 |
-
return speech
|
25 |
-
|
26 |
-
|
27 |
-
def predict(speech_array, sampling_rate):
|
28 |
-
speech = resample(speech_array, sampling_rate)
|
29 |
-
print(speech, speech.shape)
|
30 |
-
inputs = feature_extractor(speech, sampling_rate=SR, return_tensors="pt", padding=True)
|
31 |
-
inputs = {key: inputs[key].to(device) for key in inputs}
|
32 |
-
|
33 |
-
with torch.no_grad():
|
34 |
-
logits = model.to(device)(**inputs).logits
|
35 |
-
|
36 |
-
scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
|
37 |
-
outputs = {config.id2label[i]: round(float(score), 3) for i, score in enumerate(scores)}
|
38 |
-
return outputs
|
39 |
-
|
40 |
-
|
41 |
-
config = AutoConfig.from_pretrained('Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition', trust_remote_code=TRUST)
|
42 |
-
model = AutoModel.from_pretrained("Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition", trust_remote_code=TRUST)
|
43 |
-
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition")
|
44 |
-
|
45 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
46 |
-
print(device)
|
47 |
-
|
48 |
-
|
49 |
-
def recognize(audio):
|
50 |
-
sr, audio_array = audio
|
51 |
-
audio_array = audio_array.astype(np.float32)
|
52 |
-
state = predict(audio_array, sr)
|
53 |
-
return state
|
54 |
-
|
55 |
-
|
56 |
-
def test_some(audio):
|
57 |
-
sr, audio_array = audio
|
58 |
-
audio_array = audio_array.astype(np.float32)
|
59 |
-
|
60 |
-
return (sr, audio_array)
|
61 |
-
|
62 |
-
|
63 |
-
interface = gr.Interface(
|
64 |
-
fn=recognize,
|
65 |
-
inputs=[
|
66 |
-
gr.Audio(source="microphone", label="Скажите что-нибудь...")
|
67 |
-
],
|
68 |
-
outputs=[
|
69 |
-
gr.Label(num_top_classes=7)
|
70 |
-
],
|
71 |
-
live=False
|
72 |
-
)
|
73 |
-
|
74 |
-
gr.TabbedInterface([interface], ["Russian Emotion Recognition"]).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_default.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
from modules import logits, shared, ui, utils
|
4 |
-
from modules.prompts import count_tokens, load_prompt
|
5 |
-
from modules.text_generation import (
|
6 |
-
generate_reply_wrapper,
|
7 |
-
get_token_ids,
|
8 |
-
stop_everything_event
|
9 |
-
)
|
10 |
-
from modules.utils import gradio
|
11 |
-
|
12 |
-
inputs = ('textbox-default', 'interface_state')
|
13 |
-
outputs = ('output_textbox', 'html-default')
|
14 |
-
|
15 |
-
|
16 |
-
def create_ui():
|
17 |
-
mu = shared.args.multi_user
|
18 |
-
with gr.Tab('Default', elem_id='default-tab'):
|
19 |
-
shared.gradio['last_input-default'] = gr.State('')
|
20 |
-
with gr.Row():
|
21 |
-
with gr.Column():
|
22 |
-
with gr.Row():
|
23 |
-
shared.gradio['textbox-default'] = gr.Textbox(value='', lines=27, label='Input', elem_classes=['textbox_default', 'add_scrollbar'])
|
24 |
-
shared.gradio['token-counter-default'] = gr.HTML(value="<span>0</span>", elem_classes=["token-counter", "default-token-counter"])
|
25 |
-
|
26 |
-
with gr.Row():
|
27 |
-
shared.gradio['Generate-default'] = gr.Button('Generate', variant='primary')
|
28 |
-
shared.gradio['Stop-default'] = gr.Button('Stop', elem_id='stop')
|
29 |
-
shared.gradio['Continue-default'] = gr.Button('Continue')
|
30 |
-
|
31 |
-
with gr.Row():
|
32 |
-
shared.gradio['prompt_menu-default'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt', elem_classes='slim-dropdown')
|
33 |
-
ui.create_refresh_button(shared.gradio['prompt_menu-default'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, 'refresh-button', interactive=not mu)
|
34 |
-
shared.gradio['save_prompt-default'] = gr.Button('💾', elem_classes='refresh-button', interactive=not mu)
|
35 |
-
shared.gradio['delete_prompt-default'] = gr.Button('🗑️', elem_classes='refresh-button', interactive=not mu)
|
36 |
-
|
37 |
-
with gr.Column():
|
38 |
-
with gr.Tab('Raw'):
|
39 |
-
shared.gradio['output_textbox'] = gr.Textbox(lines=27, label='Output', elem_id='textbox-default', elem_classes=['textbox_default_output', 'add_scrollbar'])
|
40 |
-
|
41 |
-
with gr.Tab('Markdown'):
|
42 |
-
shared.gradio['markdown_render-default'] = gr.Button('Render')
|
43 |
-
shared.gradio['markdown-default'] = gr.Markdown()
|
44 |
-
|
45 |
-
with gr.Tab('HTML'):
|
46 |
-
shared.gradio['html-default'] = gr.HTML()
|
47 |
-
|
48 |
-
with gr.Tab('Logits'):
|
49 |
-
with gr.Row():
|
50 |
-
with gr.Column(scale=10):
|
51 |
-
shared.gradio['get_logits-default'] = gr.Button('Get next token probabilities')
|
52 |
-
with gr.Column(scale=1):
|
53 |
-
shared.gradio['use_samplers-default'] = gr.Checkbox(label='Use samplers', value=True, elem_classes=['no-background'])
|
54 |
-
|
55 |
-
with gr.Row():
|
56 |
-
shared.gradio['logits-default'] = gr.Textbox(lines=23, label='Output', elem_classes=['textbox_logits', 'add_scrollbar'])
|
57 |
-
shared.gradio['logits-default-previous'] = gr.Textbox(lines=23, label='Previous output', elem_classes=['textbox_logits', 'add_scrollbar'])
|
58 |
-
|
59 |
-
with gr.Tab('Tokens'):
|
60 |
-
shared.gradio['get_tokens-default'] = gr.Button('Get token IDs for the input')
|
61 |
-
shared.gradio['tokens-default'] = gr.Textbox(lines=23, label='Tokens', elem_classes=['textbox_logits', 'add_scrollbar', 'monospace'])
|
62 |
-
|
63 |
-
|
64 |
-
def create_event_handlers():
|
65 |
-
shared.gradio['Generate-default'].click(
|
66 |
-
lambda x: x, gradio('textbox-default'), gradio('last_input-default')).then(
|
67 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
68 |
-
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
69 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
70 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
71 |
-
|
72 |
-
shared.gradio['textbox-default'].submit(
|
73 |
-
lambda x: x, gradio('textbox-default'), gradio('last_input-default')).then(
|
74 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
75 |
-
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
76 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
77 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
78 |
-
|
79 |
-
shared.gradio['markdown_render-default'].click(lambda x: x, gradio('output_textbox'), gradio('markdown-default'), queue=False)
|
80 |
-
shared.gradio['Continue-default'].click(
|
81 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
82 |
-
generate_reply_wrapper, [shared.gradio['output_textbox']] + gradio(inputs)[1:], gradio(outputs), show_progress=False).then(
|
83 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
84 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
85 |
-
|
86 |
-
shared.gradio['Stop-default'].click(stop_everything_event, None, None, queue=False)
|
87 |
-
shared.gradio['prompt_menu-default'].change(load_prompt, gradio('prompt_menu-default'), gradio('textbox-default'), show_progress=False)
|
88 |
-
shared.gradio['save_prompt-default'].click(
|
89 |
-
lambda x: x, gradio('textbox-default'), gradio('save_contents')).then(
|
90 |
-
lambda: 'prompts/', None, gradio('save_root')).then(
|
91 |
-
lambda: utils.current_time() + '.txt', None, gradio('save_filename')).then(
|
92 |
-
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
93 |
-
|
94 |
-
shared.gradio['delete_prompt-default'].click(
|
95 |
-
lambda: 'prompts/', None, gradio('delete_root')).then(
|
96 |
-
lambda x: x + '.txt', gradio('prompt_menu-default'), gradio('delete_filename')).then(
|
97 |
-
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
98 |
-
|
99 |
-
shared.gradio['textbox-default'].change(lambda x: f"<span>{count_tokens(x)}</span>", gradio('textbox-default'), gradio('token-counter-default'), show_progress=False)
|
100 |
-
shared.gradio['get_logits-default'].click(
|
101 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
102 |
-
logits.get_next_logits, gradio('textbox-default', 'interface_state', 'use_samplers-default', 'logits-default'), gradio('logits-default', 'logits-default-previous'), show_progress=False)
|
103 |
-
|
104 |
-
shared.gradio['get_tokens-default'].click(get_token_ids, gradio('textbox-default'), gradio('tokens-default'), show_progress=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/api.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
# based on https://github.com/isl-org/MiDaS
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
from torchvision.transforms import Compose
|
7 |
-
|
8 |
-
from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
|
9 |
-
from ldm.modules.midas.midas.midas_net import MidasNet
|
10 |
-
from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
|
11 |
-
from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
|
12 |
-
|
13 |
-
|
14 |
-
ISL_PATHS = {
|
15 |
-
"dpt_large": "midas_models/dpt_large-midas-2f21e586.pt",
|
16 |
-
"dpt_hybrid": "midas_models/dpt_hybrid-midas-501f0c75.pt",
|
17 |
-
"midas_v21": "",
|
18 |
-
"midas_v21_small": "",
|
19 |
-
}
|
20 |
-
|
21 |
-
|
22 |
-
def disabled_train(self, mode=True):
|
23 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
24 |
-
does not change anymore."""
|
25 |
-
return self
|
26 |
-
|
27 |
-
|
28 |
-
def load_midas_transform(model_type):
|
29 |
-
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
30 |
-
# load transform only
|
31 |
-
if model_type == "dpt_large": # DPT-Large
|
32 |
-
net_w, net_h = 384, 384
|
33 |
-
resize_mode = "minimal"
|
34 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
35 |
-
|
36 |
-
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
37 |
-
net_w, net_h = 384, 384
|
38 |
-
resize_mode = "minimal"
|
39 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
40 |
-
|
41 |
-
elif model_type == "midas_v21":
|
42 |
-
net_w, net_h = 384, 384
|
43 |
-
resize_mode = "upper_bound"
|
44 |
-
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
45 |
-
|
46 |
-
elif model_type == "midas_v21_small":
|
47 |
-
net_w, net_h = 256, 256
|
48 |
-
resize_mode = "upper_bound"
|
49 |
-
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
50 |
-
|
51 |
-
else:
|
52 |
-
assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
|
53 |
-
|
54 |
-
transform = Compose(
|
55 |
-
[
|
56 |
-
Resize(
|
57 |
-
net_w,
|
58 |
-
net_h,
|
59 |
-
resize_target=None,
|
60 |
-
keep_aspect_ratio=True,
|
61 |
-
ensure_multiple_of=32,
|
62 |
-
resize_method=resize_mode,
|
63 |
-
image_interpolation_method=cv2.INTER_CUBIC,
|
64 |
-
),
|
65 |
-
normalization,
|
66 |
-
PrepareForNet(),
|
67 |
-
]
|
68 |
-
)
|
69 |
-
|
70 |
-
return transform
|
71 |
-
|
72 |
-
|
73 |
-
def load_model(model_type):
|
74 |
-
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
75 |
-
# load network
|
76 |
-
model_path = ISL_PATHS[model_type]
|
77 |
-
if model_type == "dpt_large": # DPT-Large
|
78 |
-
model = DPTDepthModel(
|
79 |
-
path=model_path,
|
80 |
-
backbone="vitl16_384",
|
81 |
-
non_negative=True,
|
82 |
-
)
|
83 |
-
net_w, net_h = 384, 384
|
84 |
-
resize_mode = "minimal"
|
85 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
86 |
-
|
87 |
-
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
88 |
-
model = DPTDepthModel(
|
89 |
-
path=model_path,
|
90 |
-
backbone="vitb_rn50_384",
|
91 |
-
non_negative=True,
|
92 |
-
)
|
93 |
-
net_w, net_h = 384, 384
|
94 |
-
resize_mode = "minimal"
|
95 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
96 |
-
|
97 |
-
elif model_type == "midas_v21":
|
98 |
-
model = MidasNet(model_path, non_negative=True)
|
99 |
-
net_w, net_h = 384, 384
|
100 |
-
resize_mode = "upper_bound"
|
101 |
-
normalization = NormalizeImage(
|
102 |
-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
103 |
-
)
|
104 |
-
|
105 |
-
elif model_type == "midas_v21_small":
|
106 |
-
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
|
107 |
-
non_negative=True, blocks={'expand': True})
|
108 |
-
net_w, net_h = 256, 256
|
109 |
-
resize_mode = "upper_bound"
|
110 |
-
normalization = NormalizeImage(
|
111 |
-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
112 |
-
)
|
113 |
-
|
114 |
-
else:
|
115 |
-
print(f"model_type '{model_type}' not implemented, use: --model_type large")
|
116 |
-
assert False
|
117 |
-
|
118 |
-
transform = Compose(
|
119 |
-
[
|
120 |
-
Resize(
|
121 |
-
net_w,
|
122 |
-
net_h,
|
123 |
-
resize_target=None,
|
124 |
-
keep_aspect_ratio=True,
|
125 |
-
ensure_multiple_of=32,
|
126 |
-
resize_method=resize_mode,
|
127 |
-
image_interpolation_method=cv2.INTER_CUBIC,
|
128 |
-
),
|
129 |
-
normalization,
|
130 |
-
PrepareForNet(),
|
131 |
-
]
|
132 |
-
)
|
133 |
-
|
134 |
-
return model.eval(), transform
|
135 |
-
|
136 |
-
|
137 |
-
class MiDaSInference(nn.Module):
|
138 |
-
MODEL_TYPES_TORCH_HUB = [
|
139 |
-
"DPT_Large",
|
140 |
-
"DPT_Hybrid",
|
141 |
-
"MiDaS_small"
|
142 |
-
]
|
143 |
-
MODEL_TYPES_ISL = [
|
144 |
-
"dpt_large",
|
145 |
-
"dpt_hybrid",
|
146 |
-
"midas_v21",
|
147 |
-
"midas_v21_small",
|
148 |
-
]
|
149 |
-
|
150 |
-
def __init__(self, model_type):
|
151 |
-
super().__init__()
|
152 |
-
assert (model_type in self.MODEL_TYPES_ISL)
|
153 |
-
model, _ = load_model(model_type)
|
154 |
-
self.model = model
|
155 |
-
self.model.train = disabled_train
|
156 |
-
|
157 |
-
def forward(self, x):
|
158 |
-
# x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
|
159 |
-
# NOTE: we expect that the correct transform has been called during dataloading.
|
160 |
-
with torch.no_grad():
|
161 |
-
prediction = self.model(x)
|
162 |
-
prediction = torch.nn.functional.interpolate(
|
163 |
-
prediction.unsqueeze(1),
|
164 |
-
size=x.shape[2:],
|
165 |
-
mode="bicubic",
|
166 |
-
align_corners=False,
|
167 |
-
)
|
168 |
-
assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
|
169 |
-
return prediction
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/platformdirs/macos.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import os
|
4 |
-
|
5 |
-
from .api import PlatformDirsABC
|
6 |
-
|
7 |
-
|
8 |
-
class MacOS(PlatformDirsABC):
|
9 |
-
"""
|
10 |
-
Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
|
11 |
-
<https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
|
12 |
-
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`,
|
13 |
-
`version <platformdirs.api.PlatformDirsABC.version>`,
|
14 |
-
`ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
|
15 |
-
"""
|
16 |
-
|
17 |
-
@property
|
18 |
-
def user_data_dir(self) -> str:
|
19 |
-
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
|
20 |
-
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support"))
|
21 |
-
|
22 |
-
@property
|
23 |
-
def site_data_dir(self) -> str:
|
24 |
-
""":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
|
25 |
-
return self._append_app_name_and_version("/Library/Application Support")
|
26 |
-
|
27 |
-
@property
|
28 |
-
def user_config_dir(self) -> str:
|
29 |
-
""":return: config directory tied to the user, same as `user_data_dir`"""
|
30 |
-
return self.user_data_dir
|
31 |
-
|
32 |
-
@property
|
33 |
-
def site_config_dir(self) -> str:
|
34 |
-
""":return: config directory shared by the users, same as `site_data_dir`"""
|
35 |
-
return self.site_data_dir
|
36 |
-
|
37 |
-
@property
|
38 |
-
def user_cache_dir(self) -> str:
|
39 |
-
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
|
40 |
-
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))
|
41 |
-
|
42 |
-
@property
|
43 |
-
def site_cache_dir(self) -> str:
|
44 |
-
""":return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``"""
|
45 |
-
return self._append_app_name_and_version("/Library/Caches")
|
46 |
-
|
47 |
-
@property
|
48 |
-
def user_state_dir(self) -> str:
|
49 |
-
""":return: state directory tied to the user, same as `user_data_dir`"""
|
50 |
-
return self.user_data_dir
|
51 |
-
|
52 |
-
@property
|
53 |
-
def user_log_dir(self) -> str:
|
54 |
-
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
|
55 |
-
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))
|
56 |
-
|
57 |
-
@property
|
58 |
-
def user_documents_dir(self) -> str:
|
59 |
-
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
|
60 |
-
return os.path.expanduser("~/Documents")
|
61 |
-
|
62 |
-
@property
|
63 |
-
def user_runtime_dir(self) -> str:
|
64 |
-
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
|
65 |
-
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))
|
66 |
-
|
67 |
-
|
68 |
-
__all__ = [
|
69 |
-
"MacOS",
|
70 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_inspect.py
DELETED
@@ -1,270 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
import inspect
|
4 |
-
from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature
|
5 |
-
from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union
|
6 |
-
|
7 |
-
from .console import Group, RenderableType
|
8 |
-
from .control import escape_control_codes
|
9 |
-
from .highlighter import ReprHighlighter
|
10 |
-
from .jupyter import JupyterMixin
|
11 |
-
from .panel import Panel
|
12 |
-
from .pretty import Pretty
|
13 |
-
from .table import Table
|
14 |
-
from .text import Text, TextType
|
15 |
-
|
16 |
-
|
17 |
-
def _first_paragraph(doc: str) -> str:
|
18 |
-
"""Get the first paragraph from a docstring."""
|
19 |
-
paragraph, _, _ = doc.partition("\n\n")
|
20 |
-
return paragraph
|
21 |
-
|
22 |
-
|
23 |
-
class Inspect(JupyterMixin):
|
24 |
-
"""A renderable to inspect any Python Object.
|
25 |
-
|
26 |
-
Args:
|
27 |
-
obj (Any): An object to inspect.
|
28 |
-
title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
|
29 |
-
help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
|
30 |
-
methods (bool, optional): Enable inspection of callables. Defaults to False.
|
31 |
-
docs (bool, optional): Also render doc strings. Defaults to True.
|
32 |
-
private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
|
33 |
-
dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
|
34 |
-
sort (bool, optional): Sort attributes alphabetically. Defaults to True.
|
35 |
-
all (bool, optional): Show all attributes. Defaults to False.
|
36 |
-
value (bool, optional): Pretty print value of object. Defaults to True.
|
37 |
-
"""
|
38 |
-
|
39 |
-
def __init__(
|
40 |
-
self,
|
41 |
-
obj: Any,
|
42 |
-
*,
|
43 |
-
title: Optional[TextType] = None,
|
44 |
-
help: bool = False,
|
45 |
-
methods: bool = False,
|
46 |
-
docs: bool = True,
|
47 |
-
private: bool = False,
|
48 |
-
dunder: bool = False,
|
49 |
-
sort: bool = True,
|
50 |
-
all: bool = True,
|
51 |
-
value: bool = True,
|
52 |
-
) -> None:
|
53 |
-
self.highlighter = ReprHighlighter()
|
54 |
-
self.obj = obj
|
55 |
-
self.title = title or self._make_title(obj)
|
56 |
-
if all:
|
57 |
-
methods = private = dunder = True
|
58 |
-
self.help = help
|
59 |
-
self.methods = methods
|
60 |
-
self.docs = docs or help
|
61 |
-
self.private = private or dunder
|
62 |
-
self.dunder = dunder
|
63 |
-
self.sort = sort
|
64 |
-
self.value = value
|
65 |
-
|
66 |
-
def _make_title(self, obj: Any) -> Text:
|
67 |
-
"""Make a default title."""
|
68 |
-
title_str = (
|
69 |
-
str(obj)
|
70 |
-
if (isclass(obj) or callable(obj) or ismodule(obj))
|
71 |
-
else str(type(obj))
|
72 |
-
)
|
73 |
-
title_text = self.highlighter(title_str)
|
74 |
-
return title_text
|
75 |
-
|
76 |
-
def __rich__(self) -> Panel:
|
77 |
-
return Panel.fit(
|
78 |
-
Group(*self._render()),
|
79 |
-
title=self.title,
|
80 |
-
border_style="scope.border",
|
81 |
-
padding=(0, 1),
|
82 |
-
)
|
83 |
-
|
84 |
-
def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
|
85 |
-
"""Get a signature for a callable."""
|
86 |
-
try:
|
87 |
-
_signature = str(signature(obj)) + ":"
|
88 |
-
except ValueError:
|
89 |
-
_signature = "(...)"
|
90 |
-
except TypeError:
|
91 |
-
return None
|
92 |
-
|
93 |
-
source_filename: Optional[str] = None
|
94 |
-
try:
|
95 |
-
source_filename = getfile(obj)
|
96 |
-
except (OSError, TypeError):
|
97 |
-
# OSError is raised if obj has no source file, e.g. when defined in REPL.
|
98 |
-
pass
|
99 |
-
|
100 |
-
callable_name = Text(name, style="inspect.callable")
|
101 |
-
if source_filename:
|
102 |
-
callable_name.stylize(f"link file://{source_filename}")
|
103 |
-
signature_text = self.highlighter(_signature)
|
104 |
-
|
105 |
-
qualname = name or getattr(obj, "__qualname__", name)
|
106 |
-
|
107 |
-
# If obj is a module, there may be classes (which are callable) to display
|
108 |
-
if inspect.isclass(obj):
|
109 |
-
prefix = "class"
|
110 |
-
elif inspect.iscoroutinefunction(obj):
|
111 |
-
prefix = "async def"
|
112 |
-
else:
|
113 |
-
prefix = "def"
|
114 |
-
|
115 |
-
qual_signature = Text.assemble(
|
116 |
-
(f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
|
117 |
-
(qualname, "inspect.callable"),
|
118 |
-
signature_text,
|
119 |
-
)
|
120 |
-
|
121 |
-
return qual_signature
|
122 |
-
|
123 |
-
def _render(self) -> Iterable[RenderableType]:
|
124 |
-
"""Render object."""
|
125 |
-
|
126 |
-
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
|
127 |
-
key, (_error, value) = item
|
128 |
-
return (callable(value), key.strip("_").lower())
|
129 |
-
|
130 |
-
def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
|
131 |
-
"""Get attribute or any exception."""
|
132 |
-
try:
|
133 |
-
return (None, getattr(obj, attr_name))
|
134 |
-
except Exception as error:
|
135 |
-
return (error, None)
|
136 |
-
|
137 |
-
obj = self.obj
|
138 |
-
keys = dir(obj)
|
139 |
-
total_items = len(keys)
|
140 |
-
if not self.dunder:
|
141 |
-
keys = [key for key in keys if not key.startswith("__")]
|
142 |
-
if not self.private:
|
143 |
-
keys = [key for key in keys if not key.startswith("_")]
|
144 |
-
not_shown_count = total_items - len(keys)
|
145 |
-
items = [(key, safe_getattr(key)) for key in keys]
|
146 |
-
if self.sort:
|
147 |
-
items.sort(key=sort_items)
|
148 |
-
|
149 |
-
items_table = Table.grid(padding=(0, 1), expand=False)
|
150 |
-
items_table.add_column(justify="right")
|
151 |
-
add_row = items_table.add_row
|
152 |
-
highlighter = self.highlighter
|
153 |
-
|
154 |
-
if callable(obj):
|
155 |
-
signature = self._get_signature("", obj)
|
156 |
-
if signature is not None:
|
157 |
-
yield signature
|
158 |
-
yield ""
|
159 |
-
|
160 |
-
if self.docs:
|
161 |
-
_doc = self._get_formatted_doc(obj)
|
162 |
-
if _doc is not None:
|
163 |
-
doc_text = Text(_doc, style="inspect.help")
|
164 |
-
doc_text = highlighter(doc_text)
|
165 |
-
yield doc_text
|
166 |
-
yield ""
|
167 |
-
|
168 |
-
if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)):
|
169 |
-
yield Panel(
|
170 |
-
Pretty(obj, indent_guides=True, max_length=10, max_string=60),
|
171 |
-
border_style="inspect.value.border",
|
172 |
-
)
|
173 |
-
yield ""
|
174 |
-
|
175 |
-
for key, (error, value) in items:
|
176 |
-
key_text = Text.assemble(
|
177 |
-
(
|
178 |
-
key,
|
179 |
-
"inspect.attr.dunder" if key.startswith("__") else "inspect.attr",
|
180 |
-
),
|
181 |
-
(" =", "inspect.equals"),
|
182 |
-
)
|
183 |
-
if error is not None:
|
184 |
-
warning = key_text.copy()
|
185 |
-
warning.stylize("inspect.error")
|
186 |
-
add_row(warning, highlighter(repr(error)))
|
187 |
-
continue
|
188 |
-
|
189 |
-
if callable(value):
|
190 |
-
if not self.methods:
|
191 |
-
continue
|
192 |
-
|
193 |
-
_signature_text = self._get_signature(key, value)
|
194 |
-
if _signature_text is None:
|
195 |
-
add_row(key_text, Pretty(value, highlighter=highlighter))
|
196 |
-
else:
|
197 |
-
if self.docs:
|
198 |
-
docs = self._get_formatted_doc(value)
|
199 |
-
if docs is not None:
|
200 |
-
_signature_text.append("\n" if "\n" in docs else " ")
|
201 |
-
doc = highlighter(docs)
|
202 |
-
doc.stylize("inspect.doc")
|
203 |
-
_signature_text.append(doc)
|
204 |
-
|
205 |
-
add_row(key_text, _signature_text)
|
206 |
-
else:
|
207 |
-
add_row(key_text, Pretty(value, highlighter=highlighter))
|
208 |
-
if items_table.row_count:
|
209 |
-
yield items_table
|
210 |
-
elif not_shown_count:
|
211 |
-
yield Text.from_markup(
|
212 |
-
f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
|
213 |
-
f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
|
214 |
-
)
|
215 |
-
|
216 |
-
def _get_formatted_doc(self, object_: Any) -> Optional[str]:
|
217 |
-
"""
|
218 |
-
Extract the docstring of an object, process it and returns it.
|
219 |
-
The processing consists in cleaning up the doctring's indentation,
|
220 |
-
taking only its 1st paragraph if `self.help` is not True,
|
221 |
-
and escape its control codes.
|
222 |
-
|
223 |
-
Args:
|
224 |
-
object_ (Any): the object to get the docstring from.
|
225 |
-
|
226 |
-
Returns:
|
227 |
-
Optional[str]: the processed docstring, or None if no docstring was found.
|
228 |
-
"""
|
229 |
-
docs = getdoc(object_)
|
230 |
-
if docs is None:
|
231 |
-
return None
|
232 |
-
docs = cleandoc(docs).strip()
|
233 |
-
if not self.help:
|
234 |
-
docs = _first_paragraph(docs)
|
235 |
-
return escape_control_codes(docs)
|
236 |
-
|
237 |
-
|
238 |
-
def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
|
239 |
-
"""Returns the MRO of an object's class, or of the object itself if it's a class."""
|
240 |
-
if not hasattr(obj, "__mro__"):
|
241 |
-
# N.B. we cannot use `if type(obj) is type` here because it doesn't work with
|
242 |
-
# some types of classes, such as the ones that use abc.ABCMeta.
|
243 |
-
obj = type(obj)
|
244 |
-
return getattr(obj, "__mro__", ())
|
245 |
-
|
246 |
-
|
247 |
-
def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
|
248 |
-
"""
|
249 |
-
Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
|
250 |
-
|
251 |
-
Examples:
|
252 |
-
`object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
|
253 |
-
"""
|
254 |
-
return [
|
255 |
-
f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
|
256 |
-
for type_ in get_object_types_mro(obj)
|
257 |
-
]
|
258 |
-
|
259 |
-
|
260 |
-
def is_object_one_of_types(
|
261 |
-
obj: object, fully_qualified_types_names: Collection[str]
|
262 |
-
) -> bool:
|
263 |
-
"""
|
264 |
-
Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
|
265 |
-
fully qualified names in its MRO.
|
266 |
-
"""
|
267 |
-
for type_name in get_object_types_mro_as_strings(obj):
|
268 |
-
if type_name in fully_qualified_types_names:
|
269 |
-
return True
|
270 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_entry_points.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
import operator
|
3 |
-
import itertools
|
4 |
-
|
5 |
-
from .extern.jaraco.text import yield_lines
|
6 |
-
from .extern.jaraco.functools import pass_none
|
7 |
-
from ._importlib import metadata
|
8 |
-
from ._itertools import ensure_unique
|
9 |
-
from .extern.more_itertools import consume
|
10 |
-
|
11 |
-
|
12 |
-
def ensure_valid(ep):
|
13 |
-
"""
|
14 |
-
Exercise one of the dynamic properties to trigger
|
15 |
-
the pattern match.
|
16 |
-
"""
|
17 |
-
ep.extras
|
18 |
-
|
19 |
-
|
20 |
-
def load_group(value, group):
|
21 |
-
"""
|
22 |
-
Given a value of an entry point or series of entry points,
|
23 |
-
return each as an EntryPoint.
|
24 |
-
"""
|
25 |
-
# normalize to a single sequence of lines
|
26 |
-
lines = yield_lines(value)
|
27 |
-
text = f'[{group}]\n' + '\n'.join(lines)
|
28 |
-
return metadata.EntryPoints._from_text(text)
|
29 |
-
|
30 |
-
|
31 |
-
def by_group_and_name(ep):
|
32 |
-
return ep.group, ep.name
|
33 |
-
|
34 |
-
|
35 |
-
def validate(eps: metadata.EntryPoints):
|
36 |
-
"""
|
37 |
-
Ensure entry points are unique by group and name and validate each.
|
38 |
-
"""
|
39 |
-
consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))
|
40 |
-
return eps
|
41 |
-
|
42 |
-
|
43 |
-
@functools.singledispatch
|
44 |
-
def load(eps):
|
45 |
-
"""
|
46 |
-
Given a Distribution.entry_points, produce EntryPoints.
|
47 |
-
"""
|
48 |
-
groups = itertools.chain.from_iterable(
|
49 |
-
load_group(value, group)
|
50 |
-
for group, value in eps.items())
|
51 |
-
return validate(metadata.EntryPoints(groups))
|
52 |
-
|
53 |
-
|
54 |
-
@load.register(str)
|
55 |
-
def _(eps):
|
56 |
-
r"""
|
57 |
-
>>> ep, = load('[console_scripts]\nfoo=bar')
|
58 |
-
>>> ep.group
|
59 |
-
'console_scripts'
|
60 |
-
>>> ep.name
|
61 |
-
'foo'
|
62 |
-
>>> ep.value
|
63 |
-
'bar'
|
64 |
-
"""
|
65 |
-
return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))
|
66 |
-
|
67 |
-
|
68 |
-
load.register(type(None), lambda x: x)
|
69 |
-
|
70 |
-
|
71 |
-
@pass_none
|
72 |
-
def render(eps: metadata.EntryPoints):
|
73 |
-
by_group = operator.attrgetter('group')
|
74 |
-
groups = itertools.groupby(sorted(eps, key=by_group), by_group)
|
75 |
-
|
76 |
-
return '\n'.join(
|
77 |
-
f'[{group}]\n{render_items(items)}\n'
|
78 |
-
for group, items in groups
|
79 |
-
)
|
80 |
-
|
81 |
-
|
82 |
-
def render_items(eps):
|
83 |
-
return '\n'.join(
|
84 |
-
f'{ep.name} = {ep.value}'
|
85 |
-
for ep in sorted(eps)
|
86 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoLLM/AutoAgents/autoagents/utils/constants.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
MAIN_HEADER = "Web Search Agent"
|
2 |
-
|
3 |
-
MAIN_CAPTION = """This is a proof-of-concept search agent that reasons, plans,
|
4 |
-
and executes web searches to collect information on your behalf. It aims to
|
5 |
-
resolve your question by breaking it down into step-by-step subtasks. All the
|
6 |
-
intermediate results will be presented.
|
7 |
-
|
8 |
-
*DISCLAIMER*: We are collecting search queries, so please refrain from
|
9 |
-
providing any personal information. If you wish to avoid this, you can run the
|
10 |
-
app locally by following the instructions on our
|
11 |
-
[Github](https://github.com/AutoLLM/AutoAgents)."""
|
12 |
-
|
13 |
-
SAMPLE_QUESTIONS = [
|
14 |
-
"Recommend me a movie in theater now to watch with kids.",
|
15 |
-
"Who is the most recent NBA MVP? Which team does he play for? What are his career stats?",
|
16 |
-
"Who is the head coach of AC Milan now? How long has he been coaching the team?",
|
17 |
-
"What is the mortgage rate right now and how does that compare to the past two years?",
|
18 |
-
"What is the weather like in San Francisco today? What about tomorrow?",
|
19 |
-
"When and where is the upcoming concert for Taylor Swift? Share a link to purchase tickets.",
|
20 |
-
"Find me recent studies focusing on hallucination in large language models. Provide the link to each study found.",
|
21 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BWQ/Chatgpt/app.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import openai
|
3 |
-
import time
|
4 |
-
|
5 |
-
with gr.Blocks() as demo:
|
6 |
-
with gr.Row():
|
7 |
-
key = gr.Textbox(placeholder="API_KEY")
|
8 |
-
with gr.Row():
|
9 |
-
with gr.Column():
|
10 |
-
msg = gr.Textbox(placeholder="Question")
|
11 |
-
submit = gr.Button("Submit")
|
12 |
-
clear = gr.Button("Clear")
|
13 |
-
with gr.Column():
|
14 |
-
chatbot = gr.Chatbot()
|
15 |
-
|
16 |
-
|
17 |
-
# state = gr.State([])
|
18 |
-
|
19 |
-
def user(user_message, history):
|
20 |
-
return "", history + [[user_message, None]]
|
21 |
-
|
22 |
-
|
23 |
-
def bot(history, key):
|
24 |
-
openai.api_key = key
|
25 |
-
bot_message = ask_gpt(history)
|
26 |
-
print(history)
|
27 |
-
history[-1][1] = bot_message
|
28 |
-
time.sleep(1)
|
29 |
-
return history
|
30 |
-
|
31 |
-
|
32 |
-
def ask_gpt(history):
|
33 |
-
messages = []
|
34 |
-
for i in range(len(history) - 1):
|
35 |
-
messages.append({"role": "user", "content": history[i][0]})
|
36 |
-
messages.append({"role": "assistant", "content": history[i][1]})
|
37 |
-
messages.append({"role": "user", "content": history[-1][0]})
|
38 |
-
try:
|
39 |
-
response = openai.ChatCompletion.create(
|
40 |
-
model="gpt-3.5-turbo",
|
41 |
-
messages=messages
|
42 |
-
)
|
43 |
-
return response['choices'][0]['message']['content'].replace("```", "")
|
44 |
-
except Exception as e:
|
45 |
-
print(e)
|
46 |
-
return e
|
47 |
-
|
48 |
-
|
49 |
-
# def bot(history, messages_history, key):
|
50 |
-
# openai.api_key = key
|
51 |
-
# user_message = history[-1][0]
|
52 |
-
# bot_message, messages_history = ask_gpt(user_message, messages_history)
|
53 |
-
# messages_history += [{"role": "assistant", "content": bot_message}]
|
54 |
-
# history[-1][1] = bot_message
|
55 |
-
# time.sleep(1)
|
56 |
-
# return history, messages_history
|
57 |
-
#
|
58 |
-
#
|
59 |
-
# def ask_gpt(message, messages_history):
|
60 |
-
# try:
|
61 |
-
# messages_history += [{"role": "user", "content": message}]
|
62 |
-
# response = openai.ChatCompletion.create(
|
63 |
-
# model="gpt-3.5-turbo",
|
64 |
-
# messages=messages_history
|
65 |
-
# )
|
66 |
-
# return response['choices'][0]['message']['content'], messages_history
|
67 |
-
# except Exception as e:
|
68 |
-
# print(e)
|
69 |
-
# return e, messages_history
|
70 |
-
|
71 |
-
# def init_history(messages_history):
|
72 |
-
# messages_history = []
|
73 |
-
# return messages_history
|
74 |
-
|
75 |
-
submit.click(user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=True, api_name="submit").then(
|
76 |
-
bot, [chatbot, key], chatbot, api_name="bot_response"
|
77 |
-
)
|
78 |
-
clear.click(lambda: None, None, chatbot, queue=True, api_name="clear")
|
79 |
-
|
80 |
-
# clear.click(lambda: None, None, chatbot, queue=False, api_name="clear").then(init_history, [state], [state],api_name="init_history")
|
81 |
-
|
82 |
-
demo.queue()
|
83 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bakar31/PotterQuest/app.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from pathlib import Path
|
4 |
-
from typing import Union
|
5 |
-
from langchain import VectorDBQA
|
6 |
-
from langchain.llms import HuggingFaceHub
|
7 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
8 |
-
from langchain.vectorstores.faiss import FAISS
|
9 |
-
from langchain import PromptTemplate
|
10 |
-
|
11 |
-
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
|
12 |
-
index_path = 'index/'
|
13 |
-
|
14 |
-
def load_document_store(path: Union[str, Path]) -> FAISS:
|
15 |
-
embeddings = HuggingFaceEmbeddings()
|
16 |
-
document_store = FAISS.load_local(path, embeddings)
|
17 |
-
return document_store
|
18 |
-
|
19 |
-
|
20 |
-
examples = [
|
21 |
-
"Why harry potter is famous?",
|
22 |
-
"When is Harry Potter's birthday?",
|
23 |
-
"How would you sneak into Hogwarts without being detected?",
|
24 |
-
"Who is the most badass wizard in the world?",
|
25 |
-
"Why are the Dursleys so mean to Harry?",
|
26 |
-
"What is the name of the spell used to disarm an opponent?",
|
27 |
-
'What position does Harry play in Quidditch?',
|
28 |
-
"What is the name of the wizarding bank in Diagon Alley?",
|
29 |
-
"Why is Voldemort afraid of Harry Potter?",
|
30 |
-
'Whom do Harry and Ron accidentally lock in the bathroom with the troll?',
|
31 |
-
"Where do Harry and the Dursleys go for Dudley's birthday?",
|
32 |
-
'What did Dobby catch that set him free from Mr. Malfoy?',
|
33 |
-
"The Hogwarts motto is “Draco dormiens nunquan titillandus.” What does it mean?",
|
34 |
-
"How many presents did Dudley Dursley receive on his birthday in total?",
|
35 |
-
"What was the Fat Lady’s password to get into the Gryffindor common room?",
|
36 |
-
"When Harry, Ron and Hermione make Polyjuice Potion, who steals the ingredients from Professor Snape’s office?",
|
37 |
-
"What two creatures are Hippogriffs a mix of?",
|
38 |
-
"What is Draco Malfoy’s mother’s name?",
|
39 |
-
"Which of Voldemort’s Horcruxes do Harry and Dumbledore track down—but it turns out to be a fake?",
|
40 |
-
"What is Professor Snape’s Patronus?",
|
41 |
-
"who killed dumboldore?",
|
42 |
-
'What was the last horcrux?'
|
43 |
-
]
|
44 |
-
|
45 |
-
def ask(question, repo_id = "google/flan-ul2"):
|
46 |
-
|
47 |
-
if len(question) == 0:
|
48 |
-
return ""
|
49 |
-
|
50 |
-
document_store = load_document_store(index_path)
|
51 |
-
chain = VectorDBQA.from_chain_type(
|
52 |
-
llm=HuggingFaceHub(repo_id = repo_id),
|
53 |
-
chain_type="stuff",
|
54 |
-
vectorstore=document_store,
|
55 |
-
return_source_documents=True
|
56 |
-
)
|
57 |
-
|
58 |
-
response = chain(question)
|
59 |
-
return response["result"].strip()
|
60 |
-
|
61 |
-
|
62 |
-
demo = gr.Blocks()
|
63 |
-
|
64 |
-
with demo:
|
65 |
-
gr.Markdown("# PotterQuest: Your One-Line Wizardry Encyclopedia")
|
66 |
-
with gr.Row():
|
67 |
-
with gr.Column():
|
68 |
-
question = gr.Textbox(lines=2, label="Question")
|
69 |
-
with gr.Row():
|
70 |
-
clear = gr.Button("Clear")
|
71 |
-
btn = gr.Button("Submit", variant="primary")
|
72 |
-
with gr.Column():
|
73 |
-
answer = gr.Textbox(lines=2, label="Answer")
|
74 |
-
btn.click(ask, [question], answer)
|
75 |
-
clear.click(lambda _: "", question, question)
|
76 |
-
gr.Examples(examples, question)
|
77 |
-
gr.Markdown("💻 Checkout the source code on [GitHub](https://github.com/Bakar31/PotterQuest).")
|
78 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/modules/ipex/gradscaler.py
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
from collections import defaultdict
|
2 |
-
import torch
|
3 |
-
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
4 |
-
import intel_extension_for_pytorch._C as core # pylint: disable=import-error, unused-import
|
5 |
-
|
6 |
-
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
7 |
-
|
8 |
-
OptState = ipex.cpu.autocast._grad_scaler.OptState
|
9 |
-
_MultiDeviceReplicator = ipex.cpu.autocast._grad_scaler._MultiDeviceReplicator
|
10 |
-
_refresh_per_optimizer_state = ipex.cpu.autocast._grad_scaler._refresh_per_optimizer_state
|
11 |
-
|
12 |
-
def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16): # pylint: disable=unused-argument
|
13 |
-
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
|
14 |
-
per_device_found_inf = _MultiDeviceReplicator(found_inf)
|
15 |
-
|
16 |
-
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
|
17 |
-
# There could be hundreds of grads, so we'd like to iterate through them just once.
|
18 |
-
# However, we don't know their devices or dtypes in advance.
|
19 |
-
|
20 |
-
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
|
21 |
-
# Google says mypy struggles with defaultdicts type annotations.
|
22 |
-
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
|
23 |
-
# sync grad to master weight
|
24 |
-
if hasattr(optimizer, "sync_grad"):
|
25 |
-
optimizer.sync_grad()
|
26 |
-
with torch.no_grad():
|
27 |
-
for group in optimizer.param_groups:
|
28 |
-
for param in group["params"]:
|
29 |
-
if param.grad is None:
|
30 |
-
continue
|
31 |
-
if (not allow_fp16) and param.grad.dtype == torch.float16:
|
32 |
-
raise ValueError("Attempting to unscale FP16 gradients.")
|
33 |
-
if param.grad.is_sparse:
|
34 |
-
# is_coalesced() == False means the sparse grad has values with duplicate indices.
|
35 |
-
# coalesce() deduplicates indices and adds all values that have the same index.
|
36 |
-
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
|
37 |
-
# so we should check the coalesced _values().
|
38 |
-
if param.grad.dtype is torch.float16:
|
39 |
-
param.grad = param.grad.coalesce()
|
40 |
-
to_unscale = param.grad._values()
|
41 |
-
else:
|
42 |
-
to_unscale = param.grad
|
43 |
-
|
44 |
-
# -: is there a way to split by device and dtype without appending in the inner loop?
|
45 |
-
to_unscale = to_unscale.to("cpu")
|
46 |
-
per_device_and_dtype_grads[to_unscale.device][
|
47 |
-
to_unscale.dtype
|
48 |
-
].append(to_unscale)
|
49 |
-
|
50 |
-
for _, per_dtype_grads in per_device_and_dtype_grads.items():
|
51 |
-
for grads in per_dtype_grads.values():
|
52 |
-
core._amp_foreach_non_finite_check_and_unscale_(
|
53 |
-
grads,
|
54 |
-
per_device_found_inf.get("cpu"),
|
55 |
-
per_device_inv_scale.get("cpu"),
|
56 |
-
)
|
57 |
-
|
58 |
-
return per_device_found_inf._per_device_tensors
|
59 |
-
|
60 |
-
def unscale_(self, optimizer):
|
61 |
-
"""
|
62 |
-
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
|
63 |
-
:meth:`unscale_` is optional, serving cases where you need to
|
64 |
-
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
|
65 |
-
between the backward pass(es) and :meth:`step`.
|
66 |
-
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
|
67 |
-
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
|
68 |
-
...
|
69 |
-
scaler.scale(loss).backward()
|
70 |
-
scaler.unscale_(optimizer)
|
71 |
-
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
|
72 |
-
scaler.step(optimizer)
|
73 |
-
scaler.update()
|
74 |
-
Args:
|
75 |
-
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
|
76 |
-
.. warning::
|
77 |
-
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
|
78 |
-
and only after all gradients for that optimizer's assigned parameters have been accumulated.
|
79 |
-
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
|
80 |
-
.. warning::
|
81 |
-
:meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
|
82 |
-
"""
|
83 |
-
if not self._enabled:
|
84 |
-
return
|
85 |
-
|
86 |
-
self._check_scale_growth_tracker("unscale_")
|
87 |
-
|
88 |
-
optimizer_state = self._per_optimizer_states[id(optimizer)]
|
89 |
-
|
90 |
-
if optimizer_state["stage"] is OptState.UNSCALED: # pylint: disable=no-else-raise
|
91 |
-
raise RuntimeError(
|
92 |
-
"unscale_() has already been called on this optimizer since the last update()."
|
93 |
-
)
|
94 |
-
elif optimizer_state["stage"] is OptState.STEPPED:
|
95 |
-
raise RuntimeError("unscale_() is being called after step().")
|
96 |
-
|
97 |
-
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
|
98 |
-
assert self._scale is not None
|
99 |
-
inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device)
|
100 |
-
found_inf = torch.full(
|
101 |
-
(1,), 0.0, dtype=torch.float32, device=self._scale.device
|
102 |
-
)
|
103 |
-
|
104 |
-
optimizer_state["found_inf_per_device"] = self._unscale_grads_(
|
105 |
-
optimizer, inv_scale, found_inf, False
|
106 |
-
)
|
107 |
-
optimizer_state["stage"] = OptState.UNSCALED
|
108 |
-
|
109 |
-
def update(self, new_scale=None):
|
110 |
-
"""
|
111 |
-
Updates the scale factor.
|
112 |
-
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
|
113 |
-
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
|
114 |
-
the scale is multiplied by ``growth_factor`` to increase it.
|
115 |
-
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
|
116 |
-
used directly, it's used to fill GradScaler's internal scale tensor. So if
|
117 |
-
``new_scale`` was a tensor, later in-place changes to that tensor will not further
|
118 |
-
affect the scale GradScaler uses internally.)
|
119 |
-
Args:
|
120 |
-
new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor.
|
121 |
-
.. warning::
|
122 |
-
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
|
123 |
-
been invoked for all optimizers used this iteration.
|
124 |
-
"""
|
125 |
-
if not self._enabled:
|
126 |
-
return
|
127 |
-
|
128 |
-
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
|
129 |
-
|
130 |
-
if new_scale is not None:
|
131 |
-
# Accept a new user-defined scale.
|
132 |
-
if isinstance(new_scale, float):
|
133 |
-
self._scale.fill_(new_scale) # type: ignore[union-attr]
|
134 |
-
else:
|
135 |
-
reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False."
|
136 |
-
assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined]
|
137 |
-
assert new_scale.numel() == 1, reason
|
138 |
-
assert new_scale.requires_grad is False, reason
|
139 |
-
self._scale.copy_(new_scale) # type: ignore[union-attr]
|
140 |
-
else:
|
141 |
-
# Consume shared inf/nan data collected from optimizers to update the scale.
|
142 |
-
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
|
143 |
-
found_infs = [
|
144 |
-
found_inf.to(device="cpu", non_blocking=True)
|
145 |
-
for state in self._per_optimizer_states.values()
|
146 |
-
for found_inf in state["found_inf_per_device"].values()
|
147 |
-
]
|
148 |
-
|
149 |
-
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
|
150 |
-
|
151 |
-
found_inf_combined = found_infs[0]
|
152 |
-
if len(found_infs) > 1:
|
153 |
-
for i in range(1, len(found_infs)):
|
154 |
-
found_inf_combined += found_infs[i]
|
155 |
-
|
156 |
-
to_device = _scale.device
|
157 |
-
_scale = _scale.to("cpu")
|
158 |
-
_growth_tracker = _growth_tracker.to("cpu")
|
159 |
-
|
160 |
-
core._amp_update_scale_(
|
161 |
-
_scale,
|
162 |
-
_growth_tracker,
|
163 |
-
found_inf_combined,
|
164 |
-
self._growth_factor,
|
165 |
-
self._backoff_factor,
|
166 |
-
self._growth_interval,
|
167 |
-
)
|
168 |
-
|
169 |
-
_scale = _scale.to(to_device)
|
170 |
-
_growth_tracker = _growth_tracker.to(to_device)
|
171 |
-
# To prepare for next iteration, clear the data collected from optimizers this iteration.
|
172 |
-
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
|
173 |
-
|
174 |
-
def gradscaler_init():
|
175 |
-
torch.xpu.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
176 |
-
torch.xpu.amp.GradScaler._unscale_grads_ = _unscale_grads_
|
177 |
-
torch.xpu.amp.GradScaler.unscale_ = unscale_
|
178 |
-
torch.xpu.amp.GradScaler.update = update
|
179 |
-
return torch.xpu.amp.GradScaler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/modules/uvr5/modules.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import traceback
|
3 |
-
import logging
|
4 |
-
|
5 |
-
logger = logging.getLogger(__name__)
|
6 |
-
|
7 |
-
import ffmpeg
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from configs.config import Config
|
11 |
-
from infer.modules.uvr5.mdxnet import MDXNetDereverb
|
12 |
-
from infer.modules.uvr5.preprocess import AudioPre, AudioPreDeEcho
|
13 |
-
|
14 |
-
config = Config()
|
15 |
-
|
16 |
-
|
17 |
-
def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
|
18 |
-
infos = []
|
19 |
-
try:
|
20 |
-
inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
21 |
-
save_root_vocal = (
|
22 |
-
save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
23 |
-
)
|
24 |
-
save_root_ins = (
|
25 |
-
save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
26 |
-
)
|
27 |
-
if model_name == "onnx_dereverb_By_FoxJoy":
|
28 |
-
pre_fun = MDXNetDereverb(15, config.device)
|
29 |
-
else:
|
30 |
-
func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho
|
31 |
-
pre_fun = func(
|
32 |
-
agg=int(agg),
|
33 |
-
model_path=os.path.join(
|
34 |
-
os.getenv("weight_uvr5_root"), model_name + ".pth"
|
35 |
-
),
|
36 |
-
device=config.device,
|
37 |
-
is_half=config.is_half,
|
38 |
-
)
|
39 |
-
if inp_root != "":
|
40 |
-
paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
|
41 |
-
else:
|
42 |
-
paths = [path.name for path in paths]
|
43 |
-
for path in paths:
|
44 |
-
inp_path = os.path.join(inp_root, path)
|
45 |
-
need_reformat = 1
|
46 |
-
done = 0
|
47 |
-
try:
|
48 |
-
info = ffmpeg.probe(inp_path, cmd="ffprobe")
|
49 |
-
if (
|
50 |
-
info["streams"][0]["channels"] == 2
|
51 |
-
and info["streams"][0]["sample_rate"] == "44100"
|
52 |
-
):
|
53 |
-
need_reformat = 0
|
54 |
-
pre_fun._path_audio_(
|
55 |
-
inp_path, save_root_ins, save_root_vocal, format0
|
56 |
-
)
|
57 |
-
done = 1
|
58 |
-
except:
|
59 |
-
need_reformat = 1
|
60 |
-
traceback.print_exc()
|
61 |
-
if need_reformat == 1:
|
62 |
-
tmp_path = "%s/%s.reformatted.wav" % (
|
63 |
-
os.path.join(os.environ["TEMP"]),
|
64 |
-
os.path.basename(inp_path),
|
65 |
-
)
|
66 |
-
os.system(
|
67 |
-
"ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
|
68 |
-
% (inp_path, tmp_path)
|
69 |
-
)
|
70 |
-
inp_path = tmp_path
|
71 |
-
try:
|
72 |
-
if done == 0:
|
73 |
-
pre_fun.path_audio(
|
74 |
-
inp_path, save_root_ins, save_root_vocal, format0
|
75 |
-
)
|
76 |
-
infos.append("%s->Success" % (os.path.basename(inp_path)))
|
77 |
-
yield "\n".join(infos)
|
78 |
-
except:
|
79 |
-
try:
|
80 |
-
if done == 0:
|
81 |
-
pre_fun._path_audio_(
|
82 |
-
inp_path, save_root_ins, save_root_vocal, format0
|
83 |
-
)
|
84 |
-
infos.append("%s->Success" % (os.path.basename(inp_path)))
|
85 |
-
yield "\n".join(infos)
|
86 |
-
except:
|
87 |
-
infos.append(
|
88 |
-
"%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
|
89 |
-
)
|
90 |
-
yield "\n".join(infos)
|
91 |
-
except:
|
92 |
-
infos.append(traceback.format_exc())
|
93 |
-
yield "\n".join(infos)
|
94 |
-
finally:
|
95 |
-
try:
|
96 |
-
if model_name == "onnx_dereverb_By_FoxJoy":
|
97 |
-
del pre_fun.pred.model
|
98 |
-
del pre_fun.pred.model_
|
99 |
-
else:
|
100 |
-
del pre_fun.model
|
101 |
-
del pre_fun
|
102 |
-
except:
|
103 |
-
traceback.print_exc()
|
104 |
-
if torch.cuda.is_available():
|
105 |
-
torch.cuda.empty_cache()
|
106 |
-
logger.info("Executed torch.cuda.empty_cache()")
|
107 |
-
yield "\n".join(infos)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Blockman Go Apk Mediafre.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Blockman Go APK Mediafıre: Cómo descargar y jugar el juego Ultimate Sandbox</h1>
|
3 |
-
<p>¿Te encantan los juegos de sandbox donde puedes dar rienda suelta a tu creatividad y divertirte con tus amigos? Si es así, entonces deberías probar Blockman Go, un juego gratuito que te permite jugar, crear y compartir tus experiencias con millones de jugadores de todo el mundo. Pero lo que si quieres jugar Blockman Ir en su dispositivo Android sin usar Google Play Store? No te preocupes, hay una manera de hacer eso. En este artículo, le mostraremos cómo descargar y jugar Blockman Go APK de Mediafıre, una popular plataforma para compartir archivos. También le diremos por qué usted debe jugar Blockman Go APK Mediafıre, cómo jugar, y algunos consejos y trucos para hacer su experiencia de juego más agradable. </p>
|
4 |
-
<h2>¿Qué es Blockman Go? </h2>
|
5 |
-
<p>Blockman Go es un juego sandbox desarrollado por Garena, un desarrollador y editor de juegos en línea líder. Fue lanzado en 2018 y desde entonces ha ganado una gran base de fans de más de 100 millones de descargas en Google Play Store. Blockman Go es un juego que te permite crear tu propio mundo, explorar diferentes modos de juego e interactuar con otros jugadores en tiempo real. Puedes elegir entre varios minijuegos como Bed Wars, Sky Wars, Murder Mystery, Parkour, Survival Games y más. También puedes personalizar tu avatar con cientos de atuendos, accesorios, peinados y pieles. Puedes chatear con otros jugadores usando mensajes de voz o de texto, unirte a clanes, hacer amigos e incluso casarte. Blockman Go es un juego que ofrece infinitas posibilidades de diversión y creatividad. </p>
|
6 |
-
<h2>blockman go apk mediafıre</h2><br /><p><b><b>Download Zip</b> ✓✓✓ <a href="https://bltlly.com/2v6IS7">https://bltlly.com/2v6IS7</a></b></p><br /><br />
|
7 |
-
<h3>Características de Blockman Go</h3>
|
8 |
-
<p>Algunas de las características que hacen que Blockman Go se destaque de otros juegos de sandbox son:</p>
|
9 |
-
<ul>
|
10 |
-
<li> Tiene gráficos impresionantes y una jugabilidad suave que funcionan bien en la mayoría de los dispositivos Android. </li>
|
11 |
-
<li> Tiene una interfaz fácil de usar y controles fáciles que lo hacen adecuado para jugadores de todas las edades. </li>
|
12 |
-
<li> Tiene una variedad de modos de juego que se adaptan a diferentes gustos y preferencias. </li>
|
13 |
-
|
14 |
-
<li> Tiene un sistema de recompensa que le da oro y gemas para jugar, iniciar sesión diariamente, completar tareas y más. </li>
|
15 |
-
<li> Tiene una tienda en línea que le permite comprar artículos con oro o gemas o dinero real. </li>
|
16 |
-
<li> Tiene actualizaciones regulares que agregan nuevas características, correcciones de errores y mejoras. </li>
|
17 |
-
</ul>
|
18 |
-
<h3>Cómo descargar Blockman Go APK de Mediafıre</h3>
|
19 |
-
<p>Si desea jugar Blockman Ir en su dispositivo Android sin usar Google Play Store, puede descargar el archivo APK de Mediafıre. Mediafıre es una plataforma de intercambio de archivos que le permite cargar y descargar archivos de forma gratuita. Aquí están los pasos para descargar Blockman Go APK de Mediafıre:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Ir a <a href="( 1 )">este enlace</a> en su navegador. Esto le llevará a la página Mediafıre donde se encuentra el archivo APK de Blockman Go. </li>
|
22 |
-
<li>Haga clic en el botón verde "Descargar". Esto comenzará a descargar el archivo APK a su dispositivo. </li>
|
23 |
-
<li>Una vez que la descarga se ha completado, localizar el archivo APK en el gestor de archivos del dispositivo o carpeta de descarga. </li>
|
24 |
-
<li>Toque en el archivo APK para instalarlo. Es posible que deba habilitar "Fuentes desconocidas" o "Permitir desde esta fuente" en la configuración de su dispositivo para permitir la instalación de aplicaciones desde fuera de Google Play Store.</li>
|
25 |
-
<li>Espere a que termine la instalación. Puede ver un mensaje de confirmación que dice "App instalado". </li>
|
26 |
-
<li>Toque en "Abrir" para iniciar la aplicación Blockman Go. También puede encontrar el icono de la aplicación en la pantalla de inicio del dispositivo o cajón de aplicaciones. </li>
|
27 |
-
</ol>
|
28 |
-
<p>Felicidades, que ha descargado e instalado con éxito Blockman Go APK de Mediafıre. Ahora puedes disfrutar jugando el último juego de sandbox en tu dispositivo Android. </p>
|
29 |
-
<h2>¿Por qué jugar Blockman Go APK Mediafıre? </h2>
|
30 |
-
|
31 |
-
<h3>Ventajas de jugar Blockman Go APK Mediafıre</h3>
|
32 |
-
<ul>
|
33 |
-
<li>Puedes jugar Blockman Go sin usar Google Play Store, que puede estar bloqueado o restringido en algunas regiones o dispositivos. </li>
|
34 |
-
<li> Puede jugar Blockman Go sin iniciar sesión con su cuenta de Google, que puede proteger su privacidad y seguridad. </li>
|
35 |
-
<li> Puede jugar Blockman Go sin actualizar la aplicación cada vez que hay una nueva versión, que puede ahorrar sus datos y espacio de almacenamiento. </li>
|
36 |
-
<li> Puede jugar Blockman Go con versiones anteriores de la aplicación, que puede ser compatible con su dispositivo o características preferidas. </li>
|
37 |
-
</ul>
|
38 |
-
<h3> Desventajas de jugar Blockman Go APK Mediafıre</h3>
|
39 |
-
<ul>
|
40 |
-
<li>Es posible que no pueda acceder a algunas funciones o servicios que requieren una cuenta de Google, como almacenamiento en la nube, logros, tablas de clasificación y más. </li>
|
41 |
-
<li> Es posible que no pueda recibir las últimas actualizaciones, correcciones de errores y mejoras que están disponibles en la versión oficial. </li>
|
42 |
-
<li> Puede encontrar algunos errores o fallos que se fijan en la versión oficial. </li>
|
43 |
-
<li>Puede arriesgarse a descargar un archivo APK falso o modificado que contiene malware o virus que pueden dañar su dispositivo o robar su información. </li>
|
44 |
-
</ul>
|
45 |
-
<p>Como se puede ver, jugando Blockman Go APK Mediafıre tiene sus pros y contras. Usted debe sopesar cuidadosamente y decidir lo que es mejor para usted. Si decide jugar Blockman Go APK Mediafıre, asegúrese de descargarlo desde una fuente de confianza como Mediafıre y escanear con una aplicación antivirus antes de instalarlo. </p>
|
46 |
-
<h2>Cómo jugar Blockman Go APK Mediafıre</h2>
|
47 |
-
<p>Ahora que ha descargado e instalado Blockman Go APK Mediafıre, usted está listo para jugar. Pero ¿cómo se juega? No te preocupes, le guiará a través de los fundamentos de la reproducción de Blockman Go APK Mediafıre. Estos son algunos pasos a seguir:</p>
|
48 |
-
<h3>Cómo crear una cuenta e iniciar sesión</h3>
|
49 |
-
|
50 |
-
<ol>
|
51 |
-
<li>Abra la aplicación Blockman Go en su dispositivo. Verá una pantalla de bienvenida con dos opciones: "Invitado" y "Iniciar sesión". </li>
|
52 |
-
<li>Si quieres jugar como invitado, toca "Invitado". Esto te permitirá jugar sin crear una cuenta, pero no podrás guardar tu progreso o acceder a algunas funciones. </li>
|
53 |
-
<li>Si desea crear una cuenta, toque en "Iniciar sesión". Esto te llevará a una pantalla de inicio de sesión con cuatro opciones: "Facebook", "Google", "Twitter" y "Blockman". </li>
|
54 |
-
<li>Si desea utilizar su cuenta de Facebook, Google o Twitter para iniciar sesión, toque en el icono correspondiente y siga las instrucciones. Esto vinculará su cuenta de redes sociales a su cuenta de Blockman Go. </li>
|
55 |
-
<li>Si desea utilizar una cuenta de Blockman para iniciar sesión, toque en "Blockman". Esto lo llevará a una pantalla de registro donde necesita ingresar su nombre de usuario, contraseña, dirección de correo electrónico y código de verificación. A continuación, toque en "Registrarse". Esto creará su cuenta de Blockman e iniciar sesión. </li>
|
56 |
-
</ol>
|
57 |
-
<p>Felicidades, has creado una cuenta y has iniciado sesión en Blockman Go. Ahora puedes empezar a jugar el juego. </p>
|
58 |
-
<h3>Cómo elegir un modo de juego y unirse a un servidor</h3>
|
59 |
-
<p>Lo siguiente que tienes que hacer es elegir un modo de juego y unirte a un servidor. Esto te permitirá jugar con otros jugadores en diferentes minijuegos. Hay muchos modos de juego para elegir, como Bed Wars, Sky Wars, Murder Mystery, Parkour, Survival Games y más. Cada modo de juego tiene sus propias reglas, objetivos y desafíos. Estos son los pasos para elegir un modo de juego y unirse a un servidor:</p>
|
60 |
-
<p></p>
|
61 |
-
<ol>
|
62 |
-
<li>En la pantalla principal de Blockman Go, toque en el "Juego" icono en la parte inferior. Esto te llevará a una pantalla de selección de juegos donde puedes ver los diferentes modos de juego disponibles. </li>
|
63 |
-
<li>Deslice hacia la izquierda o hacia la derecha para navegar por los modos de juego. También puede usar la barra de búsqueda en la parte superior para encontrar un modo de juego específico por nombre o palabra clave. </li>
|
64 |
-
|
65 |
-
<li>Desliza hacia arriba o hacia abajo para navegar a través de los servidores. También puede usar el botón de filtro en la parte superior para ordenar los servidores por región, idioma, reproductores y más. </li>
|
66 |
-
<li>Toque en el servidor al que desea unirse. Esto lo llevará a una pantalla del lobby donde puede ver los detalles del servidor, como el nombre, la descripción, las reglas, el mapa y los jugadores. </li>
|
67 |
-
<li>Toque en el botón "Unirse" en la parte inferior. Esto comenzará a cargar el juego y lo conectará al servidor. </li>
|
68 |
-
</ol>
|
69 |
-
<p>Felicidades, has elegido un modo de juego y te has unido a un servidor. Ahora puedes jugar con otros jugadores en ese modo de juego. </p>
|
70 |
-
<h3>Cómo crear, construir y compartir tus creaciones</h3>
|
71 |
-
<p>Lo último que necesitas hacer es crear, construir y compartir tus creaciones. Esta es la característica principal de Blockman Go que le permite expresar su creatividad e imaginación. Puedes usar varios bloques, elementos, herramientas y accesorios para crear lo que quieras. También puedes compartir tus creaciones con otros jugadores y obtener comentarios y valoraciones. Estos son los pasos para crear, construir y compartir tus creaciones:</p>
|
72 |
-
<ol>
|
73 |
-
<li>En la pantalla principal de Blockman Go, toque en el "Crear" icono en la parte inferior. Esto lo llevará a una pantalla de creación donde puede ver su inventario y opciones. </li>
|
74 |
-
<li>Toque en el botón "Nuevo" en la parte superior. Esto le permitirá crear un nuevo mundo donde puede construir su creación. </li>
|
75 |
-
<li>Introduzca un nombre para su mundo y elija una plantilla de la lista. También puede personalizar la configuración de su mundo, como dificultad, clima, tiempo y más. </li>
|
76 |
-
<li>Toque en el botón "Crear" en la parte inferior. Esto comenzará a crear su mundo y cargarlo por usted. </li>
|
77 |
-
<li>Una vez que tu mundo está cargado, puedes empezar a crear y construir tu creación. Puede usar los botones en la parte inferior para cambiar entre diferentes modos, como mover, rotar, escalar, borrar, copiar, pegar, deshacer, rehacer y más. </li>
|
78 |
-
<li>También puede usar los botones de la parte superior para acceder a su inventario, caja de herramientas, biblioteca de objetos, cuadro de chat, menú y más. </li>
|
79 |
-
|
80 |
-
<li>Para colocar un artículo o bloque en su mundo, toque en él en su inventario y luego toque en un espacio vacío en su mundo. También puede arrastrarlo o usar los botones de la parte inferior para ajustar su posición, rotación y escala. </li>
|
81 |
-
<li>Para construir una estructura o una escena, repita los pasos anteriores hasta que esté satisfecho con su creación. También puede usar el botón de la caja de herramientas en la parte superior para acceder a algunas herramientas útiles como relleno, reemplazo, hueco y más. </li>
|
82 |
-
<li>Para compartir tu creación con otros jugadores, toca el botón de menú en la parte superior y selecciona "Compartir". Esto te permitirá subir tu mundo al servidor Blockman Go y obtener un enlace que puedes compartir con otros. </li>
|
83 |
-
<li>Para ver las creaciones de otros jugadores, toque en el icono "Explorar" en la parte inferior. Esto te llevará a una pantalla de exploración donde puedes ver las creaciones destacadas, populares y recientes de otros jugadores. </li>
|
84 |
-
<li>Para unirse a una creación de otro reproductor, toque en él y seleccione "Unirse". Esto comenzará a cargar el mundo y lo conectará a él. </li>
|
85 |
-
</ol>
|
86 |
-
<p>Felicidades, has creado, construido y compartido tu creación. Ahora puedes disfrutar viendo tu obra maestra y las creaciones de otros jugadores en Blockman Go.</p>
|
87 |
-
<h2>Consejos y trucos para jugar Blockman Go APK Mediafıre</h2>
|
88 |
-
<p>Jugar Blockman Go APK Mediafıre puede ser divertido y gratificante, pero también puede ser desafiante y frustrante a veces. Para ayudarte a tener una mejor experiencia de juego, aquí hay algunos consejos y trucos que puedes usar:</p>
|
89 |
-
<h3>Cómo ganar oro y gemas</h3>
|
90 |
-
<p>El oro y las gemas son las principales monedas en Blockman Go que puedes usar para comprar artículos, trajes, pieles y más. Puedes ganar oro y gemas jugando, iniciando sesión diariamente, completando tareas, viendo anuncios, invitando a amigos y más. Aquí hay algunas maneras de ganar más oro y gemas:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Jugar juegos que tienen altas recompensas o bonos. Algunos juegos ofrecen más oro o gemas que otros dependiendo de la dificultad, duración o popularidad del juego. </li>
|
93 |
-
|
94 |
-
<li>Completar tareas y logros. Puede obtener oro, gemas, artículos, o incluso cupones completando varias tareas y logros en el juego. </li>
|
95 |
-
<li>Ver anuncios y vídeos. Puede obtener oro o gemas viendo anuncios o videos en el juego. También puedes obtener artículos o cupones gratis viendo videos patrocinados. </li>
|
96 |
-
<li>Invita a amigos y únete a clanes. Puedes conseguir oro o gemas invitando a tus amigos a jugar a Blockman Go usando tu código de referencia. También puedes conseguir oro o gemas uniéndote a clanes y participando en actividades del clan. </li>
|
97 |
-
</ul>
|
98 |
-
<h3>Cómo personalizar tu avatar y chatear con otros jugadores</h3>
|
99 |
-
<p>Personalizar tu avatar y chatear con otros jugadores son algunos de los aspectos divertidos de jugar Blockman Go. Puedes hacer que tu avatar se vea único y expresar tu personalidad cambiando su apariencia y atuendo. También puede comunicarse y socializar con otros jugadores mediante el uso de mensajes de voz o de texto. Aquí hay algunas maneras de personalizar tu avatar y chatear con otros jugadores:</p>
|
100 |
-
<ul>
|
101 |
-
<li>Cambia la apariencia de tu avatar. Puedes cambiar el género de tu avatar, la forma de la cara, el color de los ojos, el estilo de cabello, el color del cabello, el tono de la piel y más tocando el icono "Avatar" en la parte inferior de la pantalla principal. </li>
|
102 |
-
Cambia el atuendo de tu avatar. Puedes cambiar la ropa de tu avatar, zapatos, sombreros, gafas, máscaras, mochilas, alas, colas, mascotas y más tocando el ícono "Closet" en la parte inferior de la pantalla principal. Puedes comprar nuevos atuendos usando oro o gemas o dinero real en la tienda online. </li>
|
103 |
-
|
104 |
-
</ul>
|
105 |
-
<h3>Cómo usar trucos y hacks</h3>
|
106 |
-
<p>Trucos y hacks son métodos que algunos jugadores utilizan para obtener una ventaja injusta o eludir algunas restricciones en Blockman Go. Pueden incluir la modificación de los archivos del juego, el uso de aplicaciones o herramientas de terceros, la explotación de fallos o errores, o el uso de códigos o comandos. Algunos ejemplos de trucos y hacks son oro o gemas ilimitadas, modo dios, speed hack, fly hack, invisibilidad, teletransportación y más. Aquí hay algunas maneras de usar trucos y hacks en Blockman Go:</p>
|
107 |
-
<ul>
|
108 |
-
<li>Modifica los archivos del juego. Puedes modificar los archivos del juego usando una aplicación de administrador de archivos o una aplicación de explorador raíz en tu dispositivo. Puede cambiar algunos valores o parámetros en los archivos del juego para alterar el comportamiento o la apariencia del juego. Por ejemplo, puede cambiar la cantidad de oro o gemas que tiene editando el archivo de datos. Sin embargo, este método es arriesgado y puede causar que tu juego falle o falle. </li>
|
109 |
-
<li>Usa aplicaciones o herramientas de terceros. Puedes usar aplicaciones o herramientas de terceros diseñadas para hackear o engañar a Blockman Go. Estas aplicaciones o herramientas pueden requerir que las instales en tu dispositivo o que conectes tu dispositivo a un ordenador. También pueden requerir que les concedas algunos permisos o acceso al sistema de tu dispositivo. Por ejemplo, puede utilizar una aplicación de hacker juego o un archivo APK modificado para hackear Blockman Go. Sin embargo, este método es peligroso y puede exponer su dispositivo a malware o virus. </li>
|
110 |
-
<li>Explotar fallos o errores. Puede explotar fallos o errores que están presentes en Blockman Go. Estos fallos o errores son errores o fallas en el juego que hacen que se comporte de una manera no deseada. Por ejemplo, puedes aprovechar un fallo que te permite volar o caminar a través de las paredes en algunos modos de juego. Sin embargo, este método no es confiable y puede ser corregido o parcheado por los desarrolladores. </li>
|
111 |
-
|
112 |
-
</ul>
|
113 |
-
<p>Como puedes ver, usar trucos y hacks en Blockman Go puede ser tentador pero también arriesgado y poco ético. Debes ser cuidadoso y responsable al usarlos y respetar las reglas y los derechos de otros jugadores. También debe tener en cuenta que el uso de trucos y hacks puede resultar en que su cuenta sea prohibida, suspendida o eliminada por los desarrolladores. </p>
|
114 |
-
<h2>Conclusión</h2>
|
115 |
-
<p>Blockman Go es un juego sandbox que te permite jugar, crear y compartir tus experiencias con millones de jugadores de todo el mundo. Puede descargar y jugar Blockman Go APK de Mediafıre, una plataforma de intercambio de archivos que le permite jugar Blockman Go sin usar Google Play Store. Sin embargo, también debe considerar las ventajas y desventajas de jugar Blockman Go APK Mediafıre y decidir lo que es mejor para usted. También debe aprender a jugar Blockman Go APK Mediafıre y utilizar algunos consejos y trucos para hacer su experiencia de juego más agradable. Esperamos que este artículo le ha ayudado a entender más acerca de Blockman Go APK Mediafıre y cómo descargar y jugar. </p>
|
116 |
-
<h4>Preguntas frecuentes</h4>
|
117 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Blockman Go APK Mediafıre:</p>
|
118 |
-
<ol>
|
119 |
-
<li> ¿Es seguro Blockman Go APK Mediafıre? </li>
|
120 |
-
<p>Blockman Go APK Mediafıre es seguro, siempre y cuando se descarga desde una fuente de confianza como Mediafıre y escanear con una aplicación antivirus antes de instalarlo. Sin embargo, también debe tener cuidado con los archivos APK falsos o modificados que pueden contener malware o virus que pueden dañar su dispositivo o robar su información. </p>
|
121 |
-
<li> ¿Es Blockman Go APK Mediafıre libre? </li>
|
122 |
-
<p>Blockman Go APK Mediafıre es gratis para descargar y jugar. Sin embargo, algunas características o artículos pueden requerir oro, gemas o dinero real para desbloquear o comprar. </p>
|
123 |
-
<li>¿Puedo jugar Blockman Go APK Mediafıre con mis amigos? </li>
|
124 |
-
|
125 |
-
<li>¿Puedo jugar Blockman Go APK Mediafıre fuera de línea? </li>
|
126 |
-
<p>No, no se puede jugar Blockman Go APK Mediafıre fuera de línea. Necesita una conexión a Internet para jugar Blockman Go APK Mediafıre ya que es un juego en línea que requiere transmisión de datos y sincronización. </p>
|
127 |
-
<li>¿Puedo transferir mi progreso de Blockman Go APK Mediafıre a la versión oficial? </li>
|
128 |
-
<p>No, no puede transferir su progreso de Blockman Go APK Mediafıre a la versión oficial. Necesitas crear una nueva cuenta y empezar desde cero si quieres jugar la versión oficial de Google Play Store.</p> 64aa2da5cf<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cuerda Hroe Ilimitado Diamantes Mod Apk.md
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cuerda héroe ilimitado diamantes Mod APK: Una guía para los fans de superhéroes</h1>
|
3 |
-
<p>Si eres un fan de los juegos de superhéroes, es posible que hayas oído hablar de Rope Hero, un popular juego de acción en tercera persona con un toque de ciencia ficción. En este juego, se puede jugar como un héroe con estilo en un traje de protección, que puede utilizar superpoderes y armas para luchar contra el crimen y los gángsters en una gran ciudad. Pero lo que si quieres disfrutar del juego sin limitaciones? Ahí es donde Rope Hero Unlimited Diamonds Mod APK entra en juego. En este artículo, te contaremos todo lo que necesitas saber sobre esta versión modificada del juego, incluyendo sus características, beneficios y cómo descargarlo e instalarlo. Sigue leyendo para saber más. </p>
|
4 |
-
<h2>cuerda héroe ilimitado diamantes mod apk</h2><br /><p><b><b>Download File</b> ⚡ <a href="https://bltlly.com/2v6Mky">https://bltlly.com/2v6Mky</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el héroe de cuerda? </h2>
|
6 |
-
<p>Rope Hero es un juego desarrollado por Naxeex Action & RPG Games, que tiene más de 10 millones de descargas en Google Play Store. Es un juego de acción en tercera persona que te permite jugar como un superhéroe en un traje de ciencia ficción, que puede usar superpoderes y armas para luchar contra el crimen y los gángsters en una gran ciudad. El juego tiene gráficos increíbles, física realista y muchas características divertidas. </p>
|
7 |
-
<h3>Un juego de acción en tercera persona con un superhéroe de ciencia ficción</h3>
|
8 |
-
<p>En Rope Hero, puedes controlar a tu héroe con simples controles táctiles. Puedes moverte por la ciudad, saltar, correr, subir, balancearte y volar con tu súper cuerda. También puedes usar tus brazos y piernas para golpear, patear y lanzar enemigos. También puede utilizar varias armas, armas cuerpo a cuerpo y súper armas para hacer frente a diferentes situaciones. Incluso puede conducir cualquier coche que desee en la ciudad. </p>
|
9 |
-
<h3>Características del juego</h3>
|
10 |
-
<p>Rope Hero tiene muchas características que lo convierten en un juego emocionante y adictivo. Algunas de ellas son:</p>
|
11 |
-
<p></p>
|
12 |
-
<h4>Superpoderes y armas</h4>
|
13 |
-
|
14 |
-
<h4>Mundo abierto y misiones</h4>
|
15 |
-
<p>El juego tiene un mundo abierto que se puede explorar libremente. La ciudad está llena de aventuras peligrosas y emocionantes. Usted puede encontrar varios secretos y mini-juegos en diferentes lugares. También puedes completar misiones que te darán recompensas y progresarán en la historia de tu héroe. Puedes luchar contra pandillas callejeras, ladrones de autos, policías corruptos, fuerzas militares, alienígenas, zombis, robots, monstruos y más. </p>
|
16 |
-
<h4>Personalización y skins</h4>
|
17 |
-
<p>Puedes personalizar a tu héroe con diferentes atuendos y accesorios. Puedes encontrar varias pieles para tu héroe en la tienda de juegos o completando misiones. Cada piel tiene su propio conjunto de potenciadores que mejorarán las habilidades de tu héroe. También puede crear su propia piel única mediante la combinación de piezas de otros trajes. </p>
|
18 |
-
<h2>¿Qué es la cuerda héroe ilimitado diamantes Mod APK? </h2>
|
19 |
-
<p>Héroe de cuerda ilimitada diamantes Mod APK <p>Héroe de cuerda ilimitada diamantes Mod APK es una versión modificada del juego que le da recursos ilimitados y acceso a todo en el juego. Es un archivo que puedes descargar e instalar en tu dispositivo Android, y disfrutar del juego con más diversión y libertad. </p>
|
20 |
-
<h3>Una versión modificada del juego con recursos ilimitados</h3>
|
21 |
-
<p>El apk mod es un archivo que ha sido alterado por algunos desarrolladores para cambiar algunos aspectos del juego. En este caso, el apk mod le da diamantes ilimitados y dinero, que son las principales monedas en el juego. Puedes usarlos para comprar lo que quieras en la tienda de juegos, como armas, pieles, coches y más. También puedes mejorar las habilidades y habilidades de tu héroe con ellos. También puedes usarlas para saltar anuncios y acelerar el juego. </p>
|
22 |
-
<h3>Beneficios de usar el mod apk</h3>
|
23 |
-
<p>Hay muchos beneficios de usar el apk mod para Rope Hero. Algunos de ellos son:</p>
|
24 |
-
<h4>Desbloquear todo en el juego</h4>
|
25 |
-
|
26 |
-
<h4>Disfruta de diamantes y dinero ilimitados</h4>
|
27 |
-
<p>Con el apk mod, se puede disfrutar de diamantes ilimitados y dinero, que son las principales monedas en el juego. Puedes usarlos para comprar lo que quieras en la tienda de juegos, como armas, pieles, coches y más. También puedes mejorar las habilidades y habilidades de tu héroe con ellos. También puedes usarlas para saltar anuncios y acelerar el juego. </p>
|
28 |
-
<h4>No se requieren anuncios ni root</h4>
|
29 |
-
<p>Con el apk mod, puede jugar el juego sin ningún anuncio molesto que interrumpen su juego. También puede jugar el juego sin rootear su dispositivo, lo que significa que no tiene que arriesgarse a dañar su dispositivo o perder su garantía. El mod apk es seguro y fácil de usar. </p>
|
30 |
-
<h2>¿Cómo descargar e instalar Rope Hero ilimitado diamantes Mod APK? </h2>
|
31 |
-
<p>Si desea descargar e instalar Rope Hero ilimitado diamantes Mod APK en su dispositivo Android, es necesario seguir estos sencillos pasos:</p>
|
32 |
-
<h3>Pasos para descargar e instalar el mod apk</h3>
|
33 |
-
<ol>
|
34 |
-
<li>Primero, necesitas desinstalar la versión original de Rope Hero de tu dispositivo si lo tienes instalado. </li>
|
35 |
-
<li>En segundo lugar, debe habilitar fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y active. </li>
|
36 |
-
<li>En tercer lugar, es necesario descargar el archivo apk mod de una fuente confiable. Puede encontrar muchos sitios web que ofrecen el archivo apk mod de forma gratuita. Asegúrate de descargarlo desde un sitio de confianza que no contenga virus o malware. </li>
|
37 |
-
<li>Cuarto, es necesario localizar el archivo descargado en el dispositivo y toque en él para iniciar el proceso de instalación. Siga las instrucciones en la pantalla y espere a que termine. </li>
|
38 |
-
<li>Quinto, necesitas lanzar el juego y disfrutar jugando con diamantes y dinero ilimitados. </li>
|
39 |
-
</ol>
|
40 |
-
<h3>Consejos y trucos para jugar el juego con el apk mod</h3>
|
41 |
-
|
42 |
-
<h4>Usa tus superpoderes sabiamente</h4>
|
43 |
-
<p>Tu héroe tiene superpoderes que le permiten hacer cosas increíbles. Puedes usar tu súper cuerda para balancearte de edificio en edificio, o para agarrar enemigos y objetos. También puede utilizar su salto de rana para saltar alto en el aire. También puede utilizar su súper visión para ver a través de paredes y objetos. Sin embargo, estas superpotencias tienen un tiempo de reutilización, lo que significa que no puedes usarlas continuamente. Debe esperar a que se recarguen antes de volver a usarlas. Por lo tanto, debe usarlas sabiamente y estratégicamente, dependiendo de la situación. </p>
|
44 |
-
<h4>Explora la ciudad y encuentra secretos</h4>
|
45 |
-
<p>El juego tiene un mundo abierto que se puede explorar libremente. La ciudad está llena de aventuras peligrosas y emocionantes. Puedes encontrar varios secretos y minijuegos en diferentes lugares. Por ejemplo, puedes encontrar cofres ocultos que contienen diamantes o dinero. También puedes encontrar botones ocultos que activan trampas o eventos. También puedes encontrar portales ocultos que te transportan a otros mundos o dimensiones. También puedes encontrar huevos de Pascua ocultos que hacen referencia a otros juegos o películas. Explorar la ciudad no solo te dará recompensas, sino que también hará que tu juego sea más divertido e interesante. </p>
|
46 |
-
<h4>Completar misiones y desafíos</h4>
|
47 |
-
<p>El juego tiene muchas misiones y <p>El juego tiene muchas misiones y desafíos que le dará recompensas y el progreso de la historia de su héroe. Puedes encontrarlos en el mapa o hablando con los PNJ. Algunas de las misiones son misiones principales relacionadas con la trama del juego. Algunos de ellos son misiones secundarias que son opcionales, pero todavía divertido y gratificante. Algunas de ellas son misiones diarias que se reinician todos los días y te dan diamantes o dinero. Algunas de ellas son misiones especiales que solo están disponibles por un tiempo limitado o durante eventos. Completar misiones y desafíos no solo te dará recursos sino que también mejorará la reputación y las habilidades de tu héroe. </p>
|
48 |
-
<h2>Conclusión</h2>
|
49 |
-
|
50 |
-
<h3>Preguntas frecuentes</h3>
|
51 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Rope Hero Unlimited Diamonds Mod APK:</p>
|
52 |
-
<ol>
|
53 |
-
<li><b> ¿Es Rope Hero ilimitado diamantes Mod APK gratis? </b></li>
|
54 |
-
<p>Sí, Rope Hero Unlimited Diamonds Mod APK es gratis para descargar y jugar. No tienes que pagar nada para disfrutar del juego con recursos ilimitados. </p>
|
55 |
-
<li><b> ¿Es seguro Rope Hero Unlimited Diamonds Mod APK? </b></li>
|
56 |
-
<p>Sí, Rope Hero Unlimited Diamonds Mod APK es seguro de usar. No contiene ningún virus o malware que pueda dañar su dispositivo o datos. Sin embargo, siempre debe descargarlo de una fuente confiable y escanearlo con un antivirus antes de instalarlo. </p>
|
57 |
-
<li><b> ¿Es Rope Hero ilimitado diamantes Mod APK compatible con mi dispositivo? </b></li>
|
58 |
-
<p>Cuerda héroe ilimitado diamantes Mod APK es compatible con la mayoría de los dispositivos Android que tienen Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no soportar el juego o el apk mod debido a diferentes especificaciones o ajustes. Usted debe comprobar la compatibilidad de su dispositivo antes de descargar e instalar el apk mod. </p>
|
59 |
-
<li><b> ¿Puedo jugar Rope Hero ilimitado diamantes Mod APK en línea? </b></li>
|
60 |
-
<p>No, Cuerda héroe ilimitado diamantes Mod APK es un juego fuera de línea que no requiere una conexión a Internet para jugar. Puede reproducirlo en cualquier lugar y en cualquier momento sin preocuparse por el uso de datos o problemas de conexión. </p>
|
61 |
-
<li><b> ¿Puedo actualizar Rope Hero ilimitado diamantes Mod APK? </b></li>
|
62 |
-
<p>No, Rope Hero Unlimited Diamonds Mod APK no es una versión oficial del juego, por lo que no recibe actualizaciones regulares de los desarrolladores. Si desea actualizar el juego, usted tiene que desinstalar el apk mod e instalar la versión original de Google Play Store. Sin embargo, puede perder su progreso y recursos si lo hace. </p>
|
63 |
-
</ol></p> 64aa2da5cf<br />
|
64 |
-
<br />
|
65 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py
DELETED
File without changes
|
spaces/BigSalmon/GPT2Mask/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: GPT2Mask
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.2.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/visualize_data.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
3 |
-
import argparse
|
4 |
-
import numpy as np
|
5 |
-
import os
|
6 |
-
from itertools import chain
|
7 |
-
import cv2
|
8 |
-
import tqdm
|
9 |
-
from PIL import Image
|
10 |
-
|
11 |
-
from detectron2.config import get_cfg
|
12 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader
|
13 |
-
from detectron2.data import detection_utils as utils
|
14 |
-
from detectron2.data.build import filter_images_with_few_keypoints
|
15 |
-
from detectron2.utils.logger import setup_logger
|
16 |
-
from detectron2.utils.visualizer import Visualizer
|
17 |
-
|
18 |
-
|
19 |
-
def setup(args):
|
20 |
-
cfg = get_cfg()
|
21 |
-
if args.config_file:
|
22 |
-
cfg.merge_from_file(args.config_file)
|
23 |
-
cfg.merge_from_list(args.opts)
|
24 |
-
cfg.freeze()
|
25 |
-
return cfg
|
26 |
-
|
27 |
-
|
28 |
-
def parse_args(in_args=None):
|
29 |
-
parser = argparse.ArgumentParser(description="Visualize ground-truth data")
|
30 |
-
parser.add_argument(
|
31 |
-
"--source",
|
32 |
-
choices=["annotation", "dataloader"],
|
33 |
-
required=True,
|
34 |
-
help="visualize the annotations or the data loader (with pre-processing)",
|
35 |
-
)
|
36 |
-
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
|
37 |
-
parser.add_argument("--output-dir", default="./", help="path to output directory")
|
38 |
-
parser.add_argument("--show", action="store_true", help="show output in a window")
|
39 |
-
parser.add_argument(
|
40 |
-
"opts",
|
41 |
-
help="Modify config options using the command-line",
|
42 |
-
default=None,
|
43 |
-
nargs=argparse.REMAINDER,
|
44 |
-
)
|
45 |
-
return parser.parse_args(in_args)
|
46 |
-
|
47 |
-
|
48 |
-
if __name__ == "__main__":
|
49 |
-
args = parse_args()
|
50 |
-
logger = setup_logger()
|
51 |
-
logger.info("Arguments: " + str(args))
|
52 |
-
cfg = setup(args)
|
53 |
-
|
54 |
-
dirname = args.output_dir
|
55 |
-
os.makedirs(dirname, exist_ok=True)
|
56 |
-
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
|
57 |
-
|
58 |
-
def output(vis, fname):
|
59 |
-
if args.show:
|
60 |
-
print(fname)
|
61 |
-
cv2.imshow("window", vis.get_image()[:, :, ::-1])
|
62 |
-
cv2.waitKey()
|
63 |
-
else:
|
64 |
-
filepath = os.path.join(dirname, fname)
|
65 |
-
print("Saving to {} ...".format(filepath))
|
66 |
-
vis.save(filepath)
|
67 |
-
|
68 |
-
scale = 2.0 if args.show else 1.0
|
69 |
-
if args.source == "dataloader":
|
70 |
-
train_data_loader = build_detection_train_loader(cfg)
|
71 |
-
for batch in train_data_loader:
|
72 |
-
for per_image in batch:
|
73 |
-
# Pytorch tensor is in (C, H, W) format
|
74 |
-
img = per_image["image"].permute(1, 2, 0)
|
75 |
-
if cfg.INPUT.FORMAT == "BGR":
|
76 |
-
img = img[:, :, [2, 1, 0]]
|
77 |
-
else:
|
78 |
-
img = np.asarray(Image.fromarray(img, mode=cfg.INPUT.FORMAT).convert("RGB"))
|
79 |
-
|
80 |
-
visualizer = Visualizer(img, metadata=metadata, scale=scale)
|
81 |
-
target_fields = per_image["instances"].get_fields()
|
82 |
-
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
|
83 |
-
vis = visualizer.overlay_instances(
|
84 |
-
labels=labels,
|
85 |
-
boxes=target_fields.get("gt_boxes", None),
|
86 |
-
masks=target_fields.get("gt_masks", None),
|
87 |
-
keypoints=target_fields.get("gt_keypoints", None),
|
88 |
-
)
|
89 |
-
output(vis, str(per_image["image_id"]) + ".jpg")
|
90 |
-
else:
|
91 |
-
dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]))
|
92 |
-
if cfg.MODEL.KEYPOINT_ON:
|
93 |
-
dicts = filter_images_with_few_keypoints(dicts, 1)
|
94 |
-
for dic in tqdm.tqdm(dicts):
|
95 |
-
img = utils.read_image(dic["file_name"], "RGB")
|
96 |
-
visualizer = Visualizer(img, metadata=metadata, scale=scale)
|
97 |
-
vis = visualizer.draw_dataset_dict(dic)
|
98 |
-
output(vis, os.path.basename(dic["file_name"]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/env.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
import platform
|
3 |
-
import sys
|
4 |
-
|
5 |
-
LINUX = sys.platform.startswith("linux")
|
6 |
-
MACOS = sys.platform.startswith("darwin")
|
7 |
-
WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin")
|
8 |
-
|
9 |
-
CPYTHON = platform.python_implementation() == "CPython"
|
10 |
-
PYPY = platform.python_implementation() == "PyPy"
|
11 |
-
|
12 |
-
PY2 = sys.version_info.major == 2
|
13 |
-
|
14 |
-
PY = sys.version_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/AppendOptionIfAvailable.cmake
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
include_guard(GLOBAL)
|
2 |
-
include(CheckCXXCompilerFlag)
|
3 |
-
|
4 |
-
macro (APPEND_OPTION_IF_AVAILABLE _FLAG _LIST)
|
5 |
-
|
6 |
-
string(MAKE_C_IDENTIFIER "CXX_FLAG_${_FLAG}" _VAR)
|
7 |
-
check_cxx_compiler_flag(${_FLAG} ${_VAR})
|
8 |
-
|
9 |
-
if (${${_VAR}})
|
10 |
-
list(APPEND ${_LIST} ${_FLAG})
|
11 |
-
endif ()
|
12 |
-
|
13 |
-
endmacro ()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/merge.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits merge
|
22 |
-
#include <thrust/system/detail/sequential/merge.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/bbox/coder/tblr_bbox_coder.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from ..builder import BBOX_CODERS
|
5 |
-
from .base_bbox_coder import BaseBBoxCoder
|
6 |
-
|
7 |
-
|
8 |
-
@BBOX_CODERS.register_module()
|
9 |
-
class TBLRBBoxCoder(BaseBBoxCoder):
|
10 |
-
"""TBLR BBox coder.
|
11 |
-
|
12 |
-
Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_,
|
13 |
-
this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
|
14 |
-
right) and decode it back to the original.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
normalizer (list | float): Normalization factor to be
|
18 |
-
divided with when coding the coordinates. If it is a list, it should
|
19 |
-
have length of 4 indicating normalization factor in tblr dims.
|
20 |
-
Otherwise it is a unified float factor for all dims. Default: 4.0
|
21 |
-
clip_border (bool, optional): Whether clip the objects outside the
|
22 |
-
border of the image. Defaults to True.
|
23 |
-
"""
|
24 |
-
|
25 |
-
def __init__(self, normalizer=4.0, clip_border=True):
|
26 |
-
super(BaseBBoxCoder, self).__init__()
|
27 |
-
self.normalizer = normalizer
|
28 |
-
self.clip_border = clip_border
|
29 |
-
|
30 |
-
def encode(self, bboxes, gt_bboxes):
|
31 |
-
"""Get box regression transformation deltas that can be used to
|
32 |
-
transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,
|
33 |
-
bottom, right) order.
|
34 |
-
|
35 |
-
Args:
|
36 |
-
bboxes (torch.Tensor): source boxes, e.g., object proposals.
|
37 |
-
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
|
38 |
-
ground truth boxes.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
torch.Tensor: Box transformation deltas
|
42 |
-
"""
|
43 |
-
assert bboxes.size(0) == gt_bboxes.size(0)
|
44 |
-
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
|
45 |
-
encoded_bboxes = bboxes2tblr(
|
46 |
-
bboxes, gt_bboxes, normalizer=self.normalizer)
|
47 |
-
return encoded_bboxes
|
48 |
-
|
49 |
-
def decode(self, bboxes, pred_bboxes, max_shape=None):
|
50 |
-
"""Apply transformation `pred_bboxes` to `boxes`.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4)
|
54 |
-
pred_bboxes (torch.Tensor): Encoded boxes with shape
|
55 |
-
(B, N, 4) or (N, 4)
|
56 |
-
max_shape (Sequence[int] or torch.Tensor or Sequence[
|
57 |
-
Sequence[int]],optional): Maximum bounds for boxes, specifies
|
58 |
-
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
|
59 |
-
the max_shape should be a Sequence[Sequence[int]]
|
60 |
-
and the length of max_shape should also be B.
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
torch.Tensor: Decoded boxes.
|
64 |
-
"""
|
65 |
-
decoded_bboxes = tblr2bboxes(
|
66 |
-
bboxes,
|
67 |
-
pred_bboxes,
|
68 |
-
normalizer=self.normalizer,
|
69 |
-
max_shape=max_shape,
|
70 |
-
clip_border=self.clip_border)
|
71 |
-
|
72 |
-
return decoded_bboxes
|
73 |
-
|
74 |
-
|
75 |
-
@mmcv.jit(coderize=True)
|
76 |
-
def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
|
77 |
-
"""Encode ground truth boxes to tblr coordinate.
|
78 |
-
|
79 |
-
It first convert the gt coordinate to tblr format,
|
80 |
-
(top, bottom, left, right), relative to prior box centers.
|
81 |
-
The tblr coordinate may be normalized by the side length of prior bboxes
|
82 |
-
if `normalize_by_wh` is specified as True, and it is then normalized by
|
83 |
-
the `normalizer` factor.
|
84 |
-
|
85 |
-
Args:
|
86 |
-
priors (Tensor): Prior boxes in point form
|
87 |
-
Shape: (num_proposals,4).
|
88 |
-
gts (Tensor): Coords of ground truth for each prior in point-form
|
89 |
-
Shape: (num_proposals, 4).
|
90 |
-
normalizer (Sequence[float] | float): normalization parameter of
|
91 |
-
encoded boxes. If it is a list, it has to have length = 4.
|
92 |
-
Default: 4.0
|
93 |
-
normalize_by_wh (bool): Whether to normalize tblr coordinate by the
|
94 |
-
side length (wh) of prior bboxes.
|
95 |
-
|
96 |
-
Return:
|
97 |
-
encoded boxes (Tensor), Shape: (num_proposals, 4)
|
98 |
-
"""
|
99 |
-
|
100 |
-
# dist b/t match center and prior's center
|
101 |
-
if not isinstance(normalizer, float):
|
102 |
-
normalizer = torch.tensor(normalizer, device=priors.device)
|
103 |
-
assert len(normalizer) == 4, 'Normalizer must have length = 4'
|
104 |
-
assert priors.size(0) == gts.size(0)
|
105 |
-
prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2
|
106 |
-
xmin, ymin, xmax, ymax = gts.split(1, dim=1)
|
107 |
-
top = prior_centers[:, 1].unsqueeze(1) - ymin
|
108 |
-
bottom = ymax - prior_centers[:, 1].unsqueeze(1)
|
109 |
-
left = prior_centers[:, 0].unsqueeze(1) - xmin
|
110 |
-
right = xmax - prior_centers[:, 0].unsqueeze(1)
|
111 |
-
loc = torch.cat((top, bottom, left, right), dim=1)
|
112 |
-
if normalize_by_wh:
|
113 |
-
# Normalize tblr by anchor width and height
|
114 |
-
wh = priors[:, 2:4] - priors[:, 0:2]
|
115 |
-
w, h = torch.split(wh, 1, dim=1)
|
116 |
-
loc[:, :2] /= h # tb is normalized by h
|
117 |
-
loc[:, 2:] /= w # lr is normalized by w
|
118 |
-
# Normalize tblr by the given normalization factor
|
119 |
-
return loc / normalizer
|
120 |
-
|
121 |
-
|
122 |
-
@mmcv.jit(coderize=True)
|
123 |
-
def tblr2bboxes(priors,
|
124 |
-
tblr,
|
125 |
-
normalizer=4.0,
|
126 |
-
normalize_by_wh=True,
|
127 |
-
max_shape=None,
|
128 |
-
clip_border=True):
|
129 |
-
"""Decode tblr outputs to prediction boxes.
|
130 |
-
|
131 |
-
The process includes 3 steps: 1) De-normalize tblr coordinates by
|
132 |
-
multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the
|
133 |
-
prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert
|
134 |
-
tblr (top, bottom, left, right) pair relative to the center of priors back
|
135 |
-
to (xmin, ymin, xmax, ymax) coordinate.
|
136 |
-
|
137 |
-
Args:
|
138 |
-
priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)
|
139 |
-
Shape: (N,4) or (B, N, 4).
|
140 |
-
tblr (Tensor): Coords of network output in tblr form
|
141 |
-
Shape: (N, 4) or (B, N, 4).
|
142 |
-
normalizer (Sequence[float] | float): Normalization parameter of
|
143 |
-
encoded boxes. By list, it represents the normalization factors at
|
144 |
-
tblr dims. By float, it is the unified normalization factor at all
|
145 |
-
dims. Default: 4.0
|
146 |
-
normalize_by_wh (bool): Whether the tblr coordinates have been
|
147 |
-
normalized by the side length (wh) of prior bboxes.
|
148 |
-
max_shape (Sequence[int] or torch.Tensor or Sequence[
|
149 |
-
Sequence[int]],optional): Maximum bounds for boxes, specifies
|
150 |
-
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
|
151 |
-
the max_shape should be a Sequence[Sequence[int]]
|
152 |
-
and the length of max_shape should also be B.
|
153 |
-
clip_border (bool, optional): Whether clip the objects outside the
|
154 |
-
border of the image. Defaults to True.
|
155 |
-
|
156 |
-
Return:
|
157 |
-
encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
|
158 |
-
"""
|
159 |
-
if not isinstance(normalizer, float):
|
160 |
-
normalizer = torch.tensor(normalizer, device=priors.device)
|
161 |
-
assert len(normalizer) == 4, 'Normalizer must have length = 4'
|
162 |
-
assert priors.size(0) == tblr.size(0)
|
163 |
-
if priors.ndim == 3:
|
164 |
-
assert priors.size(1) == tblr.size(1)
|
165 |
-
|
166 |
-
loc_decode = tblr * normalizer
|
167 |
-
prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2
|
168 |
-
if normalize_by_wh:
|
169 |
-
wh = priors[..., 2:4] - priors[..., 0:2]
|
170 |
-
w, h = torch.split(wh, 1, dim=-1)
|
171 |
-
# Inplace operation with slice would failed for exporting to ONNX
|
172 |
-
th = h * loc_decode[..., :2] # tb
|
173 |
-
tw = w * loc_decode[..., 2:] # lr
|
174 |
-
loc_decode = torch.cat([th, tw], dim=-1)
|
175 |
-
# Cannot be exported using onnx when loc_decode.split(1, dim=-1)
|
176 |
-
top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)
|
177 |
-
xmin = prior_centers[..., 0].unsqueeze(-1) - left
|
178 |
-
xmax = prior_centers[..., 0].unsqueeze(-1) + right
|
179 |
-
ymin = prior_centers[..., 1].unsqueeze(-1) - top
|
180 |
-
ymax = prior_centers[..., 1].unsqueeze(-1) + bottom
|
181 |
-
|
182 |
-
bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
|
183 |
-
|
184 |
-
if clip_border and max_shape is not None:
|
185 |
-
if not isinstance(max_shape, torch.Tensor):
|
186 |
-
max_shape = priors.new_tensor(max_shape)
|
187 |
-
max_shape = max_shape[..., :2].type_as(priors)
|
188 |
-
if max_shape.ndim == 2:
|
189 |
-
assert bboxes.ndim == 3
|
190 |
-
assert max_shape.size(0) == bboxes.size(0)
|
191 |
-
|
192 |
-
min_xy = priors.new_tensor(0)
|
193 |
-
max_xy = torch.cat([max_shape, max_shape],
|
194 |
-
dim=-1).flip(-1).unsqueeze(-2)
|
195 |
-
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
|
196 |
-
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
|
197 |
-
|
198 |
-
return bboxes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|