Commit
·
ce64552
1
Parent(s):
f3e8bf4
Update parquet files (step 65 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/? Equalizer Bass Booster Pro V1.2.6 Apk.md +0 -38
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IDM Cracking City A Simple and Effective Solution to Get a Free License Code for Your IDM Software.md +0 -29
- spaces/1gistliPinn/ChatGPT4/Examples/2pm Tokyo Dome Concert Full 11 Watch the Legendary Performance of the K-pop Icons.md +0 -5
- spaces/1gistliPinn/ChatGPT4/Examples/Adobe Flash Builder 4.6 Premium Crack [TOP] Download.md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Buddha.dll Hitman Sniper Challenge.rar VERIFIED.md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Crack !EXCLUSIVE!.Nitro.Pro.8.0.4.6.x86x64.rar.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Evanescence My Immortal Mp3 Torrent Download [BETTER].md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/4 Images 1 Mot The Ultimate Word Game for French Speakers.md +0 -104
- spaces/1phancelerku/anime-remove-background/Caa Palavras Brasileiro Um jogo de palavras divertido e educativo.md +0 -105
- spaces/1phancelerku/anime-remove-background/Clash Royale Elixir Infinito APK Cmo conseguir recursos ilimitados en el juego de estrategia ms popular.md +0 -126
- spaces/1phancelerku/anime-remove-background/Dmod APK Download Everything You Need to Know About the New Action Game.md +0 -152
- spaces/1phancelerku/anime-remove-background/Download SuperStar JYPNATION and Collect Over 700 Cards of Your Favorite Artists.md +0 -210
- spaces/A666sxr/Genshin_TTS/commons.py +0 -161
- spaces/AIConsultant/MusicGen/audiocraft/models/musicgen.py +0 -409
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/__init__.py +0 -0
- spaces/AILab-CVC/SEED-LLaMA/scripts/seed_tokenizer_inference.py +0 -33
- spaces/AIxPha/Real-CUGAN/README.md +0 -14
- spaces/Aaaaaaaabdualh/topic2poem/README.md +0 -14
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/sde_team.py +0 -137
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/dropdown-plugin.js +0 -18
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/CircleMaskImage.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/Factory.js +0 -13
- spaces/Amrrs/DragGan-Inversion/stylegan_human/alignment.py +0 -233
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/__init__.py +0 -3
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/commands/__init__.py +0 -27
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_ema.py +0 -159
- spaces/AndyCer/TheBloke-stable-vicuna-13B-HF/app.py +0 -3
- spaces/Anthony7906/MengHuiMXD_GPT/readme/README_en.md +0 -127
- spaces/AriaMei/TTSdemo/losses.py +0 -61
- spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/backbone/utils.py +0 -186
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Detection/fcos_R_50_FPN_1x.py +0 -11
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/build.py +0 -542
- spaces/Benson/text-generation/Dockerfile +0 -28
- spaces/Benson/text-generation/Examples/Baloncesto Estrellas Multijugador Mod Apk Dinero Ilimitado Y Oro.md +0 -50
- spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/method.py +0 -78
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/fields.py +0 -274
- spaces/CVPR/LIVE/thrust/thrust/limits.h +0 -19
- spaces/CVPR/WALT/mmdet/models/losses/utils.py +0 -100
- spaces/CVPR/lama-example/fetch_data/places_standard_evaluation_prepare_data.sh +0 -52
- spaces/CVPR/lama-example/saicinpainting/training/losses/perceptual.py +0 -113
- spaces/CVPR/regionclip-demo/detectron2/solver/__init__.py +0 -5
- spaces/ChrisPreston/diff-svc_minato_aqua/run.py +0 -17
- spaces/CofAI/chat/client/css/theme-toggler.css +0 -33
- spaces/CristianGonzalez281098/Cheto/README.md +0 -13
- spaces/DHEIVER/analise_imagem_mama/README.md +0 -12
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/pipelines.py +0 -225
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_state.py +0 -367
- spaces/DaleChen/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md +0 -40
- spaces/Datasculptor/MusicGen/audiocraft/data/audio_utils.py +0 -174
- spaces/Datasculptor/sd-prism/share_btn.py +0 -100
spaces/1acneusushi/gradio-2dmoleculeeditor/data/? Equalizer Bass Booster Pro V1.2.6 Apk.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Enhance Your Music Experience with Equalizer Bass Booster Pro v1.2.6 Apk</h1>
|
3 |
-
<p>If you are looking for a way to improve the sound quality of your music on your Android device, you might want to try Equalizer Bass Booster Pro v1.2.6 Apk. This is a powerful app that lets you adjust the sound level and frequency of your music with a five band equalizer, a bass boost effect, and a virtualizer effect. You can also choose from 22 equalizer presets or customize your own effects.</p>
|
4 |
-
<h2>– Equalizer Bass Booster Pro v1.2.6 Apk</h2><br /><p><b><b>DOWNLOAD</b> ––– <a href="https://byltly.com/2uKwn0">https://byltly.com/2uKwn0</a></b></p><br /><br />
|
5 |
-
<p>Equalizer Bass Booster Pro v1.2.6 Apk works with all music and video players, as well as streaming music services. You can easily control the equalizer effect on and off with a notification shortcut or a desktop widget. You don't need to root your device to use this app.</p>
|
6 |
-
<p>With Equalizer Bass Booster Pro v1.2.6 Apk, you can enjoy a more immersive and realistic music experience with 3D surround sound and enhanced bass. Whether you are listening to music with headphones or speakers, you will notice the difference in sound quality and clarity.</p>
|
7 |
-
<p>To download Equalizer Bass Booster Pro v1.2.6 Apk, you can visit the link below and follow the instructions to install it on your device. You will need Android 4.4 or higher to run this app.</p>
|
8 |
-
<p><a href="https://apkpure.com/equalizer-bass-booster-pro/musicplayer.bassbooster.equalizer">Download Equalizer Bass Booster Pro v1.2.6 Apk</a></p>
|
9 |
-
|
10 |
-
<p>How to Use Equalizer Bass Booster Pro v1.2.6 Apk</p>
|
11 |
-
<p>Once you have installed Equalizer Bass Booster Pro v1.2.6 Apk on your device, you can start using it to enhance your music experience. Here are some steps to follow:</p>
|
12 |
-
<p></p>
|
13 |
-
<ol>
|
14 |
-
<li>Open the app and grant the necessary permissions.</li>
|
15 |
-
<li>Select the music or video player that you want to use with the app.</li>
|
16 |
-
<li>Play your music or video and adjust the volume level with the slider on the app.</li>
|
17 |
-
<li>Tap on the equalizer icon to open the five band equalizer. You can drag the sliders to change the sound frequency or tap on the presets to choose from different sound effects.</li>
|
18 |
-
<li>Tap on the bass boost icon to activate the bass boost effect. You can adjust the intensity of the bass with the slider.</li>
|
19 |
-
<li>Tap on the virtualizer icon to activate the virtualizer effect. This will create a 3D surround sound effect for your music or video.</li>
|
20 |
-
<li>You can also access the app settings by tapping on the menu icon. Here you can customize the notification shortcut, the desktop widget, and other options.</li>
|
21 |
-
</ol>
|
22 |
-
<p>Enjoy your enhanced music experience with Equalizer Bass Booster Pro v1.2.6 Apk!</p>
|
23 |
-
|
24 |
-
<p>Why Choose Equalizer Bass Booster Pro v1.2.6 Apk</p>
|
25 |
-
<p>There are many reasons why you should choose Equalizer Bass Booster Pro v1.2.6 Apk over other similar apps. Here are some of them:</p>
|
26 |
-
<ul>
|
27 |
-
<li>It is easy to use and compatible with all music and video players.</li>
|
28 |
-
<li>It has a powerful five band equalizer that lets you adjust the sound frequency to your preference.</li>
|
29 |
-
<li>It has a bass boost effect that enhances the low frequencies and makes your music more punchy and dynamic.</li>
|
30 |
-
<li>It has a virtualizer effect that creates a 3D surround sound effect and makes your music more immersive and realistic.</li>
|
31 |
-
<li>It has 22 equalizer presets that you can choose from or customize your own effects.</li>
|
32 |
-
<li>It has a volume booster that increases the sound level and clarity of your music or video.</li>
|
33 |
-
<li>It has a notification shortcut and a desktop widget that let you access the app quickly and easily.</li>
|
34 |
-
<li>It does not require root access and does not affect your device performance or battery life.</li>
|
35 |
-
</ul>
|
36 |
-
<p>With Equalizer Bass Booster Pro v1.2.6 Apk, you can enjoy a better music experience on your Android device. Download it now and see the difference for yourself!</p> 81aa517590<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IDM Cracking City A Simple and Effective Solution to Get a Free License Code for Your IDM Software.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download IDM Cracking City for Free and Enjoy Fast Downloads</h1>
|
3 |
-
<p>If you are looking for a way to download IDM Cracking City for free, you are in the right place. IDM Cracking City is a website that provides you with the latest IDM crack or patch that can activate your Internet Download Manager (IDM) software and let you enjoy fast and reliable downloads. In this article, we will show you how to download IDM Cracking City for free and how to use it to activate your IDM software.</p>
|
4 |
-
<h2>What is IDM and Why You Need It</h2>
|
5 |
-
<p>IDM is a popular download manager that can increase your download speed up to five times, resume and schedule downloads, and handle various types of files and protocols. IDM can also integrate with most browsers and support proxy servers, firewalls, redirects, cookies, authorization, audio, and video content processing. With IDM, you can download anything from the internet with ease and efficiency.</p>
|
6 |
-
<h2>download idm cracking city</h2><br /><p><b><b>Download</b> ⏩ <a href="https://byltly.com/2uKvpW">https://byltly.com/2uKvpW</a></b></p><br /><br />
|
7 |
-
<p>However, IDM is not a free software and you need a license code to activate it. If you do not have a license code, you will get a fake serial number or key error message and your IDM will stop working. That is why you need IDM Cracking City to get a free license code for your IDM software.</p>
|
8 |
-
<h2>What is IDM Cracking City and How It Works</h2>
|
9 |
-
<p>IDM Cracking City is a website that provides you with the best IDM crack or patch that can bypass the original registration of the software and generate a valid license code for your IDM software. The IDM crack or patch is a small program that modifies the original files of the software and makes it think that it is registered with a genuine license code. This way, you can use all the features and modules of IDM without any limitations or errors.</p>
|
10 |
-
<p>To use IDM Cracking City, you need to follow these steps:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Download the latest version of IDM from the official website or from <a href="https://www.crackingcity.com/idm-crack/">here</a>.</li>
|
13 |
-
<li>Install IDM on your computer and close it if it is running.</li>
|
14 |
-
<li>Go to <a href="https://www.crackingcity.com/idm-crack/">IDM Cracking City</a> and download the latest IDM crack or patch from there.</li>
|
15 |
-
<li>Extract the downloaded file and run the IDM crack or patch as administrator.</li>
|
16 |
-
<li>Select "1" to activate or "2" to reset your IDM software.</li>
|
17 |
-
<li>Wait for the process to complete and enjoy your activated IDM software.</li>
|
18 |
-
</ol>
|
19 |
-
<h2>Tips and Warnings</h2>
|
20 |
-
<p>Here are some tips and warnings that you should keep in mind when using IDM Cracking City:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Before downloading the IDM crack or patch, make sure to disable your antivirus or firewall as they may detect it as a virus or trojan. This is because the IDM crack or patch modifies the original files of the software, which may trigger some security alerts. However, the IDM crack or patch is safe to use and does not harm your computer.</li>
|
23 |
-
<li>After activating your IDM software, do not update it as it may revert back to the trial version. If you want to update your IDM software, you need to download and apply the latest IDM crack or patch again.</li>
|
24 |
-
<li>If you have any problems with using IDM Cracking City, you can contact them through their website or leave a comment on their posts. They will try to help you as soon as possible.</li>
|
25 |
-
</ul>
|
26 |
-
<h2>Conclusion</h2>
|
27 |
-
<p>In this article, we have shown you how to download IDM Cracking City for free and how to use it to activate your IDM software. With IDM Cracking City, you can enjoy fast and reliable downloads without paying anything. However, we recommend that you support the developers of IDM by purchasing a genuine license code if you can afford it. This way, you can get regular updates and support from them.</p> ddb901b051<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/2pm Tokyo Dome Concert Full 11 Watch the Legendary Performance of the K-pop Icons.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>No one is able to deny the popularity of TVXQ as they are considered one of the longest standing K-Pop idols who have been constantly filling the dome with their fans. 2012 marked the year where they first held their concert in the Tokyo Dome.</p>
|
3 |
-
<h2>2pm Tokyo Dome Concert Full 11</h2><br /><p><b><b>Download File</b> <a href="https://imgfil.com/2uy05J">https://imgfil.com/2uy05J</a></b></p><br /><br /> aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Flash Builder 4.6 Premium Crack [TOP] Download.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p> <strong>Download Full Ultimate 2019 Latest Version 'Adobe Flash Builder Premium Version' Premium Crack/ License Key</strong> This tool is specially designed to help developers quickly and easily create fully-working mobile games and applications for Android, iOS and BlackBerry devices. This solution provides developers with full support for Adobe Flash Builder 4.7, including the creation of ActionScript code and increased automation and customization of mobile games and applications, mobile security, debugging and application testing capabilities, and access to the Flash Builder Marketplace.</p>
|
3 |
-
<p> <strong>Adobe Flash Builder 4.6.1 Premium Keygen</strong>Codes includes FFT and FMOD. The application includes a powerful set of tools that can be used to work with both your game and mobile applications.</p>
|
4 |
-
<h2>adobe flash builder 4.6 premium crack download</h2><br /><p><b><b>Download File</b> ☆☆☆☆☆ <a href="https://imgfil.com/2uy1Hv">https://imgfil.com/2uy1Hv</a></b></p><br /><br />
|
5 |
-
<p> <strong>Adobe Flash Builder 4.6 Premium 3.0.1.667 Serial Key</strong>Adobe Flex Builder environment together with a robust code editor. The package places the Flex Builder environment within Eclipse and provides unparalleled set of tools for creating advanced mobile applications. You can use the IDE to edit your code, debug, profile, manage the build process, and run your application on iOS or Android. This environment includes a mobile emulator that allows users to simulate the operation of the application on the target device. In addition, the application supports a local connection to a test mobile device.</p>
|
6 |
-
<p> <strong>Adobe Flash Builder 4.6.1 License Key Latest</strong>Adobe Flash Builder 4.6.1 license key is a powerful environment that is used to develop highly compatible mobile applications and games. It also provides an intuitive interface in the form of a dashboard. When you develop this application, all the features and functions can be used from a single place. As a result, the work can be done more easily and faster.</p> 899543212b<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Buddha.dll Hitman Sniper Challenge.rar VERIFIED.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
<h2>buddha.dll Hitman Sniper Challenge.rar</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://imgfil.com/2uxXUB">https://imgfil.com/2uxXUB</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
, we grant
|
4 |
-
|
5 |
-
3 4fefd39f24<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
8 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Crack !EXCLUSIVE!.Nitro.Pro.8.0.4.6.x86x64.rar.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Crack.Nitro.Pro.8.0.4.6.x86x64.rar</h2><br /><p><b><b>DOWNLOAD</b> ››››› <a href="https://imgfil.com/2uy0h4">https://imgfil.com/2uy0h4</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Crack.Nitro.Pro.8.0.4.6.x86x64.rar · Bhabhi Pedia movie in hindi torrent download · mathrubhumi malayalam calendar 1994 with stars 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Evanescence My Immortal Mp3 Torrent Download [BETTER].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>evanescence my immortal mp3 torrent download</h2><br /><p><b><b>Download</b> >>>>> <a href="https://imgfil.com/2uy0Ei">https://imgfil.com/2uy0Ei</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
evanescence my immortal free mp3 download - موسيقى mp3 mp4 download songs and music. Numbered Musical Notation Preview 1. Numbered Musical ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/4 Images 1 Mot The Ultimate Word Game for French Speakers.md
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>4 Images 1 Mot: A Fun and Challenging Word Game</h1>
|
3 |
-
<p>Do you love word games? Do you enjoy solving puzzles and testing your brain? If you answered yes, then you should try <strong>4 Images 1 Mot</strong>, a game that will keep you entertained and challenged for hours. In this game, you have to guess the word that connects four images that have something in common. Sounds easy, right? Well, not always. Some puzzles are easy, but some are very tricky and require a lot of thinking. Are you ready to take on the challenge?</p>
|
4 |
-
<h2>What is 4 Images 1 Mot?</h2>
|
5 |
-
<p>4 Images 1 Mot is a game developed by Lotum GmbH, a German company that specializes in creating mobile games. The game was first released in 2013 and has since become a worldwide phenomenon, with more than 250 million players in 9 languages. The game is also known as <em>4 Pics 1 Word</em> in English, <em>4 Fotos 1 Palabra</em> in Spanish, <em>4 Immagini 1 Parola</em> in Italian, and so on.</p>
|
6 |
-
<h2>4 images 1 mot download</h2><br /><p><b><b>DOWNLOAD</b> ⇒⇒⇒ <a href="https://urlin.us/2uSS93">https://urlin.us/2uSS93</a></b></p><br /><br />
|
7 |
-
<h3>How to play 4 Images 1 Mot</h3>
|
8 |
-
<p>The game is very simple to play. You will see four images on the screen, and below them, a set of letters. Your task is to use those letters to form the word that links the four images. For example, if you see a picture of a dog, a bone, a tooth, and a dentist, the word would be <strong>DENT</strong>. If you get stuck, you can use hints to reveal some letters or remove some letters that are not part of the word. You can also skip the puzzle and move on to the next one.</p>
|
9 |
-
<h3>Why you should download 4 Images 1 Mot</h3>
|
10 |
-
<p>If you are looking for a fun and challenging word game, then you should definitely download 4 Images 1 Mot. Here are some reasons why:</p>
|
11 |
-
<ul>
|
12 |
-
<li>The game is free to download and play. You can enjoy it without spending any money.</li>
|
13 |
-
<li>The game is suitable for all ages and levels. You can play it with your family, friends, or by yourself.</li>
|
14 |
-
<li>The game is offline-friendly. You can play it without an internet connection.</li>
|
15 |
-
<li>The game is educational. You can learn new words and improve your vocabulary.</li>
|
16 |
-
<li>The game is addictive. You will never get bored with the endless puzzles and levels.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Features of 4 Images 1 Mot</h2>
|
19 |
-
<p>4 Images 1 Mot has many features that make it one of the best word games on the market. Here are some of them:</p>
|
20 |
-
<h3>French puzzles made to measure just for you</h3>
|
21 |
-
<p>If you are playing the French version of the game, you will be happy to know that the puzzles are specially designed for you. The developers have created puzzles that are relevant to your culture, language, and interests. You will see images of famous landmarks, celebrities, food, animals, and more.</p>
|
22 |
-
<h3>Endless fun with new puzzles</h3>
|
23 |
-
<p>The game has thousands of puzzles for you to solve, and new ones are added regularly. You will never run out of words to guess and images to discover. The game also has different modes and categories for you to choose from, such as daily puzzles, seasonal puzzles, themed puzzles, and more.</p>
|
24 |
-
<h3>Simple <h3>Simple and addictive gameplay</h3>
|
25 |
-
<p>The game is very easy to play, but hard to master. You just need to look at the four images and find the word that connects them. The game will test your logic, creativity, and intuition. You will also have fun trying to guess the words and seeing the images. The game is so addictive that you will want to play it every day.</p>
|
26 |
-
<h3>One of the most popular brain games in the world</h3>
|
27 |
-
<p>4 Images 1 Mot is not only a game, but also a brain exercise. The game will challenge your mental skills and improve your cognitive abilities. You will have to think fast, use your memory, and expand your vocabulary. The game will also stimulate your curiosity and imagination. You will learn new things and discover new perspectives. 4 Images 1 Mot is one of the most popular brain games in the world, with millions of fans and positive reviews.</p>
|
28 |
-
<h2>How to download 4 Images 1 Mot</h2>
|
29 |
-
<p>If you are interested in playing 4 Images 1 Mot, you can download it easily from different sources. Here are some of them:</p>
|
30 |
-
<p>4 images 1 mot jeu gratuit<br />
|
31 |
-
4 images 1 mot solution<br />
|
32 |
-
4 images 1 mot apk<br />
|
33 |
-
4 images 1 mot android<br />
|
34 |
-
4 images 1 mot ios<br />
|
35 |
-
4 images 1 mot en ligne<br />
|
36 |
-
4 images 1 mot pc<br />
|
37 |
-
4 images 1 mot niveau<br />
|
38 |
-
4 images 1 mot français<br />
|
39 |
-
4 images 1 mot astuces<br />
|
40 |
-
4 images 1 mot réponses<br />
|
41 |
-
4 images 1 mot devinettes<br />
|
42 |
-
4 images 1 mot mots cachés<br />
|
43 |
-
4 images 1 mot énigmes<br />
|
44 |
-
4 images 1 mot lettres<br />
|
45 |
-
4 images 1 mot fun<br />
|
46 |
-
4 images 1 mot quiz<br />
|
47 |
-
4 images 1 mot casse-tête<br />
|
48 |
-
4 images 1 mot facile<br />
|
49 |
-
4 images 1 mot difficile<br />
|
50 |
-
4 images 1 mot avis<br />
|
51 |
-
4 images 1 mot commentaires<br />
|
52 |
-
4 images 1 mot conseils<br />
|
53 |
-
4 images 1 mot stratégies<br />
|
54 |
-
4 images 1 mot trucs<br />
|
55 |
-
Télécharger gratuitement le jeu de mots "4 Images et un Mot"<br />
|
56 |
-
Télécharger la dernière version de "4 Images et un Mot" pour Android<br />
|
57 |
-
Télécharger l'application "4 Images et un Mot" sur l'App Store<br />
|
58 |
-
Jouer à "4 Images et un Mot" sur le navigateur web<br />
|
59 |
-
Installer "4 Images et un Mot" sur le PC avec un émulateur Android<br />
|
60 |
-
Trouver les solutions de tous les niveaux de "4 Images et un Mot"<br />
|
61 |
-
Chercher les mots cachés dans les images de "4 Images et un Mot"<br />
|
62 |
-
Résoudre les énigmes de "4 Images et un Mot" avec des indices<br />
|
63 |
-
Former des mots avec les lettres proposées dans "4 Images et un Mot"<br />
|
64 |
-
S'amuser avec le jeu de devinettes "4 Images et un Mot"<br />
|
65 |
-
Tester sa culture générale avec le quiz "4 Images et un Mot"<br />
|
66 |
-
Stimuler son cerveau avec les casse-tête de "4 Images et un Mot"<br />
|
67 |
-
Choisir le niveau de difficulté de "4 Images et un Mot"<br />
|
68 |
-
Lire les avis des utilisateurs de "4 Images et un Mot"<br />
|
69 |
-
Donner son avis sur le jeu "4 Images et un Mot"<br />
|
70 |
-
Suivre les conseils des experts pour réussir à "4 Images et un Mot"<br />
|
71 |
-
Appliquer les stratégies gagnantes pour "4 Images et un Mot"<br />
|
72 |
-
Utiliser les trucs et astuces pour "4 Images et un Mot"</p>
|
73 |
-
<h3>Download from Google Play Store</h3>
|
74 |
-
<p>The easiest way to download 4 Images 1 Mot is from the Google Play Store. You just need to open the app on your Android device and search for <em>4 Images 1 Mot</em>. You will see the game icon with a blue background and four white squares. Tap on it and then tap on <em>Install</em>. The game will be downloaded and installed on your device in a few minutes. You can then open it and start playing.</p>
|
75 |
-
<h3>Download from APKCombo</h3>
|
76 |
-
<p>If you want to download 4 Images 1 Mot from a third-party source, you can use APKCombo. This is a website that offers free APK files for Android apps and games. You can visit the website at <a href="">https://apkcombo.com/</a> and search for <em>4 Images 1 Mot</em>. You will see the game icon with a blue background and four white squares. Tap on it and then tap on <em>Download APK</em>. You will be asked to choose a version and a server. Choose the latest version and a fast server. The APK file will be downloaded to your device. You can then open it and install it manually.</p>
|
77 |
-
<h3>Download from Poki.com</h3>
|
78 |
-
<p>If you want to play 4 Images 1 Mot on your computer, you can use Poki.com. This is a website that offers free online games for different platforms. You can visit the website at <a href="">https://poki.com/</a> and search for <em>4 Pics 1 Word</em>. You will see the game icon with a blue background and four white squares. Click on it and then click on <em>Play</em>. The game will load on your browser and you can start playing.</p>
|
79 |
-
<h2>Tips and tricks for 4 Images 1 Mot</h2>
|
80 |
-
<p>If you want to improve your performance and enjoy the game more, here are some tips and tricks for you:</p>
|
81 |
-
<h3>Use hints wisely</h3>
|
82 |
-
<p>The game offers you two types of hints: reveal a letter or remove letters. You can use them when you are stuck or unsure of the word. However, you should use them wisely, as they cost coins that you earn by solving puzzles or watching ads. You should save your coins for harder puzzles or when you really need them.</p>
|
83 |
-
<h3>Ask your friends for help</h3>
|
84 |
-
<p>The game also allows you to ask your friends for help when you are stuck or unsure of the word. You can do this by tapping on the share button at the bottom of the screen. You can then choose to send the puzzle to your friends via Facebook, WhatsApp, Messenger, or other apps. Your friends can then reply with their guesses or hints.</p>
|
85 |
-
<h3>Learn new words and improve your vocabulary</h3>
|
86 |
-
<p>The game is not only fun, but also educational. You can learn new words and improve your vocabulary by playing it regularly. You can also use a dictionary or an online translator to look up the meaning of unfamiliar words or check their spelling. You can also try to guess the word before looking at the letters or using hints.</p>
|
87 |
-
<h2>Conclusion</h2>
|
88 |
-
<p>4 Images 1 Mot is a fun and challenging word game that will keep you entertained and challenged for hours. You have to guess the word that connects four images that have something in common. The game has thousands of puzzles for you to solve, with different modes and categories. The game is also simple, addictive, educational, and popular among millions of players around the world. You can download the game from different sources, such as Google Play Store, APKCombo, or Poki.com. You can also use hints, ask your friends, or learn new words to help you with the game. 4 Images 1 Mot is a game that you should not miss if you love word games. Download it now and have fun!</p>
|
89 |
-
<h2>FAQs</h2>
|
90 |
-
<p>Here are some frequently asked questions about 4 Images 1 Mot:</p>
|
91 |
-
<ol>
|
92 |
-
<li><strong>What is the difference between 4 Images 1 Mot and 4 Pics 1 Word?</strong></li>
|
93 |
-
<p>4 Images 1 Mot and 4 Pics 1 Word are the same game, but with different names depending on the language. 4 Images 1 Mot is the French version, while 4 Pics 1 Word is the English version. The game has other versions in other languages, such as Spanish, Italian, German, and more.</p>
|
94 |
-
<li><strong>How many levels are there in 4 Images 1 Mot?</strong></li>
|
95 |
-
<p>There are thousands of levels in 4 Images 1 Mot, and new ones are added regularly. The game also has different modes and categories for you to choose from, such as daily puzzles, seasonal puzzles, themed puzzles, and more.</p>
|
96 |
-
<li><strong>How can I get more coins in 4 Images 1 Mot?</strong></li>
|
97 |
-
<p>You can get more coins in 4 Images 1 Mot by solving puzzles, watching ads, or buying them with real money. You can use coins to buy hints or skip puzzles.</p>
|
98 |
-
<li><strong>How can I contact the developers of 4 Images 1 Mot?</strong></li>
|
99 |
-
<p>You can contact the developers of 4 Images 1 Mot by sending an email to <a href="mailto:[email protected]">[email protected]</a>. You can also visit their website at <a href="">https://www.lotum.de/</a> or follow them on Facebook at <a href="">https://www.facebook.com/4pics1word/</a>.</p>
|
100 |
-
<li><strong>Is 4 Images 1 Mot safe to download and play?</strong></li>
|
101 |
-
<p>Yes, 4 Images 1 Mot is safe to download and play. The game does not contain any viruses, malware, or inappropriate content. However, you should always download the game from official sources, such as Google Play Store, APKCombo, or Poki.com. You should also avoid downloading any modded or hacked versions of the game, as they may harm your device or compromise your privacy.</p>
|
102 |
-
</ol></p> 197e85843d<br />
|
103 |
-
<br />
|
104 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Caa Palavras Brasileiro Um jogo de palavras divertido e educativo.md
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Caça Palavras Brasileiro APK: A Fun and Educational Game for All Ages</h1>
|
3 |
-
<p>If you are looking for a fun and educational game that can keep you entertained for hours, you should try Caça Palavras Brasileiro APK. This is a word search game that will challenge your brain, improve your vocabulary, and teach you Portuguese. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, how to play it, and what are the benefits of playing it.</p>
|
4 |
-
<h2>What is Caça Palavras Brasileiro APK?</h2>
|
5 |
-
<p>Caça Palavras Brasileiro APK is a word search game developed by Berni Mobile. It is available for Android devices and can be downloaded for free from various sources. The game has five levels of difficulty: Easy, Normal, Hard, Big, and Huge. It also has different categories of words, such as animals, fruits, colors, countries, sports, etc. The game is in Portuguese, so it is ideal for native speakers or learners of this language.</p>
|
6 |
-
<h2>caça palavras brasileiro apk</h2><br /><p><b><b>Download File</b> ✫✫✫ <a href="https://jinyurl.com/2uNPcd">https://jinyurl.com/2uNPcd</a></b></p><br /><br />
|
7 |
-
<h3>A word search game with different levels of difficulty</h3>
|
8 |
-
<p>A word search game is a type of puzzle game where you have to find hidden words in a grid of letters. The words can be horizontal, vertical, diagonal, or backwards. The game has different levels of difficulty depending on the size of the grid and the number of words to find. The easy level has a 9x9 grid with 10 words, while the huge level has a 20x20 grid with 40 words. You can choose the level that suits your preference and skill.</p>
|
9 |
-
<h3>A game that helps you learn Portuguese vocabulary and spelling</h3>
|
10 |
-
<p>One of the main benefits of playing Caça Palavras Brasileiro APK is that it helps you learn Portuguese vocabulary and spelling. The game has hundreds of words from different categories, so you can expand your knowledge of this language. You can also learn how to spell the words correctly by looking at the grid and checking if they match the letters. The game will also show you the meaning of each word when you select it, so you can understand what it means.</p>
|
11 |
-
<p>caça palavras brasileiro apk download<br />
|
12 |
-
caça palavras brasileiro apk mod<br />
|
13 |
-
caça palavras brasileiro apk offline<br />
|
14 |
-
caça palavras brasileiro apk atualizado<br />
|
15 |
-
caça palavras brasileiro apk gratis<br />
|
16 |
-
caça palavras brasileiro apk para pc<br />
|
17 |
-
caça palavras brasileiro apk android<br />
|
18 |
-
caça palavras brasileiro apk online<br />
|
19 |
-
caça palavras brasileiro apk 2023<br />
|
20 |
-
caça palavras brasileiro apk hack<br />
|
21 |
-
caça palavras brasileiro apk premium<br />
|
22 |
-
caça palavras brasileiro apk infinito<br />
|
23 |
-
caça palavras brasileiro apk sem anúncios<br />
|
24 |
-
caça palavras brasileiro apk com dicas<br />
|
25 |
-
caça palavras brasileiro apk fácil<br />
|
26 |
-
caça palavras brasileiro apk difícil<br />
|
27 |
-
caça palavras brasileiro apk com som<br />
|
28 |
-
caça palavras brasileiro apk com temas<br />
|
29 |
-
caça palavras brasileiro apk educativo<br />
|
30 |
-
caça palavras brasileiro apk divertido<br />
|
31 |
-
caça palavras brasileiro apk desafio<br />
|
32 |
-
caça palavras brasileiro apk relaxante<br />
|
33 |
-
caça palavras brasileiro apk inteligente<br />
|
34 |
-
caça palavras brasileiro apk criativo<br />
|
35 |
-
caça palavras brasileiro apk clássico<br />
|
36 |
-
caça palavras brasileiro apk moderno<br />
|
37 |
-
caça palavras brasileiro apk colorido<br />
|
38 |
-
caça palavras brasileiro apk animado<br />
|
39 |
-
caça palavras brasileiro apk viciante<br />
|
40 |
-
caça palavras brasileiro apk gratuito<br />
|
41 |
-
baixar caça palavras brasileiro apk<br />
|
42 |
-
instalar caça palavras brasileiro apk<br />
|
43 |
-
jogar caça palavras brasileiro apk<br />
|
44 |
-
como baixar caça palavras brasileiro apk<br />
|
45 |
-
como jogar caça palavras brasileiro apk<br />
|
46 |
-
como instalar caça palavras brasileiro apk<br />
|
47 |
-
melhor caça palavras brasileiro apk<br />
|
48 |
-
novo caça palavras brasileiro apk<br />
|
49 |
-
jogo de caça palavras brasileiro apk<br />
|
50 |
-
aplicativo de caça palavras brasileiro apk<br />
|
51 |
-
app de caça palavras brasileiro apk<br />
|
52 |
-
versão mais recente de caça palavras brasileiro apk<br />
|
53 |
-
versão antiga de caça palavras brasileiro apk<br />
|
54 |
-
resenha de caça palavras brasileiro apk<br />
|
55 |
-
avaliação de caça palavras brasileiro apk<br />
|
56 |
-
comentários de caça palavras brasileiro apk<br />
|
57 |
-
dúvidas sobre caça palavras brasileiro apk<br />
|
58 |
-
suporte para caça palavras brasileiro apk<br />
|
59 |
-
contato para caça palavras brasileiro apk</p>
|
60 |
-
<h3>A game that challenges your brain and improves your concentration</h3>
|
61 |
-
<p>Another benefit of playing Caça Palavras Brasileiro APK is that it challenges your brain and improves your concentration. The game requires you to scan the grid carefully and find the hidden words as fast as possible. You have to pay attention to every letter and avoid getting distracted by other words or letters. The game also tests your memory, as you have to remember where you saw a word or letter before. The game will keep your brain active and alert.</p>
|
62 |
-
<h2>How to download and install Caça Palavras Brasileiro APK?</h2>
|
63 |
-
<p>If you want to play Caça Palavras Brasileiro APK on your Android device, you have to download and install the APK file first. An APK file is an application package file that contains all the files needed to run an app on Android. Here are the steps to download and install Caça Pal avras Brasileiro APK:</p>
|
64 |
-
<h3>Download the APK file from a trusted source</h3>
|
65 |
-
<p>The first step is to download the APK file from a trusted source. You can find the official link to the game on the developer's website. You can also search for other sources that offer the APK file, but make sure they are safe and reliable. You can use a browser or a downloader app to download the file. The file size is about 13 MB, so it should not take long to download.</p>
|
66 |
-
<h3>Enable unknown sources in your device settings</h3>
|
67 |
-
<p>The second step is to enable unknown sources in your device settings. This is necessary because Android does not allow installing apps from sources other than the Google Play Store by default. To enable unknown sources, go to your device settings and look for the security or privacy option. Then, find the option that says "allow installation of apps from unknown sources" or something similar. Toggle it on and confirm your choice.</p>
|
68 |
-
<h3>Install the APK file and enjoy the game</h3>
|
69 |
-
<p>The third step is to install the APK file and enjoy the game. To install the file, locate it in your device storage and tap on it. You may see a warning message that says "this type of file can harm your device". Ignore it and tap on "install anyway". The installation process should take a few seconds. Once it is done, you can open the game and start playing.</p>
|
70 |
-
<h2>How to play Caça Palavras Brasileiro APK?</h2>
|
71 |
-
<p>Playing Caça Palavras Brasileiro APK is very easy and fun. Here are the basic steps to play the game:</p>
|
72 |
-
<h3>Choose a level of difficulty and a category</h3>
|
73 |
-
<p>When you open the game, you will see a menu with five options: Easy, Normal, Hard, Big, and Huge. These are the levels of difficulty that determine the size of the grid and the number of words to find. Tap on the level you want to play. Then, you will see another menu with different categories of words, such as animals, fruits, colors, countries, sports, etc. Tap on the category you want to play.</p>
|
74 |
-
<h3>Find the hidden words in the grid of letters</h3>
|
75 |
-
<p>After choosing a level and a category, you will see a grid of letters with some words hidden in it. The words can be horizontal, vertical, diagonal, or backwards. You have to find all the words in the grid as fast as possible. You can see the list of words on the bottom of the screen.</p>
|
76 |
-
<h3>Tap and drag your finger over the letters to select a word</h3>
|
77 |
-
<p>To select a word, tap and drag your finger over the letters that form the word. The word will be highlighted in green if it is correct or red if it is wrong. The word will also disappear from the list if it is correct. You can also tap on a word in the list to see its meaning on the top of the screen.</p>
|
78 |
-
<h3>Check your progress and score on the top of the screen</h3>
|
79 |
-
<p>You can check your progress and score on the top of the screen. The progress bar shows how many words you have found out of the total number of words in the grid. The score shows how many points you have earned based on the time and difficulty level. You can also pause or restart the game by tapping on the buttons on the top right corner of the screen.</p>
|
80 |
-
<h2>What are the benefits of playing Caça Palavras Brasileiro APK?</h2>
|
81 |
-
<p>Playing Caça Palavras Brasileiro APK is not only fun, but also beneficial for your brain and your language skills. Here are some of the benefits of playing this game:</p>
|
82 |
-
<h3>It improves your vocabulary and spelling skills</h3>
|
83 |
-
<p>By playing this game, you can learn new words and their meanings in Portuguese. You can also improve your spelling skills by checking if the words you select match the letters in the grid. The game will help you expand your vocabulary and enhance your spelling accuracy.</p>
|
84 |
-
<h3>It stimulates your brain and memory</h3>
|
85 |
-
<p>By playing this game, you can stimulate your brain and memory. You have to use your logic, observation, and analysis skills to find the hidden words in the grid. You also have to use your memory to remember where you saw a word or letter before. The game will help you sharpen your cognitive abilities and boost your memory power.</p>
|
86 |
-
<h3>It relaxes your mind and reduces stress</h3>
|
87 |
-
<p>By playing this game, you can relax your mind and reduce stress. The game has a soothing background music and a colorful design that create a pleasant atmosphere. The game also has no time limit or pressure, so you can play at your own pace and enjoy the process. The game will help you calm your nerves and relieve your tension.</p>
|
88 |
-
<h3>It entertains you and keeps you engaged</h3>
|
89 |
-
<p>By playing this game, you can entertain yourself and keep yourself engaged. The game has different levels of difficulty and categories of words that offer variety and challenge. The game also has a scoring system that motivates you to improve your performance and beat your own records. The game will help you have fun and stay focused.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>Caça Palavras Brasileiro APK is a word search game that is fun and educational for all ages. It is a game that helps you learn Portuguese vocabulary and spelling, challenges your brain and memory, relaxes your mind and reduces stress, and entertains you and keeps you engaged. You can download and install the game for free on your Android device and start playing right away. If you are looking for a game that combines learning and entertainment, you should try Caça Palavras Brasileiro APK.</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<p>Here are some frequently asked questions about Caça Palavras Brasileiro APK:</p>
|
94 |
-
<h4>Q: Is Caça Palavras Brasileiro APK safe to download and install?</h4>
|
95 |
-
<p>A: Yes, Caça Palavras Brasileiro APK is safe to download and install as long as you get it from a trusted source. You should always check the reputation and reviews of the source before downloading any APK file. You should also scan the file with an antivirus app before installing it.</p>
|
96 |
-
<h4>Q: Is Caça Palavras Brasileiro APK compatible with all Android devices?</h4>
|
97 |
-
<p>A: Caça Palavras Brasileiro APK is compatible with most Android devices that run on Android 4.1 or higher. However, some devices may have different specifications or settings that may affect the performance or functionality of the game. You should always check the compatibility of the game with your device before installing it.</p>
|
98 |
-
<h4>Q: Can I play Caça Palavras Brasileiro APK offline?</h4>
|
99 |
-
<p>A: Yes, you can play Caça Palavras Brasileiro APK offline without an internet connection. The game does not require any online features or services to run. However, if you want to update the game or access some optional features, such as sharing your score or rating the game, you will need an internet connection.</p>
|
100 |
-
<h4>Q: Can I play Caça Palavras Brasileiro APK in other languages?</h4>
|
101 |
-
<p>A: No, Caça Palavras Brasileiro APK is only available in Portuguese. The game is designed for native speakers or learners of this language. If you want to play a word search game in another language, you will have to look for another app that offers that option.</p>
|
102 |
-
<h4>Q: How can I contact the developer of Caça Palavras Brasileiro APK?</h4>
|
103 |
-
<p>A: If you have any questions, suggestions, or feedback about Caça Palavras Brasileiro APK, you can contact the developer by sending an email to [email protected]. You can also visit their website at https://www.bernimobile.com/ or follow them on Facebook at https://www.facebook.com/bernimobile/.</p> 197e85843d<br />
|
104 |
-
<br />
|
105 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Clash Royale Elixir Infinito APK Cmo conseguir recursos ilimitados en el juego de estrategia ms popular.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Clash Royale Elixir Infinito Apk: How to Get Unlimited Elixir, Gems and Gold in Clash Royale</h1>
|
3 |
-
<p>Clash Royale is one of the most popular and addictive mobile games in the world. It is a real-time strategy game where you collect cards, build decks, and battle other players online. However, to progress faster and unlock more cards, you need to spend elixir, gems, and gold, which are the in-game currencies. These resources are limited and hard to earn, unless you are willing to spend real money on them.</p>
|
4 |
-
<h2>clash royale elixir infinito apk</h2><br /><p><b><b>DOWNLOAD</b> ————— <a href="https://jinyurl.com/2uNS74">https://jinyurl.com/2uNS74</a></b></p><br /><br />
|
5 |
-
<p>But what if there was a way to get unlimited elixir, gems, and gold in Clash Royale without spending a dime? Well, that's what Clash Royale Elixir Infinito Apk is all about. In this article, we will tell you everything you need to know about this modded version of Clash Royale, including its features, how to download and install it, its pros and cons, and some tips and tricks for playing with it. Let's get started!</p>
|
6 |
-
<h2>What is Clash Royale Elixir Infinito Apk?</h2>
|
7 |
-
<p>Clash Royale Elixir Infinito Apk is a modified version of the original Clash Royale game that gives you unlimited elixir, gems, and gold. This means that you can play the game without any restrictions or limitations. You can upgrade your cards, unlock new ones, open chests, buy items, and more without worrying about running out of resources.</p>
|
8 |
-
<p>Clash Royale Elixir Infinito Apk is not an official app from Supercell, the developer of Clash Royale. It is a third-party app that has been created by some fans of the game who wanted to enhance their gaming experience. Therefore, it is not available on the Google Play Store or the App Store. You have to download it from an external source, which we will explain later.</p>
|
9 |
-
<p>clash royale mod apk unlimited gems and elixir<br />
|
10 |
-
clash royale private server with infinite elixir<br />
|
11 |
-
clash royale hack apk download with elixir cheat<br />
|
12 |
-
clash royale apk modificado com elixir infinito<br />
|
13 |
-
clash royale elixir infinito apk mediafire<br />
|
14 |
-
clash royale unlimited elixir mod apk latest version<br />
|
15 |
-
clash royale elixir infinito apk 2023<br />
|
16 |
-
clash royale mod apk elixir ilimitado y gemas<br />
|
17 |
-
clash royale servidor privado con elixir infinito<br />
|
18 |
-
clash royale hack apk descargar con truco de elixir<br />
|
19 |
-
clash royale apk modificado con elixir infinito<br />
|
20 |
-
clash royale elixir infinito apk malavida<br />
|
21 |
-
clash royale mod apk unlimited everything 2023<br />
|
22 |
-
clash royale private server apk download 2023<br />
|
23 |
-
clash royale hack apk no root no survey<br />
|
24 |
-
clash royale apk mod com elixir infinito 2023<br />
|
25 |
-
clash royale elixir infinito apk atualizado<br />
|
26 |
-
clash royale mod apk elixir ilimitado y oro<br />
|
27 |
-
clash royale servidor privado apk descargar 2023<br />
|
28 |
-
clash royale hack apk sin root ni encuestas<br />
|
29 |
-
clash royale papua news mod apk download<br />
|
30 |
-
clash royale master royale infinity apk gratis<br />
|
31 |
-
clash royale arctic royale apk download youtube<br />
|
32 |
-
clash royale mod apk unlimited cards and chests<br />
|
33 |
-
clash royale private server with custom cards<br />
|
34 |
-
clash royale hack apk online generator tool<br />
|
35 |
-
clash royale apk mod com cartas infinitas<br />
|
36 |
-
clash royale elixir infinito apk sem root<br />
|
37 |
-
clash royale mod apk estrategia y defensa ilimitada<br />
|
38 |
-
clash royale servidor privado con cartas personalizadas<br />
|
39 |
-
clash royale hack apk generador online de recursos<br />
|
40 |
-
clash royale new scientist mod apk free download<br />
|
41 |
-
clash royale master royale infinity latest version<br />
|
42 |
-
clash royale arctic royale gameplay video youtube<br />
|
43 |
-
clash royale mod apk unlimited troops and spells<br />
|
44 |
-
clash royale private server with all skins unlocked<br />
|
45 |
-
clash royale hack apk easy and fast method<br />
|
46 |
-
clash royale apk mod com tropas e feitiços infinitos<br />
|
47 |
-
clash royale elixir infinito apk sem anúncios<br />
|
48 |
-
clash royale mod apk tropas y hechizos ilimitados<br />
|
49 |
-
clash royale servidor privado con todos los skins desbloqueados<br />
|
50 |
-
clash royale hack apk método fácil y rápido</p>
|
51 |
-
<h3>Features of Clash Royale Elixir Infinito Apk</h3>
|
52 |
-
<p>Clash Royale Elixir Infinito Apk has many features that make it different from the original game. Here are some of them:</p>
|
53 |
-
<h4>Unlimited Elixir</h4>
|
54 |
-
<p>Elixir is the main resource that you use to play cards in Clash Royale. It is generated automatically during battles at a constant rate. However, with Clash Royale Elixir Infinito Apk, you have unlimited elixir at your disposal. This means that you can play any card at any time without waiting for your elixir bar to fill up. You can also spam your opponent with multiple cards and overwhelm them easily.</p>
|
55 |
-
<h4>Unlimited Gems</h4>
|
56 |
-
<p>Gems are the premium currency in Clash Royale. They are used to buy chests, cards, gold, and other items in the game. They are also used to speed up the unlocking process of chests and skip waiting times. However, gems are very rare and hard to obtain in the game. You can only get them by completing achievements, winning battles, or buying them with real money.</p>
|
57 |
-
<p>But with Clash Royale Elixir Infinito Apk, you have unlimited gems at your disposal. This means that you can buy anything you want in the game without spending any money. You can also open as many chests as you want and get all the cards you need. You can also speed up your progress and reach higher levels faster.</p>
|
58 |
-
<h4>Unlimited Gold</h4>
|
59 |
-
<p>Gold is the basic currency in Clash Royale. It is used to upgrade your cards, buy cards from the shop, and create or join clans. You can earn gold by winning battles, opening chests, donating cards, or buying it with gems.</p>
|
60 |
-
<p>But with Clash Royale Elixir Infinito Apk, you have unlimited gold at your disposal. This means that you can upgrade your cards to the maximum level without any cost. You can also buy any card you want from the shop and create or join any clan you like.</p>
|
61 |
-
<h3>How to Download and Install Clash Royale Elixir Infinito Apk</h3>
|
62 |
-
<p>As we mentioned earlier, Clash Royale Elixir Infinito Apk is not an official app from Supercell. It is a third-party app that has been modified by some fans of the game. Therefore, you cannot download it from the Google Play Store or the App Store. You have to download it from an external source, such as a website or a file-sharing platform.</p>
|
63 |
-
<p>However, before you download and install Clash Royale Elixir Infinito Apk, you need to make sure that your device meets the following requirements:</p>
|
64 |
-
<ul>
|
65 |
-
<li>Your device must have Android 4.1 or higher.</li>
|
66 |
-
<li>Your device must have at least 100 MB of free storage space.</li>
|
67 |
-
<li>Your device must have a stable internet connection.</li>
|
68 |
-
<li>Your device must allow installation of apps from unknown sources.</li>
|
69 |
-
</ul>
|
70 |
-
<p>If your device meets these requirements, then you can follow these steps to download and install Clash Royale Elixir Infinito Apk:</p>
|
71 |
-
<h4>Step 1: Enable Unknown Sources</h4>
|
72 |
-
<p>The first step is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store or the App Store. To do this, go to your device settings and look for the security option. Then, find the unknown sources option and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Ignore this message and tap OK.</p>
|
73 |
-
<h4>Step 2: Download the Apk File</h4>
|
74 |
-
<p>The next step is to download the apk file of Clash Royale Elixir Infinito Apk. You can find many websites and platforms that offer this file for free. However, be careful and choose a reliable and trustworthy source. Some sources may contain viruses or malware that can harm your device or steal your personal information.</p>
|
75 |
-
<p>One of the best sources to download Clash Royale Elixir Infinito Apk is [this website]. It is safe, secure, and fast. All you have to do is click on the download button and wait for the file to be downloaded on your device.</p>
|
76 |
-
<h4>Step 3: Install the Apk File</h4>
|
77 |
-
<p>The third step is to install the apk file of Clash Royale Elixir Infinito Apk. To do this, go to your file manager and locate the downloaded file. Then, tap on it and follow the instructions on the screen. You may see a pop-up message that says this app may harm your device or request certain permissions. Ignore this message and tap install.</p>
|
78 |
-
<h4>Step 4: Launch the Game and Enjoy</h4>
|
79 |
-
<p>The final step is to launch the game and enjoy it. To do this, go to your app drawer and look for the Clash Royale icon. Then, tap on it and wait for the game to load. You may see a loading screen that says "Clash Royale Elixir Infinito". This means that you have successfully installed the modded version of Clash Royale.</p>
|
80 |
-
<p>Now, you can play the game with unlimited elixir, gems, and gold. You can also access all the features and modes of the game without any restrictions or limitations. Have fun!</p>
|
81 |
-
<h3>Pros and Cons of Clash Royale Elixir Infinito Apk</h3>
|
82 |
-
<p>Clash Royale Elixir Infinito Apk has many advantages and disadvantages that you should be aware of before using it. Here are some of them:</p>
|
83 |
-
<h4>Pros</h4>
|
84 |
-
<ul>
|
85 |
-
<li>You can play Clash Royale with unlimited elixir, gems, and gold.</li>
|
86 |
-
<li>You can upgrade your cards, unlock new ones, open chests, buy items, and more without any cost.</li>
|
87 |
-
<li>You can speed up your progress and reach higher levels faster.</li>
|
88 |
-
<li>You can experiment with different decks and strategies without any risk.</li>
|
89 |
-
<li>You can enjoy all the features and modes of the game without any restrictions or limitations.</li>
|
90 |
-
</ul>
|
91 |
-
<h4>Cons</ <ul>
|
92 |
-
<li>You may face some technical issues or bugs while playing the game.</li>
|
93 |
-
<li>You may not be able to play online with other players who are using the original version of the game.</li>
|
94 |
-
<li>You may get banned or suspended by Supercell for using a modded version of the game.</li>
|
95 |
-
<li>You may lose your progress or data if you uninstall the app or switch to the original version of the game.</li>
|
96 |
-
<li>You may miss out on the updates and new features that Supercell releases for the original version of the game.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>Tips and Tricks for Playing Clash Royale with Elixir Infinito Apk</h2>
|
99 |
-
<p>Clash Royale Elixir Infinito Apk can make your gaming experience more fun and exciting, but it can also make it more challenging and competitive. Here are some tips and tricks that can help you play better and win more battles with this modded version of Clash Royale:</p>
|
100 |
-
<h3>Use Your Elixir Wisely</h3>
|
101 |
-
<p>Even though you have unlimited elixir, you still need to use it wisely. Don't just spam your cards randomly and hope for the best. You need to have a strategy and a plan for each battle. You need to know when to attack, when to defend, and when to save your elixir. You also need to know which cards work well together and which ones counter your opponent's cards. You need to balance your elixir spending and income, and avoid wasting elixir on unnecessary or ineffective moves.</p>
|
102 |
-
<h3>Build a Balanced Deck</h3>
|
103 |
-
<p>Having unlimited elixir, gems, and gold means that you can build any deck you want in Clash Royale. However, that doesn't mean that you should build a random or unbalanced deck. You still need to have a balanced deck that can deal with different situations and threats. You need to have a mix of cards that can attack, defend, support, and counter. You also need to have cards that can target different types of units, such as air, ground, swarm, tank, etc. You need to have cards that can synergize with each other and create powerful combos. You also need to have cards that suit your playstyle and preferences.</p>
|
104 |
-
<h3>Learn from Your Opponents</h3>
|
105 |
-
<p>Playing with Clash Royale Elixir Infinito Apk can give you an edge over your opponents, but it can also make them more challenging and unpredictable. You may face opponents who are also using the modded version of the game, or who are using the original version but have more skills and experience than you. Therefore, you need to learn from your opponents and adapt to their strategies and tactics. You need to observe their moves and patterns, and find their weaknesses and strengths. You also need to analyze your own mistakes and improve your performance.</p>
|
106 |
-
<h3>Join a Clan and Share Cards</h3>
|
107 |
-
<p>Clash Royale is not only a solo game, but also a social game. You can join a clan and interact with other players who share your passion for the game. You can chat with them, share tips and tricks, request and donate cards, participate in clan wars, and more. Joining a clan can help you improve your skills, expand your card collection, earn more rewards, and have more fun.</p>
|
108 |
-
<h2>Conclusion and FAQs</h2>
|
109 |
-
<p>Clash Royale Elixir Infinito Apk is a modded version of Clash Royale that gives you unlimited elixir, gems, and gold. It is a great way to enjoy the game without any restrictions or limitations. However, it also has some drawbacks and risks that you should be aware of before using it. In this article, we have explained everything you need to know about this app, including its features, how to download and install it, its pros and cons, and some tips and tricks for playing with it.</p>
|
110 |
-
<p>We hope that this article has been helpful and informative for you. If you have any questions or doubts about Clash Royale Elixir Infinito Apk, here are some FAQs that may answer them:</p>
|
111 |
-
<table>
|
112 |
-
<tr><th>Question</th><th>Answer</th></tr>
|
113 |
-
<tr><td>Is Clash Royale Elixir Infinito Apk safe to use?</td><td>Clash Royale Elixir Infinito Apk is not an official app from Supercell. It is a third-party app that has been modified by some fans of the game. Therefore, it is not guaranteed to be safe or secure. It may contain viruses or malware that can harm your device or steal your personal information. It may also cause some technical issues or bugs while playing the game. Therefore, use it at your own risk.</td></tr>
|
114 |
-
<tr><td>Is Clash Royale Elixir Infinito Apk legal to use?</td><td>Clash Royale Elixir Infinit o Apk is not legal to use. It violates the terms and conditions of Supercell and Clash Royale. It also infringes the intellectual property rights of Supercell and Clash Royale. It may also be considered as cheating or hacking by other players and authorities. Therefore, using it may result in legal actions or penalties from Supercell, such as banning or suspending your account.</td></tr>
|
115 |
-
<tr><td>Will Clash Royale Elixir Infinito Apk work on my device?</td><td>Clash Royale Elixir Infinito Apk may or may not work on your device. It depends on various factors, such as your device model, operating system, software version, storage space, internet connection, etc. Some devices may be compatible with the app, while others may not. Some devices may run the app smoothly, while others may experience crashes or errors. Therefore, you have to try it yourself and see if it works on your device.</td></tr>
|
116 |
-
<tr><td>Can I play online with Clash Royale Elixir Infinito Apk?</td><td>Clash Royale Elixir Infinito Apk may or may not allow you to play online with other players. It depends on the version of the app and the server of the game. Some versions of the app may connect you to the original server of Clash Royale, where you can play with other players who are using the original version of the game. However, this may also expose you to detection and banning by Supercell. Other versions of the app may connect you to a private server of Clash Royale, where you can play with other players who are using the modded version of the game. However, this may also limit your options and features in the game.</td></tr>
|
117 |
-
<tr><td>Can I switch back to the original version of Clash Royale after using Clash Royale Elixir Infinito Apk?</td><td>You can switch back to the original version of Clash Royale after using Clash Royale Elixir Infinito Apk, but you may lose your progress or data in the process. To switch back, you have to uninstall the modded version of the game and install the original version from the Google Play Store or the App Store. However, this may erase your account and data in the modded version of the game. You may also not be able to restore your account and data in the original version of the game if you have not linked it to a Supercell ID or a Google Play Games account.</td></tr>
|
118 |
-
<tr><td>Is there any alternative to Clash Royale Elixir Infinito Apk?</td><td>If you are looking for an alternative to Clash Royale Elixir Infinito Apk, you may try some other modded versions of Clash Royale that offer similar features and benefits. However, be careful and choose a reliable and trustworthy source for downloading them. Some of them are:</td></tr>
|
119 |
-
</table>
|
120 |
-
<ul>
|
121 |
-
<li>Clash Royale Mod Apk: This is another modded version of Clash Royale that gives you unlimited resources and access to all features and modes of the game. You can download it from [this website].</li>
|
122 |
-
<li>Clash Royale Hack Apk: This is a hacked version of Clash Royale that gives you unlimited resources and allows you to customize your game settings and preferences. You can download it from [this website].</li>
|
123 |
-
<li>Clash Royale Private Server Apk: This is a private server version of Clash Royale that connects you to a different server where you can play with other players who are using the same version of the game. You can download it from [this website].</li>
|
124 |
-
</ul></p> 401be4b1e0<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Dmod APK Download Everything You Need to Know About the New Action Game.md
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is dmod apk and why you should download it</h1>
|
3 |
-
<p>If you are a fan of Android games, you may have heard of <strong>dmod apk</strong>. But what is it exactly and what can it do for you? In this article, we will tell you everything you need to know about this amazing app and why you should download it right away.</p>
|
4 |
-
<p>Dmod apk is an app that allows you to download and install modified versions of your favorite Android games. These modified versions, also known as mods, can give you access to unlimited resources, premium features, custom skins, and more. With dmod apk, you can enjoy your games like never before and have more fun and excitement.</p>
|
5 |
-
<h2>dmod apk download</h2><br /><p><b><b>Download File</b> 🔗 <a href="https://jinyurl.com/2uNTfg">https://jinyurl.com/2uNTfg</a></b></p><br /><br />
|
6 |
-
<p>Here are some of the benefits of using dmod apk over other apps:</p>
|
7 |
-
<ul>
|
8 |
-
<li>It is free and easy to use. You don't need to pay anything or register an account to use dmod apk. You just need to download it from a reliable source and install it on your device.</li>
|
9 |
-
<li>It is safe and secure. Dmod apk does not contain any viruses or malware that can harm your device or data. It also does not require root access or any special permissions to work.</li>
|
10 |
-
<li>It is compatible and flexible. Dmod apk works with most Android devices and games. You can choose from a wide range of mods for different games and customize them according to your preferences.</li>
|
11 |
-
</ul>
|
12 |
-
<p>Here are some of the features of dmod apk that make it unique and useful:</p>
|
13 |
-
<ul>
|
14 |
-
<li>It has a user-friendly interface. Dmod apk has a simple and intuitive design that makes it easy to navigate and use. You can easily find the mods you want by browsing through categories or searching by keywords.</li>
|
15 |
-
<li>It has a large collection of mods. Dmod apk has thousands of mods for hundreds of games, including popular ones like PUBG, Free Fire, Clash of Clans, Subway Surfers, Candy Crush, and more. You can always find something new and interesting to try.</li>
|
16 |
-
<li>It has a fast download speed. Dmod apk downloads the mods directly from its servers, which are fast and stable. You don't have to wait for long or face any interruptions while downloading.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to download and install dmod apk on your Android device</h2>
|
19 |
-
<p>Now that you know what dmod apk is and what it can do for you, you may be wondering how to get it on your device. Don't worry, we will show you how in this section. Just follow these simple steps:</p>
|
20 |
-
<p>Before you start, make sure that your device meets these minimum requirements:</p>
|
21 |
-
<ul>
|
22 |
-
<li>It has Android 4.4 or higher version installed.</li>
|
23 |
-
<li>It has at least 50 MB of free storage space available.</li> <li>It has enabled the option to install apps from unknown sources. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
|
24 |
-
</ul>
|
25 |
-
<p>Once you have checked these requirements, you can proceed with the following steps:</p>
|
26 |
-
<ol>
|
27 |
-
<li>Download the dmod apk file from a trusted source. You can use this link to download it: <a href="">dmod apk download</a>.</li>
|
28 |
-
<li>Locate the downloaded file on your device and tap on it to open it.</li>
|
29 |
-
<li>Follow the instructions on the screen and click on Install to start the installation process.</li>
|
30 |
-
<li>Wait for a few seconds until the installation is completed and click on Open to launch the app.</li>
|
31 |
-
<li>Congratulations! You have successfully installed dmod apk on your device. You can now enjoy using it to play your favorite games.</li>
|
32 |
-
</ol>
|
33 |
-
<p>Here are some tips on how to use dmod apk safely and effectively:</p>
|
34 |
-
<p>dmod apk download for android<br />
|
35 |
-
dmod apk download latest version<br />
|
36 |
-
dmod apk download free<br />
|
37 |
-
dmod apk download 2023<br />
|
38 |
-
dmod apk download update<br />
|
39 |
-
dmod apk download modded<br />
|
40 |
-
dmod apk download offline<br />
|
41 |
-
dmod apk download no root<br />
|
42 |
-
dmod apk download unlimited money<br />
|
43 |
-
dmod apk download hack<br />
|
44 |
-
dmod apk download cracked<br />
|
45 |
-
dmod apk download full<br />
|
46 |
-
dmod apk download premium<br />
|
47 |
-
dmod apk download pro<br />
|
48 |
-
dmod apk download unlocked<br />
|
49 |
-
dmod apk download game<br />
|
50 |
-
dmod apk download app<br />
|
51 |
-
dmod apk download tool<br />
|
52 |
-
dmod apk download software<br />
|
53 |
-
dmod apk download emulator<br />
|
54 |
-
dmod apk download online<br />
|
55 |
-
dmod apk download pc<br />
|
56 |
-
dmod apk download windows 10<br />
|
57 |
-
dmod apk download mac<br />
|
58 |
-
dmod apk download linux<br />
|
59 |
-
dmod apk download chromebook<br />
|
60 |
-
dmod apk download bluestacks<br />
|
61 |
-
dmod apk download nox player<br />
|
62 |
-
dmod apk download memu play<br />
|
63 |
-
dmod apk download ld player<br />
|
64 |
-
dmod apk download smartgaga<br />
|
65 |
-
dmod apk download gameloop<br />
|
66 |
-
dmod apk download android studio<br />
|
67 |
-
dmod apk download apkpure<br />
|
68 |
-
dmod apk download uptodown<br />
|
69 |
-
dmod apk download apkmirror<br />
|
70 |
-
dmod apk download apkmody<br />
|
71 |
-
dmod apk download apknite<br />
|
72 |
-
dmod apk download apksfree<br />
|
73 |
-
dmod apk download apksfull</p>
|
74 |
-
<ul>
|
75 |
-
<li>Always download the mods from the official dmod apk app or website. Do not download them from other sources as they may contain viruses or malware.</li>
|
76 |
-
<li>Always backup your original game data before installing any mods. This way, you can restore it if anything goes wrong or if you want to switch back to the original version.</li>
|
77 |
-
<li>Always check the compatibility and reviews of the mods before installing them. Some mods may not work well with your device or game version, or they may have bugs or glitches. Read the comments and ratings of other users to see if they have any issues or complaints.</li>
|
78 |
-
</ul>
|
79 |
-
<h3>How to use dmod apk to play your favorite games</h3>
|
80 |
-
<p>Now that you have installed dmod apk on your device, you may be wondering how to use it to play your favorite games. Don't worry, we will show you how in this section. Just follow these simple steps:</p>
|
81 |
-
<p>First, you need to find the mods for the games you want to play. You can do this by using the dmod apk app or website. Here are some of the popular games that you can play with dmod apk:</p>
|
82 |
-
<table>
|
83 |
-
<tr><th>Game</th><th>Description</th></tr>
|
84 |
-
<tr><td>PUBG</td><td>A battle royale game where you fight against 99 other players in a shrinking map. You can use mods to get unlimited health, ammo, weapons, skins, and more.</td></tr>
|
85 |
-
<tr><td>Free Fire</td><td>A similar game to PUBG but with more features and modes. You can use mods to get unlimited diamonds, coins, characters, pets, and more.</td></tr>
|
86 |
-
<tr><td>Clash of Clans</td><td>A strategy game where you build your own village and army and fight against other players. You can use mods to get unlimited gems, gold, elixir, dark elixir, troops, and more.</td></tr>
|
87 |
-
<tr><td>Subway Surfers</td><td>A running game where you dodge obstacles and collect coins and power-ups. You can use mods to get unlimited keys, coins, hoverboards, characters, and more.</td></tr>
|
88 |
-
<tr><td>Candy Crush</td><td>A puzzle game where you match candies and clear levels. You can use mods to get unlimited lives, moves, boosters, and more.</td></tr>
|
89 |
-
</table>
|
90 |
-
<p>To download and install the mods for these games, you can follow these steps:</p>
|
91 |
-
<ol>
|
92 |
-
<li>Open the dmod apk app or website and search for the game you want to play.</li>
|
93 |
-
<li>Select the mod you want to download and click on Download. You will be redirected to a page where you need to complete a verification process. This is to ensure that you are not a robot and to protect the mod from being abused.</li>
|
94 |
-
<li>Complete the verification process by following the instructions on the screen. This may involve completing a survey, watching a video, or installing an app.</li>
|
95 |
-
<li>Once you have completed the verification process, you will be able to download the mod file. Locate the downloaded file on your device and tap on it to open it.</li>
|
96 |
-
<li>Follow the instructions on the screen and click on Install to start the installation process.</li>
|
97 |
-
<li>Wait for a few seconds until the installation is completed and click on Open to launch the game with the mod enabled.</li>
|
98 |
-
<li>Congratulations! You have successfully installed the mod for your game. You can now enjoy playing it with enhanced features and items.</li>
|
99 |
-
</ol>
|
100 |
-
<p>To use the mods in your games, you can follow these steps:</p>
|
101 |
-
<ol>
|
102 |
-
<li>Open the game with the mod enabled and start playing as usual.</li>
|
103 |
-
<li>To access the mod menu, you need to tap on a special button or icon on the screen. This may vary depending on the game and the mod. For example, in PUBG, you may need to tap on the floating icon on the top left corner of the screen.</li>
|
104 |
-
<li>Once you have opened the mod menu, you will see a list of options and features that you can enable or disable. You can also adjust the settings and values of some features according to your preferences.</li>
|
105 |
-
<li>Select the features that you want to use and close the mod menu. You will see the effects of the features in your game. For example, in Free Fire, you may see your health bar increase or your enemies become visible through walls.</li>
|
106 |
-
<li>Enjoy playing your game with the mod enabled and have more fun and excitement.</li>
|
107 |
-
</ol>
|
108 |
-
<h4>How to troubleshoot common issues with dmod apk</h4>
|
109 |
-
<p>While dmod apk is a great app that can enhance your gaming experience, it may also cause some issues or problems sometimes. Don't worry, we will show you how to troubleshoot some of the common issues with dmod apk in this section. Here are some of the common issues and how to fix them:</p>
|
110 |
-
<ul>
|
111 |
-
<li><strong>Error: App not installed</strong>. This error may occur when you try to install dmod apk or a mod file on your device. This may be due to several reasons, such as insufficient storage space, corrupted file, incompatible device, or security settings. To fix this error, you can try these solutions: <ul>
|
112 |
-
<li>Clear some storage space on your device by deleting unwanted files or apps.</li>
|
113 |
-
<li>Download the file again from a reliable source and make sure it is not corrupted or damaged.</li>
|
114 |
-
<li>Check if your device meets the minimum requirements for installing dmod apk or the mod file.</li>
|
115 |
-
<li>Enable the option to install apps from unknown sources by going to Settings > Security > Unknown Sources and toggling it on.</li>
|
116 |
-
</ul>
|
117 |
-
</li>
|
118 |
-
<li><strong>Error: Game not working or crashing</strong>. This error may occur when you try to play a game with a mod enabled. This may be due to several reasons, such as outdated mod, incompatible game version, conflicting features, or bugs or glitches. To fix this error, you can try these solutions: <ul>
|
119 |
-
<li>Update the mod to the latest version by using the dmod apk app or website.</li>
|
120 |
-
<li>Update the game to the latest version by using the Google Play Store or other sources.</li>
|
121 |
-
<li>Disable or adjust some features that may cause conflicts or errors in your game.</li>
|
122 |
-
<li>Report any bugs or glitches to the developers of dmod apk or the mod file.</li>
|
123 |
-
</ul>
|
124 |
-
</li>
|
125 |
-
<li><strong>Error: Mod not working or showing</strong>. This error may occur when you try to use a mod in your game but it does not work or show up. This may be due to several reasons, such as incorrect installation, wrong file format, missing files, or disabled features. To fix this error, you can try these solutions: <ul>
|
126 |
-
<li>Reinstall the mod by following the steps in section 3 of this article.</li>
|
127 |
-
<li>Make sure that the mod file is in APK format and not ZIP or RAR format.</li>
|
128 |
-
<li>Make sure that all the files and folders of the mod are present and intact on your device.</li>
|
129 |
-
<li>Enable the features that you want to use by opening the mod menu and selecting them.</li>
|
130 |
-
</ul>
|
131 |
-
</li>
|
132 |
-
</ul>
|
133 |
-
<h2>Conclusion</h2>
|
134 |
-
<p>In conclusion, dmod apk is an app that allows you to download and install modified versions of your favorite Android games. These mods can give you access to unlimited resources, premium features, custom skins, and more. With dmod apk, you can enjoy your games like never before and have more fun and excitement.</p>
|
135 |
-
<p>If you want to try dmod apk for yourself, you can download it from this link: <a href="">dmod apk download</a>. You can also visit their website for more information and updates: <a href="">dmod apk website</a>.</p>
|
136 |
-
<p>We hope that this article has helped you understand what dmod apk is and how to use it. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
|
137 |
-
<h2>FAQs</h2>
|
138 |
-
<p>Here are some of the frequently asked questions about dmod apk:</p>
|
139 |
-
<ol>
|
140 |
-
<li><strong>Is dmod apk legal?</strong></li>
|
141 |
-
<p>Dmod apk is legal as long as you use it for personal and educational purposes only. However, using it for commercial or malicious purposes may violate the terms and conditions of the original game developers and publishers. Therefore, we advise you to use dmod apk at your own risk and discretion.</p>
|
142 |
-
<li><strong>Is dmod apk safe?</strong></li>
|
143 |
-
<p>Dmod apk is safe as long as you download it from a trusted source and install it on your device. It does not contain any viruses or malware that can harm your device or data. It also does not require root access or any special permissions to work. However, some mods may contain harmful or inappropriate content that may affect your game or device. Therefore, we advise you to check the compatibility and reviews of the mods before installing them.</p>
|
144 |
-
<li><strong>Can dmod apk ban my game account?</strong></li>
|
145 |
-
<p>Dmod apk does not ban your game account directly, but it may increase the risk of getting banned by the game servers or authorities. This is because some mods may give you an unfair advantage over other players or violate the game rules and policies. Therefore, we advise you to use dmod apk with caution and moderation, and avoid using it in online or multiplayer modes.</p>
|
146 |
-
<li><strong>How can I update dmod apk?</strong></li>
|
147 |
-
<p>You can update dmod apk by using the dmod apk app or website. You can check for updates by opening the app or visiting the website and looking for the latest version. You can also enable the auto-update option in the app settings to get notified and download the updates automatically.</p>
|
148 |
-
<li><strong>How can I contact dmod apk?</strong></li>
|
149 |
-
<p>You can contact dmod apk by using their email address or social media accounts. You can find their contact information on their website or app. You can also leave a comment or feedback on their website or app. They are always happy to hear from their users and provide support and assistance.</p>
|
150 |
-
</ol></p> 401be4b1e0<br />
|
151 |
-
<br />
|
152 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download SuperStar JYPNATION and Collect Over 700 Cards of Your Favorite Artists.md
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Superstar Jypnation</h1>
|
3 |
-
<p>Do you love K-pop music and rhythm games? If so, you should definitely try out <strong>superstar jypnation</strong>, a fun and exciting music game with your favorite artists from <strong>JYP Entertainment</strong>. In this article, we will show you how to download superstar jypnation on your device, whether it's Android, iOS, or PC. We will also tell you about the features of the game, such as the artists, songs, cards, rankings, and more. Plus, we will give you some tips and tricks to help you play better and score higher. So, let's get started!</p>
|
4 |
-
<h2>Features of Superstar Jypnation</h2>
|
5 |
-
<p>Superstar jypnation is a rhythm game that lets you play along with the songs from JYP Entertainment's artists. The game has many features that make it fun and addictive, such as:</p>
|
6 |
-
<h2>how to download superstar jypnation</h2><br /><p><b><b>Download File</b> ---> <a href="https://jinyurl.com/2uNP6Q">https://jinyurl.com/2uNP6Q</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><strong>24 group artists from JYP Entertainment</strong>: You can choose from a variety of artists, such as J.Y. Park, Wonder Girls, Sunmi, 2AM, 2PM, miss A, JJ Project, Baek A Yeon, 15&, GOT7, DAY6, TWICE, Stray Kids, YUBIN, ITZY, and NiziU.</li>
|
9 |
-
<li><strong>270+ songs from JYP Entertainment's artists</strong>: You can play songs from different genres and eras, from the debut songs to the latest hits. You can also unlock more songs as you progress in the game.</li>
|
10 |
-
<li><strong>660+ cards to collect</strong>: You can collect cards that feature the images of your favorite artists. Each card has different attributes and abilities that affect your score. You can also equip and upgrade your cards to make them more powerful.</li>
|
11 |
-
<li><strong>Weekly Ranking, Best Record for each song, and many more competitions inside</strong>: You can compete with other players around the world and see how you rank in different categories. You can also check your best record for each song and try to beat it.</li>
|
12 |
-
</ul>
|
13 |
-
<h3>Participating Artists</h3>
|
14 |
-
<p>Superstar jypnation has 24 group artists from JYP Entertainment that you can choose from. Each artist has their own songs and cards that you can play with. Here is the list of the participating artists:</p>
|
15 |
-
<table>
|
16 |
-
<tr>
|
17 |
-
<th>Artist</th>
|
18 |
-
<th>Debut Year</th>
|
19 |
-
<th>Genre</th>
|
20 |
-
</tr>
|
21 |
-
<tr>
|
22 |
-
<td>J.Y. Park</td>
|
23 |
-
<td>1994</td>
|
24 |
-
<td>K-pop, R&B</td>
|
25 |
-
</tr>
|
26 |
-
<tr>
|
27 |
-
<td>Wonder Girls</td>
|
28 |
-
<td>2007</td>
|
29 |
-
<td>K-pop, Retro-pop</td>
|
30 |
-
</tr>
|
31 |
-
<tr>
|
32 |
-
<td>Sunmi</td>
|
33 |
-
<td> 2007</td>
|
34 |
-
<td>K-pop, Dance-pop</td>
|
35 |
-
</tr>
|
36 |
-
<tr>
|
37 |
-
<td>2AM</td>
|
38 |
-
<td>2008</td>
|
39 |
-
<td>K-pop, Ballad</td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td>2PM</td>
|
43 |
-
<td>2008</td>
|
44 |
-
<td>K-pop, Dance-pop</td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>miss A</td>
|
48 |
-
<td>2010</td>
|
49 |
-
<td>K-pop, Dance-pop</td>
|
50 |
-
</tr>
|
51 |
-
<tr>
|
52 |
-
<td>JJ Project</td>
|
53 |
-
<td>2012</td>
|
54 |
-
<td>K-pop, Hip-hop</td>
|
55 |
-
</tr>
|
56 |
-
<tr>
|
57 |
-
<td>Baek A Yeon</td>
|
58 |
-
<td>2012</td>
|
59 |
-
<td>K-pop, Ballad</td>
|
60 |
-
</tr>
|
61 |
-
<tr>
|
62 |
-
<td>15&</td>
|
63 |
-
<td>2012</td>
|
64 |
-
<td>K-pop, R&B</td>
|
65 |
-
</tr>
|
66 |
-
<tr>
|
67 |
-
<td>GOT7</td>
|
68 |
-
<td>2014</td>
|
69 |
-
<td>K-pop, Hip-hop</td>
|
70 |
-
</tr>
|
71 |
-
<tr>
|
72 |
-
<td>DAY6</td>
|
73 |
-
<td>2015</td>
|
74 |
-
<td>K-rock, Pop-rock</td>
|
75 |
-
</tr>
|
76 |
-
<tr>
|
77 |
-
<td>TWICE</td>
|
78 |
-
<td>2015</td>
|
79 |
-
<td>K-pop, Bubblegum pop</td>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Stray Kids</td>
|
83 |
-
<td>2018</td>
|
84 |
-
<td>K-pop, Hip-hop</td>
|
85 |
-
</tr>
|
86 |
-
<tr>
|
87 |
-
<td>YUBIN </td>
|
88 |
-
<td>2018</td>
|
89 |
-
<td>K-pop, Retro-pop</td>
|
90 |
-
</tr>
|
91 |
-
<tr>
|
92 |
-
<td>ITZY</td>
|
93 |
-
<td>2019</td>
|
94 |
-
<td>K-pop, Teen pop</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>NiziU</td>
|
98 |
-
<td>2020</td>
|
99 |
-
<td>J-pop, K-pop</td>
|
100 |
-
</tr>
|
101 |
-
</table>
|
102 |
-
<h3>Songs and Levels</h3>
|
103 |
-
<p>Superstar jypnation has over 270 songs from JYP Entertainment's artists that you can play in the game. Each song has three levels of difficulty: Easy, Normal, and Hard. The higher the difficulty, the more notes you have to tap and the faster they move. You can choose the level that suits your skill and preference. You can also unlock more songs by completing missions and achievements in the game.</p>
|
104 |
-
<h3>Cards and Abilities</h3>
|
105 |
-
<p>Superstar jypnation has over 660 cards that you can collect in the game. Each card features an image of an artist from JYP Entertainment. The cards have different attributes, such as Vocal, Dance, Rhythm, and Center. The cards also have different abilities, such as Score Up, Perfect Lock, Combo Bonus, and more. The cards can help you improve your score and performance in the game.</p>
|
106 |
-
<p>You can equip up to five cards for each artist in your deck. The cards you equip will affect the score you get for each note you tap. You can also enhance your cards by using other cards or materials as fodder. Enhancing your cards will increase their level and stats. You can also upgrade your cards by using duplicates or special items. Upgrading your cards will increase their rank and rarity.</p>
|
107 |
-
<h3>Rankings and Competitions</h3>
|
108 |
-
<p>Superstar jypnation has various rankings and competitions that you can participate in. You can compete with other players around the world and see how you rank in different categories, such as:</p>
|
109 |
-
<ul>
|
110 |
-
<li><strong>Weekly Ranking</strong>: You can compete with other players in your league and try to get the highest score for each song. The higher your score, the higher your rank. You can also get rewards based on your rank at the end of each week.</li>
|
111 |
-
<li><strong>Best Record for each song</strong>: You can check your best record for each song and try to beat it. You can also see the best records of other players and compare them with yours.</li>
|
112 |
-
<li><strong>World Record for each song</strong>: You can check the world record for each song and see who holds it. You can also try to break the world record and make history.</li>
|
113 |
-
<li><strong>Superstar League</strong>: You can compete with other players in a special league that features a random song every day. You can get rewards based on your score and rank at the end of each week.</li>
|
114 |
-
<li><strong>Arena Mode</strong>: You can compete with other players in a real-time mode that features three songs in a row. You can get rewards based on your score and rank at the end of each match.</li>
|
115 |
-
</ul>
|
116 |
-
<h2>How to Download Superstar Jypnation on Android</h2>
|
117 |
-
<p>If you have an Android device, you can download superstar jypnation from Google Play Store. Here are the steps to do so:</p>
|
118 |
-
<ol>
|
119 |
-
<li>Open Google Play Store on your device.</li>
|
120 |
-
<li>Search for "superstar jypnation" in the search bar.</li>
|
121 |
-
<li>Select the game from the search results and tap on "Install".</li>
|
122 |
-
<li>Wait for the game to download and install on your device.</li>
|
123 |
-
<li>Open the game and enjoy playing!</li>
|
124 |
-
</ol>
|
125 |
-
<h2>How to Download Superstar Jypnation on iOS</h2>
|
126 |
-
<p>If you have an iOS device, you can download superstar jypnation from App Store. Here are the steps to do so:</p>
|
127 |
-
<p>How to download superstar jypnation on android<br />
|
128 |
-
How to download superstar jypnation on ios<br />
|
129 |
-
How to download superstar jypnation on pc<br />
|
130 |
-
How to download superstar jypnation songs<br />
|
131 |
-
How to download superstar jypnation apk<br />
|
132 |
-
How to download superstar jypnation mod<br />
|
133 |
-
How to download superstar jypnation update<br />
|
134 |
-
How to download superstar jypnation for free<br />
|
135 |
-
How to download superstar jypnation on mac<br />
|
136 |
-
How to download superstar jypnation on laptop<br />
|
137 |
-
How to download superstar jypnation on windows 10<br />
|
138 |
-
How to download superstar jypnation on chromebook<br />
|
139 |
-
How to download superstar jypnation on bluestacks<br />
|
140 |
-
How to download superstar jypnation on nox player<br />
|
141 |
-
How to download superstar jypnation on memu play<br />
|
142 |
-
How to download superstar jypnation on ldplayer<br />
|
143 |
-
How to download superstar jypnation on qooapp<br />
|
144 |
-
How to download superstar jypnation on google play store<br />
|
145 |
-
How to download superstar jypnation on app store<br />
|
146 |
-
How to download superstar jypnation on amazon appstore<br />
|
147 |
-
How to download superstar jypnation offline<br />
|
148 |
-
How to download superstar jypnation without wifi<br />
|
149 |
-
How to download superstar jypnation without vpn<br />
|
150 |
-
How to download superstar jypnation without ads<br />
|
151 |
-
How to download superstar jypnation without root<br />
|
152 |
-
How to download superstar jypnation with data transfer<br />
|
153 |
-
How to download superstar jypnation with cardbook sync<br />
|
154 |
-
How to download superstar jypnation with all songs unlocked<br />
|
155 |
-
How to download superstar jypnation with unlimited diamonds<br />
|
156 |
-
How to download superstar jypnation with cheat engine<br />
|
157 |
-
How to install superstar jypnation on android phone<br />
|
158 |
-
How to install superstar jypnation on iphone or ipad<br />
|
159 |
-
How to install superstar jypnation on windows pc or macbook<br />
|
160 |
-
How to install superstar jypnation from apk file<br />
|
161 |
-
How to install superstar jypnation from modded apk file<br />
|
162 |
-
How to install superstar jypnation from qooapp apk file<br />
|
163 |
-
How to install superstar jypnation from google play store apk file<br />
|
164 |
-
How to install superstar jypnation from app store ipa file<br />
|
165 |
-
How to install superstar jypnation from amazon appstore apk file<br />
|
166 |
-
How to install superstar jypnation using emulator software</p>
|
167 |
-
<ol>
|
168 |
-
<li>Open App Store on your device.</li>
|
169 |
-
<li>Search for "superstar jypnation" in the search bar.</li>
|
170 |
-
<li>Select the game from the search results and tap on "Get".</li>
|
171 |
-
<li>Wait for the game to download and install on your device.</li>
|
172 |
-
<li>Open the game and enjoy playing!</li>
|
173 |
-
</ol>
|
174 |
-
<h2>How to Download Superstar Jypnation on PC</h2>
|
175 |
-
<p>If you want to play superstar jypnation on your PC, you will need to use an emulator that can run Android apps on your computer. One of the best emulators for this purpose is MuMu Player, which is fast, stable, and easy to use. Here are the steps to download superstar jypnation on PC using MuMu Player:</p>
|
176 |
-
<ol>
|
177 |
-
<li>Download MuMu Player from its official website: <a href="">https://m umuplayer.com/en/</a>.</li>
|
178 |
-
<li>Install MuMu Player on your PC by following the instructions on the screen.</li>
|
179 |
-
<li>Open MuMu Player and click on the Google Play icon on the home screen.</li>
|
180 |
-
<li>Sign in with your Google account or create a new one.</li>
|
181 |
-
<li>Search for "superstar jypnation" in the Google Play Store and install it.</li>
|
182 |
-
<li>Open the game and enjoy playing!</li>
|
183 |
-
</ol>
|
184 |
-
<h2>Tips and Tricks for Superstar Jypnation</h2>
|
185 |
-
<p>Now that you know how to download superstar jypnation on your device, you might want to learn some tips and tricks to improve your gameplay and score higher. Here are some of them:</p>
|
186 |
-
<ul>
|
187 |
-
<li><strong>Practice makes perfect</strong>: The best way to get better at the game is to practice as much as you can. Try to play different songs and levels and learn the patterns and timings of the notes. You can also use the practice mode to replay any part of the song you want.</li>
|
188 |
-
<li><strong>Use headphones or earphones</strong>: Playing with headphones or earphones can help you hear the music better and focus on the rhythm. It can also block out any distractions or noises around you.</li>
|
189 |
-
<li><strong>Adjust the speed and sync settings</strong>: You can adjust the speed and sync settings of the game to suit your preference and device. The speed setting controls how fast the notes move on the screen. The sync setting controls how well the notes match with the music. You can find these settings in the options menu.</li>
|
190 |
-
<li><strong>Equip and upgrade your cards wisely</strong>: You can equip up to five cards for each artist in your deck. You should equip cards that have high attributes and abilities that match with the song you are playing. For example, if the song has more vocal notes, you should equip cards that have high vocal attributes and score up abilities. You should also upgrade your cards regularly to increase their level, stats, rank, and rarity.</li>
|
191 |
-
<li><strong>Use items and rewards</strong>: You can use items and rewards to help you play better and get more benefits. For example, you can use headphones to play more songs, diamonds to buy more cards or items, RP to enhance or upgrade your cards, emeralds to buy special items or rewards, etc. You can get these items and rewards by completing missions, achievements, events, or daily login bonuses.</li>
|
192 |
-
</ul>
|
193 |
-
<h2>Conclusion</h2>
|
194 |
-
<p>Superstar jypnation is a great game for K-pop fans and rhythm game lovers. It has many features that make it fun and addictive, such as 24 group artists from JYP Entertainment, 270+ songs from different genres and eras, 660+ cards to collect and enhance, and various rankings and competitions to join. You can download superstar jypnation on your Android, iOS, or PC device easily by following our step-by-step guide. You can also use our tips and tricks to improve your gameplay and score higher. So, what are you waiting for? Download superstar jypnation today and enjoy playing with your favorite artists!</p>
|
195 |
-
<h2>FAQs</h2>
|
196 |
-
<p>Here are some frequently asked questions and answers about superstar jypnation:</p>
|
197 |
-
<ol>
|
198 |
-
<li><strong>Q: How can I change my profile picture in the game?</strong></li>
|
199 |
-
<li>A: You can change your profile picture in the game by tapping on your profile icon on the top left corner of the screen. Then, tap on "Change Profile" and choose an image from your device or take a photo with your camera.</li>
|
200 |
-
<li><strong>Q: How can I change my nickname in the game?</strong></li>
|
201 |
-
<li>A: You can change your nickname in the game by tapping on your profile icon on the top left corner of the screen. Then, tap on "Change Nickname" and enter a new nickname. You can only change your nickname once for free, so choose wisely.</li>
|
202 |
-
<li><strong>Q: How can I add friends in the game?</strong></li>
|
203 |
-
<li>A: You can add friends in the game by tapping on the friends icon on the bottom right corner of the screen. Then, tap on "Add Friend" and enter their nickname or user code. You can also accept friend requests from other players or send friend requests to players you meet in the game.</li>
|
204 |
-
<li><strong>Q: How can I contact customer service in the game?</strong></li>
|
205 |
-
<li>A: You can contact customer service in the game by tapping on the settings icon on the top right corner of the screen. Then, tap on "Customer Service" and choose a topic that matches your issue or inquiry. You can also send an email to <a href="mailto:[email protected]">support.superstar.jyp @dalcomsoft.com</a> for more assistance.</li>
|
206 |
-
<li><strong>Q: How can I update the game to the latest version?</strong></li>
|
207 |
-
<li>A: You can update the game to the latest version by visiting the Google Play Store or App Store and checking for updates. You can also turn on the auto-update option in your device settings to update the game automatically. You should always update the game to enjoy the latest features and bug fixes.</li>
|
208 |
-
</ol></p> 197e85843d<br />
|
209 |
-
<br />
|
210 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A666sxr/Genshin_TTS/commons.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size*dilation - dilation)/2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def intersperse(lst, item):
|
25 |
-
result = [item] * (len(lst) * 2 + 1)
|
26 |
-
result[1::2] = lst
|
27 |
-
return result
|
28 |
-
|
29 |
-
|
30 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
31 |
-
"""KL(P||Q)"""
|
32 |
-
kl = (logs_q - logs_p) - 0.5
|
33 |
-
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
34 |
-
return kl
|
35 |
-
|
36 |
-
|
37 |
-
def rand_gumbel(shape):
|
38 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
-
return -torch.log(-torch.log(uniform_samples))
|
41 |
-
|
42 |
-
|
43 |
-
def rand_gumbel_like(x):
|
44 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
-
return g
|
46 |
-
|
47 |
-
|
48 |
-
def slice_segments(x, ids_str, segment_size=4):
|
49 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
-
for i in range(x.size(0)):
|
51 |
-
idx_str = ids_str[i]
|
52 |
-
idx_end = idx_str + segment_size
|
53 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
-
return ret
|
55 |
-
|
56 |
-
|
57 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
-
b, d, t = x.size()
|
59 |
-
if x_lengths is None:
|
60 |
-
x_lengths = t
|
61 |
-
ids_str_max = x_lengths - segment_size + 1
|
62 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
-
ret = slice_segments(x, ids_str, segment_size)
|
64 |
-
return ret, ids_str
|
65 |
-
|
66 |
-
|
67 |
-
def get_timing_signal_1d(
|
68 |
-
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
69 |
-
position = torch.arange(length, dtype=torch.float)
|
70 |
-
num_timescales = channels // 2
|
71 |
-
log_timescale_increment = (
|
72 |
-
math.log(float(max_timescale) / float(min_timescale)) /
|
73 |
-
(num_timescales - 1))
|
74 |
-
inv_timescales = min_timescale * torch.exp(
|
75 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
76 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
-
signal = signal.view(1, channels, length)
|
80 |
-
return signal
|
81 |
-
|
82 |
-
|
83 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
-
b, channels, length = x.size()
|
85 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
-
|
88 |
-
|
89 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
-
|
94 |
-
|
95 |
-
def subsequent_mask(length):
|
96 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
-
return mask
|
98 |
-
|
99 |
-
|
100 |
-
@torch.jit.script
|
101 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
-
n_channels_int = n_channels[0]
|
103 |
-
in_act = input_a + input_b
|
104 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
-
acts = t_act * s_act
|
107 |
-
return acts
|
108 |
-
|
109 |
-
|
110 |
-
def convert_pad_shape(pad_shape):
|
111 |
-
l = pad_shape[::-1]
|
112 |
-
pad_shape = [item for sublist in l for item in sublist]
|
113 |
-
return pad_shape
|
114 |
-
|
115 |
-
|
116 |
-
def shift_1d(x):
|
117 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
-
return x
|
119 |
-
|
120 |
-
|
121 |
-
def sequence_mask(length, max_length=None):
|
122 |
-
if max_length is None:
|
123 |
-
max_length = length.max()
|
124 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
-
|
127 |
-
|
128 |
-
def generate_path(duration, mask):
|
129 |
-
"""
|
130 |
-
duration: [b, 1, t_x]
|
131 |
-
mask: [b, 1, t_y, t_x]
|
132 |
-
"""
|
133 |
-
device = duration.device
|
134 |
-
|
135 |
-
b, _, t_y, t_x = mask.shape
|
136 |
-
cum_duration = torch.cumsum(duration, -1)
|
137 |
-
|
138 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
139 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
140 |
-
path = path.view(b, t_x, t_y)
|
141 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
142 |
-
path = path.unsqueeze(1).transpose(2,3) * mask
|
143 |
-
return path
|
144 |
-
|
145 |
-
|
146 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
147 |
-
if isinstance(parameters, torch.Tensor):
|
148 |
-
parameters = [parameters]
|
149 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
150 |
-
norm_type = float(norm_type)
|
151 |
-
if clip_value is not None:
|
152 |
-
clip_value = float(clip_value)
|
153 |
-
|
154 |
-
total_norm = 0
|
155 |
-
for p in parameters:
|
156 |
-
param_norm = p.grad.data.norm(norm_type)
|
157 |
-
total_norm += param_norm.item() ** norm_type
|
158 |
-
if clip_value is not None:
|
159 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
160 |
-
total_norm = total_norm ** (1. / norm_type)
|
161 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/models/musicgen.py
DELETED
@@ -1,409 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Main model for using MusicGen. This will combine all the required components
|
9 |
-
and provide easy access to the generation API.
|
10 |
-
"""
|
11 |
-
|
12 |
-
import typing as tp
|
13 |
-
import warnings
|
14 |
-
|
15 |
-
import torch
|
16 |
-
|
17 |
-
from .encodec import CompressionModel
|
18 |
-
from .lm import LMModel
|
19 |
-
from .builders import get_debug_compression_model, get_debug_lm_model
|
20 |
-
from .loaders import load_compression_model, load_lm_model
|
21 |
-
from ..data.audio_utils import convert_audio
|
22 |
-
from ..modules.conditioners import ConditioningAttributes, WavCondition
|
23 |
-
from ..utils.autocast import TorchAutocast
|
24 |
-
|
25 |
-
|
26 |
-
MelodyList = tp.List[tp.Optional[torch.Tensor]]
|
27 |
-
MelodyType = tp.Union[torch.Tensor, MelodyList]
|
28 |
-
|
29 |
-
|
30 |
-
# backward compatible names mapping
|
31 |
-
_HF_MODEL_CHECKPOINTS_MAP = {
|
32 |
-
"small": "GrandaddyShmax/musicgen-small",
|
33 |
-
"medium": "GrandaddyShmax/musicgen-medium",
|
34 |
-
"large": "GrandaddyShmax/musicgen-large",
|
35 |
-
"melody": "GrandaddyShmax/musicgen-melody",
|
36 |
-
}
|
37 |
-
|
38 |
-
|
39 |
-
class MusicGen:
|
40 |
-
"""MusicGen main model with convenient generation API.
|
41 |
-
|
42 |
-
Args:
|
43 |
-
name (str): name of the model.
|
44 |
-
compression_model (CompressionModel): Compression model
|
45 |
-
used to map audio to invertible discrete representations.
|
46 |
-
lm (LMModel): Language model over discrete representations.
|
47 |
-
max_duration (float, optional): maximum duration the model can produce,
|
48 |
-
otherwise, inferred from the training params.
|
49 |
-
"""
|
50 |
-
def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,
|
51 |
-
max_duration: tp.Optional[float] = None):
|
52 |
-
self.name = name
|
53 |
-
self.compression_model = compression_model
|
54 |
-
self.lm = lm
|
55 |
-
if max_duration is None:
|
56 |
-
if hasattr(lm, 'cfg'):
|
57 |
-
max_duration = lm.cfg.dataset.segment_duration # type: ignore
|
58 |
-
else:
|
59 |
-
raise ValueError("You must provide max_duration when building directly MusicGen")
|
60 |
-
assert max_duration is not None
|
61 |
-
self.max_duration: float = max_duration
|
62 |
-
self.device = next(iter(lm.parameters())).device
|
63 |
-
self.generation_params: dict = {}
|
64 |
-
self.set_generation_params(duration=15) # 15 seconds by default
|
65 |
-
self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None
|
66 |
-
if self.device.type == 'cpu':
|
67 |
-
self.autocast = TorchAutocast(enabled=False)
|
68 |
-
else:
|
69 |
-
self.autocast = TorchAutocast(
|
70 |
-
enabled=True, device_type=self.device.type, dtype=torch.float16)
|
71 |
-
|
72 |
-
@property
|
73 |
-
def frame_rate(self) -> float:
|
74 |
-
"""Roughly the number of AR steps per seconds."""
|
75 |
-
return self.compression_model.frame_rate
|
76 |
-
|
77 |
-
@property
|
78 |
-
def sample_rate(self) -> int:
|
79 |
-
"""Sample rate of the generated audio."""
|
80 |
-
return self.compression_model.sample_rate
|
81 |
-
|
82 |
-
@property
|
83 |
-
def audio_channels(self) -> int:
|
84 |
-
"""Audio channels of the generated audio."""
|
85 |
-
return self.compression_model.channels
|
86 |
-
|
87 |
-
@staticmethod
|
88 |
-
def get_pretrained(name: str = 'GrandaddyShmax/musicgen-melody', device=None):
|
89 |
-
"""Return pretrained model, we provide four models:
|
90 |
-
- facebook/musicgen-small (300M), text to music,
|
91 |
-
# see: https://huggingface.co/facebook/musicgen-small
|
92 |
-
- facebook/musicgen-medium (1.5B), text to music,
|
93 |
-
# see: https://huggingface.co/facebook/musicgen-medium
|
94 |
-
- facebook/musicgen-melody (1.5B) text to music and text+melody to music,
|
95 |
-
# see: https://huggingface.co/facebook/musicgen-melody
|
96 |
-
- facebook/musicgen-large (3.3B), text to music,
|
97 |
-
# see: https://huggingface.co/facebook/musicgen-large
|
98 |
-
"""
|
99 |
-
if device is None:
|
100 |
-
if torch.cuda.device_count():
|
101 |
-
device = 'cuda'
|
102 |
-
else:
|
103 |
-
device = 'cpu'
|
104 |
-
|
105 |
-
if name == 'debug':
|
106 |
-
# used only for unit tests
|
107 |
-
compression_model = get_debug_compression_model(device)
|
108 |
-
lm = get_debug_lm_model(device)
|
109 |
-
return MusicGen(name, compression_model, lm, max_duration=30)
|
110 |
-
|
111 |
-
lm = load_lm_model(name, device=device)
|
112 |
-
compression_model = load_compression_model(name, device=device)
|
113 |
-
if 'self_wav' in lm.condition_provider.conditioners:
|
114 |
-
lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True
|
115 |
-
|
116 |
-
return MusicGen(name, compression_model, lm)
|
117 |
-
|
118 |
-
def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,
|
119 |
-
top_p: float = 0.0, temperature: float = 1.0,
|
120 |
-
duration: float = 30.0, cfg_coef: float = 3.0,
|
121 |
-
two_step_cfg: bool = False, extend_stride: float = 18):
|
122 |
-
"""Set the generation parameters for MusicGen.
|
123 |
-
|
124 |
-
Args:
|
125 |
-
use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.
|
126 |
-
top_k (int, optional): top_k used for sampling. Defaults to 250.
|
127 |
-
top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.
|
128 |
-
temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.
|
129 |
-
duration (float, optional): Duration of the generated waveform. Defaults to 30.0.
|
130 |
-
cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.
|
131 |
-
two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,
|
132 |
-
instead of batching together the two. This has some impact on how things
|
133 |
-
are padded but seems to have little impact in practice.
|
134 |
-
extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much
|
135 |
-
should we extend the audio each time. Larger values will mean less context is
|
136 |
-
preserved, and shorter value will require extra computations.
|
137 |
-
"""
|
138 |
-
assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration."
|
139 |
-
self.extend_stride = extend_stride
|
140 |
-
self.duration = duration
|
141 |
-
self.generation_params = {
|
142 |
-
'use_sampling': use_sampling,
|
143 |
-
'temp': temperature,
|
144 |
-
'top_k': top_k,
|
145 |
-
'top_p': top_p,
|
146 |
-
'cfg_coef': cfg_coef,
|
147 |
-
'two_step_cfg': two_step_cfg,
|
148 |
-
}
|
149 |
-
|
150 |
-
def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):
|
151 |
-
"""Override the default progress callback."""
|
152 |
-
self._progress_callback = progress_callback
|
153 |
-
|
154 |
-
def generate_unconditional(self, num_samples: int, progress: bool = False, return_tokens: bool = False) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
|
155 |
-
"""Generate samples in an unconditional manner.
|
156 |
-
|
157 |
-
Args:
|
158 |
-
num_samples (int): Number of samples to be generated.
|
159 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
160 |
-
"""
|
161 |
-
descriptions: tp.List[tp.Optional[str]] = [None] * num_samples
|
162 |
-
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
|
163 |
-
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
|
164 |
-
if return_tokens:
|
165 |
-
return self.generate_audio(tokens), tokens
|
166 |
-
return self.generate_audio(tokens)
|
167 |
-
|
168 |
-
def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \
|
169 |
-
-> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
|
170 |
-
"""Generate samples conditioned on text.
|
171 |
-
|
172 |
-
Args:
|
173 |
-
descriptions (list of str): A list of strings used as text conditioning.
|
174 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
175 |
-
"""
|
176 |
-
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
|
177 |
-
assert prompt_tokens is None
|
178 |
-
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
|
179 |
-
if return_tokens:
|
180 |
-
return self.generate_audio(tokens), tokens
|
181 |
-
return self.generate_audio(tokens)
|
182 |
-
|
183 |
-
def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType, melody_sample_rate: int, progress: bool = False, return_tokens: bool = False) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
|
184 |
-
"""Generate samples conditioned on text and melody.
|
185 |
-
|
186 |
-
Args:
|
187 |
-
descriptions (list of str): A list of strings used as text conditioning.
|
188 |
-
melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as
|
189 |
-
melody conditioning. Should have shape [B, C, T] with B matching the description length,
|
190 |
-
C=1 or 2. It can be [C, T] if there is a single description. It can also be
|
191 |
-
a list of [C, T] tensors.
|
192 |
-
melody_sample_rate: (int): Sample rate of the melody waveforms.
|
193 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
194 |
-
"""
|
195 |
-
if isinstance(melody_wavs, torch.Tensor):
|
196 |
-
if melody_wavs.dim() == 2:
|
197 |
-
melody_wavs = melody_wavs[None]
|
198 |
-
if melody_wavs.dim() != 3:
|
199 |
-
raise ValueError("Melody wavs should have a shape [B, C, T].")
|
200 |
-
melody_wavs = list(melody_wavs)
|
201 |
-
else:
|
202 |
-
for melody in melody_wavs:
|
203 |
-
if melody is not None:
|
204 |
-
assert melody.dim() == 2, "One melody in the list has the wrong number of dims."
|
205 |
-
|
206 |
-
melody_wavs = [
|
207 |
-
convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)
|
208 |
-
if wav is not None else None
|
209 |
-
for wav in melody_wavs]
|
210 |
-
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,
|
211 |
-
melody_wavs=melody_wavs)
|
212 |
-
assert prompt_tokens is None
|
213 |
-
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
|
214 |
-
if return_tokens:
|
215 |
-
return self.generate_audio(tokens), tokens
|
216 |
-
return self.generate_audio(tokens)
|
217 |
-
|
218 |
-
def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,
|
219 |
-
descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,
|
220 |
-
progress: bool = False, return_tokens: bool = False) \
|
221 |
-
-> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
|
222 |
-
"""Generate samples conditioned on audio prompts.
|
223 |
-
|
224 |
-
Args:
|
225 |
-
prompt (torch.Tensor): A batch of waveforms used for continuation.
|
226 |
-
Prompt should be [B, C, T], or [C, T] if only one sample is generated.
|
227 |
-
prompt_sample_rate (int): Sampling rate of the given audio waveforms.
|
228 |
-
descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.
|
229 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
230 |
-
"""
|
231 |
-
if prompt.dim() == 2:
|
232 |
-
prompt = prompt[None]
|
233 |
-
if prompt.dim() != 3:
|
234 |
-
raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).")
|
235 |
-
prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)
|
236 |
-
if descriptions is None:
|
237 |
-
descriptions = [None] * len(prompt)
|
238 |
-
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)
|
239 |
-
assert prompt_tokens is not None
|
240 |
-
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
|
241 |
-
if return_tokens:
|
242 |
-
return self.generate_audio(tokens), tokens
|
243 |
-
return self.generate_audio(tokens)
|
244 |
-
|
245 |
-
@torch.no_grad()
|
246 |
-
def _prepare_tokens_and_attributes(
|
247 |
-
self,
|
248 |
-
descriptions: tp.Sequence[tp.Optional[str]],
|
249 |
-
prompt: tp.Optional[torch.Tensor],
|
250 |
-
melody_wavs: tp.Optional[MelodyList] = None,
|
251 |
-
) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:
|
252 |
-
"""Prepare model inputs.
|
253 |
-
|
254 |
-
Args:
|
255 |
-
descriptions (list of str): A list of strings used as text conditioning.
|
256 |
-
prompt (torch.Tensor): A batch of waveforms used for continuation.
|
257 |
-
melody_wavs (torch.Tensor, optional): A batch of waveforms
|
258 |
-
used as melody conditioning. Defaults to None.
|
259 |
-
"""
|
260 |
-
attributes = [
|
261 |
-
ConditioningAttributes(text={'description': description})
|
262 |
-
for description in descriptions]
|
263 |
-
|
264 |
-
if melody_wavs is None:
|
265 |
-
for attr in attributes:
|
266 |
-
attr.wav['self_wav'] = WavCondition(
|
267 |
-
torch.zeros((1, 1, 1), device=self.device),
|
268 |
-
torch.tensor([0], device=self.device),
|
269 |
-
sample_rate=[self.sample_rate],
|
270 |
-
path=[None])
|
271 |
-
else:
|
272 |
-
if 'self_wav' not in self.lm.condition_provider.conditioners:
|
273 |
-
raise RuntimeError("This model doesn't support melody conditioning. "
|
274 |
-
"Use the `melody` model.")
|
275 |
-
assert len(melody_wavs) == len(descriptions), \
|
276 |
-
f"number of melody wavs must match number of descriptions! " \
|
277 |
-
f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}"
|
278 |
-
for attr, melody in zip(attributes, melody_wavs):
|
279 |
-
if melody is None:
|
280 |
-
attr.wav['self_wav'] = WavCondition(
|
281 |
-
torch.zeros((1, 1, 1), device=self.device),
|
282 |
-
torch.tensor([0], device=self.device),
|
283 |
-
sample_rate=[self.sample_rate],
|
284 |
-
path=[None])
|
285 |
-
else:
|
286 |
-
attr.wav['self_wav'] = WavCondition(
|
287 |
-
melody[None].to(device=self.device),
|
288 |
-
torch.tensor([melody.shape[-1]], device=self.device),
|
289 |
-
sample_rate=[self.sample_rate],
|
290 |
-
path=[None],
|
291 |
-
)
|
292 |
-
|
293 |
-
if prompt is not None:
|
294 |
-
if descriptions is not None:
|
295 |
-
assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match"
|
296 |
-
prompt = prompt.to(self.device)
|
297 |
-
prompt_tokens, scale = self.compression_model.encode(prompt)
|
298 |
-
assert scale is None
|
299 |
-
else:
|
300 |
-
prompt_tokens = None
|
301 |
-
return attributes, prompt_tokens
|
302 |
-
|
303 |
-
def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],
|
304 |
-
prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:
|
305 |
-
"""Generate discrete audio tokens given audio prompt and/or conditions.
|
306 |
-
|
307 |
-
Args:
|
308 |
-
attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).
|
309 |
-
prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.
|
310 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
311 |
-
Returns:
|
312 |
-
torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.
|
313 |
-
"""
|
314 |
-
i = 0
|
315 |
-
prompt_list = attributes[0].text['description']
|
316 |
-
total_gen_len = int(self.duration * self.frame_rate)
|
317 |
-
max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
|
318 |
-
current_gen_offset: int = 0
|
319 |
-
|
320 |
-
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
|
321 |
-
generated_tokens += current_gen_offset
|
322 |
-
if current_gen_offset > 0:
|
323 |
-
generated_tokens += (self.max_duration - self.extend_stride) * self.frame_rate
|
324 |
-
if self._progress_callback is not None:
|
325 |
-
# Note that total_gen_len might be quite wrong depending on the
|
326 |
-
# codebook pattern used, but with delay it is almost accurate.
|
327 |
-
self._progress_callback(generated_tokens, total_gen_len)
|
328 |
-
else:
|
329 |
-
print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r')
|
330 |
-
|
331 |
-
if prompt_tokens is not None:
|
332 |
-
assert max_prompt_len >= prompt_tokens.shape[-1], \
|
333 |
-
"Prompt is longer than audio to generate"
|
334 |
-
|
335 |
-
callback = None
|
336 |
-
if progress:
|
337 |
-
callback = _progress_callback
|
338 |
-
|
339 |
-
if self.duration <= self.max_duration:
|
340 |
-
# generate by sampling from LM, simple case.
|
341 |
-
with self.autocast:
|
342 |
-
attributes[0].text['description'] = prompt_list[0]
|
343 |
-
gen_tokens = self.lm.generate(
|
344 |
-
prompt_tokens, attributes,
|
345 |
-
callback=callback, max_gen_len=total_gen_len, **self.generation_params)
|
346 |
-
|
347 |
-
else:
|
348 |
-
# now this gets a bit messier, we need to handle prompts,
|
349 |
-
# melody conditioning etc.
|
350 |
-
ref_wavs = [attr.wav['self_wav'] for attr in attributes]
|
351 |
-
all_tokens = []
|
352 |
-
if prompt_tokens is None:
|
353 |
-
prompt_length = 0
|
354 |
-
else:
|
355 |
-
all_tokens.append(prompt_tokens)
|
356 |
-
prompt_length = prompt_tokens.shape[-1]
|
357 |
-
|
358 |
-
stride_tokens = int(self.frame_rate * self.extend_stride)
|
359 |
-
|
360 |
-
while current_gen_offset + prompt_length < total_gen_len:
|
361 |
-
time_offset = current_gen_offset / self.frame_rate
|
362 |
-
chunk_duration = min(self.duration - time_offset, self.max_duration)
|
363 |
-
max_gen_len = int(chunk_duration * self.frame_rate)
|
364 |
-
for attr, ref_wav in zip(attributes, ref_wavs):
|
365 |
-
wav_length = ref_wav.length.item()
|
366 |
-
if wav_length == 0:
|
367 |
-
continue
|
368 |
-
# We will extend the wav periodically if it not long enough.
|
369 |
-
# we have to do it here rather than in conditioners.py as otherwise
|
370 |
-
# we wouldn't have the full wav.
|
371 |
-
initial_position = int(time_offset * self.sample_rate)
|
372 |
-
wav_target_length = int(self.max_duration * self.sample_rate)
|
373 |
-
positions = torch.arange(initial_position,
|
374 |
-
initial_position + wav_target_length, device=self.device)
|
375 |
-
attr.wav['self_wav'] = WavCondition(
|
376 |
-
ref_wav[0][..., positions % wav_length],
|
377 |
-
torch.full_like(ref_wav[1], wav_target_length),
|
378 |
-
[self.sample_rate] * ref_wav[0].size(0),
|
379 |
-
[None], [0.])
|
380 |
-
with self.autocast:
|
381 |
-
if i >= len(prompt_list):
|
382 |
-
i = len(prompt_list) - 1
|
383 |
-
attributes[0].text['description'] = prompt_list[i]
|
384 |
-
gen_tokens = self.lm.generate(
|
385 |
-
prompt_tokens, attributes,
|
386 |
-
callback=callback, max_gen_len=max_gen_len, **self.generation_params)
|
387 |
-
i = i + 1
|
388 |
-
if prompt_tokens is None:
|
389 |
-
all_tokens.append(gen_tokens)
|
390 |
-
else:
|
391 |
-
all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])
|
392 |
-
prompt_tokens = gen_tokens[:, :, stride_tokens:]
|
393 |
-
prompt_length = prompt_tokens.shape[-1]
|
394 |
-
current_gen_offset += stride_tokens
|
395 |
-
|
396 |
-
gen_tokens = torch.cat(all_tokens, dim=-1)
|
397 |
-
return gen_tokens
|
398 |
-
|
399 |
-
def generate_audio(self, gen_tokens: torch.Tensor):
|
400 |
-
"""Generate Audio from tokens"""
|
401 |
-
assert gen_tokens.dim() == 3
|
402 |
-
with torch.no_grad():
|
403 |
-
gen_audio = self.compression_model.decode(gen_tokens, None)
|
404 |
-
return gen_audio
|
405 |
-
|
406 |
-
def to(self, device: str):
|
407 |
-
self.compression_model.to(device)
|
408 |
-
self.lm.to(device)
|
409 |
-
return self
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/__init__.py
DELETED
File without changes
|
spaces/AILab-CVC/SEED-LLaMA/scripts/seed_tokenizer_inference.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
import hydra
|
2 |
-
from omegaconf import OmegaConf
|
3 |
-
from PIL import Image
|
4 |
-
import pyrootutils
|
5 |
-
import os
|
6 |
-
|
7 |
-
pyrootutils.setup_root(__file__, indicator='.project-root', pythonpath=True)
|
8 |
-
|
9 |
-
tokenizer_cfg_path = 'configs/tokenizer/seed_llama_tokenizer.yaml'
|
10 |
-
transform_cfg_path = 'configs/transform/clip_transform.yaml'
|
11 |
-
|
12 |
-
image_path = 'images/cat.jpg'
|
13 |
-
save_dir = './'
|
14 |
-
save_path = os.path.join(save_dir, os.path.basename(image_path))
|
15 |
-
|
16 |
-
os.makedirs(save_dir, exist_ok=True)
|
17 |
-
|
18 |
-
device = 'cuda'
|
19 |
-
|
20 |
-
tokenizer_cfg = OmegaConf.load(tokenizer_cfg_path)
|
21 |
-
tokenizer = hydra.utils.instantiate(tokenizer_cfg, device=device, load_diffusion=True)
|
22 |
-
|
23 |
-
transform_cfg = OmegaConf.load(transform_cfg_path)
|
24 |
-
transform = hydra.utils.instantiate(transform_cfg)
|
25 |
-
|
26 |
-
image = Image.open(image_path).convert('RGB')
|
27 |
-
|
28 |
-
image_tensor = transform(image).to(device)
|
29 |
-
image_ids = tokenizer.encode_image(image_torch=image_tensor)
|
30 |
-
|
31 |
-
images = tokenizer.decode_image(image_ids)
|
32 |
-
|
33 |
-
images[0].save(save_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIxPha/Real-CUGAN/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Real CUGAN
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-3.0
|
11 |
-
duplicated_from: DianXian/Real-CUGAN
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aaaaaaaabdualh/topic2poem/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Topic2poem
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: afl-3.0
|
11 |
-
duplicated_from: mareloraby/topic2poem
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/sde_team.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import logging
|
3 |
-
from typing import Any, Dict, List
|
4 |
-
import json
|
5 |
-
|
6 |
-
from agentverse.agents.simulation_agent.conversation import BaseAgent
|
7 |
-
|
8 |
-
# from agentverse.environments.simulation_env.rules.base import Rule
|
9 |
-
from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
|
10 |
-
from agentverse.message import Message
|
11 |
-
|
12 |
-
from .. import env_registry as EnvironmentRegistry
|
13 |
-
from ..base import BaseEnvironment
|
14 |
-
|
15 |
-
from agentverse.initialization import load_tools
|
16 |
-
|
17 |
-
|
18 |
-
@EnvironmentRegistry.register("sde_team")
|
19 |
-
class SdeTeamEnvironment(BaseEnvironment):
|
20 |
-
"""
|
21 |
-
A basic environment implementing the logic of conversation to craft code.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
agents: List of agents
|
25 |
-
rule: Rule for the environment
|
26 |
-
max_turns: Maximum number of turns
|
27 |
-
cnt_turn: Current turn number
|
28 |
-
last_messages: Messages from last turn
|
29 |
-
rule_params: Variables set by the rule
|
30 |
-
"""
|
31 |
-
|
32 |
-
agents: List[BaseAgent]
|
33 |
-
rule: Rule
|
34 |
-
max_turns: int = 10
|
35 |
-
cnt_turn: int = 0
|
36 |
-
last_messages: List[Message] = []
|
37 |
-
rule_params: Dict = {}
|
38 |
-
task_name: str = "test"
|
39 |
-
|
40 |
-
def __init__(self, rule, **kwargs):
|
41 |
-
rule_config = rule
|
42 |
-
order_config = rule_config.get("order", {"type": "sde_team"})
|
43 |
-
visibility_config = rule_config.get("visibility", {"type": "base"})
|
44 |
-
selector_config = rule_config.get("selector", {"type": "sde_team"})
|
45 |
-
updater_config = rule_config.get("updater", {"type": "sde_team"})
|
46 |
-
describer_config = rule_config.get("describer", {"type": "base"})
|
47 |
-
rule = Rule(
|
48 |
-
order_config,
|
49 |
-
visibility_config,
|
50 |
-
selector_config,
|
51 |
-
updater_config,
|
52 |
-
describer_config,
|
53 |
-
)
|
54 |
-
super().__init__(rule=rule, **kwargs)
|
55 |
-
self.rule_params["first_round"] = True
|
56 |
-
self.rule_params["end_flag"] = False
|
57 |
-
|
58 |
-
# # Test code
|
59 |
-
self.rule_params["name_to_tools"] = {
|
60 |
-
tool.name: tool
|
61 |
-
for tool in load_tools(
|
62 |
-
[
|
63 |
-
{
|
64 |
-
"tool_name": "code_interpreter",
|
65 |
-
"tool_url": "http://127.0.0.1:8079/tools/code_interpreter/",
|
66 |
-
}
|
67 |
-
]
|
68 |
-
)
|
69 |
-
}
|
70 |
-
tool = self.rule_params["name_to_tools"]["execute_unit_tests"]
|
71 |
-
# print(type(tool))
|
72 |
-
|
73 |
-
# d = {
|
74 |
-
# "func_impl": "def f(x):\n\treturn x + 1",
|
75 |
-
# "tests": ["assert f(1) == 2"]
|
76 |
-
# }
|
77 |
-
# # input_str = json.dumps(d)
|
78 |
-
# json.loads(input_str)
|
79 |
-
# tool.run(input_str, verbose=True)
|
80 |
-
# exit()
|
81 |
-
|
82 |
-
async def step(self) -> List[Message]:
|
83 |
-
"""Run one step of the environment"""
|
84 |
-
|
85 |
-
# Get the next agent index
|
86 |
-
agent_ids = self.rule.get_next_agent_idx(self) # order
|
87 |
-
|
88 |
-
# Generate current environment description
|
89 |
-
# env_descriptions = self.rule.get_env_description(self) # describer
|
90 |
-
|
91 |
-
# # Generate the next message
|
92 |
-
# messages = await asyncio.gather(
|
93 |
-
# *[self.agents[i].astep(env_descriptions[i]) for i in agent_ids]
|
94 |
-
# ) # call chatgpt api
|
95 |
-
|
96 |
-
messages = await asyncio.gather(*[self.agents[i].astep("") for i in agent_ids])
|
97 |
-
|
98 |
-
# Track the messages to get the role of the sender
|
99 |
-
self.last_messages = messages
|
100 |
-
|
101 |
-
# Some rules will select certain messages from all the messages
|
102 |
-
selected_messages = self.rule.select_message(self, messages) # selector
|
103 |
-
self.last_messages = selected_messages
|
104 |
-
self.print_messages(selected_messages)
|
105 |
-
|
106 |
-
# Update the memory of the agents
|
107 |
-
self.rule.update_memory(self) # updater: update memory
|
108 |
-
|
109 |
-
# Update the set of visible agents for each agent
|
110 |
-
self.rule.update_visible_agents(self) # change receiver
|
111 |
-
|
112 |
-
self.cnt_turn += 1
|
113 |
-
|
114 |
-
return selected_messages
|
115 |
-
|
116 |
-
def print_messages(self, messages: List[Message]) -> None:
|
117 |
-
for message in messages:
|
118 |
-
if message is not None:
|
119 |
-
logging.info(f"{message.sender}: {message.content}")
|
120 |
-
|
121 |
-
def reset(self) -> None:
|
122 |
-
"""Reset the environment"""
|
123 |
-
self.cnt_turn = 0
|
124 |
-
self.rule.reset()
|
125 |
-
for agent in self.agents:
|
126 |
-
agent.reset()
|
127 |
-
|
128 |
-
def is_done(self) -> bool:
|
129 |
-
"""Check if the environment is done"""
|
130 |
-
if self.cnt_turn >= self.max_turns or self.rule_params["end_flag"]:
|
131 |
-
# with open("record_human_eval.txt", "a") as f:
|
132 |
-
# wd = dict()
|
133 |
-
# wd['task_id'] = self.task_name
|
134 |
-
# wd['code'] = self.rule_params['code']
|
135 |
-
# f.write(json.dumps(wd))
|
136 |
-
return True
|
137 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/dropdown-plugin.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import DropDown from './behaviors/dropdown/DropDown.js';
|
2 |
-
|
3 |
-
class DropDownPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
constructor(pluginManager) {
|
5 |
-
super(pluginManager);
|
6 |
-
}
|
7 |
-
|
8 |
-
start() {
|
9 |
-
var eventEmitter = this.game.events;
|
10 |
-
eventEmitter.on('destroy', this.destroy, this);
|
11 |
-
}
|
12 |
-
|
13 |
-
add(gameObject, config) {
|
14 |
-
return new DropDown(gameObject, config);
|
15 |
-
}
|
16 |
-
}
|
17 |
-
|
18 |
-
export default DropDownPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/CircleMaskImage.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CircleMaskImage from '../../../plugins/circlemaskimage';
|
2 |
-
export default CircleMaskImage;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import ColorInputBase from './ColorInputBase.js';
|
2 |
-
import ObjectFactory from '../../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('colorInputLite', function (config) {
|
6 |
-
var gameObject = new ColorInputBase(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.ColorInputBase', ColorInputBase);
|
12 |
-
|
13 |
-
export default ColorInputBase;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/alignment.py
DELETED
@@ -1,233 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
|
4 |
-
import os
|
5 |
-
import argparse
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
from torch.utils.data import DataLoader
|
9 |
-
from torchvision.transforms import transforms
|
10 |
-
from utils.ImagesDataset import ImagesDataset
|
11 |
-
|
12 |
-
import cv2
|
13 |
-
import time
|
14 |
-
import copy
|
15 |
-
import imutils
|
16 |
-
|
17 |
-
# for openpose body keypoint detector : # (src:https://github.com/Hzzone/pytorch-openpose)
|
18 |
-
from openpose.src import util
|
19 |
-
from openpose.src.body import Body
|
20 |
-
|
21 |
-
# for paddlepaddle human segmentation : #(src: https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.5/contrib/PP-HumanSeg/)
|
22 |
-
from PP_HumanSeg.deploy.infer import Predictor as PP_HumenSeg_Predictor
|
23 |
-
|
24 |
-
import math
|
25 |
-
|
26 |
-
|
27 |
-
def angle_between_points(p0, p1, p2):
|
28 |
-
if p0[1] == -1 or p1[1] == -1 or p2[1] == -1:
|
29 |
-
return -1
|
30 |
-
a = (p1[0]-p0[0])**2 + (p1[1]-p0[1])**2
|
31 |
-
b = (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2
|
32 |
-
c = (p2[0]-p0[0])**2 + (p2[1]-p0[1])**2
|
33 |
-
if a * b == 0:
|
34 |
-
return -1
|
35 |
-
return math.acos((a+b-c) / math.sqrt(4*a*b)) * 180 / math.pi
|
36 |
-
|
37 |
-
|
38 |
-
def crop_img_with_padding(img, keypoints, rect):
|
39 |
-
person_xmin, person_xmax, ymin, ymax = rect
|
40 |
-
img_h, img_w, _ = img.shape # find body center using keypoints
|
41 |
-
middle_shoulder_x = keypoints[1][0]
|
42 |
-
middle_hip_x = (keypoints[8][0] + keypoints[11][0]) // 2
|
43 |
-
mid_x = (middle_hip_x + middle_shoulder_x) // 2
|
44 |
-
mid_y = (ymin + ymax) // 2
|
45 |
-
# find which side (l or r) is further than center x, use the further side
|
46 |
-
if abs(mid_x-person_xmin) > abs(person_xmax-mid_x): # left further
|
47 |
-
xmin = person_xmin
|
48 |
-
xmax = mid_x + (mid_x-person_xmin)
|
49 |
-
else:
|
50 |
-
# may be negtive
|
51 |
-
# in this case, the script won't output any image, leave the case like this
|
52 |
-
# since we don't want to pad human body
|
53 |
-
xmin = mid_x - (person_xmax-mid_x)
|
54 |
-
xmax = person_xmax
|
55 |
-
|
56 |
-
w = xmax - xmin
|
57 |
-
h = ymax - ymin
|
58 |
-
# pad rectangle to w:h = 1:2 ## calculate desired border length
|
59 |
-
if h / w >= 2: # pad horizontally
|
60 |
-
target_w = h // 2
|
61 |
-
xmin_prime = int(mid_x - target_w / 2)
|
62 |
-
xmax_prime = int(mid_x + target_w / 2)
|
63 |
-
if xmin_prime < 0:
|
64 |
-
pad_left = abs(xmin_prime) # - xmin
|
65 |
-
xmin = 0
|
66 |
-
else:
|
67 |
-
pad_left = 0
|
68 |
-
xmin = xmin_prime
|
69 |
-
if xmax_prime > img_w:
|
70 |
-
pad_right = xmax_prime - img_w
|
71 |
-
xmax = img_w
|
72 |
-
else:
|
73 |
-
pad_right = 0
|
74 |
-
xmax = xmax_prime
|
75 |
-
|
76 |
-
cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
|
77 |
-
im_pad = cv2.copyMakeBorder(cropped_img, 0, 0, int(
|
78 |
-
pad_left), int(pad_right), cv2.BORDER_REPLICATE)
|
79 |
-
else: # pad vertically
|
80 |
-
target_h = w * 2
|
81 |
-
ymin_prime = mid_y - (target_h / 2)
|
82 |
-
ymax_prime = mid_y + (target_h / 2)
|
83 |
-
if ymin_prime < 0:
|
84 |
-
pad_up = abs(ymin_prime) # - ymin
|
85 |
-
ymin = 0
|
86 |
-
else:
|
87 |
-
pad_up = 0
|
88 |
-
ymin = ymin_prime
|
89 |
-
if ymax_prime > img_h:
|
90 |
-
pad_down = ymax_prime - img_h
|
91 |
-
ymax = img_h
|
92 |
-
else:
|
93 |
-
pad_down = 0
|
94 |
-
ymax = ymax_prime
|
95 |
-
print(ymin, ymax, xmin, xmax, img.shape)
|
96 |
-
|
97 |
-
cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
|
98 |
-
im_pad = cv2.copyMakeBorder(cropped_img, int(pad_up), int(pad_down), 0,
|
99 |
-
0, cv2.BORDER_REPLICATE)
|
100 |
-
result = cv2.resize(im_pad, (512, 1024), interpolation=cv2.INTER_AREA)
|
101 |
-
return result
|
102 |
-
|
103 |
-
|
104 |
-
def run(args):
|
105 |
-
os.makedirs(args.output_folder, exist_ok=True)
|
106 |
-
dataset = ImagesDataset(
|
107 |
-
args.image_folder, transforms.Compose([transforms.ToTensor()]))
|
108 |
-
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
|
109 |
-
|
110 |
-
body_estimation = Body('openpose/model/body_pose_model.pth')
|
111 |
-
|
112 |
-
total = len(dataloader)
|
113 |
-
print('Num of dataloader : ', total)
|
114 |
-
os.makedirs(f'{args.output_folder}', exist_ok=True)
|
115 |
-
# os.makedirs(f'{args.output_folder}/middle_result', exist_ok=True)
|
116 |
-
|
117 |
-
# initialzide HumenSeg
|
118 |
-
human_seg_args = {}
|
119 |
-
human_seg_args['cfg'] = 'PP_HumanSeg/export_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax/deploy.yaml'
|
120 |
-
human_seg_args['input_shape'] = [1024, 512]
|
121 |
-
human_seg_args['save_dir'] = args.output_folder
|
122 |
-
human_seg_args['soft_predict'] = False
|
123 |
-
human_seg_args['use_gpu'] = True
|
124 |
-
human_seg_args['test_speed'] = False
|
125 |
-
human_seg_args['use_optic_flow'] = False
|
126 |
-
human_seg_args['add_argmax'] = True
|
127 |
-
human_seg_args = argparse.Namespace(**human_seg_args)
|
128 |
-
human_seg = PP_HumenSeg_Predictor(human_seg_args)
|
129 |
-
|
130 |
-
from tqdm import tqdm
|
131 |
-
for fname, image in tqdm(dataloader):
|
132 |
-
# try:
|
133 |
-
# tensor to numpy image
|
134 |
-
fname = fname[0]
|
135 |
-
print(f'Processing \'{fname}\'.')
|
136 |
-
|
137 |
-
image = (image.permute(0, 2, 3, 1) * 255).clamp(0, 255)
|
138 |
-
image = image.squeeze(0).numpy() # --> tensor to numpy, (H,W,C)
|
139 |
-
# avoid super high res img
|
140 |
-
if image.shape[0] >= 2000: # height ### for shein image
|
141 |
-
ratio = image.shape[0]/1200 # height
|
142 |
-
dim = (int(image.shape[1]/ratio), 1200) # (width, height)
|
143 |
-
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
|
144 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
145 |
-
|
146 |
-
# create segmentation
|
147 |
-
# mybg = cv2.imread('mybg.png')
|
148 |
-
comb, segmentation, bg, ori_img = human_seg.run(image, None) # mybg)
|
149 |
-
# cv2.imwrite('comb.png',comb) # [0,255]
|
150 |
-
# cv2.imwrite('alpha.png',segmentation*255) # segmentation [0,1] --> [0.255]
|
151 |
-
# cv2.imwrite('bg.png',bg) #[0,255]
|
152 |
-
# cv2.imwrite('ori_img.png',ori_img) # [0,255]
|
153 |
-
|
154 |
-
masks_np = (segmentation * 255) # .byte().cpu().numpy() #1024,512,1
|
155 |
-
mask0_np = masks_np[:, :, 0].astype(np.uint8) # [0, :, :]
|
156 |
-
contours = cv2.findContours(
|
157 |
-
mask0_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
158 |
-
cnts = imutils.grab_contours(contours)
|
159 |
-
c = max(cnts, key=cv2.contourArea)
|
160 |
-
extTop = tuple(c[c[:, :, 1].argmin()][0])
|
161 |
-
extBot = tuple(c[c[:, :, 1].argmax()][0])
|
162 |
-
extBot = list(extBot)
|
163 |
-
extTop = list(extTop)
|
164 |
-
pad_range = int((extBot[1]-extTop[1])*0.05)
|
165 |
-
# seg mask already reaches to the edge
|
166 |
-
if (int(extTop[1]) <= 5 and int(extTop[1]) > 0) and (comb.shape[0] > int(extBot[1]) and int(extBot[1]) >= comb.shape[0]-5):
|
167 |
-
# pad with pure white, top 100 px, bottom 100 px
|
168 |
-
comb = cv2.copyMakeBorder(
|
169 |
-
comb, pad_range+5, pad_range+5, 0, 0, cv2.BORDER_CONSTANT, value=[255, 255, 255])
|
170 |
-
elif int(extTop[1]) <= 0 or int(extBot[1]) >= comb.shape[0]:
|
171 |
-
print('PAD: body out of boundary', fname) # should not happened
|
172 |
-
return {}
|
173 |
-
else:
|
174 |
-
# 105 instead of 100: give some extra space
|
175 |
-
comb = cv2.copyMakeBorder(
|
176 |
-
comb, pad_range+5, pad_range+5, 0, 0, cv2.BORDER_REPLICATE)
|
177 |
-
extBot[1] = extBot[1] + pad_range+5
|
178 |
-
extTop[1] = extTop[1] + pad_range+5
|
179 |
-
|
180 |
-
extLeft = tuple(c[c[:, :, 0].argmin()][0])
|
181 |
-
extRight = tuple(c[c[:, :, 0].argmax()][0])
|
182 |
-
extLeft = list(extLeft)
|
183 |
-
extRight = list(extRight)
|
184 |
-
person_ymin = int(extTop[1])-pad_range # 100
|
185 |
-
person_ymax = int(extBot[1])+pad_range # 100 #height
|
186 |
-
if person_ymin < 0 or person_ymax > comb.shape[0]: # out of range
|
187 |
-
return {}
|
188 |
-
person_xmin = int(extLeft[0])
|
189 |
-
person_xmax = int(extRight[0])
|
190 |
-
rect = [person_xmin, person_xmax, person_ymin, person_ymax]
|
191 |
-
# recimg = copy.deepcopy(comb)
|
192 |
-
# cv2.rectangle(recimg,(person_xmin,person_ymin),(person_xmax,person_ymax),(0,255,0),2)
|
193 |
-
# cv2.imwrite(f'{args.output_folder}/middle_result/{fname}_rec.png',recimg)
|
194 |
-
|
195 |
-
# detect keypoints
|
196 |
-
keypoints, subset = body_estimation(comb)
|
197 |
-
# print(keypoints, subset, len(subset))
|
198 |
-
if len(subset) != 1 or (len(subset) == 1 and subset[0][-1] < 15):
|
199 |
-
print(
|
200 |
-
f'Processing \'{fname}\'. Please import image contains one person only. Also can check segmentation mask. ')
|
201 |
-
continue
|
202 |
-
|
203 |
-
# canvas = copy.deepcopy(comb)
|
204 |
-
# canvas = util.draw_bodypose(canvas, keypoints, subset, show_number=True)
|
205 |
-
# cv2.imwrite(f'{args.output_folder}/middle_result/{fname}_keypoints.png',canvas)
|
206 |
-
|
207 |
-
comb = crop_img_with_padding(comb, keypoints, rect)
|
208 |
-
|
209 |
-
cv2.imwrite(f'{args.output_folder}/{fname}.png', comb)
|
210 |
-
print(f' -- Finished processing \'{fname}\'. --')
|
211 |
-
# except:
|
212 |
-
# print(f'Processing \'{fname}\'. Not satisfied the alignment strategy.')
|
213 |
-
|
214 |
-
|
215 |
-
if __name__ == '__main__':
|
216 |
-
torch.backends.cudnn.benchmark = True
|
217 |
-
torch.backends.cudnn.deterministic = False
|
218 |
-
|
219 |
-
t1 = time.time()
|
220 |
-
arg_formatter = argparse.ArgumentDefaultsHelpFormatter
|
221 |
-
description = 'StyleGAN-Human data process'
|
222 |
-
parser = argparse.ArgumentParser(formatter_class=arg_formatter,
|
223 |
-
description=description)
|
224 |
-
parser.add_argument('--image-folder', type=str, dest='image_folder')
|
225 |
-
parser.add_argument('--output-folder',
|
226 |
-
dest='output_folder', default='results', type=str)
|
227 |
-
# parser.add_argument('--cfg', dest='cfg for segmentation', default='PP_HumanSeg/export_model/ppseg_lite_portrait_398x224_with_softmax/deploy.yaml', type=str)
|
228 |
-
|
229 |
-
print('parsing arguments')
|
230 |
-
cmd_args = parser.parse_args()
|
231 |
-
run(cmd_args)
|
232 |
-
|
233 |
-
print('total time elapsed: ', str(time.time() - t1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# empty
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/commands/__init__.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
from abc import ABC, abstractmethod
|
16 |
-
from argparse import ArgumentParser
|
17 |
-
|
18 |
-
|
19 |
-
class BaseDiffusersCLICommand(ABC):
|
20 |
-
@staticmethod
|
21 |
-
@abstractmethod
|
22 |
-
def register_subcommand(parser: ArgumentParser):
|
23 |
-
raise NotImplementedError()
|
24 |
-
|
25 |
-
@abstractmethod
|
26 |
-
def run(self):
|
27 |
-
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_ema.py
DELETED
@@ -1,159 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import tempfile
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from diffusers import UNet2DConditionModel
|
22 |
-
from diffusers.training_utils import EMAModel
|
23 |
-
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
|
24 |
-
|
25 |
-
|
26 |
-
enable_full_determinism()
|
27 |
-
|
28 |
-
|
29 |
-
class EMAModelTests(unittest.TestCase):
|
30 |
-
model_id = "hf-internal-testing/tiny-stable-diffusion-pipe"
|
31 |
-
batch_size = 1
|
32 |
-
prompt_length = 77
|
33 |
-
text_encoder_hidden_dim = 32
|
34 |
-
num_in_channels = 4
|
35 |
-
latent_height = latent_width = 64
|
36 |
-
generator = torch.manual_seed(0)
|
37 |
-
|
38 |
-
def get_models(self, decay=0.9999):
|
39 |
-
unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet")
|
40 |
-
unet = unet.to(torch_device)
|
41 |
-
ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config)
|
42 |
-
return unet, ema_unet
|
43 |
-
|
44 |
-
def get_dummy_inputs(self):
|
45 |
-
noisy_latents = torch.randn(
|
46 |
-
self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator
|
47 |
-
).to(torch_device)
|
48 |
-
timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device)
|
49 |
-
encoder_hidden_states = torch.randn(
|
50 |
-
self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator
|
51 |
-
).to(torch_device)
|
52 |
-
return noisy_latents, timesteps, encoder_hidden_states
|
53 |
-
|
54 |
-
def simulate_backprop(self, unet):
|
55 |
-
updated_state_dict = {}
|
56 |
-
for k, param in unet.state_dict().items():
|
57 |
-
updated_param = torch.randn_like(param) + (param * torch.randn_like(param))
|
58 |
-
updated_state_dict.update({k: updated_param})
|
59 |
-
unet.load_state_dict(updated_state_dict)
|
60 |
-
return unet
|
61 |
-
|
62 |
-
def test_optimization_steps_updated(self):
|
63 |
-
unet, ema_unet = self.get_models()
|
64 |
-
# Take the first (hypothetical) EMA step.
|
65 |
-
ema_unet.step(unet.parameters())
|
66 |
-
assert ema_unet.optimization_step == 1
|
67 |
-
|
68 |
-
# Take two more.
|
69 |
-
for _ in range(2):
|
70 |
-
ema_unet.step(unet.parameters())
|
71 |
-
assert ema_unet.optimization_step == 3
|
72 |
-
|
73 |
-
def test_shadow_params_not_updated(self):
|
74 |
-
unet, ema_unet = self.get_models()
|
75 |
-
# Since the `unet` is not being updated (i.e., backprop'd)
|
76 |
-
# there won't be any difference between the `params` of `unet`
|
77 |
-
# and `ema_unet` even if we call `ema_unet.step(unet.parameters())`.
|
78 |
-
ema_unet.step(unet.parameters())
|
79 |
-
orig_params = list(unet.parameters())
|
80 |
-
for s_param, param in zip(ema_unet.shadow_params, orig_params):
|
81 |
-
assert torch.allclose(s_param, param)
|
82 |
-
|
83 |
-
# The above holds true even if we call `ema.step()` multiple times since
|
84 |
-
# `unet` params are still not being updated.
|
85 |
-
for _ in range(4):
|
86 |
-
ema_unet.step(unet.parameters())
|
87 |
-
for s_param, param in zip(ema_unet.shadow_params, orig_params):
|
88 |
-
assert torch.allclose(s_param, param)
|
89 |
-
|
90 |
-
def test_shadow_params_updated(self):
|
91 |
-
unet, ema_unet = self.get_models()
|
92 |
-
# Here we simulate the parameter updates for `unet`. Since there might
|
93 |
-
# be some parameters which are initialized to zero we take extra care to
|
94 |
-
# initialize their values to something non-zero before the multiplication.
|
95 |
-
unet_pseudo_updated_step_one = self.simulate_backprop(unet)
|
96 |
-
|
97 |
-
# Take the EMA step.
|
98 |
-
ema_unet.step(unet_pseudo_updated_step_one.parameters())
|
99 |
-
|
100 |
-
# Now the EMA'd parameters won't be equal to the original model parameters.
|
101 |
-
orig_params = list(unet_pseudo_updated_step_one.parameters())
|
102 |
-
for s_param, param in zip(ema_unet.shadow_params, orig_params):
|
103 |
-
assert ~torch.allclose(s_param, param)
|
104 |
-
|
105 |
-
# Ensure this is the case when we take multiple EMA steps.
|
106 |
-
for _ in range(4):
|
107 |
-
ema_unet.step(unet.parameters())
|
108 |
-
for s_param, param in zip(ema_unet.shadow_params, orig_params):
|
109 |
-
assert ~torch.allclose(s_param, param)
|
110 |
-
|
111 |
-
def test_consecutive_shadow_params_updated(self):
|
112 |
-
# If we call EMA step after a backpropagation consecutively for two times,
|
113 |
-
# the shadow params from those two steps should be different.
|
114 |
-
unet, ema_unet = self.get_models()
|
115 |
-
|
116 |
-
# First backprop + EMA
|
117 |
-
unet_step_one = self.simulate_backprop(unet)
|
118 |
-
ema_unet.step(unet_step_one.parameters())
|
119 |
-
step_one_shadow_params = ema_unet.shadow_params
|
120 |
-
|
121 |
-
# Second backprop + EMA
|
122 |
-
unet_step_two = self.simulate_backprop(unet_step_one)
|
123 |
-
ema_unet.step(unet_step_two.parameters())
|
124 |
-
step_two_shadow_params = ema_unet.shadow_params
|
125 |
-
|
126 |
-
for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params):
|
127 |
-
assert ~torch.allclose(step_one, step_two)
|
128 |
-
|
129 |
-
def test_zero_decay(self):
|
130 |
-
# If there's no decay even if there are backprops, EMA steps
|
131 |
-
# won't take any effect i.e., the shadow params would remain the
|
132 |
-
# same.
|
133 |
-
unet, ema_unet = self.get_models(decay=0.0)
|
134 |
-
unet_step_one = self.simulate_backprop(unet)
|
135 |
-
ema_unet.step(unet_step_one.parameters())
|
136 |
-
step_one_shadow_params = ema_unet.shadow_params
|
137 |
-
|
138 |
-
unet_step_two = self.simulate_backprop(unet_step_one)
|
139 |
-
ema_unet.step(unet_step_two.parameters())
|
140 |
-
step_two_shadow_params = ema_unet.shadow_params
|
141 |
-
|
142 |
-
for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params):
|
143 |
-
assert torch.allclose(step_one, step_two)
|
144 |
-
|
145 |
-
@skip_mps
|
146 |
-
def test_serialization(self):
|
147 |
-
unet, ema_unet = self.get_models()
|
148 |
-
noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs()
|
149 |
-
|
150 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
151 |
-
ema_unet.save_pretrained(tmpdir)
|
152 |
-
loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel)
|
153 |
-
loaded_unet = loaded_unet.to(unet.device)
|
154 |
-
|
155 |
-
# Since no EMA step has been performed the outputs should match.
|
156 |
-
output = unet(noisy_latents, timesteps, encoder_hidden_states).sample
|
157 |
-
output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample
|
158 |
-
|
159 |
-
assert torch.allclose(output, output_loaded, atol=1e-4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AndyCer/TheBloke-stable-vicuna-13B-HF/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/TheBloke/stable-vicuna-13B-HF").launch()
|
|
|
|
|
|
|
|
spaces/Anthony7906/MengHuiMXD_GPT/readme/README_en.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
<div align="right">
|
2 |
-
<!-- Language: -->
|
3 |
-
<a title="Chinese" href="../README.md">简体中文</a> | English | <a title="Japanese" href="README_ja.md">日本語</a>
|
4 |
-
</div>
|
5 |
-
|
6 |
-
<h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
|
7 |
-
<div align="center">
|
8 |
-
<a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
|
9 |
-
<img src="https://user-images.githubusercontent.com/70903329/227087087-93b37d64-7dc3-4738-a518-c1cf05591c8a.png" alt="Logo" height="156">
|
10 |
-
</a>
|
11 |
-
|
12 |
-
<p align="center">
|
13 |
-
<h3>Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA</h3>
|
14 |
-
<p align="center">
|
15 |
-
<a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/blob/main/LICENSE">
|
16 |
-
<img alt="Tests Passing" src="https://img.shields.io/github/license/GaiZhenbiao/ChuanhuChatGPT" />
|
17 |
-
</a>
|
18 |
-
<a href="https://gradio.app/">
|
19 |
-
<img alt="GitHub Contributors" src="https://img.shields.io/badge/Base-Gradio-fb7d1a?style=flat" />
|
20 |
-
</a>
|
21 |
-
<a href="https://t.me/tkdifferent">
|
22 |
-
<img alt="GitHub pull requests" src="https://img.shields.io/badge/Telegram-Group-blue.svg?logo=telegram" />
|
23 |
-
</a>
|
24 |
-
<p>
|
25 |
-
Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search <br />
|
26 |
-
LaTeX rendering / Table rendering / Code highlighting <br />
|
27 |
-
Auto dark mode / Adaptive web interface / WeChat-like theme <br />
|
28 |
-
Multi-parameters tuning / Multi-API-Key support / Multi-user support <br />
|
29 |
-
Compatible with GPT-4 / Local deployment for LLMs
|
30 |
-
</p>
|
31 |
-
<a href="https://www.youtube.com/watch?v=MtxS4XZWbJE"><strong>Video Tutorial</strong></a>
|
32 |
-
·
|
33 |
-
<a href="https://www.youtube.com/watch?v=77nw7iimYDE"><strong>2.0 Introduction</strong></a>
|
34 |
-
·
|
35 |
-
<a href="https://www.youtube.com/watch?v=x-O1jjBqgu4"><strong>3.0 Introduction & Tutorial</strong></a>
|
36 |
-
||
|
37 |
-
<a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT"><strong>Online trial</strong></a>
|
38 |
-
·
|
39 |
-
<a href="https://huggingface.co/login?next=%2Fspaces%2FJohnSmith9982%2FChuanhuChatGPT%3Fduplicate%3Dtrue"><strong>One-Click deployment</strong></a>
|
40 |
-
</p>
|
41 |
-
<p align="center">
|
42 |
-
<img alt="Animation Demo" src="https://user-images.githubusercontent.com/51039745/226255695-6b17ff1f-ea8d-464f-b69b-a7b6b68fffe8.gif" />
|
43 |
-
</p>
|
44 |
-
</p>
|
45 |
-
</div>
|
46 |
-
|
47 |
-
## Usage Tips
|
48 |
-
|
49 |
-
- To better control the ChatGPT, use System Prompt.
|
50 |
-
- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu.
|
51 |
-
- To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
|
52 |
-
- To start a new line in the input box, press <kbd>Shift</kbd> + <kbd>Enter</kbd> keys.
|
53 |
-
- To quickly switch between input history, press <kbd>↑</kbd> and <kbd>↓</kbd> key in the input box.
|
54 |
-
- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=<your port number>)`.
|
55 |
-
- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link.
|
56 |
-
- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
|
57 |
-
|
58 |
-
## Installation
|
59 |
-
|
60 |
-
```shell
|
61 |
-
git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
|
62 |
-
cd ChuanhuChatGPT
|
63 |
-
pip install -r requirements.txt
|
64 |
-
```
|
65 |
-
|
66 |
-
Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file.
|
67 |
-
|
68 |
-
```shell
|
69 |
-
python ChuanhuChatbot.py
|
70 |
-
```
|
71 |
-
|
72 |
-
A browser window will open and you will be able to chat with ChatGPT.
|
73 |
-
|
74 |
-
> **Note**
|
75 |
-
>
|
76 |
-
> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions.
|
77 |
-
|
78 |
-
## Troubleshooting
|
79 |
-
|
80 |
-
When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows:
|
81 |
-
|
82 |
-
1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or
|
83 |
-
```shell
|
84 |
-
git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
|
85 |
-
```
|
86 |
-
2. Try installing the dependencies again (as this project may have introduced new dependencies)
|
87 |
-
```
|
88 |
-
pip install -r requirements.txt
|
89 |
-
```
|
90 |
-
3. Update Gradio
|
91 |
-
```
|
92 |
-
pip install gradio --upgrade --force-reinstall
|
93 |
-
```
|
94 |
-
|
95 |
-
Generally, you can solve most problems by following these steps.
|
96 |
-
|
97 |
-
If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
|
98 |
-
|
99 |
-
This page lists almost all the possible problems and solutions. Please read it carefully.
|
100 |
-
|
101 |
-
## More Information
|
102 |
-
|
103 |
-
More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki):
|
104 |
-
|
105 |
-
- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization)
|
106 |
-
- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/��献指南)
|
107 |
-
- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
|
108 |
-
- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
|
109 |
-
- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
|
110 |
-
|
111 |
-
## Starchart
|
112 |
-
|
113 |
-
[](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
|
114 |
-
|
115 |
-
## Contributors
|
116 |
-
|
117 |
-
<a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/graphs/contributors">
|
118 |
-
<img src="https://contrib.rocks/image?repo=GaiZhenbiao/ChuanhuChatGPT" />
|
119 |
-
</a>
|
120 |
-
|
121 |
-
## Sponsor
|
122 |
-
|
123 |
-
🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~
|
124 |
-
|
125 |
-
<a href="https://www.buymeacoffee.com/ChuanhuChat" ><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=219d53&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
|
126 |
-
|
127 |
-
<img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AriaMei/TTSdemo/losses.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import commons
|
5 |
-
|
6 |
-
|
7 |
-
def feature_loss(fmap_r, fmap_g):
|
8 |
-
loss = 0
|
9 |
-
for dr, dg in zip(fmap_r, fmap_g):
|
10 |
-
for rl, gl in zip(dr, dg):
|
11 |
-
rl = rl.float().detach()
|
12 |
-
gl = gl.float()
|
13 |
-
loss += torch.mean(torch.abs(rl - gl))
|
14 |
-
|
15 |
-
return loss * 2
|
16 |
-
|
17 |
-
|
18 |
-
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
19 |
-
loss = 0
|
20 |
-
r_losses = []
|
21 |
-
g_losses = []
|
22 |
-
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
23 |
-
dr = dr.float()
|
24 |
-
dg = dg.float()
|
25 |
-
r_loss = torch.mean((1-dr)**2)
|
26 |
-
g_loss = torch.mean(dg**2)
|
27 |
-
loss += (r_loss + g_loss)
|
28 |
-
r_losses.append(r_loss.item())
|
29 |
-
g_losses.append(g_loss.item())
|
30 |
-
|
31 |
-
return loss, r_losses, g_losses
|
32 |
-
|
33 |
-
|
34 |
-
def generator_loss(disc_outputs):
|
35 |
-
loss = 0
|
36 |
-
gen_losses = []
|
37 |
-
for dg in disc_outputs:
|
38 |
-
dg = dg.float()
|
39 |
-
l = torch.mean((1-dg)**2)
|
40 |
-
gen_losses.append(l)
|
41 |
-
loss += l
|
42 |
-
|
43 |
-
return loss, gen_losses
|
44 |
-
|
45 |
-
|
46 |
-
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
|
47 |
-
"""
|
48 |
-
z_p, logs_q: [b, h, t_t]
|
49 |
-
m_p, logs_p: [b, h, t_t]
|
50 |
-
"""
|
51 |
-
z_p = z_p.float()
|
52 |
-
logs_q = logs_q.float()
|
53 |
-
m_p = m_p.float()
|
54 |
-
logs_p = logs_p.float()
|
55 |
-
z_mask = z_mask.float()
|
56 |
-
|
57 |
-
kl = logs_p - logs_q - 0.5
|
58 |
-
kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
|
59 |
-
kl = torch.sum(kl * z_mask)
|
60 |
-
l = kl / torch.sum(z_mask)
|
61 |
-
return l
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/backbone/utils.py
DELETED
@@ -1,186 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
# This code is from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/utils.py
|
3 |
-
import math
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
__all__ = [
|
9 |
-
"window_partition",
|
10 |
-
"window_unpartition",
|
11 |
-
"add_decomposed_rel_pos",
|
12 |
-
"get_abs_pos",
|
13 |
-
"PatchEmbed",
|
14 |
-
]
|
15 |
-
|
16 |
-
def window_partition(x, window_size):
|
17 |
-
"""
|
18 |
-
Partition into non-overlapping windows with padding if needed.
|
19 |
-
Args:
|
20 |
-
x (tensor): input tokens with [B, H, W, C].
|
21 |
-
window_size (int): window size.
|
22 |
-
|
23 |
-
Returns:
|
24 |
-
windows: windows after partition with [B * num_windows, window_size, window_size, C].
|
25 |
-
(Hp, Wp): padded height and width before partition
|
26 |
-
"""
|
27 |
-
B, H, W, C = x.shape
|
28 |
-
|
29 |
-
pad_h = (window_size - H % window_size) % window_size
|
30 |
-
pad_w = (window_size - W % window_size) % window_size
|
31 |
-
if pad_h > 0 or pad_w > 0:
|
32 |
-
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
|
33 |
-
Hp, Wp = H + pad_h, W + pad_w
|
34 |
-
|
35 |
-
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
|
36 |
-
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
37 |
-
return windows, (Hp, Wp)
|
38 |
-
|
39 |
-
|
40 |
-
def window_unpartition(windows, window_size, pad_hw, hw):
|
41 |
-
"""
|
42 |
-
Window unpartition into original sequences and removing padding.
|
43 |
-
Args:
|
44 |
-
x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
|
45 |
-
window_size (int): window size.
|
46 |
-
pad_hw (Tuple): padded height and width (Hp, Wp).
|
47 |
-
hw (Tuple): original height and width (H, W) before padding.
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
x: unpartitioned sequences with [B, H, W, C].
|
51 |
-
"""
|
52 |
-
Hp, Wp = pad_hw
|
53 |
-
H, W = hw
|
54 |
-
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
|
55 |
-
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
|
56 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
|
57 |
-
|
58 |
-
if Hp > H or Wp > W:
|
59 |
-
x = x[:, :H, :W, :].contiguous()
|
60 |
-
return x
|
61 |
-
|
62 |
-
|
63 |
-
def get_rel_pos(q_size, k_size, rel_pos):
|
64 |
-
"""
|
65 |
-
Get relative positional embeddings according to the relative positions of
|
66 |
-
query and key sizes.
|
67 |
-
Args:
|
68 |
-
q_size (int): size of query q.
|
69 |
-
k_size (int): size of key k.
|
70 |
-
rel_pos (Tensor): relative position embeddings (L, C).
|
71 |
-
|
72 |
-
Returns:
|
73 |
-
Extracted positional embeddings according to relative positions.
|
74 |
-
"""
|
75 |
-
max_rel_dist = int(2 * max(q_size, k_size) - 1)
|
76 |
-
# Interpolate rel pos if needed.
|
77 |
-
if rel_pos.shape[0] != max_rel_dist:
|
78 |
-
# Interpolate rel pos.
|
79 |
-
rel_pos_resized = F.interpolate(
|
80 |
-
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
|
81 |
-
size=max_rel_dist,
|
82 |
-
mode="linear",
|
83 |
-
)
|
84 |
-
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
|
85 |
-
else:
|
86 |
-
rel_pos_resized = rel_pos
|
87 |
-
|
88 |
-
# Scale the coords with short length if shapes for q and k are different.
|
89 |
-
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
|
90 |
-
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
|
91 |
-
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
|
92 |
-
|
93 |
-
return rel_pos_resized[relative_coords.long()]
|
94 |
-
|
95 |
-
|
96 |
-
def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):
|
97 |
-
"""
|
98 |
-
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
|
99 |
-
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
|
100 |
-
Args:
|
101 |
-
attn (Tensor): attention map.
|
102 |
-
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
|
103 |
-
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
|
104 |
-
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
|
105 |
-
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
|
106 |
-
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
|
107 |
-
|
108 |
-
Returns:
|
109 |
-
attn (Tensor): attention map with added relative positional embeddings.
|
110 |
-
"""
|
111 |
-
q_h, q_w = q_size
|
112 |
-
k_h, k_w = k_size
|
113 |
-
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
|
114 |
-
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
|
115 |
-
|
116 |
-
B, _, dim = q.shape
|
117 |
-
r_q = q.reshape(B, q_h, q_w, dim)
|
118 |
-
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
|
119 |
-
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
|
120 |
-
|
121 |
-
attn = (
|
122 |
-
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
|
123 |
-
).view(B, q_h * q_w, k_h * k_w)
|
124 |
-
|
125 |
-
return attn
|
126 |
-
|
127 |
-
|
128 |
-
def get_abs_pos(abs_pos, has_cls_token, hw):
|
129 |
-
"""
|
130 |
-
Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token
|
131 |
-
dimension for the original embeddings.
|
132 |
-
Args:
|
133 |
-
abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).
|
134 |
-
has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.
|
135 |
-
hw (Tuple): size of input image tokens.
|
136 |
-
|
137 |
-
Returns:
|
138 |
-
Absolute positional embeddings after processing with shape (1, H, W, C)
|
139 |
-
"""
|
140 |
-
h, w = hw
|
141 |
-
if has_cls_token:
|
142 |
-
abs_pos = abs_pos[:, 1:]
|
143 |
-
xy_num = abs_pos.shape[1]
|
144 |
-
size = int(math.sqrt(xy_num))
|
145 |
-
assert size * size == xy_num
|
146 |
-
|
147 |
-
if size != h or size != w:
|
148 |
-
new_abs_pos = F.interpolate(
|
149 |
-
abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),
|
150 |
-
size=(h, w),
|
151 |
-
mode="bicubic",
|
152 |
-
align_corners=False,
|
153 |
-
)
|
154 |
-
|
155 |
-
return new_abs_pos.permute(0, 2, 3, 1)
|
156 |
-
else:
|
157 |
-
return abs_pos.reshape(1, h, w, -1)
|
158 |
-
|
159 |
-
|
160 |
-
class PatchEmbed(nn.Module):
|
161 |
-
"""
|
162 |
-
Image to Patch Embedding.
|
163 |
-
"""
|
164 |
-
|
165 |
-
def __init__(
|
166 |
-
self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768
|
167 |
-
):
|
168 |
-
"""
|
169 |
-
Args:
|
170 |
-
kernel_size (Tuple): kernel size of the projection layer.
|
171 |
-
stride (Tuple): stride of the projection layer.
|
172 |
-
padding (Tuple): padding size of the projection layer.
|
173 |
-
in_chans (int): Number of input image channels.
|
174 |
-
embed_dim (int): embed_dim (int): Patch embedding dimension.
|
175 |
-
"""
|
176 |
-
super().__init__()
|
177 |
-
|
178 |
-
self.proj = nn.Conv2d(
|
179 |
-
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
|
180 |
-
)
|
181 |
-
|
182 |
-
def forward(self, x):
|
183 |
-
x = self.proj(x)
|
184 |
-
# B C H W -> B H W C
|
185 |
-
x = x.permute(0, 2, 3, 1)
|
186 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Detection/fcos_R_50_FPN_1x.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from ..common.optim import SGD as optimizer
|
2 |
-
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
|
3 |
-
from ..common.data.coco import dataloader
|
4 |
-
from ..common.models.fcos import model
|
5 |
-
from ..common.train import train
|
6 |
-
|
7 |
-
dataloader.train.mapper.use_instance_mask = False
|
8 |
-
optimizer.lr = 0.01
|
9 |
-
|
10 |
-
model.backbone.bottom_up.freeze_at = 2
|
11 |
-
train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/build.py
DELETED
@@ -1,542 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import itertools
|
3 |
-
import logging
|
4 |
-
import numpy as np
|
5 |
-
import operator
|
6 |
-
import pickle
|
7 |
-
from typing import Any, Callable, Dict, List, Optional, Union
|
8 |
-
import torch
|
9 |
-
import torch.utils.data as torchdata
|
10 |
-
from tabulate import tabulate
|
11 |
-
from termcolor import colored
|
12 |
-
|
13 |
-
from detectron2.config import configurable
|
14 |
-
from detectron2.structures import BoxMode
|
15 |
-
from detectron2.utils.comm import get_world_size
|
16 |
-
from detectron2.utils.env import seed_all_rng
|
17 |
-
from detectron2.utils.file_io import PathManager
|
18 |
-
from detectron2.utils.logger import _log_api_usage, log_first_n
|
19 |
-
|
20 |
-
from .catalog import DatasetCatalog, MetadataCatalog
|
21 |
-
from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset
|
22 |
-
from .dataset_mapper import DatasetMapper
|
23 |
-
from .detection_utils import check_metadata_consistency
|
24 |
-
from .samplers import (
|
25 |
-
InferenceSampler,
|
26 |
-
RandomSubsetTrainingSampler,
|
27 |
-
RepeatFactorTrainingSampler,
|
28 |
-
TrainingSampler,
|
29 |
-
)
|
30 |
-
|
31 |
-
"""
|
32 |
-
This file contains the default logic to build a dataloader for training or testing.
|
33 |
-
"""
|
34 |
-
|
35 |
-
__all__ = [
|
36 |
-
"build_batch_data_loader",
|
37 |
-
"build_detection_train_loader",
|
38 |
-
"build_detection_test_loader",
|
39 |
-
"get_detection_dataset_dicts",
|
40 |
-
"load_proposals_into_dataset",
|
41 |
-
"print_instances_class_histogram",
|
42 |
-
]
|
43 |
-
|
44 |
-
|
45 |
-
def filter_images_with_only_crowd_annotations(dataset_dicts):
|
46 |
-
"""
|
47 |
-
Filter out images with none annotations or only crowd annotations
|
48 |
-
(i.e., images without non-crowd annotations).
|
49 |
-
A common training-time preprocessing on COCO dataset.
|
50 |
-
|
51 |
-
Args:
|
52 |
-
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
|
53 |
-
|
54 |
-
Returns:
|
55 |
-
list[dict]: the same format, but filtered.
|
56 |
-
"""
|
57 |
-
num_before = len(dataset_dicts)
|
58 |
-
|
59 |
-
def valid(anns):
|
60 |
-
for ann in anns:
|
61 |
-
if ann.get("iscrowd", 0) == 0:
|
62 |
-
return True
|
63 |
-
return False
|
64 |
-
|
65 |
-
dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
|
66 |
-
num_after = len(dataset_dicts)
|
67 |
-
logger = logging.getLogger(__name__)
|
68 |
-
logger.info(
|
69 |
-
"Removed {} images with no usable annotations. {} images left.".format(
|
70 |
-
num_before - num_after, num_after
|
71 |
-
)
|
72 |
-
)
|
73 |
-
return dataset_dicts
|
74 |
-
|
75 |
-
|
76 |
-
def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
|
77 |
-
"""
|
78 |
-
Filter out images with too few number of keypoints.
|
79 |
-
|
80 |
-
Args:
|
81 |
-
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
|
82 |
-
|
83 |
-
Returns:
|
84 |
-
list[dict]: the same format as dataset_dicts, but filtered.
|
85 |
-
"""
|
86 |
-
num_before = len(dataset_dicts)
|
87 |
-
|
88 |
-
def visible_keypoints_in_image(dic):
|
89 |
-
# Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
|
90 |
-
annotations = dic["annotations"]
|
91 |
-
return sum(
|
92 |
-
(np.array(ann["keypoints"][2::3]) > 0).sum()
|
93 |
-
for ann in annotations
|
94 |
-
if "keypoints" in ann
|
95 |
-
)
|
96 |
-
|
97 |
-
dataset_dicts = [
|
98 |
-
x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
|
99 |
-
]
|
100 |
-
num_after = len(dataset_dicts)
|
101 |
-
logger = logging.getLogger(__name__)
|
102 |
-
logger.info(
|
103 |
-
"Removed {} images with fewer than {} keypoints.".format(
|
104 |
-
num_before - num_after, min_keypoints_per_image
|
105 |
-
)
|
106 |
-
)
|
107 |
-
return dataset_dicts
|
108 |
-
|
109 |
-
|
110 |
-
def load_proposals_into_dataset(dataset_dicts, proposal_file):
|
111 |
-
"""
|
112 |
-
Load precomputed object proposals into the dataset.
|
113 |
-
|
114 |
-
The proposal file should be a pickled dict with the following keys:
|
115 |
-
|
116 |
-
- "ids": list[int] or list[str], the image ids
|
117 |
-
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
|
118 |
-
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
|
119 |
-
corresponding to the boxes.
|
120 |
-
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
|
121 |
-
|
122 |
-
Args:
|
123 |
-
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
|
124 |
-
proposal_file (str): file path of pre-computed proposals, in pkl format.
|
125 |
-
|
126 |
-
Returns:
|
127 |
-
list[dict]: the same format as dataset_dicts, but added proposal field.
|
128 |
-
"""
|
129 |
-
logger = logging.getLogger(__name__)
|
130 |
-
logger.info("Loading proposals from: {}".format(proposal_file))
|
131 |
-
|
132 |
-
with PathManager.open(proposal_file, "rb") as f:
|
133 |
-
proposals = pickle.load(f, encoding="latin1")
|
134 |
-
|
135 |
-
# Rename the key names in D1 proposal files
|
136 |
-
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
|
137 |
-
for key in rename_keys:
|
138 |
-
if key in proposals:
|
139 |
-
proposals[rename_keys[key]] = proposals.pop(key)
|
140 |
-
|
141 |
-
# Fetch the indexes of all proposals that are in the dataset
|
142 |
-
# Convert image_id to str since they could be int.
|
143 |
-
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
|
144 |
-
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
|
145 |
-
|
146 |
-
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
|
147 |
-
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
|
148 |
-
|
149 |
-
for record in dataset_dicts:
|
150 |
-
# Get the index of the proposal
|
151 |
-
i = id_to_index[str(record["image_id"])]
|
152 |
-
|
153 |
-
boxes = proposals["boxes"][i]
|
154 |
-
objectness_logits = proposals["objectness_logits"][i]
|
155 |
-
# Sort the proposals in descending order of the scores
|
156 |
-
inds = objectness_logits.argsort()[::-1]
|
157 |
-
record["proposal_boxes"] = boxes[inds]
|
158 |
-
record["proposal_objectness_logits"] = objectness_logits[inds]
|
159 |
-
record["proposal_bbox_mode"] = bbox_mode
|
160 |
-
|
161 |
-
return dataset_dicts
|
162 |
-
|
163 |
-
|
164 |
-
def print_instances_class_histogram(dataset_dicts, class_names):
|
165 |
-
"""
|
166 |
-
Args:
|
167 |
-
dataset_dicts (list[dict]): list of dataset dicts.
|
168 |
-
class_names (list[str]): list of class names (zero-indexed).
|
169 |
-
"""
|
170 |
-
num_classes = len(class_names)
|
171 |
-
hist_bins = np.arange(num_classes + 1)
|
172 |
-
histogram = np.zeros((num_classes,), dtype=np.int)
|
173 |
-
for entry in dataset_dicts:
|
174 |
-
annos = entry["annotations"]
|
175 |
-
classes = np.asarray(
|
176 |
-
[x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
|
177 |
-
)
|
178 |
-
if len(classes):
|
179 |
-
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
|
180 |
-
assert (
|
181 |
-
classes.max() < num_classes
|
182 |
-
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
|
183 |
-
histogram += np.histogram(classes, bins=hist_bins)[0]
|
184 |
-
|
185 |
-
N_COLS = min(6, len(class_names) * 2)
|
186 |
-
|
187 |
-
def short_name(x):
|
188 |
-
# make long class names shorter. useful for lvis
|
189 |
-
if len(x) > 13:
|
190 |
-
return x[:11] + ".."
|
191 |
-
return x
|
192 |
-
|
193 |
-
data = list(
|
194 |
-
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
|
195 |
-
)
|
196 |
-
total_num_instances = sum(data[1::2])
|
197 |
-
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
|
198 |
-
if num_classes > 1:
|
199 |
-
data.extend(["total", total_num_instances])
|
200 |
-
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
|
201 |
-
table = tabulate(
|
202 |
-
data,
|
203 |
-
headers=["category", "#instances"] * (N_COLS // 2),
|
204 |
-
tablefmt="pipe",
|
205 |
-
numalign="left",
|
206 |
-
stralign="center",
|
207 |
-
)
|
208 |
-
log_first_n(
|
209 |
-
logging.INFO,
|
210 |
-
"Distribution of instances among all {} categories:\n".format(num_classes)
|
211 |
-
+ colored(table, "cyan"),
|
212 |
-
key="message",
|
213 |
-
)
|
214 |
-
|
215 |
-
|
216 |
-
def get_detection_dataset_dicts(
|
217 |
-
names,
|
218 |
-
filter_empty=True,
|
219 |
-
min_keypoints=0,
|
220 |
-
proposal_files=None,
|
221 |
-
check_consistency=True,
|
222 |
-
):
|
223 |
-
"""
|
224 |
-
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
|
225 |
-
|
226 |
-
Args:
|
227 |
-
names (str or list[str]): a dataset name or a list of dataset names
|
228 |
-
filter_empty (bool): whether to filter out images without instance annotations
|
229 |
-
min_keypoints (int): filter out images with fewer keypoints than
|
230 |
-
`min_keypoints`. Set to 0 to do nothing.
|
231 |
-
proposal_files (list[str]): if given, a list of object proposal files
|
232 |
-
that match each dataset in `names`.
|
233 |
-
check_consistency (bool): whether to check if datasets have consistent metadata.
|
234 |
-
|
235 |
-
Returns:
|
236 |
-
list[dict]: a list of dicts following the standard dataset dict format.
|
237 |
-
"""
|
238 |
-
if isinstance(names, str):
|
239 |
-
names = [names]
|
240 |
-
assert len(names), names
|
241 |
-
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
|
242 |
-
for dataset_name, dicts in zip(names, dataset_dicts):
|
243 |
-
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
|
244 |
-
|
245 |
-
if proposal_files is not None:
|
246 |
-
assert len(names) == len(proposal_files)
|
247 |
-
# load precomputed proposals from proposal files
|
248 |
-
dataset_dicts = [
|
249 |
-
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
|
250 |
-
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
|
251 |
-
]
|
252 |
-
|
253 |
-
if isinstance(dataset_dicts[0], torchdata.Dataset):
|
254 |
-
return torchdata.ConcatDataset(dataset_dicts)
|
255 |
-
|
256 |
-
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
|
257 |
-
|
258 |
-
has_instances = "annotations" in dataset_dicts[0]
|
259 |
-
if filter_empty and has_instances:
|
260 |
-
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
|
261 |
-
if min_keypoints > 0 and has_instances:
|
262 |
-
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
|
263 |
-
|
264 |
-
if check_consistency and has_instances:
|
265 |
-
try:
|
266 |
-
class_names = MetadataCatalog.get(names[0]).thing_classes
|
267 |
-
check_metadata_consistency("thing_classes", names)
|
268 |
-
print_instances_class_histogram(dataset_dicts, class_names)
|
269 |
-
except AttributeError: # class names are not available for this dataset
|
270 |
-
pass
|
271 |
-
|
272 |
-
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
|
273 |
-
return dataset_dicts
|
274 |
-
|
275 |
-
|
276 |
-
def build_batch_data_loader(
|
277 |
-
dataset,
|
278 |
-
sampler,
|
279 |
-
total_batch_size,
|
280 |
-
*,
|
281 |
-
aspect_ratio_grouping=False,
|
282 |
-
num_workers=0,
|
283 |
-
collate_fn=None,
|
284 |
-
):
|
285 |
-
"""
|
286 |
-
Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
|
287 |
-
1. support aspect ratio grouping options
|
288 |
-
2. use no "batch collation", because this is common for detection training
|
289 |
-
|
290 |
-
Args:
|
291 |
-
dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
|
292 |
-
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
|
293 |
-
Must be provided iff. ``dataset`` is a map-style dataset.
|
294 |
-
total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
|
295 |
-
:func:`build_detection_train_loader`.
|
296 |
-
|
297 |
-
Returns:
|
298 |
-
iterable[list]. Length of each list is the batch size of the current
|
299 |
-
GPU. Each element in the list comes from the dataset.
|
300 |
-
"""
|
301 |
-
world_size = get_world_size()
|
302 |
-
assert (
|
303 |
-
total_batch_size > 0 and total_batch_size % world_size == 0
|
304 |
-
), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
|
305 |
-
total_batch_size, world_size
|
306 |
-
)
|
307 |
-
batch_size = total_batch_size // world_size
|
308 |
-
|
309 |
-
if isinstance(dataset, torchdata.IterableDataset):
|
310 |
-
assert sampler is None, "sampler must be None if dataset is IterableDataset"
|
311 |
-
else:
|
312 |
-
dataset = ToIterableDataset(dataset, sampler)
|
313 |
-
|
314 |
-
if aspect_ratio_grouping:
|
315 |
-
data_loader = torchdata.DataLoader(
|
316 |
-
dataset,
|
317 |
-
num_workers=num_workers,
|
318 |
-
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
|
319 |
-
worker_init_fn=worker_init_reset_seed,
|
320 |
-
) # yield individual mapped dict
|
321 |
-
data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
|
322 |
-
if collate_fn is None:
|
323 |
-
return data_loader
|
324 |
-
return MapDataset(data_loader, collate_fn)
|
325 |
-
else:
|
326 |
-
return torchdata.DataLoader(
|
327 |
-
dataset,
|
328 |
-
batch_size=batch_size,
|
329 |
-
drop_last=True,
|
330 |
-
num_workers=num_workers,
|
331 |
-
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
|
332 |
-
worker_init_fn=worker_init_reset_seed,
|
333 |
-
)
|
334 |
-
|
335 |
-
|
336 |
-
def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
|
337 |
-
if dataset is None:
|
338 |
-
dataset = get_detection_dataset_dicts(
|
339 |
-
cfg.DATASETS.TRAIN,
|
340 |
-
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
|
341 |
-
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
|
342 |
-
if cfg.MODEL.KEYPOINT_ON
|
343 |
-
else 0,
|
344 |
-
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
|
345 |
-
)
|
346 |
-
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
|
347 |
-
|
348 |
-
if mapper is None:
|
349 |
-
mapper = DatasetMapper(cfg, True)
|
350 |
-
|
351 |
-
if sampler is None:
|
352 |
-
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
|
353 |
-
logger = logging.getLogger(__name__)
|
354 |
-
logger.info("Using training sampler {}".format(sampler_name))
|
355 |
-
if sampler_name == "TrainingSampler":
|
356 |
-
sampler = TrainingSampler(len(dataset))
|
357 |
-
elif sampler_name == "RepeatFactorTrainingSampler":
|
358 |
-
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
|
359 |
-
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
|
360 |
-
)
|
361 |
-
sampler = RepeatFactorTrainingSampler(repeat_factors)
|
362 |
-
elif sampler_name == "RandomSubsetTrainingSampler":
|
363 |
-
sampler = RandomSubsetTrainingSampler(len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO)
|
364 |
-
else:
|
365 |
-
raise ValueError("Unknown training sampler: {}".format(sampler_name))
|
366 |
-
|
367 |
-
return {
|
368 |
-
"dataset": dataset,
|
369 |
-
"sampler": sampler,
|
370 |
-
"mapper": mapper,
|
371 |
-
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
|
372 |
-
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
|
373 |
-
"num_workers": cfg.DATALOADER.NUM_WORKERS,
|
374 |
-
}
|
375 |
-
|
376 |
-
|
377 |
-
@configurable(from_config=_train_loader_from_config)
|
378 |
-
def build_detection_train_loader(
|
379 |
-
dataset,
|
380 |
-
*,
|
381 |
-
mapper,
|
382 |
-
sampler=None,
|
383 |
-
total_batch_size,
|
384 |
-
aspect_ratio_grouping=True,
|
385 |
-
num_workers=0,
|
386 |
-
collate_fn=None,
|
387 |
-
):
|
388 |
-
"""
|
389 |
-
Build a dataloader for object detection with some default features.
|
390 |
-
|
391 |
-
Args:
|
392 |
-
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
|
393 |
-
or a pytorch dataset (either map-style or iterable). It can be obtained
|
394 |
-
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
|
395 |
-
mapper (callable): a callable which takes a sample (dict) from dataset and
|
396 |
-
returns the format to be consumed by the model.
|
397 |
-
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
|
398 |
-
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
|
399 |
-
indices to be applied on ``dataset``.
|
400 |
-
If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
|
401 |
-
which coordinates an infinite random shuffle sequence across all workers.
|
402 |
-
Sampler must be None if ``dataset`` is iterable.
|
403 |
-
total_batch_size (int): total batch size across all workers.
|
404 |
-
aspect_ratio_grouping (bool): whether to group images with similar
|
405 |
-
aspect ratio for efficiency. When enabled, it requires each
|
406 |
-
element in dataset be a dict with keys "width" and "height".
|
407 |
-
num_workers (int): number of parallel data loading workers
|
408 |
-
collate_fn: a function that determines how to do batching, same as the argument of
|
409 |
-
`torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
|
410 |
-
data. No collation is OK for small batch size and simple data structures.
|
411 |
-
If your batch size is large and each sample contains too many small tensors,
|
412 |
-
it's more efficient to collate them in data loader.
|
413 |
-
|
414 |
-
Returns:
|
415 |
-
torch.utils.data.DataLoader:
|
416 |
-
a dataloader. Each output from it is a ``list[mapped_element]`` of length
|
417 |
-
``total_batch_size / num_workers``, where ``mapped_element`` is produced
|
418 |
-
by the ``mapper``.
|
419 |
-
"""
|
420 |
-
if isinstance(dataset, list):
|
421 |
-
dataset = DatasetFromList(dataset, copy=False)
|
422 |
-
if mapper is not None:
|
423 |
-
dataset = MapDataset(dataset, mapper)
|
424 |
-
|
425 |
-
if isinstance(dataset, torchdata.IterableDataset):
|
426 |
-
assert sampler is None, "sampler must be None if dataset is IterableDataset"
|
427 |
-
else:
|
428 |
-
if sampler is None:
|
429 |
-
sampler = TrainingSampler(len(dataset))
|
430 |
-
assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
|
431 |
-
return build_batch_data_loader(
|
432 |
-
dataset,
|
433 |
-
sampler,
|
434 |
-
total_batch_size,
|
435 |
-
aspect_ratio_grouping=aspect_ratio_grouping,
|
436 |
-
num_workers=num_workers,
|
437 |
-
collate_fn=collate_fn,
|
438 |
-
)
|
439 |
-
|
440 |
-
|
441 |
-
def _test_loader_from_config(cfg, dataset_name, mapper=None):
|
442 |
-
"""
|
443 |
-
Uses the given `dataset_name` argument (instead of the names in cfg), because the
|
444 |
-
standard practice is to evaluate each test set individually (not combining them).
|
445 |
-
"""
|
446 |
-
if isinstance(dataset_name, str):
|
447 |
-
dataset_name = [dataset_name]
|
448 |
-
|
449 |
-
dataset = get_detection_dataset_dicts(
|
450 |
-
dataset_name,
|
451 |
-
filter_empty=False,
|
452 |
-
proposal_files=[
|
453 |
-
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
|
454 |
-
]
|
455 |
-
if cfg.MODEL.LOAD_PROPOSALS
|
456 |
-
else None,
|
457 |
-
)
|
458 |
-
if mapper is None:
|
459 |
-
mapper = DatasetMapper(cfg, False)
|
460 |
-
return {
|
461 |
-
"dataset": dataset,
|
462 |
-
"mapper": mapper,
|
463 |
-
"num_workers": cfg.DATALOADER.NUM_WORKERS,
|
464 |
-
"sampler": InferenceSampler(len(dataset)),
|
465 |
-
}
|
466 |
-
|
467 |
-
|
468 |
-
@configurable(from_config=_test_loader_from_config)
|
469 |
-
def build_detection_test_loader(
|
470 |
-
dataset: Union[List[Any], torchdata.Dataset],
|
471 |
-
*,
|
472 |
-
mapper: Callable[[Dict[str, Any]], Any],
|
473 |
-
sampler: Optional[torchdata.Sampler] = None,
|
474 |
-
batch_size: int = 1,
|
475 |
-
num_workers: int = 0,
|
476 |
-
collate_fn: Optional[Callable[[List[Any]], Any]] = None,
|
477 |
-
) -> torchdata.DataLoader:
|
478 |
-
"""
|
479 |
-
Similar to `build_detection_train_loader`, with default batch size = 1,
|
480 |
-
and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
|
481 |
-
to produce the exact set of all samples.
|
482 |
-
|
483 |
-
Args:
|
484 |
-
dataset: a list of dataset dicts,
|
485 |
-
or a pytorch dataset (either map-style or iterable). They can be obtained
|
486 |
-
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
|
487 |
-
mapper: a callable which takes a sample (dict) from dataset
|
488 |
-
and returns the format to be consumed by the model.
|
489 |
-
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
|
490 |
-
sampler: a sampler that produces
|
491 |
-
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
|
492 |
-
which splits the dataset across all workers. Sampler must be None
|
493 |
-
if `dataset` is iterable.
|
494 |
-
batch_size: the batch size of the data loader to be created.
|
495 |
-
Default to 1 image per worker since this is the standard when reporting
|
496 |
-
inference time in papers.
|
497 |
-
num_workers: number of parallel data loading workers
|
498 |
-
collate_fn: same as the argument of `torch.utils.data.DataLoader`.
|
499 |
-
Defaults to do no collation and return a list of data.
|
500 |
-
|
501 |
-
Returns:
|
502 |
-
DataLoader: a torch DataLoader, that loads the given detection
|
503 |
-
dataset, with test-time transformation and batching.
|
504 |
-
|
505 |
-
Examples:
|
506 |
-
::
|
507 |
-
data_loader = build_detection_test_loader(
|
508 |
-
DatasetRegistry.get("my_test"),
|
509 |
-
mapper=DatasetMapper(...))
|
510 |
-
|
511 |
-
# or, instantiate with a CfgNode:
|
512 |
-
data_loader = build_detection_test_loader(cfg, "my_test")
|
513 |
-
"""
|
514 |
-
if isinstance(dataset, list):
|
515 |
-
dataset = DatasetFromList(dataset, copy=False)
|
516 |
-
if mapper is not None:
|
517 |
-
dataset = MapDataset(dataset, mapper)
|
518 |
-
if isinstance(dataset, torchdata.IterableDataset):
|
519 |
-
assert sampler is None, "sampler must be None if dataset is IterableDataset"
|
520 |
-
else:
|
521 |
-
if sampler is None:
|
522 |
-
sampler = InferenceSampler(len(dataset))
|
523 |
-
return torchdata.DataLoader(
|
524 |
-
dataset,
|
525 |
-
batch_size=batch_size,
|
526 |
-
sampler=sampler,
|
527 |
-
drop_last=False,
|
528 |
-
num_workers=num_workers,
|
529 |
-
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
|
530 |
-
)
|
531 |
-
|
532 |
-
|
533 |
-
def trivial_batch_collator(batch):
|
534 |
-
"""
|
535 |
-
A batch collator that does nothing.
|
536 |
-
"""
|
537 |
-
return batch
|
538 |
-
|
539 |
-
|
540 |
-
def worker_init_reset_seed(worker_id):
|
541 |
-
initial_seed = torch.initial_seed() % 2 ** 31
|
542 |
-
seed_all_rng(initial_seed + worker_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Dockerfile
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
# Use the official Python 3.9 image
|
2 |
-
FROM python:3.9
|
3 |
-
|
4 |
-
# Set the working directory to /code
|
5 |
-
WORKDIR /code
|
6 |
-
|
7 |
-
# Copy the current directory contents into the container at /code
|
8 |
-
COPY ./requirements.txt /code/requirements.txt
|
9 |
-
|
10 |
-
# Install requirements.txt
|
11 |
-
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
12 |
-
|
13 |
-
# Set up a new user named "user" with user ID 1000
|
14 |
-
RUN useradd -m -u 1000 user
|
15 |
-
# Switch to the "user" user
|
16 |
-
USER user
|
17 |
-
# Set home to the user's home directory
|
18 |
-
ENV HOME=/home/user \
|
19 |
-
PATH=/home/user/.local/bin:$PATH
|
20 |
-
|
21 |
-
# Set the working directory to the user's home directory
|
22 |
-
WORKDIR $HOME/app
|
23 |
-
|
24 |
-
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
25 |
-
COPY --chown=user . $HOME/app
|
26 |
-
|
27 |
-
# Start the FastAPI app on port 7860, the default port expected by Spaces
|
28 |
-
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Baloncesto Estrellas Multijugador Mod Apk Dinero Ilimitado Y Oro.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Estrellas de baloncesto multijugador Mod APK: Dinero ilimitado y oro</h1>
|
3 |
-
<p>¿Te gusta jugar al baloncesto en tu dispositivo móvil? ¿Quieres experimentar la emoción de los partidos en línea 1v1 contra jugadores reales de todo el mundo? ¿Quieres tener recursos ilimitados para personalizar a tu personaje y desbloquear nuevas pelotas de baloncesto? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe tratar de estrellas de baloncesto multijugador mod APK. Esta es una versión modificada del popular juego de baloncesto de Miniclip que te da dinero y oro ilimitados, así como otras características increíbles que mejorarán tu experiencia de juego. En este artículo, le diremos qué es Basketball Stars, qué es un mod APK, por qué debe usar Basketball Stars mod multijugador APK, qué características ofrece y cómo descargarlo e instalarlo en su dispositivo. </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<h3>¿Qué son las estrellas del baloncesto? </h3>
|
6 |
-
<p>Basketball Stars es un juego de baloncesto gratuito de Miniclip que te permite jugar partidos en línea 1v1 contra jugadores reales de todo el mundo. Puedes elegir entre diferentes modos de juego, como Attacker-Defender, Shooting Race o Dunk Contest. También puedes personalizar a tu personaje con diferentes atuendos, accesorios, peinados, tatuajes y más. También puede recoger y actualizar diferentes balones de baloncesto con efectos y habilidades únicas. Basketball Stars es un juego divertido y adictivo que pondrá a prueba tus habilidades y reflejos en la cancha. </p>
|
7 |
-
<h2>baloncesto estrellas multijugador mod apk dinero ilimitado y oro</h2><br /><p><b><b>Download</b> ☑ <a href="https://bltlly.com/2v6IWG">https://bltlly.com/2v6IWG</a></b></p><br /><br />
|
8 |
-
<h3>¿Qué es un mod APK? </h3>
|
9 |
-
|
10 |
-
<h3>¿Por qué utilizar las estrellas de baloncesto multijugador mod APK? </h3>
|
11 |
-
<p>Estrellas de baloncesto multijugador mod APK es uno de los mejores APK mod para estrellas de baloncesto que se pueden encontrar en línea. Te da dinero ilimitado y oro que puedes usar para comprar lo que quieras en el juego. También puedes disfrutar de un juego premium y controles que te harán sentir como un profesional en la cancha. También puedes experimentar gráficos 3D realistas que te sumergirán en el mundo del juego. También puedes elegir entre dos diferentes modos de juego multijugador en línea que te desafiarán de diferentes maneras. También puede recoger fácilmente el juego y jugar en cualquier momento, en cualquier lugar. También puedes personalizar a tu personaje con cientos de opciones y desbloquear nuevas pelotas de baloncesto con efectos especiales. Estrellas de baloncesto multijugador mod APK es un deber-tener para cualquier aficionado al baloncesto que quiere tener más diversión y emoción en el juego. </p>
|
12 |
-
<h2>Características de Estrellas de baloncesto multijugador mod APK</h2>
|
13 |
-
<h3>Dinero y oro ilimitados</h3>
|
14 |
-
<p>Una de las principales características de Baloncesto Estrellas multijugador mod APK es que le da dinero ilimitado y oro que se puede utilizar para comprar cualquier cosa que quieras en el juego. El dinero y el oro son las principales monedas en las Estrellas del Baloncesto que necesitas para desbloquear nuevos objetos, mejorar tus pelotas de baloncesto, entrar en el juego <h3>Premium y controles</h3>
|
15 |
-
<p>Otra característica de Estrellas de baloncesto multijugador mod APK es que le da premium gameplay y controles que te harán sentir como un profesional en la cancha. Puedes disfrutar de controles suaves y sensibles que te permitirán driblar, disparar, bloquear, robar y encestar con facilidad. También puedes usar diferentes movimientos y trucos para superar a tu oponente y ganar más puntos. También puede ajustar la sensibilidad y el ángulo de la cámara para adaptarse a sus preferencias. Estrellas de baloncesto multijugador mod APK le dará la mejor experiencia de juego posible. </p>
|
16 |
-
<h3>Gráficos 3D realistas</h3>
|
17 |
-
|
18 |
-
<h3>Dos diferentes modos de juego multijugador en línea</h3>
|
19 |
-
<p>Estrellas de baloncesto multijugador mod APK también ofrece dos diferentes modos de juego multijugador en línea que le desafiará de diferentes maneras. Puedes elegir entre Attacker-Defender o Shooting Race. En Attacker-Defender, tienes que anotar tantos puntos como puedas mientras defiendes tu canasta de tu oponente. En Shooting Race, tienes que anotar tantas canastas como puedas antes de que se acabe el tiempo. Ambos modos de juego son rápidos y competitivos, y requieren habilidad y estrategia para ganar. También puedes jugar con tus amigos o con jugadores aleatorios de todo el mundo. Estrellas de baloncesto multijugador mod APK pondrá a prueba sus habilidades de baloncesto y reflejos. </p>
|
20 |
-
<h3>Fácil de recoger, difícil de dominar</h3>
|
21 |
-
<p>Estrellas de baloncesto multijugador mod APK también es fácil de recoger, pero difícil de dominar. Puedes aprender lo básico del juego en pocos minutos, pero necesitarás horas de práctica y dedicación para convertirte en una estrella del baloncesto. También puedes mejorar tus habilidades jugando contra diferentes oponentes con diferentes estilos y habilidades. También puedes ganar recompensas y logros al completar varios desafíos y misiones. Estrellas de baloncesto multijugador mod APK es un juego que te mantendrá enganchado durante mucho tiempo. </p>
|
22 |
-
<h3>Amplias opciones de personalización</h3>
|
23 |
-
<p>Estrellas de baloncesto multijugador mod APK también le da amplias opciones de personalización que le permitirá crear su propio personaje único. Puedes elegir entre cientos de trajes, accesorios, peinados, tatuajes y más. También puedes mezclar y combinar diferentes elementos para crear tu propio estilo y personalidad. También puedes cambiar la apariencia de tu personaje cuando quieras. Estrellas de baloncesto multijugador mod APK le permitirá expresarse en la cancha. </p>
|
24 |
-
<h3>Colección de baloncesto desbloqueable</h3>
|
25 |
-
|
26 |
-
<h2>Cómo descargar e instalar Baloncesto Estrellas multijugador mod APK</h2>
|
27 |
-
<h3>Paso 1: Descargar el archivo mod APK de una fuente de confianza</h3>
|
28 |
-
<p>El primer paso para descargar e instalar Baloncesto Estrellas multijugador mod APK es encontrar una fuente de confianza que ofrece la última versión del archivo APK mod. Puede buscar en línea para varios sitios web que proporcionan archivos APK mod para diferentes juegos, pero tenga cuidado con los enlaces falsos o maliciosos que pueden dañar su dispositivo o robar sus datos. También puede utilizar este enlace para descargar el archivo mod APK para Basketball Stars directamente. </p>
|
29 |
-
<h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
|
30 |
-
<p>El segundo paso para descargar e instalar Estrellas de baloncesto multijugador mod APK es habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store o App Store. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad o la privacidad, luego cambie las fuentes desconocidas o permita desde esta fuente. Esto puede variar dependiendo del modelo de dispositivo y del sistema operativo. </p>
|
31 |
-
<h3>Paso 3: Instalar el archivo APK mod y lanzar el juego</h3>
|
32 |
-
<p>El tercer paso para descargar e instalar Baloncesto Estrellas multijugador mod APK es instalar el archivo APK mod y lanzar el juego. Para hacer esto, localizar el archivo APK mod descargado en el almacenamiento del dispositivo, a continuación, toque en él para iniciar el proceso de instalación. Sigue las instrucciones de la pantalla hasta que se complete la instalación. Luego, inicia el juego desde el cajón de la app o la pantalla de inicio. Disfruta jugando Basketball Stars con dinero ilimitado y oro <h2>Conclusión</h2>
|
33 |
-
|
34 |
-
<h2>Preguntas frecuentes</h2>
|
35 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Basketball Stars mod multijugador APK:</p>
|
36 |
-
<p></p>
|
37 |
-
<ul>
|
38 |
-
<li><b>Q: ¿Está libre el mod multijugador Basketball Stars? </b></li>
|
39 |
-
<li>A: Sí, Estrellas de baloncesto multijugador mod APK es libre de descargar y usar. No necesitas pagar nada para disfrutar del juego con dinero y oro ilimitados. </li>
|
40 |
-
<li><b>Q: ¿Es seguro el mod multijugador APK de Basketball Stars? </b></li>
|
41 |
-
<li>A: Sí, Estrellas de baloncesto multijugador mod APK es seguro de usar. No contiene virus, malware, spyware u otros componentes dañinos que puedan dañar su dispositivo o comprometer su privacidad. Sin embargo, siempre debe descargarlo de una fuente confiable y escanearlo con un software antivirus antes de instalarlo. </li>
|
42 |
-
<li><b>Q: ¿Es Basketball Stars multijugador mod APK compatible con mi dispositivo? </b></li>
|
43 |
-
<li>A: Estrellas de baloncesto multijugador mod APK es compatible con la mayoría de los dispositivos Android que tienen Android 4.1 o superior. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego o el mod APK debido a limitaciones de hardware o software. </li>
|
44 |
-
<li><b>Q: ¿Voy a ser prohibido para el uso de estrellas de baloncesto multijugador mod APK? </b></li>
|
45 |
-
<li>A: No, no se le prohibió el uso de estrellas de baloncesto multijugador mod APK. El mod APK no interfiere con los servidores del juego o el sistema de emparejamiento en línea. Puedes jugar el juego normalmente sin ningún riesgo de ser prohibido. </li>
|
46 |
-
<li><b>Q: ¿Cómo puedo actualizar Basketball Stars mod multijugador APK? </b></li>
|
47 |
-
<li>A: Para actualizar Estrellas de baloncesto multijugador mod APK, es necesario descargar la última versión del archivo mod APK de la misma fuente que lo descargó desde antes. A continuación, es necesario desinstalar la versión anterior del mod APK e instalar el nuevo. No necesita preocuparse por perder su progreso o datos, ya que se almacenan en su dispositivo y no en el archivo APK mod. </li>
|
48 |
-
</ul></p> 64aa2da5cf<br />
|
49 |
-
<br />
|
50 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/method.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# https://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
from botocore.docs.method import document_model_driven_method
|
14 |
-
|
15 |
-
|
16 |
-
def document_model_driven_resource_method(
|
17 |
-
section,
|
18 |
-
method_name,
|
19 |
-
operation_model,
|
20 |
-
event_emitter,
|
21 |
-
method_description=None,
|
22 |
-
example_prefix=None,
|
23 |
-
include_input=None,
|
24 |
-
include_output=None,
|
25 |
-
exclude_input=None,
|
26 |
-
exclude_output=None,
|
27 |
-
document_output=True,
|
28 |
-
resource_action_model=None,
|
29 |
-
include_signature=True,
|
30 |
-
):
|
31 |
-
|
32 |
-
document_model_driven_method(
|
33 |
-
section=section,
|
34 |
-
method_name=method_name,
|
35 |
-
operation_model=operation_model,
|
36 |
-
event_emitter=event_emitter,
|
37 |
-
method_description=method_description,
|
38 |
-
example_prefix=example_prefix,
|
39 |
-
include_input=include_input,
|
40 |
-
include_output=include_output,
|
41 |
-
exclude_input=exclude_input,
|
42 |
-
exclude_output=exclude_output,
|
43 |
-
document_output=document_output,
|
44 |
-
include_signature=include_signature,
|
45 |
-
)
|
46 |
-
|
47 |
-
# If this action returns a resource modify the return example to
|
48 |
-
# appropriately reflect that.
|
49 |
-
if resource_action_model.resource:
|
50 |
-
if 'return' in section.available_sections:
|
51 |
-
section.delete_section('return')
|
52 |
-
resource_type = resource_action_model.resource.type
|
53 |
-
|
54 |
-
new_return_section = section.add_new_section('return')
|
55 |
-
return_resource_type = '{}.{}'.format(
|
56 |
-
operation_model.service_model.service_name, resource_type
|
57 |
-
)
|
58 |
-
|
59 |
-
return_type = f':py:class:`{return_resource_type}`'
|
60 |
-
return_description = f'{resource_type} resource'
|
61 |
-
|
62 |
-
if _method_returns_resource_list(resource_action_model.resource):
|
63 |
-
return_type = f'list({return_type})'
|
64 |
-
return_description = f'A list of {resource_type} resources'
|
65 |
-
|
66 |
-
new_return_section.style.new_line()
|
67 |
-
new_return_section.write(f':rtype: {return_type}')
|
68 |
-
new_return_section.style.new_line()
|
69 |
-
new_return_section.write(f':returns: {return_description}')
|
70 |
-
new_return_section.style.new_line()
|
71 |
-
|
72 |
-
|
73 |
-
def _method_returns_resource_list(resource):
|
74 |
-
for identifier in resource.identifiers:
|
75 |
-
if identifier.path and '[]' in identifier.path:
|
76 |
-
return True
|
77 |
-
|
78 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/fields.py
DELETED
@@ -1,274 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
import email.utils
|
4 |
-
import mimetypes
|
5 |
-
import re
|
6 |
-
|
7 |
-
from .packages import six
|
8 |
-
|
9 |
-
|
10 |
-
def guess_content_type(filename, default="application/octet-stream"):
|
11 |
-
"""
|
12 |
-
Guess the "Content-Type" of a file.
|
13 |
-
|
14 |
-
:param filename:
|
15 |
-
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
|
16 |
-
:param default:
|
17 |
-
If no "Content-Type" can be guessed, default to `default`.
|
18 |
-
"""
|
19 |
-
if filename:
|
20 |
-
return mimetypes.guess_type(filename)[0] or default
|
21 |
-
return default
|
22 |
-
|
23 |
-
|
24 |
-
def format_header_param_rfc2231(name, value):
|
25 |
-
"""
|
26 |
-
Helper function to format and quote a single header parameter using the
|
27 |
-
strategy defined in RFC 2231.
|
28 |
-
|
29 |
-
Particularly useful for header parameters which might contain
|
30 |
-
non-ASCII values, like file names. This follows
|
31 |
-
`RFC 2388 Section 4.4 <https://tools.ietf.org/html/rfc2388#section-4.4>`_.
|
32 |
-
|
33 |
-
:param name:
|
34 |
-
The name of the parameter, a string expected to be ASCII only.
|
35 |
-
:param value:
|
36 |
-
The value of the parameter, provided as ``bytes`` or `str``.
|
37 |
-
:ret:
|
38 |
-
An RFC-2231-formatted unicode string.
|
39 |
-
"""
|
40 |
-
if isinstance(value, six.binary_type):
|
41 |
-
value = value.decode("utf-8")
|
42 |
-
|
43 |
-
if not any(ch in value for ch in '"\\\r\n'):
|
44 |
-
result = u'%s="%s"' % (name, value)
|
45 |
-
try:
|
46 |
-
result.encode("ascii")
|
47 |
-
except (UnicodeEncodeError, UnicodeDecodeError):
|
48 |
-
pass
|
49 |
-
else:
|
50 |
-
return result
|
51 |
-
|
52 |
-
if six.PY2: # Python 2:
|
53 |
-
value = value.encode("utf-8")
|
54 |
-
|
55 |
-
# encode_rfc2231 accepts an encoded string and returns an ascii-encoded
|
56 |
-
# string in Python 2 but accepts and returns unicode strings in Python 3
|
57 |
-
value = email.utils.encode_rfc2231(value, "utf-8")
|
58 |
-
value = "%s*=%s" % (name, value)
|
59 |
-
|
60 |
-
if six.PY2: # Python 2:
|
61 |
-
value = value.decode("utf-8")
|
62 |
-
|
63 |
-
return value
|
64 |
-
|
65 |
-
|
66 |
-
_HTML5_REPLACEMENTS = {
|
67 |
-
u"\u0022": u"%22",
|
68 |
-
# Replace "\" with "\\".
|
69 |
-
u"\u005C": u"\u005C\u005C",
|
70 |
-
}
|
71 |
-
|
72 |
-
# All control characters from 0x00 to 0x1F *except* 0x1B.
|
73 |
-
_HTML5_REPLACEMENTS.update(
|
74 |
-
{
|
75 |
-
six.unichr(cc): u"%{:02X}".format(cc)
|
76 |
-
for cc in range(0x00, 0x1F + 1)
|
77 |
-
if cc not in (0x1B,)
|
78 |
-
}
|
79 |
-
)
|
80 |
-
|
81 |
-
|
82 |
-
def _replace_multiple(value, needles_and_replacements):
|
83 |
-
def replacer(match):
|
84 |
-
return needles_and_replacements[match.group(0)]
|
85 |
-
|
86 |
-
pattern = re.compile(
|
87 |
-
r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
|
88 |
-
)
|
89 |
-
|
90 |
-
result = pattern.sub(replacer, value)
|
91 |
-
|
92 |
-
return result
|
93 |
-
|
94 |
-
|
95 |
-
def format_header_param_html5(name, value):
|
96 |
-
"""
|
97 |
-
Helper function to format and quote a single header parameter using the
|
98 |
-
HTML5 strategy.
|
99 |
-
|
100 |
-
Particularly useful for header parameters which might contain
|
101 |
-
non-ASCII values, like file names. This follows the `HTML5 Working Draft
|
102 |
-
Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
|
103 |
-
|
104 |
-
.. _HTML5 Working Draft Section 4.10.22.7:
|
105 |
-
https://w3c.github.io/html/sec-forms.html#multipart-form-data
|
106 |
-
|
107 |
-
:param name:
|
108 |
-
The name of the parameter, a string expected to be ASCII only.
|
109 |
-
:param value:
|
110 |
-
The value of the parameter, provided as ``bytes`` or `str``.
|
111 |
-
:ret:
|
112 |
-
A unicode string, stripped of troublesome characters.
|
113 |
-
"""
|
114 |
-
if isinstance(value, six.binary_type):
|
115 |
-
value = value.decode("utf-8")
|
116 |
-
|
117 |
-
value = _replace_multiple(value, _HTML5_REPLACEMENTS)
|
118 |
-
|
119 |
-
return u'%s="%s"' % (name, value)
|
120 |
-
|
121 |
-
|
122 |
-
# For backwards-compatibility.
|
123 |
-
format_header_param = format_header_param_html5
|
124 |
-
|
125 |
-
|
126 |
-
class RequestField(object):
|
127 |
-
"""
|
128 |
-
A data container for request body parameters.
|
129 |
-
|
130 |
-
:param name:
|
131 |
-
The name of this request field. Must be unicode.
|
132 |
-
:param data:
|
133 |
-
The data/value body.
|
134 |
-
:param filename:
|
135 |
-
An optional filename of the request field. Must be unicode.
|
136 |
-
:param headers:
|
137 |
-
An optional dict-like object of headers to initially use for the field.
|
138 |
-
:param header_formatter:
|
139 |
-
An optional callable that is used to encode and format the headers. By
|
140 |
-
default, this is :func:`format_header_param_html5`.
|
141 |
-
"""
|
142 |
-
|
143 |
-
def __init__(
|
144 |
-
self,
|
145 |
-
name,
|
146 |
-
data,
|
147 |
-
filename=None,
|
148 |
-
headers=None,
|
149 |
-
header_formatter=format_header_param_html5,
|
150 |
-
):
|
151 |
-
self._name = name
|
152 |
-
self._filename = filename
|
153 |
-
self.data = data
|
154 |
-
self.headers = {}
|
155 |
-
if headers:
|
156 |
-
self.headers = dict(headers)
|
157 |
-
self.header_formatter = header_formatter
|
158 |
-
|
159 |
-
@classmethod
|
160 |
-
def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
|
161 |
-
"""
|
162 |
-
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
|
163 |
-
|
164 |
-
Supports constructing :class:`~urllib3.fields.RequestField` from
|
165 |
-
parameter of key/value strings AND key/filetuple. A filetuple is a
|
166 |
-
(filename, data, MIME type) tuple where the MIME type is optional.
|
167 |
-
For example::
|
168 |
-
|
169 |
-
'foo': 'bar',
|
170 |
-
'fakefile': ('foofile.txt', 'contents of foofile'),
|
171 |
-
'realfile': ('barfile.txt', open('realfile').read()),
|
172 |
-
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
|
173 |
-
'nonamefile': 'contents of nonamefile field',
|
174 |
-
|
175 |
-
Field names and filenames must be unicode.
|
176 |
-
"""
|
177 |
-
if isinstance(value, tuple):
|
178 |
-
if len(value) == 3:
|
179 |
-
filename, data, content_type = value
|
180 |
-
else:
|
181 |
-
filename, data = value
|
182 |
-
content_type = guess_content_type(filename)
|
183 |
-
else:
|
184 |
-
filename = None
|
185 |
-
content_type = None
|
186 |
-
data = value
|
187 |
-
|
188 |
-
request_param = cls(
|
189 |
-
fieldname, data, filename=filename, header_formatter=header_formatter
|
190 |
-
)
|
191 |
-
request_param.make_multipart(content_type=content_type)
|
192 |
-
|
193 |
-
return request_param
|
194 |
-
|
195 |
-
def _render_part(self, name, value):
|
196 |
-
"""
|
197 |
-
Overridable helper function to format a single header parameter. By
|
198 |
-
default, this calls ``self.header_formatter``.
|
199 |
-
|
200 |
-
:param name:
|
201 |
-
The name of the parameter, a string expected to be ASCII only.
|
202 |
-
:param value:
|
203 |
-
The value of the parameter, provided as a unicode string.
|
204 |
-
"""
|
205 |
-
|
206 |
-
return self.header_formatter(name, value)
|
207 |
-
|
208 |
-
def _render_parts(self, header_parts):
|
209 |
-
"""
|
210 |
-
Helper function to format and quote a single header.
|
211 |
-
|
212 |
-
Useful for single headers that are composed of multiple items. E.g.,
|
213 |
-
'Content-Disposition' fields.
|
214 |
-
|
215 |
-
:param header_parts:
|
216 |
-
A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
|
217 |
-
as `k1="v1"; k2="v2"; ...`.
|
218 |
-
"""
|
219 |
-
parts = []
|
220 |
-
iterable = header_parts
|
221 |
-
if isinstance(header_parts, dict):
|
222 |
-
iterable = header_parts.items()
|
223 |
-
|
224 |
-
for name, value in iterable:
|
225 |
-
if value is not None:
|
226 |
-
parts.append(self._render_part(name, value))
|
227 |
-
|
228 |
-
return u"; ".join(parts)
|
229 |
-
|
230 |
-
def render_headers(self):
|
231 |
-
"""
|
232 |
-
Renders the headers for this request field.
|
233 |
-
"""
|
234 |
-
lines = []
|
235 |
-
|
236 |
-
sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
|
237 |
-
for sort_key in sort_keys:
|
238 |
-
if self.headers.get(sort_key, False):
|
239 |
-
lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
|
240 |
-
|
241 |
-
for header_name, header_value in self.headers.items():
|
242 |
-
if header_name not in sort_keys:
|
243 |
-
if header_value:
|
244 |
-
lines.append(u"%s: %s" % (header_name, header_value))
|
245 |
-
|
246 |
-
lines.append(u"\r\n")
|
247 |
-
return u"\r\n".join(lines)
|
248 |
-
|
249 |
-
def make_multipart(
|
250 |
-
self, content_disposition=None, content_type=None, content_location=None
|
251 |
-
):
|
252 |
-
"""
|
253 |
-
Makes this request field into a multipart request field.
|
254 |
-
|
255 |
-
This method overrides "Content-Disposition", "Content-Type" and
|
256 |
-
"Content-Location" headers to the request parameter.
|
257 |
-
|
258 |
-
:param content_type:
|
259 |
-
The 'Content-Type' of the request body.
|
260 |
-
:param content_location:
|
261 |
-
The 'Content-Location' of the request body.
|
262 |
-
|
263 |
-
"""
|
264 |
-
self.headers["Content-Disposition"] = content_disposition or u"form-data"
|
265 |
-
self.headers["Content-Disposition"] += u"; ".join(
|
266 |
-
[
|
267 |
-
u"",
|
268 |
-
self._render_parts(
|
269 |
-
((u"name", self._name), (u"filename", self._filename))
|
270 |
-
),
|
271 |
-
]
|
272 |
-
)
|
273 |
-
self.headers["Content-Type"] = content_type
|
274 |
-
self.headers["Content-Location"] = content_location
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/limits.h
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
// Copyright (c) 2018 NVIDIA Corporation
|
2 |
-
// Author: Bryce Adelstein Lelbach <[email protected]>
|
3 |
-
//
|
4 |
-
// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
|
5 |
-
|
6 |
-
#pragma once
|
7 |
-
|
8 |
-
#include <limits>
|
9 |
-
|
10 |
-
#include <thrust/detail/type_traits.h>
|
11 |
-
|
12 |
-
namespace thrust
|
13 |
-
{
|
14 |
-
|
15 |
-
template <typename T>
|
16 |
-
struct numeric_limits : std::numeric_limits<T> {};
|
17 |
-
|
18 |
-
} // end namespace thrust
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/losses/utils.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
|
3 |
-
import mmcv
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
|
7 |
-
def reduce_loss(loss, reduction):
|
8 |
-
"""Reduce loss as specified.
|
9 |
-
|
10 |
-
Args:
|
11 |
-
loss (Tensor): Elementwise loss tensor.
|
12 |
-
reduction (str): Options are "none", "mean" and "sum".
|
13 |
-
|
14 |
-
Return:
|
15 |
-
Tensor: Reduced loss tensor.
|
16 |
-
"""
|
17 |
-
reduction_enum = F._Reduction.get_enum(reduction)
|
18 |
-
# none: 0, elementwise_mean:1, sum: 2
|
19 |
-
if reduction_enum == 0:
|
20 |
-
return loss
|
21 |
-
elif reduction_enum == 1:
|
22 |
-
return loss.mean()
|
23 |
-
elif reduction_enum == 2:
|
24 |
-
return loss.sum()
|
25 |
-
|
26 |
-
|
27 |
-
@mmcv.jit(derivate=True, coderize=True)
|
28 |
-
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
|
29 |
-
"""Apply element-wise weight and reduce loss.
|
30 |
-
|
31 |
-
Args:
|
32 |
-
loss (Tensor): Element-wise loss.
|
33 |
-
weight (Tensor): Element-wise weights.
|
34 |
-
reduction (str): Same as built-in losses of PyTorch.
|
35 |
-
avg_factor (float): Avarage factor when computing the mean of losses.
|
36 |
-
|
37 |
-
Returns:
|
38 |
-
Tensor: Processed loss values.
|
39 |
-
"""
|
40 |
-
# if weight is specified, apply element-wise weight
|
41 |
-
if weight is not None:
|
42 |
-
loss = loss * weight
|
43 |
-
|
44 |
-
# if avg_factor is not specified, just reduce the loss
|
45 |
-
if avg_factor is None:
|
46 |
-
loss = reduce_loss(loss, reduction)
|
47 |
-
else:
|
48 |
-
# if reduction is mean, then average the loss by avg_factor
|
49 |
-
if reduction == 'mean':
|
50 |
-
loss = loss.sum() / avg_factor
|
51 |
-
# if reduction is 'none', then do nothing, otherwise raise an error
|
52 |
-
elif reduction != 'none':
|
53 |
-
raise ValueError('avg_factor can not be used with reduction="sum"')
|
54 |
-
return loss
|
55 |
-
|
56 |
-
|
57 |
-
def weighted_loss(loss_func):
|
58 |
-
"""Create a weighted version of a given loss function.
|
59 |
-
|
60 |
-
To use this decorator, the loss function must have the signature like
|
61 |
-
`loss_func(pred, target, **kwargs)`. The function only needs to compute
|
62 |
-
element-wise loss without any reduction. This decorator will add weight
|
63 |
-
and reduction arguments to the function. The decorated function will have
|
64 |
-
the signature like `loss_func(pred, target, weight=None, reduction='mean',
|
65 |
-
avg_factor=None, **kwargs)`.
|
66 |
-
|
67 |
-
:Example:
|
68 |
-
|
69 |
-
>>> import torch
|
70 |
-
>>> @weighted_loss
|
71 |
-
>>> def l1_loss(pred, target):
|
72 |
-
>>> return (pred - target).abs()
|
73 |
-
|
74 |
-
>>> pred = torch.Tensor([0, 2, 3])
|
75 |
-
>>> target = torch.Tensor([1, 1, 1])
|
76 |
-
>>> weight = torch.Tensor([1, 0, 1])
|
77 |
-
|
78 |
-
>>> l1_loss(pred, target)
|
79 |
-
tensor(1.3333)
|
80 |
-
>>> l1_loss(pred, target, weight)
|
81 |
-
tensor(1.)
|
82 |
-
>>> l1_loss(pred, target, reduction='none')
|
83 |
-
tensor([1., 1., 2.])
|
84 |
-
>>> l1_loss(pred, target, weight, avg_factor=2)
|
85 |
-
tensor(1.5000)
|
86 |
-
"""
|
87 |
-
|
88 |
-
@functools.wraps(loss_func)
|
89 |
-
def wrapper(pred,
|
90 |
-
target,
|
91 |
-
weight=None,
|
92 |
-
reduction='mean',
|
93 |
-
avg_factor=None,
|
94 |
-
**kwargs):
|
95 |
-
# get element-wise loss
|
96 |
-
loss = loss_func(pred, target, **kwargs)
|
97 |
-
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
|
98 |
-
return loss
|
99 |
-
|
100 |
-
return wrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/fetch_data/places_standard_evaluation_prepare_data.sh
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
# 0. folder preparation
|
2 |
-
mkdir -p places_standard_dataset/evaluation/hires/
|
3 |
-
mkdir -p places_standard_dataset/evaluation/random_thick_512/
|
4 |
-
mkdir -p places_standard_dataset/evaluation/random_thin_512/
|
5 |
-
mkdir -p places_standard_dataset/evaluation/random_medium_512/
|
6 |
-
mkdir -p places_standard_dataset/evaluation/random_thick_256/
|
7 |
-
mkdir -p places_standard_dataset/evaluation/random_thin_256/
|
8 |
-
mkdir -p places_standard_dataset/evaluation/random_medium_256/
|
9 |
-
|
10 |
-
# 1. sample 2000 new images
|
11 |
-
OUT=$(python3 fetch_data/eval_sampler.py)
|
12 |
-
echo ${OUT}
|
13 |
-
|
14 |
-
FILELIST=$(cat places_standard_dataset/original/eval_random_files.txt)
|
15 |
-
for i in $FILELIST
|
16 |
-
do
|
17 |
-
$(cp ${i} places_standard_dataset/evaluation/hires/)
|
18 |
-
done
|
19 |
-
|
20 |
-
|
21 |
-
# 2. generate all kinds of masks
|
22 |
-
|
23 |
-
# all 512
|
24 |
-
python3 bin/gen_mask_dataset.py \
|
25 |
-
$(pwd)/configs/data_gen/random_thick_512.yaml \
|
26 |
-
places_standard_dataset/evaluation/hires \
|
27 |
-
places_standard_dataset/evaluation/random_thick_512/
|
28 |
-
|
29 |
-
python3 bin/gen_mask_dataset.py \
|
30 |
-
$(pwd)/configs/data_gen/random_thin_512.yaml \
|
31 |
-
places_standard_dataset/evaluation/hires \
|
32 |
-
places_standard_dataset/evaluation/random_thin_512/
|
33 |
-
|
34 |
-
python3 bin/gen_mask_dataset.py \
|
35 |
-
$(pwd)/configs/data_gen/random_medium_512.yaml \
|
36 |
-
places_standard_dataset/evaluation/hires \
|
37 |
-
places_standard_dataset/evaluation/random_medium_512/
|
38 |
-
|
39 |
-
python3 bin/gen_mask_dataset.py \
|
40 |
-
$(pwd)/configs/data_gen/random_thick_256.yaml \
|
41 |
-
places_standard_dataset/evaluation/hires \
|
42 |
-
places_standard_dataset/evaluation/random_thick_256/
|
43 |
-
|
44 |
-
python3 bin/gen_mask_dataset.py \
|
45 |
-
$(pwd)/configs/data_gen/random_thin_256.yaml \
|
46 |
-
places_standard_dataset/evaluation/hires \
|
47 |
-
places_standard_dataset/evaluation/random_thin_256/
|
48 |
-
|
49 |
-
python3 bin/gen_mask_dataset.py \
|
50 |
-
$(pwd)/configs/data_gen/random_medium_256.yaml \
|
51 |
-
places_standard_dataset/evaluation/hires \
|
52 |
-
places_standard_dataset/evaluation/random_medium_256/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/losses/perceptual.py
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import torchvision
|
5 |
-
|
6 |
-
from models.ade20k import ModelBuilder
|
7 |
-
from saicinpainting.utils import check_and_warn_input_range
|
8 |
-
|
9 |
-
|
10 |
-
IMAGENET_MEAN = torch.FloatTensor([0.485, 0.456, 0.406])[None, :, None, None]
|
11 |
-
IMAGENET_STD = torch.FloatTensor([0.229, 0.224, 0.225])[None, :, None, None]
|
12 |
-
|
13 |
-
|
14 |
-
class PerceptualLoss(nn.Module):
|
15 |
-
def __init__(self, normalize_inputs=True):
|
16 |
-
super(PerceptualLoss, self).__init__()
|
17 |
-
|
18 |
-
self.normalize_inputs = normalize_inputs
|
19 |
-
self.mean_ = IMAGENET_MEAN
|
20 |
-
self.std_ = IMAGENET_STD
|
21 |
-
|
22 |
-
vgg = torchvision.models.vgg19(pretrained=True).features
|
23 |
-
vgg_avg_pooling = []
|
24 |
-
|
25 |
-
for weights in vgg.parameters():
|
26 |
-
weights.requires_grad = False
|
27 |
-
|
28 |
-
for module in vgg.modules():
|
29 |
-
if module.__class__.__name__ == 'Sequential':
|
30 |
-
continue
|
31 |
-
elif module.__class__.__name__ == 'MaxPool2d':
|
32 |
-
vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
|
33 |
-
else:
|
34 |
-
vgg_avg_pooling.append(module)
|
35 |
-
|
36 |
-
self.vgg = nn.Sequential(*vgg_avg_pooling)
|
37 |
-
|
38 |
-
def do_normalize_inputs(self, x):
|
39 |
-
return (x - self.mean_.to(x.device)) / self.std_.to(x.device)
|
40 |
-
|
41 |
-
def partial_losses(self, input, target, mask=None):
|
42 |
-
check_and_warn_input_range(target, 0, 1, 'PerceptualLoss target in partial_losses')
|
43 |
-
|
44 |
-
# we expect input and target to be in [0, 1] range
|
45 |
-
losses = []
|
46 |
-
|
47 |
-
if self.normalize_inputs:
|
48 |
-
features_input = self.do_normalize_inputs(input)
|
49 |
-
features_target = self.do_normalize_inputs(target)
|
50 |
-
else:
|
51 |
-
features_input = input
|
52 |
-
features_target = target
|
53 |
-
|
54 |
-
for layer in self.vgg[:30]:
|
55 |
-
|
56 |
-
features_input = layer(features_input)
|
57 |
-
features_target = layer(features_target)
|
58 |
-
|
59 |
-
if layer.__class__.__name__ == 'ReLU':
|
60 |
-
loss = F.mse_loss(features_input, features_target, reduction='none')
|
61 |
-
|
62 |
-
if mask is not None:
|
63 |
-
cur_mask = F.interpolate(mask, size=features_input.shape[-2:],
|
64 |
-
mode='bilinear', align_corners=False)
|
65 |
-
loss = loss * (1 - cur_mask)
|
66 |
-
|
67 |
-
loss = loss.mean(dim=tuple(range(1, len(loss.shape))))
|
68 |
-
losses.append(loss)
|
69 |
-
|
70 |
-
return losses
|
71 |
-
|
72 |
-
def forward(self, input, target, mask=None):
|
73 |
-
losses = self.partial_losses(input, target, mask=mask)
|
74 |
-
return torch.stack(losses).sum(dim=0)
|
75 |
-
|
76 |
-
def get_global_features(self, input):
|
77 |
-
check_and_warn_input_range(input, 0, 1, 'PerceptualLoss input in get_global_features')
|
78 |
-
|
79 |
-
if self.normalize_inputs:
|
80 |
-
features_input = self.do_normalize_inputs(input)
|
81 |
-
else:
|
82 |
-
features_input = input
|
83 |
-
|
84 |
-
features_input = self.vgg(features_input)
|
85 |
-
return features_input
|
86 |
-
|
87 |
-
|
88 |
-
class ResNetPL(nn.Module):
|
89 |
-
def __init__(self, weight=1,
|
90 |
-
weights_path=None, arch_encoder='resnet50dilated', segmentation=True):
|
91 |
-
super().__init__()
|
92 |
-
self.impl = ModelBuilder.get_encoder(weights_path=weights_path,
|
93 |
-
arch_encoder=arch_encoder,
|
94 |
-
arch_decoder='ppm_deepsup',
|
95 |
-
fc_dim=2048,
|
96 |
-
segmentation=segmentation)
|
97 |
-
self.impl.eval()
|
98 |
-
for w in self.impl.parameters():
|
99 |
-
w.requires_grad_(False)
|
100 |
-
|
101 |
-
self.weight = weight
|
102 |
-
|
103 |
-
def forward(self, pred, target):
|
104 |
-
pred = (pred - IMAGENET_MEAN.to(pred)) / IMAGENET_STD.to(pred)
|
105 |
-
target = (target - IMAGENET_MEAN.to(target)) / IMAGENET_STD.to(target)
|
106 |
-
|
107 |
-
pred_feats = self.impl(pred, return_feature_maps=True)
|
108 |
-
target_feats = self.impl(target, return_feature_maps=True)
|
109 |
-
|
110 |
-
result = torch.stack([F.mse_loss(cur_pred, cur_target)
|
111 |
-
for cur_pred, cur_target
|
112 |
-
in zip(pred_feats, target_feats)]).sum() * self.weight
|
113 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/solver/__init__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params
|
3 |
-
from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, LRMultiplier, WarmupParamScheduler
|
4 |
-
|
5 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisPreston/diff-svc_minato_aqua/run.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
from utils.hparams import set_hparams, hparams
|
4 |
-
|
5 |
-
set_hparams(print_hparams=False)
|
6 |
-
|
7 |
-
|
8 |
-
def run_task():
|
9 |
-
assert hparams['task_cls'] != ''
|
10 |
-
pkg = ".".join(hparams["task_cls"].split(".")[:-1])
|
11 |
-
cls_name = hparams["task_cls"].split(".")[-1]
|
12 |
-
task_cls = getattr(importlib.import_module(pkg), cls_name)
|
13 |
-
task_cls.start()
|
14 |
-
|
15 |
-
|
16 |
-
if __name__ == '__main__':
|
17 |
-
run_task()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/client/css/theme-toggler.css
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
.theme-toggler-container {
|
2 |
-
margin: 24px 0px 8px 0px;
|
3 |
-
justify-content: center;
|
4 |
-
}
|
5 |
-
|
6 |
-
.theme-toggler-container.checkbox input + label,
|
7 |
-
.theme-toggler-container.checkbox input:checked + label:after {
|
8 |
-
background: var(--colour-1);
|
9 |
-
}
|
10 |
-
|
11 |
-
.theme-toggler-container.checkbox input + label:after,
|
12 |
-
.theme-toggler-container.checkbox input:checked + label {
|
13 |
-
background: var(--colour-3);
|
14 |
-
}
|
15 |
-
|
16 |
-
.theme-toggler-container.checkbox span {
|
17 |
-
font-size: 0.75rem;
|
18 |
-
}
|
19 |
-
|
20 |
-
.theme-toggler-container.checkbox label {
|
21 |
-
width: 24px;
|
22 |
-
height: 16px;
|
23 |
-
}
|
24 |
-
|
25 |
-
.theme-toggler-container.checkbox label:after {
|
26 |
-
left: 2px;
|
27 |
-
width: 10px;
|
28 |
-
height: 10px;
|
29 |
-
}
|
30 |
-
|
31 |
-
.theme-toggler-container.checkbox input:checked + label:after {
|
32 |
-
left: calc(100% - 2px - 10px);
|
33 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CristianGonzalez281098/Cheto/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Cheto
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 2.9.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DHEIVER/analise_imagem_mama/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Analise Imagem Mama
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.38.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/pipelines.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
"""This module should not be used directly as its API is subject to change. Instead,
|
2 |
-
please use the `gr.Interface.from_pipeline()` function."""
|
3 |
-
|
4 |
-
from __future__ import annotations
|
5 |
-
|
6 |
-
from typing import TYPE_CHECKING
|
7 |
-
|
8 |
-
from gradio import components
|
9 |
-
|
10 |
-
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
|
11 |
-
from transformers import pipelines
|
12 |
-
|
13 |
-
|
14 |
-
def load_from_pipeline(pipeline: pipelines.base.Pipeline) -> dict:
|
15 |
-
"""
|
16 |
-
Gets the appropriate Interface kwargs for a given Hugging Face transformers.Pipeline.
|
17 |
-
pipeline (transformers.Pipeline): the transformers.Pipeline from which to create an interface
|
18 |
-
Returns:
|
19 |
-
(dict): a dictionary of kwargs that can be used to construct an Interface object
|
20 |
-
"""
|
21 |
-
try:
|
22 |
-
import transformers
|
23 |
-
from transformers import pipelines
|
24 |
-
except ImportError as ie:
|
25 |
-
raise ImportError(
|
26 |
-
"transformers not installed. Please try `pip install transformers`"
|
27 |
-
) from ie
|
28 |
-
if not isinstance(pipeline, pipelines.base.Pipeline):
|
29 |
-
raise ValueError("pipeline must be a transformers.Pipeline")
|
30 |
-
|
31 |
-
# Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the
|
32 |
-
# version of the transformers library that the user has installed.
|
33 |
-
if hasattr(transformers, "AudioClassificationPipeline") and isinstance(
|
34 |
-
pipeline, pipelines.audio_classification.AudioClassificationPipeline
|
35 |
-
):
|
36 |
-
pipeline_info = {
|
37 |
-
"inputs": components.Audio(
|
38 |
-
source="microphone", type="filepath", label="Input"
|
39 |
-
),
|
40 |
-
"outputs": components.Label(label="Class"),
|
41 |
-
"preprocess": lambda i: {"inputs": i},
|
42 |
-
"postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r},
|
43 |
-
}
|
44 |
-
elif hasattr(transformers, "AutomaticSpeechRecognitionPipeline") and isinstance(
|
45 |
-
pipeline,
|
46 |
-
pipelines.automatic_speech_recognition.AutomaticSpeechRecognitionPipeline,
|
47 |
-
):
|
48 |
-
pipeline_info = {
|
49 |
-
"inputs": components.Audio(
|
50 |
-
source="microphone", type="filepath", label="Input"
|
51 |
-
),
|
52 |
-
"outputs": components.Textbox(label="Output"),
|
53 |
-
"preprocess": lambda i: {"inputs": i},
|
54 |
-
"postprocess": lambda r: r["text"],
|
55 |
-
}
|
56 |
-
elif hasattr(transformers, "FeatureExtractionPipeline") and isinstance(
|
57 |
-
pipeline, pipelines.feature_extraction.FeatureExtractionPipeline
|
58 |
-
):
|
59 |
-
pipeline_info = {
|
60 |
-
"inputs": components.Textbox(label="Input"),
|
61 |
-
"outputs": components.Dataframe(label="Output"),
|
62 |
-
"preprocess": lambda x: {"inputs": x},
|
63 |
-
"postprocess": lambda r: r[0],
|
64 |
-
}
|
65 |
-
elif hasattr(transformers, "FillMaskPipeline") and isinstance(
|
66 |
-
pipeline, pipelines.fill_mask.FillMaskPipeline
|
67 |
-
):
|
68 |
-
pipeline_info = {
|
69 |
-
"inputs": components.Textbox(label="Input"),
|
70 |
-
"outputs": components.Label(label="Classification"),
|
71 |
-
"preprocess": lambda x: {"inputs": x},
|
72 |
-
"postprocess": lambda r: {i["token_str"]: i["score"] for i in r},
|
73 |
-
}
|
74 |
-
elif hasattr(transformers, "ImageClassificationPipeline") and isinstance(
|
75 |
-
pipeline, pipelines.image_classification.ImageClassificationPipeline
|
76 |
-
):
|
77 |
-
pipeline_info = {
|
78 |
-
"inputs": components.Image(type="filepath", label="Input Image"),
|
79 |
-
"outputs": components.Label(type="confidences", label="Classification"),
|
80 |
-
"preprocess": lambda i: {"images": i},
|
81 |
-
"postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r},
|
82 |
-
}
|
83 |
-
elif hasattr(transformers, "QuestionAnsweringPipeline") and isinstance(
|
84 |
-
pipeline, pipelines.question_answering.QuestionAnsweringPipeline
|
85 |
-
):
|
86 |
-
pipeline_info = {
|
87 |
-
"inputs": [
|
88 |
-
components.Textbox(lines=7, label="Context"),
|
89 |
-
components.Textbox(label="Question"),
|
90 |
-
],
|
91 |
-
"outputs": [
|
92 |
-
components.Textbox(label="Answer"),
|
93 |
-
components.Label(label="Score"),
|
94 |
-
],
|
95 |
-
"preprocess": lambda c, q: {"context": c, "question": q},
|
96 |
-
"postprocess": lambda r: (r["answer"], r["score"]),
|
97 |
-
}
|
98 |
-
elif hasattr(transformers, "SummarizationPipeline") and isinstance(
|
99 |
-
pipeline, pipelines.text2text_generation.SummarizationPipeline
|
100 |
-
):
|
101 |
-
pipeline_info = {
|
102 |
-
"inputs": components.Textbox(lines=7, label="Input"),
|
103 |
-
"outputs": components.Textbox(label="Summary"),
|
104 |
-
"preprocess": lambda x: {"inputs": x},
|
105 |
-
"postprocess": lambda r: r[0]["summary_text"],
|
106 |
-
}
|
107 |
-
elif hasattr(transformers, "TextClassificationPipeline") and isinstance(
|
108 |
-
pipeline, pipelines.text_classification.TextClassificationPipeline
|
109 |
-
):
|
110 |
-
pipeline_info = {
|
111 |
-
"inputs": components.Textbox(label="Input"),
|
112 |
-
"outputs": components.Label(label="Classification"),
|
113 |
-
"preprocess": lambda x: [x],
|
114 |
-
"postprocess": lambda r: {i["label"].split(", ")[0]: i["score"] for i in r},
|
115 |
-
}
|
116 |
-
elif hasattr(transformers, "TextGenerationPipeline") and isinstance(
|
117 |
-
pipeline, pipelines.text_generation.TextGenerationPipeline
|
118 |
-
):
|
119 |
-
pipeline_info = {
|
120 |
-
"inputs": components.Textbox(label="Input"),
|
121 |
-
"outputs": components.Textbox(label="Output"),
|
122 |
-
"preprocess": lambda x: {"text_inputs": x},
|
123 |
-
"postprocess": lambda r: r[0]["generated_text"],
|
124 |
-
}
|
125 |
-
elif hasattr(transformers, "TranslationPipeline") and isinstance(
|
126 |
-
pipeline, pipelines.text2text_generation.TranslationPipeline
|
127 |
-
):
|
128 |
-
pipeline_info = {
|
129 |
-
"inputs": components.Textbox(label="Input"),
|
130 |
-
"outputs": components.Textbox(label="Translation"),
|
131 |
-
"preprocess": lambda x: [x],
|
132 |
-
"postprocess": lambda r: r[0]["translation_text"],
|
133 |
-
}
|
134 |
-
elif hasattr(transformers, "Text2TextGenerationPipeline") and isinstance(
|
135 |
-
pipeline, pipelines.text2text_generation.Text2TextGenerationPipeline
|
136 |
-
):
|
137 |
-
pipeline_info = {
|
138 |
-
"inputs": components.Textbox(label="Input"),
|
139 |
-
"outputs": components.Textbox(label="Generated Text"),
|
140 |
-
"preprocess": lambda x: [x],
|
141 |
-
"postprocess": lambda r: r[0]["generated_text"],
|
142 |
-
}
|
143 |
-
elif hasattr(transformers, "ZeroShotClassificationPipeline") and isinstance(
|
144 |
-
pipeline, pipelines.zero_shot_classification.ZeroShotClassificationPipeline
|
145 |
-
):
|
146 |
-
pipeline_info = {
|
147 |
-
"inputs": [
|
148 |
-
components.Textbox(label="Input"),
|
149 |
-
components.Textbox(label="Possible class names (" "comma-separated)"),
|
150 |
-
components.Checkbox(label="Allow multiple true classes"),
|
151 |
-
],
|
152 |
-
"outputs": components.Label(label="Classification"),
|
153 |
-
"preprocess": lambda i, c, m: {
|
154 |
-
"sequences": i,
|
155 |
-
"candidate_labels": c,
|
156 |
-
"multi_label": m,
|
157 |
-
},
|
158 |
-
"postprocess": lambda r: {
|
159 |
-
r["labels"][i]: r["scores"][i] for i in range(len(r["labels"]))
|
160 |
-
},
|
161 |
-
}
|
162 |
-
elif hasattr(transformers, "DocumentQuestionAnsweringPipeline") and isinstance(
|
163 |
-
pipeline,
|
164 |
-
pipelines.document_question_answering.DocumentQuestionAnsweringPipeline, # type: ignore
|
165 |
-
):
|
166 |
-
pipeline_info = {
|
167 |
-
"inputs": [
|
168 |
-
components.Image(type="filepath", label="Input Document"),
|
169 |
-
components.Textbox(label="Question"),
|
170 |
-
],
|
171 |
-
"outputs": components.Label(label="Label"),
|
172 |
-
"preprocess": lambda img, q: {"image": img, "question": q},
|
173 |
-
"postprocess": lambda r: {i["answer"]: i["score"] for i in r},
|
174 |
-
}
|
175 |
-
elif hasattr(transformers, "VisualQuestionAnsweringPipeline") and isinstance(
|
176 |
-
pipeline, pipelines.visual_question_answering.VisualQuestionAnsweringPipeline
|
177 |
-
):
|
178 |
-
pipeline_info = {
|
179 |
-
"inputs": [
|
180 |
-
components.Image(type="filepath", label="Input Image"),
|
181 |
-
components.Textbox(label="Question"),
|
182 |
-
],
|
183 |
-
"outputs": components.Label(label="Score"),
|
184 |
-
"preprocess": lambda img, q: {"image": img, "question": q},
|
185 |
-
"postprocess": lambda r: {i["answer"]: i["score"] for i in r},
|
186 |
-
}
|
187 |
-
elif hasattr(transformers, "ImageToTextPipeline") and isinstance(
|
188 |
-
pipeline, pipelines.image_to_text.ImageToTextPipeline # type: ignore
|
189 |
-
):
|
190 |
-
pipeline_info = {
|
191 |
-
"inputs": components.Image(type="filepath", label="Input Image"),
|
192 |
-
"outputs": components.Textbox(label="Text"),
|
193 |
-
"preprocess": lambda i: {"images": i},
|
194 |
-
"postprocess": lambda r: r[0]["generated_text"],
|
195 |
-
}
|
196 |
-
else:
|
197 |
-
raise ValueError(f"Unsupported pipeline type: {type(pipeline)}")
|
198 |
-
|
199 |
-
# define the function that will be called by the Interface
|
200 |
-
def fn(*params):
|
201 |
-
data = pipeline_info["preprocess"](*params)
|
202 |
-
# special cases that needs to be handled differently
|
203 |
-
if isinstance(
|
204 |
-
pipeline,
|
205 |
-
(
|
206 |
-
pipelines.text_classification.TextClassificationPipeline,
|
207 |
-
pipelines.text2text_generation.Text2TextGenerationPipeline,
|
208 |
-
pipelines.text2text_generation.TranslationPipeline,
|
209 |
-
),
|
210 |
-
):
|
211 |
-
data = pipeline(*data)
|
212 |
-
else:
|
213 |
-
data = pipeline(**data)
|
214 |
-
output = pipeline_info["postprocess"](data)
|
215 |
-
return output
|
216 |
-
|
217 |
-
interface_info = pipeline_info.copy()
|
218 |
-
interface_info["fn"] = fn
|
219 |
-
del interface_info["preprocess"]
|
220 |
-
del interface_info["postprocess"]
|
221 |
-
|
222 |
-
# define the title/description of the Interface
|
223 |
-
interface_info["title"] = pipeline.model.__class__.__name__
|
224 |
-
|
225 |
-
return interface_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_state.py
DELETED
@@ -1,367 +0,0 @@
|
|
1 |
-
################################################################
|
2 |
-
# The core state machine
|
3 |
-
################################################################
|
4 |
-
#
|
5 |
-
# Rule 1: everything that affects the state machine and state transitions must
|
6 |
-
# live here in this file. As much as possible goes into the table-based
|
7 |
-
# representation, but for the bits that don't quite fit, the actual code and
|
8 |
-
# state must nonetheless live here.
|
9 |
-
#
|
10 |
-
# Rule 2: this file does not know about what role we're playing; it only knows
|
11 |
-
# about HTTP request/response cycles in the abstract. This ensures that we
|
12 |
-
# don't cheat and apply different rules to local and remote parties.
|
13 |
-
#
|
14 |
-
#
|
15 |
-
# Theory of operation
|
16 |
-
# ===================
|
17 |
-
#
|
18 |
-
# Possibly the simplest way to think about this is that we actually have 5
|
19 |
-
# different state machines here. Yes, 5. These are:
|
20 |
-
#
|
21 |
-
# 1) The client state, with its complicated automaton (see the docs)
|
22 |
-
# 2) The server state, with its complicated automaton (see the docs)
|
23 |
-
# 3) The keep-alive state, with possible states {True, False}
|
24 |
-
# 4) The SWITCH_CONNECT state, with possible states {False, True}
|
25 |
-
# 5) The SWITCH_UPGRADE state, with possible states {False, True}
|
26 |
-
#
|
27 |
-
# For (3)-(5), the first state listed is the initial state.
|
28 |
-
#
|
29 |
-
# (1)-(3) are stored explicitly in member variables. The last
|
30 |
-
# two are stored implicitly in the pending_switch_proposals set as:
|
31 |
-
# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals)
|
32 |
-
# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals)
|
33 |
-
#
|
34 |
-
# And each of these machines has two different kinds of transitions:
|
35 |
-
#
|
36 |
-
# a) Event-triggered
|
37 |
-
# b) State-triggered
|
38 |
-
#
|
39 |
-
# Event triggered is the obvious thing that you'd think it is: some event
|
40 |
-
# happens, and if it's the right event at the right time then a transition
|
41 |
-
# happens. But there are somewhat complicated rules for which machines can
|
42 |
-
# "see" which events. (As a rule of thumb, if a machine "sees" an event, this
|
43 |
-
# means two things: the event can affect the machine, and if the machine is
|
44 |
-
# not in a state where it expects that event then it's an error.) These rules
|
45 |
-
# are:
|
46 |
-
#
|
47 |
-
# 1) The client machine sees all h11.events objects emitted by the client.
|
48 |
-
#
|
49 |
-
# 2) The server machine sees all h11.events objects emitted by the server.
|
50 |
-
#
|
51 |
-
# It also sees the client's Request event.
|
52 |
-
#
|
53 |
-
# And sometimes, server events are annotated with a _SWITCH_* event. For
|
54 |
-
# example, we can have a (Response, _SWITCH_CONNECT) event, which is
|
55 |
-
# different from a regular Response event.
|
56 |
-
#
|
57 |
-
# 3) The keep-alive machine sees the process_keep_alive_disabled() event
|
58 |
-
# (which is derived from Request/Response events), and this event
|
59 |
-
# transitions it from True -> False, or from False -> False. There's no way
|
60 |
-
# to transition back.
|
61 |
-
#
|
62 |
-
# 4&5) The _SWITCH_* machines transition from False->True when we get a
|
63 |
-
# Request that proposes the relevant type of switch (via
|
64 |
-
# process_client_switch_proposals), and they go from True->False when we
|
65 |
-
# get a Response that has no _SWITCH_* annotation.
|
66 |
-
#
|
67 |
-
# So that's event-triggered transitions.
|
68 |
-
#
|
69 |
-
# State-triggered transitions are less standard. What they do here is couple
|
70 |
-
# the machines together. The way this works is, when certain *joint*
|
71 |
-
# configurations of states are achieved, then we automatically transition to a
|
72 |
-
# new *joint* state. So, for example, if we're ever in a joint state with
|
73 |
-
#
|
74 |
-
# client: DONE
|
75 |
-
# keep-alive: False
|
76 |
-
#
|
77 |
-
# then the client state immediately transitions to:
|
78 |
-
#
|
79 |
-
# client: MUST_CLOSE
|
80 |
-
#
|
81 |
-
# This is fundamentally different from an event-based transition, because it
|
82 |
-
# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state
|
83 |
-
# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive
|
84 |
-
# transitioned True -> False. Either way, once this precondition is satisfied,
|
85 |
-
# this transition is immediately triggered.
|
86 |
-
#
|
87 |
-
# What if two conflicting state-based transitions get enabled at the same
|
88 |
-
# time? In practice there's only one case where this arises (client DONE ->
|
89 |
-
# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by
|
90 |
-
# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition.
|
91 |
-
#
|
92 |
-
# Implementation
|
93 |
-
# --------------
|
94 |
-
#
|
95 |
-
# The event-triggered transitions for the server and client machines are all
|
96 |
-
# stored explicitly in a table. Ditto for the state-triggered transitions that
|
97 |
-
# involve just the server and client state.
|
98 |
-
#
|
99 |
-
# The transitions for the other machines, and the state-triggered transitions
|
100 |
-
# that involve the other machines, are written out as explicit Python code.
|
101 |
-
#
|
102 |
-
# It'd be nice if there were some cleaner way to do all this. This isn't
|
103 |
-
# *too* terrible, but I feel like it could probably be better.
|
104 |
-
#
|
105 |
-
# WARNING
|
106 |
-
# -------
|
107 |
-
#
|
108 |
-
# The script that generates the state machine diagrams for the docs knows how
|
109 |
-
# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS
|
110 |
-
# tables. But it can't automatically read the transitions that are written
|
111 |
-
# directly in Python code. So if you touch those, you need to also update the
|
112 |
-
# script to keep it in sync!
|
113 |
-
from typing import cast, Dict, Optional, Set, Tuple, Type, Union
|
114 |
-
|
115 |
-
from ._events import *
|
116 |
-
from ._util import LocalProtocolError, Sentinel
|
117 |
-
|
118 |
-
# Everything in __all__ gets re-exported as part of the h11 public API.
|
119 |
-
__all__ = [
|
120 |
-
"CLIENT",
|
121 |
-
"SERVER",
|
122 |
-
"IDLE",
|
123 |
-
"SEND_RESPONSE",
|
124 |
-
"SEND_BODY",
|
125 |
-
"DONE",
|
126 |
-
"MUST_CLOSE",
|
127 |
-
"CLOSED",
|
128 |
-
"MIGHT_SWITCH_PROTOCOL",
|
129 |
-
"SWITCHED_PROTOCOL",
|
130 |
-
"ERROR",
|
131 |
-
]
|
132 |
-
|
133 |
-
|
134 |
-
class CLIENT(Sentinel, metaclass=Sentinel):
|
135 |
-
pass
|
136 |
-
|
137 |
-
|
138 |
-
class SERVER(Sentinel, metaclass=Sentinel):
|
139 |
-
pass
|
140 |
-
|
141 |
-
|
142 |
-
# States
|
143 |
-
class IDLE(Sentinel, metaclass=Sentinel):
|
144 |
-
pass
|
145 |
-
|
146 |
-
|
147 |
-
class SEND_RESPONSE(Sentinel, metaclass=Sentinel):
|
148 |
-
pass
|
149 |
-
|
150 |
-
|
151 |
-
class SEND_BODY(Sentinel, metaclass=Sentinel):
|
152 |
-
pass
|
153 |
-
|
154 |
-
|
155 |
-
class DONE(Sentinel, metaclass=Sentinel):
|
156 |
-
pass
|
157 |
-
|
158 |
-
|
159 |
-
class MUST_CLOSE(Sentinel, metaclass=Sentinel):
|
160 |
-
pass
|
161 |
-
|
162 |
-
|
163 |
-
class CLOSED(Sentinel, metaclass=Sentinel):
|
164 |
-
pass
|
165 |
-
|
166 |
-
|
167 |
-
class ERROR(Sentinel, metaclass=Sentinel):
|
168 |
-
pass
|
169 |
-
|
170 |
-
|
171 |
-
# Switch types
|
172 |
-
class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel):
|
173 |
-
pass
|
174 |
-
|
175 |
-
|
176 |
-
class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel):
|
177 |
-
pass
|
178 |
-
|
179 |
-
|
180 |
-
class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel):
|
181 |
-
pass
|
182 |
-
|
183 |
-
|
184 |
-
class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel):
|
185 |
-
pass
|
186 |
-
|
187 |
-
|
188 |
-
EventTransitionType = Dict[
|
189 |
-
Type[Sentinel],
|
190 |
-
Dict[
|
191 |
-
Type[Sentinel],
|
192 |
-
Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]],
|
193 |
-
],
|
194 |
-
]
|
195 |
-
|
196 |
-
EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = {
|
197 |
-
CLIENT: {
|
198 |
-
IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED},
|
199 |
-
SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
|
200 |
-
DONE: {ConnectionClosed: CLOSED},
|
201 |
-
MUST_CLOSE: {ConnectionClosed: CLOSED},
|
202 |
-
CLOSED: {ConnectionClosed: CLOSED},
|
203 |
-
MIGHT_SWITCH_PROTOCOL: {},
|
204 |
-
SWITCHED_PROTOCOL: {},
|
205 |
-
ERROR: {},
|
206 |
-
},
|
207 |
-
SERVER: {
|
208 |
-
IDLE: {
|
209 |
-
ConnectionClosed: CLOSED,
|
210 |
-
Response: SEND_BODY,
|
211 |
-
# Special case: server sees client Request events, in this form
|
212 |
-
(Request, CLIENT): SEND_RESPONSE,
|
213 |
-
},
|
214 |
-
SEND_RESPONSE: {
|
215 |
-
InformationalResponse: SEND_RESPONSE,
|
216 |
-
Response: SEND_BODY,
|
217 |
-
(InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL,
|
218 |
-
(Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL,
|
219 |
-
},
|
220 |
-
SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
|
221 |
-
DONE: {ConnectionClosed: CLOSED},
|
222 |
-
MUST_CLOSE: {ConnectionClosed: CLOSED},
|
223 |
-
CLOSED: {ConnectionClosed: CLOSED},
|
224 |
-
SWITCHED_PROTOCOL: {},
|
225 |
-
ERROR: {},
|
226 |
-
},
|
227 |
-
}
|
228 |
-
|
229 |
-
StateTransitionType = Dict[
|
230 |
-
Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]]
|
231 |
-
]
|
232 |
-
|
233 |
-
# NB: there are also some special-case state-triggered transitions hard-coded
|
234 |
-
# into _fire_state_triggered_transitions below.
|
235 |
-
STATE_TRIGGERED_TRANSITIONS: StateTransitionType = {
|
236 |
-
# (Client state, Server state) -> new states
|
237 |
-
# Protocol negotiation
|
238 |
-
(MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL},
|
239 |
-
# Socket shutdown
|
240 |
-
(CLOSED, DONE): {SERVER: MUST_CLOSE},
|
241 |
-
(CLOSED, IDLE): {SERVER: MUST_CLOSE},
|
242 |
-
(ERROR, DONE): {SERVER: MUST_CLOSE},
|
243 |
-
(DONE, CLOSED): {CLIENT: MUST_CLOSE},
|
244 |
-
(IDLE, CLOSED): {CLIENT: MUST_CLOSE},
|
245 |
-
(DONE, ERROR): {CLIENT: MUST_CLOSE},
|
246 |
-
}
|
247 |
-
|
248 |
-
|
249 |
-
class ConnectionState:
|
250 |
-
def __init__(self) -> None:
|
251 |
-
# Extra bits of state that don't quite fit into the state model.
|
252 |
-
|
253 |
-
# If this is False then it enables the automatic DONE -> MUST_CLOSE
|
254 |
-
# transition. Don't set this directly; call .keep_alive_disabled()
|
255 |
-
self.keep_alive = True
|
256 |
-
|
257 |
-
# This is a subset of {UPGRADE, CONNECT}, containing the proposals
|
258 |
-
# made by the client for switching protocols.
|
259 |
-
self.pending_switch_proposals: Set[Type[Sentinel]] = set()
|
260 |
-
|
261 |
-
self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}
|
262 |
-
|
263 |
-
def process_error(self, role: Type[Sentinel]) -> None:
|
264 |
-
self.states[role] = ERROR
|
265 |
-
self._fire_state_triggered_transitions()
|
266 |
-
|
267 |
-
def process_keep_alive_disabled(self) -> None:
|
268 |
-
self.keep_alive = False
|
269 |
-
self._fire_state_triggered_transitions()
|
270 |
-
|
271 |
-
def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:
|
272 |
-
self.pending_switch_proposals.add(switch_event)
|
273 |
-
self._fire_state_triggered_transitions()
|
274 |
-
|
275 |
-
def process_event(
|
276 |
-
self,
|
277 |
-
role: Type[Sentinel],
|
278 |
-
event_type: Type[Event],
|
279 |
-
server_switch_event: Optional[Type[Sentinel]] = None,
|
280 |
-
) -> None:
|
281 |
-
_event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type
|
282 |
-
if server_switch_event is not None:
|
283 |
-
assert role is SERVER
|
284 |
-
if server_switch_event not in self.pending_switch_proposals:
|
285 |
-
raise LocalProtocolError(
|
286 |
-
"Received server {} event without a pending proposal".format(
|
287 |
-
server_switch_event
|
288 |
-
)
|
289 |
-
)
|
290 |
-
_event_type = (event_type, server_switch_event)
|
291 |
-
if server_switch_event is None and _event_type is Response:
|
292 |
-
self.pending_switch_proposals = set()
|
293 |
-
self._fire_event_triggered_transitions(role, _event_type)
|
294 |
-
# Special case: the server state does get to see Request
|
295 |
-
# events.
|
296 |
-
if _event_type is Request:
|
297 |
-
assert role is CLIENT
|
298 |
-
self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))
|
299 |
-
self._fire_state_triggered_transitions()
|
300 |
-
|
301 |
-
def _fire_event_triggered_transitions(
|
302 |
-
self,
|
303 |
-
role: Type[Sentinel],
|
304 |
-
event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]],
|
305 |
-
) -> None:
|
306 |
-
state = self.states[role]
|
307 |
-
try:
|
308 |
-
new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]
|
309 |
-
except KeyError:
|
310 |
-
event_type = cast(Type[Event], event_type)
|
311 |
-
raise LocalProtocolError(
|
312 |
-
"can't handle event type {} when role={} and state={}".format(
|
313 |
-
event_type.__name__, role, self.states[role]
|
314 |
-
)
|
315 |
-
) from None
|
316 |
-
self.states[role] = new_state
|
317 |
-
|
318 |
-
def _fire_state_triggered_transitions(self) -> None:
|
319 |
-
# We apply these rules repeatedly until converging on a fixed point
|
320 |
-
while True:
|
321 |
-
start_states = dict(self.states)
|
322 |
-
|
323 |
-
# It could happen that both these special-case transitions are
|
324 |
-
# enabled at the same time:
|
325 |
-
#
|
326 |
-
# DONE -> MIGHT_SWITCH_PROTOCOL
|
327 |
-
# DONE -> MUST_CLOSE
|
328 |
-
#
|
329 |
-
# For example, this will always be true of a HTTP/1.0 client
|
330 |
-
# requesting CONNECT. If this happens, the protocol switch takes
|
331 |
-
# priority. From there the client will either go to
|
332 |
-
# SWITCHED_PROTOCOL, in which case it's none of our business when
|
333 |
-
# they close the connection, or else the server will deny the
|
334 |
-
# request, in which case the client will go back to DONE and then
|
335 |
-
# from there to MUST_CLOSE.
|
336 |
-
if self.pending_switch_proposals:
|
337 |
-
if self.states[CLIENT] is DONE:
|
338 |
-
self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL
|
339 |
-
|
340 |
-
if not self.pending_switch_proposals:
|
341 |
-
if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:
|
342 |
-
self.states[CLIENT] = DONE
|
343 |
-
|
344 |
-
if not self.keep_alive:
|
345 |
-
for role in (CLIENT, SERVER):
|
346 |
-
if self.states[role] is DONE:
|
347 |
-
self.states[role] = MUST_CLOSE
|
348 |
-
|
349 |
-
# Tabular state-triggered transitions
|
350 |
-
joint_state = (self.states[CLIENT], self.states[SERVER])
|
351 |
-
changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})
|
352 |
-
self.states.update(changes)
|
353 |
-
|
354 |
-
if self.states == start_states:
|
355 |
-
# Fixed point reached
|
356 |
-
return
|
357 |
-
|
358 |
-
def start_next_cycle(self) -> None:
|
359 |
-
if self.states != {CLIENT: DONE, SERVER: DONE}:
|
360 |
-
raise LocalProtocolError(
|
361 |
-
"not in a reusable state. self.states={}".format(self.states)
|
362 |
-
)
|
363 |
-
# Can't reach DONE/DONE with any of these active, but still, let's be
|
364 |
-
# sure.
|
365 |
-
assert self.keep_alive
|
366 |
-
assert not self.pending_switch_proposals
|
367 |
-
self.states = {CLIENT: IDLE, SERVER: IDLE}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
2 |
-
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
3 |
-
We are expecting to ship plugin support within the week (PR #757).
|
4 |
-
Resources:
|
5 |
-
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
6 |
-
-->
|
7 |
-
|
8 |
-
<!-- 📢 Announcement
|
9 |
-
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
10 |
-
|
11 |
-
Focus on a single, specific change.
|
12 |
-
Do not include any unrelated or "extra" modifications.
|
13 |
-
Provide clear documentation and explanations of the changes made.
|
14 |
-
Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
|
15 |
-
For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
|
16 |
-
|
17 |
-
By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
|
18 |
-
|
19 |
-
### Background
|
20 |
-
<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
|
21 |
-
|
22 |
-
### Changes
|
23 |
-
<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
|
24 |
-
|
25 |
-
### Documentation
|
26 |
-
<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
|
27 |
-
|
28 |
-
### Test Plan
|
29 |
-
<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
|
30 |
-
|
31 |
-
### PR Quality Checklist
|
32 |
-
- [ ] My pull request is atomic and focuses on a single change.
|
33 |
-
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
34 |
-
- [ ] I have considered potential risks and mitigations for my changes.
|
35 |
-
- [ ] I have documented my changes clearly and comprehensively.
|
36 |
-
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
|
37 |
-
|
38 |
-
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
39 |
-
|
40 |
-
<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/audiocraft/data/audio_utils.py
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import sys
|
8 |
-
import typing as tp
|
9 |
-
|
10 |
-
import julius
|
11 |
-
import torch
|
12 |
-
import torchaudio
|
13 |
-
|
14 |
-
|
15 |
-
def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor:
|
16 |
-
"""Convert audio to the given number of channels.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
wav (torch.Tensor): Audio wave of shape [B, C, T].
|
20 |
-
channels (int): Expected number of channels as output.
|
21 |
-
Returns:
|
22 |
-
torch.Tensor: Downmixed or unchanged audio wave [B, C, T].
|
23 |
-
"""
|
24 |
-
*shape, src_channels, length = wav.shape
|
25 |
-
if src_channels == channels:
|
26 |
-
pass
|
27 |
-
elif channels == 1:
|
28 |
-
# Case 1:
|
29 |
-
# The caller asked 1-channel audio, and the stream has multiple
|
30 |
-
# channels, downmix all channels.
|
31 |
-
wav = wav.mean(dim=-2, keepdim=True)
|
32 |
-
elif src_channels == 1:
|
33 |
-
# Case 2:
|
34 |
-
# The caller asked for multiple channels, but the input file has
|
35 |
-
# a single channel, replicate the audio over all channels.
|
36 |
-
wav = wav.expand(*shape, channels, length)
|
37 |
-
elif src_channels >= channels:
|
38 |
-
# Case 3:
|
39 |
-
# The caller asked for multiple channels, and the input file has
|
40 |
-
# more channels than requested. In that case return the first channels.
|
41 |
-
wav = wav[..., :channels, :]
|
42 |
-
else:
|
43 |
-
# Case 4: What is a reasonable choice here?
|
44 |
-
raise ValueError('The audio file has less channels than requested but is not mono.')
|
45 |
-
return wav
|
46 |
-
|
47 |
-
|
48 |
-
def convert_audio(wav: torch.Tensor, from_rate: float,
|
49 |
-
to_rate: float, to_channels: int) -> torch.Tensor:
|
50 |
-
"""Convert audio to new sample rate and number of audio channels.
|
51 |
-
"""
|
52 |
-
wav = julius.resample_frac(wav, int(from_rate), int(to_rate))
|
53 |
-
wav = convert_audio_channels(wav, to_channels)
|
54 |
-
return wav
|
55 |
-
|
56 |
-
|
57 |
-
def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14,
|
58 |
-
loudness_compressor: bool = False, energy_floor: float = 2e-3):
|
59 |
-
"""Normalize an input signal to a user loudness in dB LKFS.
|
60 |
-
Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
|
61 |
-
|
62 |
-
Args:
|
63 |
-
wav (torch.Tensor): Input multichannel audio data.
|
64 |
-
sample_rate (int): Sample rate.
|
65 |
-
loudness_headroom_db (float): Target loudness of the output in dB LUFS.
|
66 |
-
loudness_compressor (bool): Uses tanh for soft clipping.
|
67 |
-
energy_floor (float): anything below that RMS level will not be rescaled.
|
68 |
-
Returns:
|
69 |
-
output (torch.Tensor): Loudness normalized output data.
|
70 |
-
"""
|
71 |
-
energy = wav.pow(2).mean().sqrt().item()
|
72 |
-
if energy < energy_floor:
|
73 |
-
return wav
|
74 |
-
transform = torchaudio.transforms.Loudness(sample_rate)
|
75 |
-
input_loudness_db = transform(wav).item()
|
76 |
-
# calculate the gain needed to scale to the desired loudness level
|
77 |
-
delta_loudness = -loudness_headroom_db - input_loudness_db
|
78 |
-
gain = 10.0 ** (delta_loudness / 20.0)
|
79 |
-
output = gain * wav
|
80 |
-
if loudness_compressor:
|
81 |
-
output = torch.tanh(output)
|
82 |
-
assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt())
|
83 |
-
return output
|
84 |
-
|
85 |
-
|
86 |
-
def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None:
|
87 |
-
"""Utility function to clip the audio with logging if specified."""
|
88 |
-
max_scale = wav.abs().max()
|
89 |
-
if log_clipping and max_scale > 1:
|
90 |
-
clamp_prob = (wav.abs() > 1).float().mean().item()
|
91 |
-
print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):",
|
92 |
-
clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr)
|
93 |
-
wav.clamp_(-1, 1)
|
94 |
-
|
95 |
-
|
96 |
-
def normalize_audio(wav: torch.Tensor, normalize: bool = True,
|
97 |
-
strategy: str = 'peak', peak_clip_headroom_db: float = 1,
|
98 |
-
rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
|
99 |
-
loudness_compressor: bool = False, log_clipping: bool = False,
|
100 |
-
sample_rate: tp.Optional[int] = None,
|
101 |
-
stem_name: tp.Optional[str] = None) -> torch.Tensor:
|
102 |
-
"""Normalize the audio according to the prescribed strategy (see after).
|
103 |
-
|
104 |
-
Args:
|
105 |
-
wav (torch.Tensor): Audio data.
|
106 |
-
normalize (bool): if `True` (default), normalizes according to the prescribed
|
107 |
-
strategy (see after). If `False`, the strategy is only used in case clipping
|
108 |
-
would happen.
|
109 |
-
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
|
110 |
-
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
|
111 |
-
with extra headroom to avoid clipping. 'clip' just clips.
|
112 |
-
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
|
113 |
-
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
|
114 |
-
than the `peak_clip` one to avoid further clipping.
|
115 |
-
loudness_headroom_db (float): Target loudness for loudness normalization.
|
116 |
-
loudness_compressor (bool): If True, uses tanh based soft clipping.
|
117 |
-
log_clipping (bool): If True, basic logging on stderr when clipping still
|
118 |
-
occurs despite strategy (only for 'rms').
|
119 |
-
sample_rate (int): Sample rate for the audio data (required for loudness).
|
120 |
-
stem_name (Optional[str]): Stem name for clipping logging.
|
121 |
-
Returns:
|
122 |
-
torch.Tensor: Normalized audio.
|
123 |
-
"""
|
124 |
-
scale_peak = 10 ** (-peak_clip_headroom_db / 20)
|
125 |
-
scale_rms = 10 ** (-rms_headroom_db / 20)
|
126 |
-
if strategy == 'peak':
|
127 |
-
rescaling = (scale_peak / wav.abs().max())
|
128 |
-
if normalize or rescaling < 1:
|
129 |
-
wav = wav * rescaling
|
130 |
-
elif strategy == 'clip':
|
131 |
-
wav = wav.clamp(-scale_peak, scale_peak)
|
132 |
-
elif strategy == 'rms':
|
133 |
-
mono = wav.mean(dim=0)
|
134 |
-
rescaling = scale_rms / mono.pow(2).mean().sqrt()
|
135 |
-
if normalize or rescaling < 1:
|
136 |
-
wav = wav * rescaling
|
137 |
-
_clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
|
138 |
-
elif strategy == 'loudness':
|
139 |
-
assert sample_rate is not None, "Loudness normalization requires sample rate."
|
140 |
-
wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor)
|
141 |
-
_clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
|
142 |
-
else:
|
143 |
-
assert wav.abs().max() < 1
|
144 |
-
assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'"
|
145 |
-
return wav
|
146 |
-
|
147 |
-
|
148 |
-
def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
|
149 |
-
"""Convert audio to float 32 bits PCM format.
|
150 |
-
"""
|
151 |
-
if wav.dtype.is_floating_point:
|
152 |
-
return wav
|
153 |
-
else:
|
154 |
-
assert wav.dtype == torch.int16
|
155 |
-
return wav.float() / 2**15
|
156 |
-
|
157 |
-
|
158 |
-
def i16_pcm(wav: torch.Tensor) -> torch.Tensor:
|
159 |
-
"""Convert audio to int 16 bits PCM format.
|
160 |
-
|
161 |
-
..Warning:: There exist many formula for doing this convertion. None are perfect
|
162 |
-
due to the asymetry of the int16 range. One either have possible clipping, DC offset,
|
163 |
-
or inconsistancies with f32_pcm. If the given wav doesn't have enough headroom,
|
164 |
-
it is possible that `i16_pcm(f32_pcm)) != Identity`.
|
165 |
-
"""
|
166 |
-
if wav.dtype.is_floating_point:
|
167 |
-
assert wav.abs().max() <= 1
|
168 |
-
candidate = (wav * 2 ** 15).round()
|
169 |
-
if candidate.max() >= 2 ** 15: # clipping would occur
|
170 |
-
candidate = (wav * (2 ** 15 - 1)).round()
|
171 |
-
return candidate.short()
|
172 |
-
else:
|
173 |
-
assert wav.dtype == torch.int16
|
174 |
-
return wav
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/sd-prism/share_btn.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
|
26 |
-
async function getInputImgFile(imgEl){
|
27 |
-
const res = await fetch(imgEl.src);
|
28 |
-
const blob = await res.blob();
|
29 |
-
const imgId = Date.now() % 200;
|
30 |
-
const isPng = imgEl.src.startsWith(`data:image/png`);
|
31 |
-
if(isPng){
|
32 |
-
const fileName = `sd-perception-${{imgId}}.png`;
|
33 |
-
return new File([blob], fileName, { type: 'image/png' });
|
34 |
-
}else{
|
35 |
-
const fileName = `sd-perception-${{imgId}}.jpg`;
|
36 |
-
return new File([blob], fileName, { type: 'image/jpeg' });
|
37 |
-
}
|
38 |
-
}
|
39 |
-
|
40 |
-
const gradioEl = document.querySelector('body > gradio-app');
|
41 |
-
// const gradioEl = document.querySelector("gradio-app").shadowRoot;
|
42 |
-
const inputImgEl = gradioEl.querySelector('#input-img img');
|
43 |
-
const imgEls = gradioEl.querySelectorAll('#generated-gallery img');
|
44 |
-
const promptTxt = gradioEl.querySelector('#translated textarea').value;
|
45 |
-
let titleTxt = promptTxt;
|
46 |
-
if(titleTxt.length > 100){
|
47 |
-
titleTxt = titleTxt.slice(0, 100) + ' ...';
|
48 |
-
}
|
49 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
50 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
51 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
52 |
-
|
53 |
-
if(!imgEls.length){
|
54 |
-
return;
|
55 |
-
};
|
56 |
-
|
57 |
-
shareBtnEl.style.pointerEvents = 'none';
|
58 |
-
shareIconEl.style.display = 'none';
|
59 |
-
loadingIconEl.style.removeProperty('display');
|
60 |
-
|
61 |
-
const files = await Promise.all(
|
62 |
-
[...imgEls].map(async (imgEl) => {
|
63 |
-
const res = await fetch(imgEl.src);
|
64 |
-
const blob = await res.blob();
|
65 |
-
const imgId = Date.now() % 200;
|
66 |
-
const fileName = `sd-perception-${{imgId}}.jpg`;
|
67 |
-
return new File([blob], fileName, { type: 'image/jpeg' });
|
68 |
-
})
|
69 |
-
);
|
70 |
-
const inputFile = await getInputImgFile(inputImgEl);
|
71 |
-
files.push(inputFile);
|
72 |
-
|
73 |
-
const urls = await Promise.all(files.map((f) => uploadFile(f)));
|
74 |
-
const urlInputImg = urls.pop();
|
75 |
-
const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
|
76 |
-
const htmlImgsMd = htmlImgs.join(`\n`);
|
77 |
-
|
78 |
-
const descriptionMd = `#### Input img:
|
79 |
-
<img src='${urlInputImg}' style='max-height: 350px;'>
|
80 |
-
|
81 |
-
#### Caption:
|
82 |
-
${promptTxt}
|
83 |
-
|
84 |
-
#### Generations:
|
85 |
-
<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
|
86 |
-
${htmlImgsMd}
|
87 |
-
</div>`;
|
88 |
-
|
89 |
-
const params = new URLSearchParams({
|
90 |
-
title: titleTxt,
|
91 |
-
description: descriptionMd,
|
92 |
-
});
|
93 |
-
|
94 |
-
const paramsStr = params.toString();
|
95 |
-
window.open(`https://huggingface.co/spaces/pharma/sd-prism/discussions/new?${paramsStr}`, '_blank');
|
96 |
-
|
97 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
98 |
-
shareIconEl.style.removeProperty('display');
|
99 |
-
loadingIconEl.style.display = 'none';
|
100 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|