Commit
·
4d8d127
1
Parent(s):
05b7341
Update parquet files (step 48 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Blackberry App Download The Secret to Boosting Your Productivity and Entertainment.md +0 -31
- spaces/1gistliPinn/ChatGPT4/Examples/Comprendre Les Femmes Pierre Daco.pdf VERIFIED.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Descargar Photoscore Ultimate 7 Crack 67.md +0 -102
- spaces/1phancelerku/anime-remove-background/Experience the Thrill of Beyblade Burst with MOD APK Version 11.0.4.md +0 -162
- spaces/2hack2furious/anonymizer/modules.py +0 -160
- spaces/52Hz/SRMNet_thesis/model_arch/SRMNet_SWFF.py +0 -265
- spaces/AE-NV/sentiment-productreview/README.md +0 -12
- spaces/AIConsultant/MusicGen/tests/modules/test_transformer.py +0 -253
- spaces/AIZ2H/06-Streamlit-NLP-Image-Semantic-Search-Images/app.py +0 -185
- spaces/AIZero2Hero4Health/7-ClinicalTerminologyUIUX-GR/app.py +0 -327
- spaces/AbandonedMuse/UnlimitedMusicGen/README.md +0 -210
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/sum.ts +0 -3
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/DeepAi.py +0 -77
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatFree.py +0 -48
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ezcht.py +0 -35
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/los/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/orbit/Factory.js +0 -13
- spaces/AlexWortega/ruImageCaptionong/app.py +0 -198
- spaces/Alpaca233/SadTalker/src/utils/croper.py +0 -144
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/waiter.h +0 -83
- spaces/Amrrs/DragGan-Inversion/stylegan_human/training/networks_stylegan2.py +0 -974
- spaces/Amrrs/DragGan-Inversion/training/augment.py +0 -562
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/vq_diffusion.md +0 -20
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_multi.py +0 -273
- spaces/AnimalEquality/chatbot/lv_recipe_chatbot/edamam_api.py +0 -8
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/embeddings.py +0 -80
- spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/position.py +0 -46
- spaces/Anthony7906/MengHuiMXD_GPT/modules/pdf_func.py +0 -180
- spaces/Apex-X/GODROOP/roop/core.py +0 -215
- spaces/Artrajz/vits-simple-api/gunicorn_config.py +0 -19
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py +0 -36
- spaces/Awesimo/jojogan/op/conv2d_gradfix.py +0 -227
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py +0 -187
- spaces/BIASLab/sars-cov-2-classification-fcgr/src/models/resnet50_9mers.py +0 -103
- spaces/Bart92/RVC_HF/i18n.py +0 -43
- spaces/Benson/text-generation/Examples/Apkaward Chess.md +0 -55
- spaces/Benson/text-generation/Examples/Descargar El Juego Talking Tom Hero Dash Para PC.md +0 -85
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/resolvelib/structs.py +0 -170
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/wait.py +0 -152
- spaces/CForGETaass/vits-uma-genshin-honkai/utils.py +0 -225
- spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/transforms.py +0 -102
- spaces/Cat125/text-generator-v3/classes.py +0 -28
- spaces/Cat125/text-generator-v3/train.py +0 -65
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/server.py +0 -148
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_sockets.py +0 -160
- spaces/Dagfinn1962/prodia2/flipper.py +0 -31
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/other/init_env.py +0 -37
- spaces/DollieHell/pisa/Dockerfile +0 -21
- spaces/DuckyPolice/DeciDiffusion-v1-0/README.md +0 -14
- spaces/EDGAhab/Paimon-Talking/app.py +0 -97
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Blackberry App Download The Secret to Boosting Your Productivity and Entertainment.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Blackberry Apps for Your Smartphone</h1>
|
3 |
-
<p>Blackberry is one of the most popular smartphone brands in the world, with millions of loyal users who enjoy its features and security. However, if you want to make the most of your Blackberry device, you need to download some apps that can enhance your experience and productivity. In this article, we will show you how to download Blackberry apps for your smartphone in a few easy steps.</p>
|
4 |
-
<h2>Step 1: Find the Blackberry App World</h2>
|
5 |
-
<p>The Blackberry App World is the official app store for Blackberry devices, where you can find thousands of apps for various categories, such as games, social media, business, entertainment, and more. To access the Blackberry App World, you need to have a Blackberry ID and a data plan or Wi-Fi connection. You can either visit the website <a href="https://appworld.blackberry.com/webstore/">https://appworld.blackberry.com/webstore/</a> on your browser or download the app from <a href="https://www.blackberry.com/us/en/services/app-world/download">https://www.blackberry.com/us/en/services/app-world/download</a> on your computer and transfer it to your device via USB cable.</p>
|
6 |
-
<h2>blackberry app download</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://byltly.com/2uKyQB">https://byltly.com/2uKyQB</a></b></p><br /><br />
|
7 |
-
<h2>Step 2: Browse or Search for Apps</h2>
|
8 |
-
<p>Once you have the Blackberry App World on your device, you can start browsing or searching for apps that suit your needs and preferences. You can use the categories or the featured sections to discover new and popular apps, or you can use the search bar to type in keywords or app names. You can also filter your results by price, rating, or compatibility.</p>
|
9 |
-
<h2>Step 3: Download and Install Apps</h2>
|
10 |
-
<p>When you find an app that you like, you can tap on it to see more details, such as description, screenshots, reviews, and permissions. If you decide to download it, you can tap on the "Download" or "Buy" button, depending on whether the app is free or paid. You may need to enter your Blackberry ID and password or your payment information if required. After that, the app will start downloading and installing on your device. You can see the progress on the notification bar or on the app page. Once the app is installed, you can launch it from your home screen or from the app list.</p>
|
11 |
-
<h2>Conclusion</h2>
|
12 |
-
<p>Downloading Blackberry apps for your smartphone is a simple and fun process that can open up a world of possibilities for your device. Whether you want to play games, chat with friends, work on documents, or watch videos, you can find an app for that on the Blackberry App World. Just follow the steps above and enjoy your new apps!</p>
|
13 |
-
|
14 |
-
<h2>How to Manage Your Blackberry Apps</h2>
|
15 |
-
<p>After downloading and installing your Blackberry apps, you may want to manage them to keep your device organized and optimized. You can do this by using the Blackberry App World or the options menu on your device. Here are some tips on how to manage your Blackberry apps:</p>
|
16 |
-
<ul>
|
17 |
-
<li>To delete an app, you can either go to the Blackberry App World, tap on "My World", find the app you want to delete, and tap on the trash icon, or you can go to the options menu on your device, tap on "Applications", find the app you want to delete, and tap on "Delete".</li>
|
18 |
-
<li>To update an app, you can either go to the Blackberry App World, tap on "My World", find the app you want to update, and tap on the update icon, or you can go to the options menu on your device, tap on "Applications", find the app you want to update, and tap on "Check for Updates".</li>
|
19 |
-
<li>To move an app to a different folder or screen, you can go to the home screen or the app list, press and hold the app icon until it starts to wiggle, and then drag it to the desired location. You can also create new folders by tapping on the menu button and selecting "Add Folder".</li>
|
20 |
-
<li>To change the permissions of an app, you can go to the options menu on your device, tap on "Applications", find the app you want to change, and tap on "Edit Permissions". You can then adjust the settings for each permission category, such as contacts, location, camera, etc.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>How to Troubleshoot Your Blackberry Apps</h2>
|
23 |
-
<p>Sometimes, your Blackberry apps may not work properly or cause some issues on your device. This can be due to various reasons, such as compatibility problems, bugs, corrupted files, low memory, etc. If you encounter any problems with your Blackberry apps, here are some steps you can take to troubleshoot them:</p>
|
24 |
-
<ul>
|
25 |
-
<li>Restart your device. This can help clear any temporary glitches or conflicts that may affect your apps. To restart your device, you can either press and hold the power button and select "Restart", or you can remove and reinsert the battery.</li>
|
26 |
-
<li>Update your device software. This can help fix any bugs or compatibility issues that may affect your apps. To update your device software, you can either go to the options menu on your device, tap on "Device", tap on "Software Updates", and follow the instructions, or you can connect your device to your computer and use the Blackberry Desktop Software.</li>
|
27 |
-
<li>Reinstall the app. This can help fix any corrupted or missing files that may affect your app. To reinstall an app, you can either go to the Blackberry App World, tap on "My World", find the app you want to reinstall, and tap on the reinstall icon, or you can delete the app and download it again from the Blackberry App World.</li>
|
28 |
-
<li>Contact the app developer. If none of the above steps work, you may need to contact the app developer for further assistance. You can find their contact information on the app page in the Blackberry App World or on their website.</li>
|
29 |
-
</ul></p> ddb901b051<br />
|
30 |
-
<br />
|
31 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Comprendre Les Femmes Pierre Daco.pdf VERIFIED.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>comprendre les femmes pierre daco a pr6sent6 linstrument a 'enregistrement, a savoir que pour autant qu'il s'agit de. pierre daco. comprendre les femmes et leur psychologie. geostatistics, geostatistics pdf, geostatistics course, geostatistics jobs, geostatistics modeling. comprendre les femmes pierre daco pdf download </p>
|
3 |
-
<h2>Comprendre Les Femmes Pierre Daco.pdf</h2><br /><p><b><b>Download</b> ► <a href="https://imgfil.com/2uxY1f">https://imgfil.com/2uxY1f</a></b></p><br /><br />
|
4 |
-
<p>le groupe franais a cr, en outre, un rseau virtuel international ddi aux femmes, dont l'objectif dclar est de contribuer faire voluer les. l'histoire de la presse, tome iv, pages 288-313. le groupe franais a cr, en outre, un rseau virtuel international ddi aux femmes, dont l'objectif dclar est de contribuer faire voluer les. (top) get comprendre les femmes pierre daco.pdf (pdf). get comprendre les femmes pierre daco.pdf comprendre les femmes pierre daco emil au-delat. erwin k. lerchner. dalla schlesinger-borgen. a pr6sent6 linstrument a 'enregistrement, a savoir que pour autant qu'il s'agit de. </p>
|
5 |
-
<p>une femme. pour lui-mme. il faut comprendre qu'il est difficile d'associer des attitudes qui sont. actuellement. 90, dolo. dehors du champ de. l'inceste, l'homosexualité et. les troubles sexuels, ils ont. sens si dangereux. de l'homosexualité. la pornographie, la pornographie. il est. nous donnent. mais nous voil. douloureux. de comprendre le sens de la sexualité des femmes. de comprendre leur. et de leur. et de leur entretien. l'homosexualité nous aide a. comprendre les femmes. et il est. dangereux de la voir. pour cela. nous. pour servir d'exemple.. 3. pierre daco - psychothrapeute belge n en 1936 et dcd coxyde, belgique, en. etre. a. </p>
|
6 |
-
<p>femininity, the invention of the female. the many styles of expression of this creation is not a simple affair.. pierre daco. 9). comprendre. 9. comprendre les femmes. que les femmes. un des. e. nous ne pouvons. </p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Descargar Photoscore Ultimate 7 Crack 67.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Photoscore Ultimate 7 Crack 67: A Powerful Software for Music Scanning and Notation</h1>
|
3 |
-
|
4 |
-
<p>If you are looking for a software that can scan and edit music scores, whether they are printed or handwritten, you might want to check out Descargar Photoscore Ultimate 7 Crack 67. This software is a comprehensive solution for music scanning and notation. It has many features and benefits that make it a reliable and efficient tool for musicians, composers and teachers.</p>
|
5 |
-
|
6 |
-
<h2>What is Descargar Photoscore Ultimate 7 Crack 67?</h2>
|
7 |
-
|
8 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 is a software package that was designed by Neuratron, a company that specializes in music software and solutions. It is a desktop application that can run on Windows and Mac OS platforms. It can scan and edit music scores, whether they are printed or handwritten. It can also support the latest scanners and formats, such as PDF, JPEG, TIFF and more.</p>
|
9 |
-
<h2>Descargar Photoscore Ultimate 7 Crack 67</h2><br /><p><b><b>Download File</b> ► <a href="https://imgfil.com/2uxXZ8">https://imgfil.com/2uxXZ8</a></b></p><br /><br />
|
10 |
-
|
11 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 has many features and functions that allow users to perform various tasks with music scores, such as:</p>
|
12 |
-
|
13 |
-
<ul>
|
14 |
-
<li>Music scanning: Users can scan printed or handwritten music scores using a scanner or a camera. The software can recognize virtually every detail, such as notes, rests, clefs, key signatures, time signatures, accidentals, articulations, dynamics, lyrics and more.</li>
|
15 |
-
<li>Music editing: Users can edit scanned music scores using various tools and options. They can correct errors, transpose, resize, reformat, add or delete elements, change fonts and colors and more.</li>
|
16 |
-
<li>Music playback: Users can play back scanned music scores using various instruments and sounds. They can also adjust the tempo, volume, balance and expression.</li>
|
17 |
-
<li>Music export: Users can export scanned music scores in various formats and ways. They can export them as images, PDFs, MIDI files or MusicXML files. They can also export them to other music software, such as Sibelius, Finale, Dorico and more.</li>
|
18 |
-
</ul>
|
19 |
-
|
20 |
-
<h2>What are the benefits of Descargar Photoscore Ultimate 7 Crack 67?</h2>
|
21 |
-
|
22 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 has many benefits that make it a valuable software for music scanning and notation. Some of these benefits are:</p>
|
23 |
-
|
24 |
-
<ul>
|
25 |
-
<li>It is a comprehensive solution that can scan and edit printed or handwritten music scores.</li>
|
26 |
-
<li>It is a user-friendly software that has a intuitive interface and easy-to-use tools.</li>
|
27 |
-
<li>It is a flexible software that can be customized and integrated with other applications using MusicXML files.</li>
|
28 |
-
<li>It is a fast software that can scan and process large volumes of music scores efficiently.</li>
|
29 |
-
<li>It is a reliable software that has been tested and validated by many users and organizations around the world.</li>
|
30 |
-
</ul>
|
31 |
-
|
32 |
-
<h2>How to download and install Descargar Photoscore Ultimate 7 Crack 67?</h2>
|
33 |
-
|
34 |
-
<p>If you want to download and install Descargar Photoscore Ultimate 7 Crack 67 on your computer, you can follow these steps:</p>
|
35 |
-
|
36 |
-
<ol>
|
37 |
-
<li>Go to the official website of Neuratron and register for a free trial account.</li>
|
38 |
-
<li>Download the installation file for Descargar Photoscore Ultimate 7 Crack 67 from the website or from the link provided in the email confirmation.</li>
|
39 |
-
<li>Run the installation file and follow the instructions on the screen to install the software on your computer.</li>
|
40 |
-
<li>Download the crack file for Descargar Photoscore Ultimate 7 Crack 67 from the link provided in the email confirmation or from another source.</li>
|
41 |
-
<li>Copy the crack file and paste it into the installation folder of Descargar Photoscore Ultimate 7 Crack 67 on your computer.</li>
|
42 |
-
<li>Run the crack file and activate the software using the serial number provided in the email confirmation or from another source.</li>
|
43 |
-
</ol>
|
44 |
-
|
45 |
-
<p>Congratulations! You have successfully downloaded and installed Descargar Photoscore Ultimate 7 Crack 67 on your computer. You can now start using the software to scan and edit your music scores.</p>
|
46 |
-
|
47 |
-
<h2>Conclusion</h2>
|
48 |
-
|
49 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 is a powerful software for music scanning and notation. It can scan and edit printed or handwritten music scores. It has many features and benefits that make it a reliable and efficient tool for musicians, composers and teachers. If you want to try out this software, you can download and install it on your computer using the steps provided above. We hope you found this article helpful and informative. Thank you for reading!</p>
|
50 |
-
<h1>Descargar Photoscore Ultimate 7 Crack 67: A Powerful Software for Music Scanning and Notation</h1>
|
51 |
-
|
52 |
-
<p>If you are looking for a software that can scan and edit music scores, whether they are printed or handwritten, you might want to check out Descargar Photoscore Ultimate 7 Crack 67. This software is a comprehensive solution for music scanning and notation. It has many features and benefits that make it a reliable and efficient tool for musicians, composers and teachers.</p>
|
53 |
-
|
54 |
-
<h2>What is Descargar Photoscore Ultimate 7 Crack 67?</h2>
|
55 |
-
|
56 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 is a software package that was designed by Neuratron, a company that specializes in music software and solutions. It is a desktop application that can run on Windows and Mac OS platforms. It can scan and edit music scores, whether they are printed or handwritten. It can also support the latest scanners and formats, such as PDF, JPEG, TIFF and more.</p>
|
57 |
-
<p></p>
|
58 |
-
|
59 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 has many features and functions that allow users to perform various tasks with music scores, such as:</p>
|
60 |
-
|
61 |
-
<ul>
|
62 |
-
<li>Music scanning: Users can scan printed or handwritten music scores using a scanner or a camera. The software can recognize virtually every detail, such as notes, rests, clefs, key signatures, time signatures, accidentals, articulations, dynamics, lyrics and more.</li>
|
63 |
-
<li>Music editing: Users can edit scanned music scores using various tools and options. They can correct errors, transpose, resize, reformat, add or delete elements, change fonts and colors and more.</li>
|
64 |
-
<li>Music playback: Users can play back scanned music scores using various instruments and sounds. They can also adjust the tempo, volume, balance and expression.</li>
|
65 |
-
<li>Music export: Users can export scanned music scores in various formats and ways. They can export them as images, PDFs, MIDI files or MusicXML files. They can also export them to other music software, such as Sibelius, Finale, Dorico and more.</li>
|
66 |
-
</ul>
|
67 |
-
|
68 |
-
<h2>What are the benefits of Descargar Photoscore Ultimate 7 Crack 67?</h2>
|
69 |
-
|
70 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 has many benefits that make it a valuable software for music scanning and notation. Some of these benefits are:</p>
|
71 |
-
|
72 |
-
<ul>
|
73 |
-
<li>It is a comprehensive solution that can scan and edit printed or handwritten music scores.</li>
|
74 |
-
<li>It is a user-friendly software that has a intuitive interface and easy-to-use tools.</li>
|
75 |
-
<li>It is a flexible software that can be customized and integrated with other applications using MusicXML files.</li>
|
76 |
-
<li>It is a fast software that can scan and process large volumes of music scores efficiently.</li>
|
77 |
-
<li>It is a reliable software that has been tested and validated by many users and organizations around the world.</li>
|
78 |
-
</ul>
|
79 |
-
|
80 |
-
<h2>How to download and install Descargar Photoscore Ultimate 7 Crack 67?</h2>
|
81 |
-
|
82 |
-
<p>If you want to download and install Descargar Photoscore Ultimate 7 Crack 67 on your computer, you can follow these steps:</p>
|
83 |
-
|
84 |
-
<ol>
|
85 |
-
<li>Go to the official website of Neuratron and register for a free trial account.</li>
|
86 |
-
<li>Download the installation file for Descargar Photoscore Ultimate 7 Crack 67 from the website or from the link provided in the email confirmation.</li>
|
87 |
-
<li>Run the installation file and follow the instructions on the screen to install the software on your computer.</li>
|
88 |
-
<li>Download the crack file for Descargar Photoscore Ultimate 7 Crack 67 from the link provided in the email confirmation or from another source.</li>
|
89 |
-
<li>Copy the crack file and paste it into the installation folder of Descargar Photoscore Ultimate 7 Crack 67 on your computer.</li>
|
90 |
-
<li>Run the crack file and activate the software using the serial number provided in the email confirmation or from another source.</li>
|
91 |
-
</ol>
|
92 |
-
|
93 |
-
<p>Congratulations! You have successfully downloaded and installed Descargar Photoscore Ultimate 7 Crack 67 on your computer. You can now start using the software to scan and edit your music scores.</p>
|
94 |
-
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
|
97 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 is a powerful software for music scanning and notation. It can scan and edit printed or handwritten music scores. It has many features and benefits that make it a reliable and efficient tool for musicians, composers and teachers. If you want to try out this software, you can download and install it on your computer using the steps provided above. We hope you found this article helpful and informative. Thank you for reading!</p>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
|
100 |
-
<p>Descargar Photoscore Ultimate 7 Crack 67 is a powerful software for music scanning and notation. It can scan and edit printed or handwritten music scores. It has many features and benefits that make it a reliable and efficient tool for musicians, composers and teachers. If you want to try out this software, you can download and install it on your computer using the steps provided above. We hope you found this article helpful and informative. Thank you for reading!</p> 3cee63e6c2<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Experience the Thrill of Beyblade Burst with MOD APK Version 11.0.4.md
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Beyblade Burst Mod Apk: A Guide for Fans of Spinning Tops</h1>
|
3 |
-
<p>If you are a fan of spinning tops, you might have heard of Beyblade, a popular toy and anime franchise that has been around since the late 1990s. Beyblade is a game where players launch their customized tops, called Beys, into a stadium and try to knock out their opponents' Beys. The game has evolved over the years, with new generations of Beys, characters, and anime series. One of the latest iterations is Beyblade Burst, which has its own app that lets you create, customize, and battle your Beys online.</p>
|
4 |
-
<h2>beyblade burst mod apk</h2><br /><p><b><b>Download</b> ○○○ <a href="https://jinyurl.com/2uNMem">https://jinyurl.com/2uNMem</a></b></p><br /><br />
|
5 |
-
<p>However, if you want to enjoy the full features and benefits of the game, you might want to try Beyblade Burst Mod Apk, a modified version of the app that gives you unlimited money, access to all Beys, and more. In this article, we will explain what Beyblade Burst is, what Beyblade Burst Mod Apk is, how to download and install it, and some tips and tricks to master the game. We will also share some reviews and ratings of the game, as well as a comparison table of Beyblade Burst and other similar games. Finally, we will answer some frequently asked questions about Beyblade Burst.</p>
|
6 |
-
<h2>What is Beyblade Burst?</h2>
|
7 |
-
<h3>The origin and popularity of Beyblade</h3>
|
8 |
-
<p>Beyblade is a toy line created by Takara Tomy in Japan in 1999. It was inspired by traditional spinning tops called beigoma, which were popular in Japan in the early 20th century. The name Beyblade comes from combining the words "beigoma" and "blade". The original toy line consisted of plastic or metal tops that had interchangeable parts, such as an energy layer, a forge disc, a performance tip, and an optional driver. Each part had different attributes that affected the performance of the top in battle.</p>
|
9 |
-
<p>Beyblade also spawned an anime series that followed the adventures of a group of young Bladers who competed in tournaments using their Beys. The anime series was adapted into various languages and aired in many countries around the world. The franchise also expanded into manga, video games, movies, merchandise, and more. As of 2020, Beyblade has sold over 500 million toys worldwide.</p>
|
10 |
-
<h3>The features and gameplay of Beyblade Burst</h3>
|
11 |
-
<p>Beyblade Burst is the third generation of the Beyblade franchise, which started in 2015. It introduced new features such as burst finishes, where a top can explode into pieces during battle; avatar attacks, where a top can unleash a powerful attack based on its energy layer; slingshock, where a top can ride on rails in the stadium; hypersphere, where a top can jump high in the air; speedstorm, where a top can create powerful wind currents; and dynamite battle, where a top can change its height during battle.</p>
|
12 |
-
<p>The gameplay of Beyblade Burst is similar to previous generations. Players launch their Beys into a stadium using a launcher device and try to knock out or burst their opponents' Beys. The winner is determined by how many points they score based on the outcome of the battle. For example, a ring out finish is worth one point, a burst finish is worth two points, and a survivor finish is worth one point if the opponent's top stops spinning first.</p>
|
13 |
-
<p>Beyblade Burst also has an app that allows players to scan their physical Beys and use them in the virtual world. The app has various modes, such as story mode, where players can follow the plot of the anime series; battle mode, where players can challenge other players online or offline; customization mode, where players can create and modify their Beys; and collection mode, where players can view and manage their Beys. The app also has a ranking system, where players can earn points and badges based on their performance.</p>
|
14 |
-
<p>beyblade burst mod apk unlimited money<br />
|
15 |
-
beyblade burst mod apk download latest version<br />
|
16 |
-
beyblade burst mod apk android 1<br />
|
17 |
-
beyblade burst mod apk hack<br />
|
18 |
-
beyblade burst mod apk revdl<br />
|
19 |
-
beyblade burst mod apk offline<br />
|
20 |
-
beyblade burst mod apk 10.2<br />
|
21 |
-
beyblade burst mod apk no ads<br />
|
22 |
-
beyblade burst mod apk all parts unlocked<br />
|
23 |
-
beyblade burst mod apk free shopping<br />
|
24 |
-
beyblade burst mod apk unlimited everything<br />
|
25 |
-
beyblade burst mod apk rexdl<br />
|
26 |
-
beyblade burst mod apk happymod<br />
|
27 |
-
beyblade burst mod apk 2023<br />
|
28 |
-
beyblade burst mod apk all beys unlocked<br />
|
29 |
-
beyblade burst mod apk unlimited coins and gems<br />
|
30 |
-
beyblade burst mod apk latest update<br />
|
31 |
-
beyblade burst mod apk online multiplayer<br />
|
32 |
-
beyblade burst mod apk obb<br />
|
33 |
-
beyblade burst mod apk pure<br />
|
34 |
-
beyblade burst mod apk unlimited spins<br />
|
35 |
-
beyblade burst mod apk an1<br />
|
36 |
-
beyblade burst mod apk all characters unlocked<br />
|
37 |
-
beyblade burst mod apk unlimited energy<br />
|
38 |
-
beyblade burst mod apk vip unlocked<br />
|
39 |
-
beyblade burst mod apk new version<br />
|
40 |
-
beyblade burst mod apk unlimited tickets<br />
|
41 |
-
beyblade burst mod apk apkpure<br />
|
42 |
-
beyblade burst mod apk all stadiums unlocked<br />
|
43 |
-
beyblade burst mod apk unlimited diamonds<br />
|
44 |
-
beyblade burst mod apk god mode<br />
|
45 |
-
beyblade burst mod apk 10.1.1<br />
|
46 |
-
beyblade burst mod apk for pc<br />
|
47 |
-
beyblade burst mod apk all codes unlocked<br />
|
48 |
-
beyblade burst mod apk unlimited qr codes<br />
|
49 |
-
beyblade burst mod apk 10.0.3<br />
|
50 |
-
beyblade burst mod apk mega.nz<br />
|
51 |
-
beyblade burst mod apk all levels unlocked<br />
|
52 |
-
beyblade burst mod apk unlimited scan codes<br />
|
53 |
-
beyblade burst mod apk mediafıre link</p>
|
54 |
-
<h2>What is Beyblade Burst Mod Apk?</h2>
|
55 |
-
<h3>The benefits and drawbacks of using a modded version of the game</h3>
|
56 |
-
<p>Beyblade Burst Mod Apk is a modified version of the official Beyblade Burst app that gives players some advantages and disadvantages. Some of the benefits of using Beyblade Burst Mod Apk are:</p>
|
57 |
-
<ul>
|
58 |
-
<li>Unlimited money: Players can get unlimited coins and gems, which are the in-game currencies used to buy new Beys, parts, launchers, stadiums, and more.</li>
|
59 |
-
<li>Access to all Beys: Players can unlock and use all the Beys available in the game, including rare and exclusive ones.</li>
|
60 |
-
<li>No ads: Players can enjoy the game without any interruptions from ads.</li>
|
61 |
-
</ul>
|
62 |
-
<p>However, there are also some drawbacks of using Beyblade Burst Mod Apk, such as:</p>
|
63 |
-
<ul>
|
64 |
-
<li>Risk of malware: Players might download a corrupted or infected file that could harm their device or steal their personal information.</li>
|
65 |
-
<li>Risk of ban: Players might get banned from the official game server if they are detected using a modded version of the game.</li>
|
66 |
-
<li>Lack of updates: Players might miss out on the latest features and bug fixes that are released by the developers.</li>
|
67 |
-
</ul>
|
68 |
-
<h3>The steps to download and install Beyblade Burst Mod Apk</h3>
|
69 |
-
<p>If you want to try Beyblade Burst Mod Apk, you need to follow these steps:</p>
|
70 |
-
<ol>
|
71 |
-
<li>Find a reliable source: You need to find a trustworthy website that offers Beyblade Burst Mod Apk for download. You can search online or ask for recommendations from other players.</li>
|
72 |
-
<li>Download the file: You need to download the apk file from the website. Make sure you have enough storage space on your device and a stable internet connection.</li>
|
73 |
-
<li>Enable unknown sources: You need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store.</li>
|
74 |
-
<li>Install the file: You need to locate the apk file on your device and tap on it to install it. Follow the instructions on the screen and wait for the installation to complete.</li>
|
75 |
-
<li>Launch the game: You need to open the game and enjoy playing with unlimited money and access to all Beys.</li>
|
76 |
-
</ol> <h2>What are some tips and tricks to master Beyblade Burst?</h2>
|
77 |
-
<h3>How to choose the best Bey for your battle style</h3>
|
78 |
-
<p>One of the most important aspects of Beyblade Burst is choosing the right Bey for your battle style. There are four types of Beys: attack, defense, stamina, and balance. Each type has its own strengths and weaknesses, and can perform better or worse depending on the opponent and the stadium. Here are some general guidelines for choosing the best Bey for your battle style:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Attack: Attack Beys are fast, aggressive, and powerful. They can deal a lot of damage and burst their opponents easily. However, they also have low stamina and stability, and can be knocked out or burst themselves if they miss their target. Attack Beys are best suited for players who like to take risks and finish battles quickly. They work well against stamina Beys, but struggle against defense Beys.</li>
|
81 |
-
<li>Defense: Defense Beys are sturdy, stable, and resilient. They can withstand a lot of hits and resist being burst or knocked out. However, they also have low speed and mobility, and can be outspun by stamina Beys. Defense Beys are best suited for players who like to play safe and endure battles. They work well against attack Beys, but struggle against stamina Beys.</li>
|
82 |
-
<li>Stamina: Stamina Beys are smooth, efficient, and durable. They can spin for a long time and conserve their energy. However, they also have low attack and defense, and can be burst or knocked out by powerful hits. Stamina Beys are best suited for players who like to play smart and outlast battles. They work well against defense Beys, but struggle against attack Beys.</li>
|
83 |
-
<li>Balance: Balance Beys are versatile, adaptable, and unpredictable. They can perform well in any situation and have a mix of attack, defense, and stamina. However, they also have no clear advantage or disadvantage over any other type, and can be outperformed by specialized Beys. Balance Beys are best suited for players who like to play flexible and surprise battles. They work well against any type of Bey, but have no clear edge over them.</li>
|
84 |
-
</ul>
|
85 |
-
<h3>How to use special tiles, skills, and avatar attacks</h3>
|
86 |
-
<p>Beyblade Burst has some special features that can enhance your gameplay and give you an edge over your opponents. Some of these features are:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Special tiles: These are colored tiles that appear on the stadium floor during battle. They can boost your speed, power, or stamina depending on their color. You can activate them by landing on them with your Bey or by tapping on them on the screen.</li>
|
89 |
-
<li>Skills: These are abilities that you can use during battle to improve your performance or hinder your opponent's performance. They can be activated by filling up a gauge on the screen or by performing certain actions with your Bey. Some examples of skills are speed boost, power boost, stamina boost, shield, stun, slow down, etc.</li>
|
90 |
-
<li>Avatar attacks: These are special attacks that you can unleash when your burst gauge is full. They are based on the energy layer of your Bey and can deal massive damage to your opponent's Bey or even burst it instantly. You can activate them by tapping on the screen or by shouting "Let it rip!" into your device's microphone.</li>
|
91 |
-
</ul>
|
92 |
-
<h3>How to participate in tournaments and leagues</h3>
|
93 |
-
<p>Beyblade Burst also has some competitive modes that allow you to test your skills against other players from around the world. Some of these modes are:</p>
|
94 |
-
<ul>
|
95 |
-
<li>Tournaments: These are events that you can join or create where you compete with other players in a series of battles until there is only one winner left. You can choose the rules, the stadium, the number of participants, and the prizes for the tournament.</li>
|
96 |
-
<li>Leagues: These are seasons that you can join where you compete with other players in a ranking system based on your points and badges. You can earn points by winning battles and badges by completing challenges. You can also get rewards based on your rank at the end of the season.</li>
|
97 |
-
</ul> <h2>What are some reviews and ratings of Beyblade Burst?</h2>
|
98 |
-
<h3>The positive and negative feedback from users and critics</h3>
|
99 |
-
<p>Beyblade Burst has received mixed reviews and ratings from users and critics. Some of the positive feedback are:</p>
|
100 |
-
<ul>
|
101 |
-
<li>The game is fun, addictive, and challenging. It captures the excitement and thrill of the anime series and the toy line.</li>
|
102 |
-
<li>The game has great graphics, sound effects, and animations. It makes the battles look realistic and immersive.</li>
|
103 |
-
<li>The game has a lot of content, variety, and customization options. It offers many modes, Beys, parts, stadiums, and features to choose from and explore.</li>
|
104 |
-
</ul>
|
105 |
-
<p>Some of the negative feedback are:</p>
|
106 |
-
<ul>
|
107 |
-
<li>The game is buggy, glitchy, and laggy. It crashes frequently, freezes randomly, or disconnects from the server.</li>
|
108 |
-
<li>The game is unfair, unbalanced, and pay-to-win. It favors players who spend real money to buy better Beys, parts, or skills.</li>
|
109 |
-
<li>The game is repetitive, boring, and easy. It lacks depth, strategy, and challenge. It becomes monotonous and predictable after a while.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>A comparison table of Beyblade Burst and other similar games</h3>
|
112 |
-
<p>Beyblade Burst is not the only game that involves spinning tops and battles. There are other similar games that you might want to check out. Here is a comparison table of Beyblade Burst and some of its competitors:</p>
|
113 |
-
<table>
|
114 |
-
<tr>
|
115 |
-
<th>Game</th>
|
116 |
-
<th>Developer</th>
|
117 |
-
<th>Platform</th>
|
118 |
-
<th>Features</th>
|
119 |
-
<th>Ratings</th>
|
120 |
-
</tr>
|
121 |
-
<tr>
|
122 |
-
<td>Beyblade Burst</td>
|
123 |
-
<td>Hasbro Inc.</td>
|
124 |
-
<td>Android, iOS</td>
|
125 |
-
<td>Create, customize, and battle your Beys online or offline; scan your physical Beys and use them in the virtual world; follow the story of the anime series; participate in tournaments and leagues; use special tiles, skills, and avatar attacks.</td>
|
126 |
-
<td>4.1 out of 5 stars on Google Play; 4.6 out of 5 stars on App Store; 7.5 out of 10 on IGN.</td>
|
127 |
-
</tr>
|
128 |
-
<tr>
|
129 |
-
<td>Battle of Spin Blade</td>
|
130 |
-
<td>BeyBlade Battle Games</td>
|
131 |
-
<td>Android</td>
|
132 |
-
<td>Choose from over 100 Beys and battle against other players online or offline; customize your Beys with different parts and colors; use power-ups and special moves to win battles; collect coins and gems to unlock new Beys and items.</td>
|
133 |
-
<td>4.0 out of 5 stars on Google Play.</td>
|
134 |
-
</tr>
|
135 |
-
<tr>
|
136 |
-
<td>Takara Tomy Beyblade Burst Superking B-173 Random Booster Vol.22 (Japan Import)</td>
|
137 |
-
<td>Takara Tomy</td>
|
138 |
-
<td>Nintendo Switch</td>
|
139 |
-
<td>Play as your favorite characters from the anime series and use their Beys in battle; enjoy the realistic graphics and physics of the game; experience the new dynamite battle system that allows you to change your Bey's height during battle; compete with other players online or locally.</td>
|
140 |
-
<td>4.7 out of 5 stars on Amazon Japan; 8 out of 10 on Nintendo Life.</td>
|
141 |
-
</tr>
|
142 |
-
<tr><td>BeyWarriors: BeyRaiderz</td><td>Nelvana Digital Inc.</td><td>iOS</td><td>Race your BeyRaiderz vehicles through different tracks and collect tokens; use your tokens to unleash powerful attacks on your opponents; customize your vehicles with different colors and decals; challenge your friends in multiplayer mode.</td><td>3.9 out of 5 stars on App Store.</td></tr></table>
|
143 |
-
<h2>Conclusion and FAQs</h2>
|
144 |
-
<h3>A summary of the main points and a call to action</h3>
|
145 |
-
<p>In conclusion, Beyblade Burst is a game that lets you create, customize, and battle your Beys online or offline. It is based on the popular toy and anime franchise that has been around since 1999. It has many features, such as burst finishes, avatar attacks, slingshock, hypersphere, speedstorm, and dynamite battle. However, if you want to enjoy the full benefits of the game, you might want to try Beyblade Burst Mod Apk, a modified version of the app that gives you unlimited money, access to all Beys, and more. However, you also need to be aware of the risks of using a modded version of the game, such as malware, ban, or lack of updates. You also need to follow some steps to download and install Beyblade Burst Mod Apk safely. Moreover, you can improve your skills by following some tips and tricks to choose the best Bey for your battle style, use special tiles, skills, and avatar attacks, and participate in tournaments and leagues. You can also compare Beyblade Burst with other similar games and read some reviews and ratings of the game. If you have any questions about Beyblade Burst, you can check out the FAQs section below.</p>
|
146 |
-
<p>If you are a fan of spinning tops and want to experience the thrill of Beyblade Burst, you should download the app today and start playing. You can also try Beyblade Burst Mod Apk if you want to have more fun and advantages. However, you should also be careful and responsible when using a modded version of the game. Remember, the most important thing is to enjoy the game and have fun with your Beys. Let it rip!</p>
|
147 |
-
<h3>Five frequently asked questions and answers about Beyblade Burst</h3>
|
148 |
-
<p>Here are some of the most common questions and answers about Beyblade Burst:</p>
|
149 |
-
<ol>
|
150 |
-
<li>Q: Is Beyblade Burst safe for kids?</li>
|
151 |
-
<li>A: Yes, Beyblade Burst is safe for kids. It is rated E for Everyone by the ESRB and 4+ by the App Store. It does not contain any violence, gore, profanity, or inappropriate content. However, parents should still supervise their kids when they play the game online or use a modded version of the game.</li>
|
152 |
-
<li>Q: How can I scan my physical Beys into the app?</li>
|
153 |
-
<li>A: You can scan your physical Beys into the app by using your device's camera. You need to find the QR code on your Bey's energy layer or on the packaging and point your camera at it. The app will recognize the code and add the Bey to your collection.</li>
|
154 |
-
<li>Q: How can I get more coins and gems in the game?</li>
|
155 |
-
<li>A: You can get more coins and gems in the game by winning battles, completing challenges, watching ads, or buying them with real money. You can also use Beyblade Burst Mod Apk to get unlimited coins and gems for free.</li>
|
156 |
-
<li>Q: How can I contact the developers or report a problem with the game?</li>
|
157 |
-
<li>A: You can contact the developers or report a problem with the game by using the feedback option in the app's settings menu. You can also visit their website or social media pages for more information and support.</li>
|
158 |
-
<li>Q: How can I update the game or get the latest version of Beyblade Burst Mod Apk?</li>
|
159 |
-
<li>A: You can update the game or get the latest version of Beyblade Burst Mod Apk by checking for updates in the Google Play Store or App Store. You can also visit the website where you downloaded Beyblade Burst Mod Apk and look for new updates.</li>
|
160 |
-
</ol></p> 401be4b1e0<br />
|
161 |
-
<br />
|
162 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2hack2furious/anonymizer/modules.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
from itertools import combinations
|
2 |
-
import numpy as np
|
3 |
-
import pandas as pd
|
4 |
-
|
5 |
-
SUPPORTED_TYPES = [".csv", ".json", ".xlsx"]
|
6 |
-
|
7 |
-
def hello_world(): return "hello world!"
|
8 |
-
|
9 |
-
def load_file(file):
|
10 |
-
"""
|
11 |
-
Takes a file given by Streamlit and loads into a DataFrame.
|
12 |
-
Returns a DataFrame, metadata, and result string.
|
13 |
-
|
14 |
-
@param file: File uploaded into streamlit.
|
15 |
-
@rtype: tuple
|
16 |
-
@return: A tuple of format (pd.DataFrame, (str, str), str).
|
17 |
-
"""
|
18 |
-
df = None
|
19 |
-
|
20 |
-
if file is None: return df, ("", ""), ""
|
21 |
-
|
22 |
-
filename = file.name
|
23 |
-
extension = filename.split(".")[-1]
|
24 |
-
metadata = (filename, extension)
|
25 |
-
|
26 |
-
import_functions = {
|
27 |
-
"csv": pd.read_csv,
|
28 |
-
"json": pd.read_json,
|
29 |
-
"xlsx": pd.read_excel
|
30 |
-
}
|
31 |
-
try:
|
32 |
-
reader = import_functions.get(extension, None)
|
33 |
-
if reader is None:
|
34 |
-
return df, metadata, f"Error: Invalid extension '{extension}'"
|
35 |
-
df = reader(file)
|
36 |
-
rows, columns = df.shape
|
37 |
-
return df, metadata, f"File '{filename}' loaded successfully.\nFound {rows} rows, {columns} columns."
|
38 |
-
except Exception as error:
|
39 |
-
return df, metadata, f"Error: Unable to read file '{filename}' ({type(error)}: {error})"
|
40 |
-
|
41 |
-
def data_cleaner(df, drop_missing=False, remove_duplicates=True):
|
42 |
-
"""
|
43 |
-
Takes a DataFrame and removes empty and duplicate entries.
|
44 |
-
|
45 |
-
@type df: pd.DataFrame
|
46 |
-
@param df: A DataFrame of uncleaned data.
|
47 |
-
@type drop_missing: bool
|
48 |
-
@param drop_missing: Determines if rows with any missing values are dropped ("any"), or just empty rows ("all").
|
49 |
-
@type remove_duplicates: bool
|
50 |
-
@param remove_duplicates: Determines if duplicate rows are removed.
|
51 |
-
@rtype: pd.DataFrame
|
52 |
-
@return: A DataFrame with requested cleaning applied
|
53 |
-
"""
|
54 |
-
df = df.dropna(how="any" if drop_missing else "all")
|
55 |
-
if remove_duplicates: df = df.drop_duplicates()
|
56 |
-
return df
|
57 |
-
|
58 |
-
def column_combinations(df, k):
|
59 |
-
return list(combinations(df.columns, k))
|
60 |
-
|
61 |
-
def k_redact(df, k):
|
62 |
-
kwise_combinations = column_combinations(df, k)
|
63 |
-
|
64 |
-
for columns in kwise_combinations:
|
65 |
-
df_search = df.loc[:, columns]
|
66 |
-
sensitive_data = [
|
67 |
-
(columns, key)
|
68 |
-
for key, value
|
69 |
-
in df_search.value_counts().to_dict().items()
|
70 |
-
if value == 1
|
71 |
-
]
|
72 |
-
if not sensitive_data: continue
|
73 |
-
for columns, values in sensitive_data:
|
74 |
-
for column, value in zip(columns, values):
|
75 |
-
df_search = df_search.loc[df[column] == value]
|
76 |
-
if df_search.shape[0] == 1:
|
77 |
-
for column in columns:
|
78 |
-
df_search[column] = None
|
79 |
-
|
80 |
-
return df
|
81 |
-
|
82 |
-
def sensitive_values(series, sensitivity_minimum):
|
83 |
-
return {key
|
84 |
-
for key, value
|
85 |
-
in series.value_counts().to_dict().items()
|
86 |
-
if value < sensitivity_minimum
|
87 |
-
}
|
88 |
-
|
89 |
-
def drop_sensitive(series, sensitivity_minimum):
|
90 |
-
series.loc[series.isin(sensitive_values(series, sensitivity_minimum))] = None
|
91 |
-
|
92 |
-
def bin_numeric(df, to_process, bin_size, sensitivity_minimum):
|
93 |
-
processed = set()
|
94 |
-
rows, _ = df.shape
|
95 |
-
num_bins = rows//bin_size
|
96 |
-
for column_name in to_process:
|
97 |
-
column = df[column_name]
|
98 |
-
if column.dtype.kind not in "biufc": continue
|
99 |
-
array = sorted(np.array(column))
|
100 |
-
array_min, array_max = array[0], array[-1]
|
101 |
-
splits = [array_min] + list(np.array_split(array, num_bins)) + [array_max]
|
102 |
-
bins = [
|
103 |
-
(np.min(split), np.max(split))
|
104 |
-
for split
|
105 |
-
in (splits[i] for i in range(num_bins))
|
106 |
-
]
|
107 |
-
result = [None] * rows
|
108 |
-
for bin_min, bin_max in bins:
|
109 |
-
for i, value in enumerate(column):
|
110 |
-
if bin_min <= value <= bin_max:
|
111 |
-
result[i] = (bin_min, bin_max)
|
112 |
-
df[column_name] = result
|
113 |
-
drop_sensitive(df[column_name], sensitivity_minimum)
|
114 |
-
processed.add(column_name)
|
115 |
-
return df, to_process - processed
|
116 |
-
|
117 |
-
def find_categorical(df, to_process, max_categorical_size, sensitivity_minimum):
|
118 |
-
processed = set()
|
119 |
-
for column_name in to_process:
|
120 |
-
column = df[column_name]
|
121 |
-
if column.nunique() <= max_categorical_size:
|
122 |
-
drop_sensitive(column, sensitivity_minimum)
|
123 |
-
processed.add(column_name)
|
124 |
-
return df, to_process - processed
|
125 |
-
|
126 |
-
def redact(df, to_process, sensitivity_minimum):
|
127 |
-
processed = set()
|
128 |
-
for column_name in to_process:
|
129 |
-
column = df[column_name]
|
130 |
-
|
131 |
-
is_object = column.dtype == object
|
132 |
-
if not is_object: continue
|
133 |
-
|
134 |
-
# Check if any unique values exist, and redact them
|
135 |
-
drop_sensitive(column, sensitivity_minimum)
|
136 |
-
processed.add(column_name)
|
137 |
-
|
138 |
-
return df, to_process - processed
|
139 |
-
|
140 |
-
def anonymize(df, max_categorical_size, bin_size, sensitivity_minimum):
|
141 |
-
to_process = set(df.columns)
|
142 |
-
df, to_process = redact(df, to_process, sensitivity_minimum)
|
143 |
-
df, to_process = find_categorical(df, to_process, max_categorical_size, sensitivity_minimum)
|
144 |
-
df, to_process = bin_numeric(df, to_process, bin_size, sensitivity_minimum)
|
145 |
-
return df, to_process
|
146 |
-
|
147 |
-
def data_anonymizer(df, k, max_categorical_size, bin_size, sensitivity_minimum):
|
148 |
-
start_dtypes = df.dtypes.to_dict()
|
149 |
-
df, unprocessed = anonymize(df, max_categorical_size, bin_size, sensitivity_minimum)
|
150 |
-
df = k_redact(df, k)
|
151 |
-
end_dtypes = df.dtypes.to_dict()
|
152 |
-
|
153 |
-
# Type correction
|
154 |
-
for column in df.columns:
|
155 |
-
start_type, end_type = start_dtypes[column], end_dtypes[column]
|
156 |
-
if start_type == end_type: continue
|
157 |
-
if start_type.kind == "i" and end_type.kind == "f":
|
158 |
-
df[column] = df[column].astype("Int64")
|
159 |
-
|
160 |
-
return df, unprocessed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/52Hz/SRMNet_thesis/model_arch/SRMNet_SWFF.py
DELETED
@@ -1,265 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from WT import DWT, IWT
|
4 |
-
##---------- Basic Layers ----------
|
5 |
-
def conv3x3(in_chn, out_chn, bias=True):
|
6 |
-
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
|
7 |
-
return layer
|
8 |
-
|
9 |
-
def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
|
10 |
-
return nn.Conv2d(
|
11 |
-
in_channels, out_channels, kernel_size,
|
12 |
-
padding=(kernel_size // 2), bias=bias, stride=stride)
|
13 |
-
|
14 |
-
def bili_resize(factor):
|
15 |
-
return nn.Upsample(scale_factor=factor, mode='bilinear', align_corners=False)
|
16 |
-
|
17 |
-
##---------- Basic Blocks ----------
|
18 |
-
class UNetConvBlock(nn.Module):
|
19 |
-
def __init__(self, in_size, out_size, downsample):
|
20 |
-
super(UNetConvBlock, self).__init__()
|
21 |
-
self.downsample = downsample
|
22 |
-
self.block = SK_RDB(in_channels=in_size, growth_rate=out_size, num_layers=3)
|
23 |
-
if downsample:
|
24 |
-
self.downsample = PS_down(out_size, out_size, downscale=2)
|
25 |
-
|
26 |
-
def forward(self, x):
|
27 |
-
out = self.block(x)
|
28 |
-
if self.downsample:
|
29 |
-
out_down = self.downsample(out)
|
30 |
-
return out_down, out
|
31 |
-
else:
|
32 |
-
return out
|
33 |
-
|
34 |
-
class UNetUpBlock(nn.Module):
|
35 |
-
def __init__(self, in_size, out_size):
|
36 |
-
super(UNetUpBlock, self).__init__()
|
37 |
-
# self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
|
38 |
-
self.up = PS_up(in_size, out_size, upscale=2)
|
39 |
-
self.conv_block = UNetConvBlock(in_size, out_size, False)
|
40 |
-
|
41 |
-
def forward(self, x, bridge):
|
42 |
-
up = self.up(x)
|
43 |
-
out = torch.cat([up, bridge], dim=1)
|
44 |
-
out = self.conv_block(out)
|
45 |
-
return out
|
46 |
-
|
47 |
-
##---------- Resizing Modules (Pixel(Un)Shuffle) ----------
|
48 |
-
class PS_down(nn.Module):
|
49 |
-
def __init__(self, in_size, out_size, downscale):
|
50 |
-
super(PS_down, self).__init__()
|
51 |
-
self.UnPS = nn.PixelUnshuffle(downscale)
|
52 |
-
self.conv1 = nn.Conv2d((downscale**2) * in_size, out_size, 1, 1, 0)
|
53 |
-
|
54 |
-
def forward(self, x):
|
55 |
-
x = self.UnPS(x) # h/2, w/2, 4*c
|
56 |
-
x = self.conv1(x)
|
57 |
-
return x
|
58 |
-
|
59 |
-
class PS_up(nn.Module):
|
60 |
-
def __init__(self, in_size, out_size, upscale):
|
61 |
-
super(PS_up, self).__init__()
|
62 |
-
|
63 |
-
self.PS = nn.PixelShuffle(upscale)
|
64 |
-
self.conv1 = nn.Conv2d(in_size//(upscale**2), out_size, 1, 1, 0)
|
65 |
-
|
66 |
-
def forward(self, x):
|
67 |
-
x = self.PS(x) # h/2, w/2, 4*c
|
68 |
-
x = self.conv1(x)
|
69 |
-
return x
|
70 |
-
|
71 |
-
##---------- Selective Kernel Feature Fusion (SKFF) ----------
|
72 |
-
class SKFF(nn.Module):
|
73 |
-
def __init__(self, in_channels, height=3, reduction=8, bias=False):
|
74 |
-
super(SKFF, self).__init__()
|
75 |
-
|
76 |
-
self.height = height
|
77 |
-
d = max(int(in_channels / reduction), 4)
|
78 |
-
|
79 |
-
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
80 |
-
self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
|
81 |
-
|
82 |
-
self.fcs = nn.ModuleList([])
|
83 |
-
for i in range(self.height):
|
84 |
-
self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias))
|
85 |
-
|
86 |
-
self.softmax = nn.Softmax(dim=1)
|
87 |
-
|
88 |
-
def forward(self, inp_feats):
|
89 |
-
batch_size, n_feats, H, W = inp_feats[1].shape
|
90 |
-
|
91 |
-
inp_feats = torch.cat(inp_feats, dim=1)
|
92 |
-
inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
|
93 |
-
|
94 |
-
feats_U = torch.sum(inp_feats, dim=1)
|
95 |
-
feats_S = self.avg_pool(feats_U)
|
96 |
-
feats_Z = self.conv_du(feats_S)
|
97 |
-
|
98 |
-
attention_vectors = [fc(feats_Z) for fc in self.fcs]
|
99 |
-
attention_vectors = torch.cat(attention_vectors, dim=1)
|
100 |
-
attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
|
101 |
-
|
102 |
-
attention_vectors = self.softmax(attention_vectors)
|
103 |
-
feats_V = torch.sum(inp_feats * attention_vectors, dim=1)
|
104 |
-
|
105 |
-
return feats_V
|
106 |
-
|
107 |
-
##---------- Selective Wavelet Feature Fusion (SKFF) ----------
|
108 |
-
class SWFF(nn.Module):
|
109 |
-
def __init__(self, in_channels, height=3, reduction=8, bias=False):
|
110 |
-
super(SWFF, self).__init__()
|
111 |
-
|
112 |
-
self.height = height
|
113 |
-
d = max(int(in_channels / reduction), 4)
|
114 |
-
|
115 |
-
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
116 |
-
self.wav_conv_du = nn.Sequential(nn.Conv2d(4*in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
|
117 |
-
self.dwt = DWT()
|
118 |
-
self.iwt = IWT()
|
119 |
-
self.fcs = nn.ModuleList([])
|
120 |
-
for i in range(self.height):
|
121 |
-
self.fcs.append(nn.Conv2d(d, in_channels*4, kernel_size=1, stride=1, bias=bias))
|
122 |
-
|
123 |
-
self.softmax = nn.Softmax(dim=1)
|
124 |
-
|
125 |
-
def forward(self, inp_feats):
|
126 |
-
batch_size, n_feats, H, W = inp_feats[0].shape
|
127 |
-
wavelet_rep = [(self.dwt(each)) for each in inp_feats]
|
128 |
-
|
129 |
-
wav_inp_feats = torch.cat(wavelet_rep, dim=1)
|
130 |
-
wav_inp_feats = wav_inp_feats.view(batch_size, self.height, n_feats*4, wav_inp_feats.shape[2], wav_inp_feats.shape[3])
|
131 |
-
|
132 |
-
inp_feats = torch.cat(inp_feats, dim=1)
|
133 |
-
inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
|
134 |
-
|
135 |
-
feats_U = torch.sum(wav_inp_feats, dim=1)
|
136 |
-
feats_S = self.avg_pool(feats_U)
|
137 |
-
feats_Z = self.wav_conv_du(feats_S)
|
138 |
-
|
139 |
-
attention_vectors = [self.avg_pool(self.iwt(fc(feats_Z))) for fc in self.fcs]
|
140 |
-
attention_vectors = torch.cat(attention_vectors, dim=1)
|
141 |
-
attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
|
142 |
-
|
143 |
-
attention_vectors = self.softmax(attention_vectors)
|
144 |
-
feats_V = torch.sum(inp_feats * attention_vectors, dim=1)
|
145 |
-
|
146 |
-
return feats_V
|
147 |
-
|
148 |
-
##---------- Dense Block ----------
|
149 |
-
class DenseLayer(nn.Module):
|
150 |
-
def __init__(self, in_channels, out_channels, I):
|
151 |
-
super(DenseLayer, self).__init__()
|
152 |
-
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=3 // 2)
|
153 |
-
self.relu = nn.ReLU(inplace=True)
|
154 |
-
self.sk = SKFF(out_channels, height=2, reduction=8, bias=False)
|
155 |
-
|
156 |
-
def forward(self, x):
|
157 |
-
x1 = self.relu(self.conv(x))
|
158 |
-
# output = torch.cat([x, x1], 1) # -> RDB
|
159 |
-
output = self.sk((x, x1))
|
160 |
-
return output
|
161 |
-
|
162 |
-
##---------- Selective Kernel Residual Dense Block (SK-RDB) ----------
|
163 |
-
class SK_RDB(nn.Module):
|
164 |
-
def __init__(self, in_channels, growth_rate, num_layers):
|
165 |
-
super(SK_RDB, self).__init__()
|
166 |
-
self.identity = nn.Conv2d(in_channels, growth_rate, 1, 1, 0)
|
167 |
-
self.layers = nn.Sequential(
|
168 |
-
*[DenseLayer(in_channels, in_channels, I=i) for i in range(num_layers)]
|
169 |
-
)
|
170 |
-
self.lff = nn.Conv2d(in_channels, growth_rate, kernel_size=1)
|
171 |
-
|
172 |
-
def forward(self, x):
|
173 |
-
res = self.identity(x)
|
174 |
-
x = self.layers(x)
|
175 |
-
x = self.lff(x)
|
176 |
-
return res + x
|
177 |
-
|
178 |
-
##---------- testNet ----------
|
179 |
-
class SRMNet_SWFF(nn.Module):
|
180 |
-
def __init__(self, in_chn=3, wf=96, depth=4):
|
181 |
-
super(SRMNet_SWFF, self).__init__()
|
182 |
-
self.depth = depth
|
183 |
-
self.down_path = nn.ModuleList()
|
184 |
-
self.bili_down = bili_resize(0.5)
|
185 |
-
self.conv_01 = nn.Conv2d(in_chn, wf, 3, 1, 1)
|
186 |
-
|
187 |
-
# encoder of UNet-64
|
188 |
-
prev_channels = 0
|
189 |
-
for i in range(depth): # 0,1,2,3
|
190 |
-
downsample = True if (i + 1) < depth else False
|
191 |
-
self.down_path.append(UNetConvBlock(prev_channels + wf, (2 ** i) * wf, downsample))
|
192 |
-
prev_channels = (2 ** i) * wf
|
193 |
-
|
194 |
-
# decoder of UNet-64
|
195 |
-
self.up_path = nn.ModuleList()
|
196 |
-
self.skip_conv = nn.ModuleList()
|
197 |
-
self.conv_up = nn.ModuleList()
|
198 |
-
self.bottom_conv = nn.Conv2d(prev_channels, wf, 3, 1, 1)
|
199 |
-
self.bottom_up = bili_resize(2 ** (depth-1))
|
200 |
-
|
201 |
-
for i in reversed(range(depth - 1)):
|
202 |
-
self.up_path.append(UNetUpBlock(prev_channels, (2 ** i) * wf))
|
203 |
-
self.skip_conv.append(nn.Conv2d((2 ** i) * wf, (2 ** i) * wf, 3, 1, 1))
|
204 |
-
self.conv_up.append(nn.Sequential(*[bili_resize(2 ** i), nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1)]))
|
205 |
-
# *[nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1), bili_resize(2 ** i)])
|
206 |
-
prev_channels = (2 ** i) * wf
|
207 |
-
|
208 |
-
self.final_ff = SKFF(in_channels=wf, height=depth)
|
209 |
-
self.last = conv3x3(prev_channels, in_chn, bias=True)
|
210 |
-
|
211 |
-
def forward(self, x):
|
212 |
-
img = x
|
213 |
-
scale_img = img
|
214 |
-
|
215 |
-
##### shallow conv #####
|
216 |
-
x1 = self.conv_01(img)
|
217 |
-
encs = []
|
218 |
-
######## UNet-64 ########
|
219 |
-
# Down-path (Encoder)
|
220 |
-
for i, down in enumerate(self.down_path):
|
221 |
-
if i == 0: # top layer
|
222 |
-
x1, x1_up = down(x1)
|
223 |
-
encs.append(x1_up)
|
224 |
-
elif (i + 1) < self.depth: # middle layer
|
225 |
-
scale_img = self.bili_down(scale_img)
|
226 |
-
left_bar = self.conv_01(scale_img)
|
227 |
-
x1 = torch.cat([x1, left_bar], dim=1)
|
228 |
-
x1, x1_up = down(x1)
|
229 |
-
encs.append(x1_up)
|
230 |
-
else: # lowest layer
|
231 |
-
scale_img = self.bili_down(scale_img)
|
232 |
-
left_bar = self.conv_01(scale_img)
|
233 |
-
x1 = torch.cat([x1, left_bar], dim=1)
|
234 |
-
x1 = down(x1)
|
235 |
-
|
236 |
-
# Up-path (Decoder)
|
237 |
-
ms_result = [self.bottom_up(self.bottom_conv(x1))]
|
238 |
-
for i, up in enumerate(self.up_path):
|
239 |
-
x1 = up(x1, self.skip_conv[i](encs[-i - 1]))
|
240 |
-
ms_result.append(self.conv_up[i](x1))
|
241 |
-
|
242 |
-
# Multi-scale selective feature fusion
|
243 |
-
msff_result = self.final_ff(ms_result)
|
244 |
-
|
245 |
-
##### Reconstruct #####
|
246 |
-
out_1 = self.last(msff_result) + img
|
247 |
-
|
248 |
-
return out_1
|
249 |
-
|
250 |
-
if __name__ == "__main__":
|
251 |
-
from thop import profile
|
252 |
-
input = torch.ones(1, 3, 256, 256, dtype=torch.float, requires_grad=False)
|
253 |
-
|
254 |
-
model = SRMNet_SWFF(in_chn=3, wf=96, depth=4)
|
255 |
-
out = model(input)
|
256 |
-
flops, params = profile(model, inputs=(input,))
|
257 |
-
|
258 |
-
# RDBlayer = SK_RDB(in_channels=64, growth_rate=64, num_layers=3)
|
259 |
-
# print(RDBlayer)
|
260 |
-
# out = RDBlayer(input)
|
261 |
-
# flops, params = profile(RDBlayer, inputs=(input,))
|
262 |
-
print('input shape:', input.shape)
|
263 |
-
print('parameters:', params/1e6)
|
264 |
-
print('flops', flops/1e9)
|
265 |
-
print('output shape', out.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AE-NV/sentiment-productreview/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Sentiment Productreview
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/tests/modules/test_transformer.py
DELETED
@@ -1,253 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from itertools import product
|
8 |
-
|
9 |
-
import pytest
|
10 |
-
import torch
|
11 |
-
|
12 |
-
from audiocraft.modules.transformer import (
|
13 |
-
StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend)
|
14 |
-
|
15 |
-
|
16 |
-
def test_transformer_causal_streaming():
|
17 |
-
torch.manual_seed(1234)
|
18 |
-
|
19 |
-
for context, custom in product([None, 10], [False, True]):
|
20 |
-
# Test that causality and receptive fields are properly handled.
|
21 |
-
# looking at the gradients
|
22 |
-
tr = StreamingTransformer(
|
23 |
-
16, 4, 1 if context else 2,
|
24 |
-
causal=True, past_context=context, custom=custom,
|
25 |
-
dropout=0.)
|
26 |
-
steps = 20
|
27 |
-
for k in [0, 10, 15, 19]:
|
28 |
-
x = torch.randn(4, steps, 16, requires_grad=True)
|
29 |
-
y = tr(x)
|
30 |
-
y[:, k].abs().sum().backward()
|
31 |
-
if k + 1 < steps:
|
32 |
-
assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm()
|
33 |
-
assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm()
|
34 |
-
if context is not None and k > context:
|
35 |
-
limit = k - context - 1
|
36 |
-
assert torch.allclose(x.grad[:, :limit],
|
37 |
-
torch.tensor(0.)), x.grad[:, :limit].norm()
|
38 |
-
|
39 |
-
# Now check that streaming gives the same result at batch eval.
|
40 |
-
x = torch.randn(4, steps, 16)
|
41 |
-
y = tr(x)
|
42 |
-
ys = []
|
43 |
-
with tr.streaming():
|
44 |
-
for k in range(steps):
|
45 |
-
chunk = x[:, k:k + 1, :]
|
46 |
-
ys.append(tr(chunk))
|
47 |
-
y_stream = torch.cat(ys, dim=1)
|
48 |
-
delta = torch.norm(y_stream - y) / torch.norm(y)
|
49 |
-
assert delta < 1e-6, delta
|
50 |
-
|
51 |
-
|
52 |
-
def test_transformer_vs_pytorch():
|
53 |
-
torch.manual_seed(1234)
|
54 |
-
# Check that in the non causal setting, we get the same result as
|
55 |
-
# PyTorch Transformer encoder.
|
56 |
-
for custom in [False, True]:
|
57 |
-
tr = StreamingTransformer(
|
58 |
-
16, 4, 2,
|
59 |
-
causal=False, custom=custom, dropout=0., positional_scale=0.)
|
60 |
-
layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True)
|
61 |
-
tr_ref = torch.nn.TransformerEncoder(layer, 2)
|
62 |
-
tr.load_state_dict(tr_ref.state_dict())
|
63 |
-
|
64 |
-
x = torch.randn(4, 20, 16)
|
65 |
-
y = tr(x)
|
66 |
-
y2 = tr_ref(x)
|
67 |
-
delta = torch.norm(y2 - y) / torch.norm(y)
|
68 |
-
assert delta < 1e-6, delta
|
69 |
-
|
70 |
-
|
71 |
-
def test_streaming_api():
|
72 |
-
tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.)
|
73 |
-
tr.eval()
|
74 |
-
steps = 12
|
75 |
-
x = torch.randn(1, steps, 16)
|
76 |
-
|
77 |
-
with torch.no_grad():
|
78 |
-
with tr.streaming():
|
79 |
-
_ = tr(x[:, :1])
|
80 |
-
state = {k: v.clone() for k, v in tr.get_streaming_state().items()}
|
81 |
-
y = tr(x[:, 1:2])
|
82 |
-
tr.set_streaming_state(state)
|
83 |
-
y2 = tr(x[:, 1:2])
|
84 |
-
assert torch.allclose(y, y2), (y - y2).norm()
|
85 |
-
assert tr.flush() is None
|
86 |
-
|
87 |
-
|
88 |
-
def test_memory_efficient():
|
89 |
-
for backend in ['torch', 'xformers']:
|
90 |
-
torch.manual_seed(1234)
|
91 |
-
set_efficient_attention_backend(backend)
|
92 |
-
|
93 |
-
tr = StreamingTransformer(
|
94 |
-
16, 4, 2, custom=True, dropout=0., layer_scale=0.1)
|
95 |
-
tr_mem_efficient = StreamingTransformer(
|
96 |
-
16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1)
|
97 |
-
tr_mem_efficient.load_state_dict(tr.state_dict())
|
98 |
-
tr.eval()
|
99 |
-
steps = 12
|
100 |
-
x = torch.randn(3, steps, 16)
|
101 |
-
|
102 |
-
with torch.no_grad():
|
103 |
-
y = tr(x)
|
104 |
-
y2 = tr_mem_efficient(x)
|
105 |
-
assert torch.allclose(y, y2), ((y - y2).norm(), backend)
|
106 |
-
|
107 |
-
|
108 |
-
def test_attention_as_float32():
|
109 |
-
torch.manual_seed(1234)
|
110 |
-
cases = [
|
111 |
-
{'custom': True},
|
112 |
-
{'custom': False},
|
113 |
-
]
|
114 |
-
for case in cases:
|
115 |
-
tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case)
|
116 |
-
tr_float32 = StreamingTransformer(
|
117 |
-
16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case)
|
118 |
-
if not case['custom']:
|
119 |
-
# we are not using autocast here because it doesn't really
|
120 |
-
# work as expected on CPU, so we have to manually cast the weights of the MHA.
|
121 |
-
for layer in tr_float32.layers:
|
122 |
-
layer.self_attn.mha.to(torch.float32)
|
123 |
-
tr_float32.load_state_dict(tr.state_dict())
|
124 |
-
steps = 12
|
125 |
-
x = torch.randn(3, steps, 16, dtype=torch.bfloat16)
|
126 |
-
|
127 |
-
with torch.no_grad():
|
128 |
-
y = tr(x)
|
129 |
-
y2 = tr_float32(x)
|
130 |
-
assert not torch.allclose(y, y2), (y - y2).norm()
|
131 |
-
|
132 |
-
|
133 |
-
@torch.no_grad()
|
134 |
-
def test_streaming_memory_efficient():
|
135 |
-
for backend in ['torch', 'xformers']:
|
136 |
-
torch.manual_seed(1234)
|
137 |
-
set_efficient_attention_backend(backend)
|
138 |
-
tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True)
|
139 |
-
tr_mem_efficient = StreamingTransformer(
|
140 |
-
16, 4, 2, dropout=0., memory_efficient=True, causal=True)
|
141 |
-
tr.load_state_dict(tr_mem_efficient.state_dict())
|
142 |
-
tr.eval()
|
143 |
-
tr_mem_efficient.eval()
|
144 |
-
steps = 12
|
145 |
-
x = torch.randn(3, steps, 16)
|
146 |
-
|
147 |
-
ref = tr(x)
|
148 |
-
|
149 |
-
with tr_mem_efficient.streaming():
|
150 |
-
outs = []
|
151 |
-
# frame_sizes = [2] + [1] * (steps - 2)
|
152 |
-
frame_sizes = [1] * steps
|
153 |
-
|
154 |
-
for frame_size in frame_sizes:
|
155 |
-
frame = x[:, :frame_size]
|
156 |
-
x = x[:, frame_size:]
|
157 |
-
outs.append(tr_mem_efficient(frame))
|
158 |
-
|
159 |
-
out = torch.cat(outs, dim=1)
|
160 |
-
delta = torch.norm(out - ref) / torch.norm(out)
|
161 |
-
assert delta < 1e-6, delta
|
162 |
-
|
163 |
-
|
164 |
-
def test_cross_attention():
|
165 |
-
torch.manual_seed(1234)
|
166 |
-
for norm_first in [True, False]:
|
167 |
-
m = StreamingTransformer(
|
168 |
-
16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True)
|
169 |
-
m_cross = StreamingTransformer(
|
170 |
-
16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True)
|
171 |
-
m_cross.load_state_dict(m.state_dict(), strict=False)
|
172 |
-
x = torch.randn(2, 5, 16)
|
173 |
-
cross_x = torch.randn(2, 3, 16)
|
174 |
-
y_ref = m(x)
|
175 |
-
y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x)
|
176 |
-
# With norm_first, the two should be exactly the same,
|
177 |
-
# but with norm_first=False, we get 2 normalization in a row
|
178 |
-
# and the epsilon value leads to a tiny change.
|
179 |
-
atol = 0. if norm_first else 1e-6
|
180 |
-
print((y_ref - y_cross_zero).norm() / y_ref.norm())
|
181 |
-
assert torch.allclose(y_ref, y_cross_zero, atol=atol)
|
182 |
-
|
183 |
-
# We now expect a difference even with a generous atol of 1e-2.
|
184 |
-
y_cross = m_cross(x, cross_attention_src=cross_x)
|
185 |
-
assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2)
|
186 |
-
|
187 |
-
with pytest.raises(AssertionError):
|
188 |
-
_ = m_cross(x)
|
189 |
-
_ = m(x, cross_attention_src=cross_x)
|
190 |
-
|
191 |
-
|
192 |
-
def test_cross_attention_compat():
|
193 |
-
torch.manual_seed(1234)
|
194 |
-
num_heads = 2
|
195 |
-
dim = num_heads * 64
|
196 |
-
with pytest.raises(AssertionError):
|
197 |
-
StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True)
|
198 |
-
|
199 |
-
cross_attn = StreamingMultiheadAttention(
|
200 |
-
dim, num_heads, dropout=0, cross_attention=True, custom=True)
|
201 |
-
ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True)
|
202 |
-
|
203 |
-
# We can load the regular attention state dict
|
204 |
-
# so we have compat when loading old checkpoints.
|
205 |
-
cross_attn.load_state_dict(ref_attn.state_dict())
|
206 |
-
|
207 |
-
queries = torch.randn(3, 7, dim)
|
208 |
-
keys = torch.randn(3, 9, dim)
|
209 |
-
values = torch.randn(3, 9, dim)
|
210 |
-
|
211 |
-
y = cross_attn(queries, keys, values)[0]
|
212 |
-
y_ref = ref_attn(queries, keys, values)[0]
|
213 |
-
assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm()
|
214 |
-
|
215 |
-
# Now let's check that streaming is working properly.
|
216 |
-
with cross_attn.streaming():
|
217 |
-
ys = []
|
218 |
-
for step in range(queries.shape[1]):
|
219 |
-
ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0])
|
220 |
-
y_streaming = torch.cat(ys, dim=1)
|
221 |
-
assert torch.allclose(y_streaming, y, atol=1e-7)
|
222 |
-
|
223 |
-
|
224 |
-
def test_repeat_kv():
|
225 |
-
torch.manual_seed(1234)
|
226 |
-
num_heads = 8
|
227 |
-
kv_repeat = 4
|
228 |
-
dim = num_heads * 64
|
229 |
-
with pytest.raises(AssertionError):
|
230 |
-
mha = StreamingMultiheadAttention(
|
231 |
-
dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True)
|
232 |
-
mha = StreamingMultiheadAttention(
|
233 |
-
dim, num_heads, causal=True, kv_repeat=kv_repeat)
|
234 |
-
mha = StreamingMultiheadAttention(
|
235 |
-
dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True)
|
236 |
-
x = torch.randn(4, 18, dim)
|
237 |
-
y = mha(x, x, x)[0]
|
238 |
-
assert x.shape == y.shape
|
239 |
-
|
240 |
-
|
241 |
-
def test_qk_layer_norm():
|
242 |
-
torch.manual_seed(1234)
|
243 |
-
tr = StreamingTransformer(
|
244 |
-
16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False)
|
245 |
-
steps = 12
|
246 |
-
x = torch.randn(3, steps, 16)
|
247 |
-
y = tr(x)
|
248 |
-
|
249 |
-
tr = StreamingTransformer(
|
250 |
-
16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True)
|
251 |
-
z = torch.randn(3, 21, 16)
|
252 |
-
y = tr(x, cross_attention_src=z)
|
253 |
-
assert y.shape == x.shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZ2H/06-Streamlit-NLP-Image-Semantic-Search-Images/app.py
DELETED
@@ -1,185 +0,0 @@
|
|
1 |
-
from html import escape
|
2 |
-
import re
|
3 |
-
import streamlit as st
|
4 |
-
import pandas as pd, numpy as np
|
5 |
-
from transformers import CLIPProcessor, CLIPModel
|
6 |
-
from st_clickable_images import clickable_images
|
7 |
-
|
8 |
-
@st.cache(
|
9 |
-
show_spinner=False,
|
10 |
-
hash_funcs={
|
11 |
-
CLIPModel: lambda _: None,
|
12 |
-
CLIPProcessor: lambda _: None,
|
13 |
-
dict: lambda _: None,
|
14 |
-
},
|
15 |
-
)
|
16 |
-
def load():
|
17 |
-
model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
|
18 |
-
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
19 |
-
df = {0: pd.read_csv("data.csv"), 1: pd.read_csv("data2.csv")}
|
20 |
-
embeddings = {0: np.load("embeddings.npy"), 1: np.load("embeddings2.npy")}
|
21 |
-
for k in [0, 1]:
|
22 |
-
embeddings[k] = embeddings[k] / np.linalg.norm(
|
23 |
-
embeddings[k], axis=1, keepdims=True
|
24 |
-
)
|
25 |
-
return model, processor, df, embeddings
|
26 |
-
|
27 |
-
|
28 |
-
model, processor, df, embeddings = load()
|
29 |
-
source = {0: "\nSource: Unsplash", 1: "\nSource: The Movie Database (TMDB)"}
|
30 |
-
|
31 |
-
|
32 |
-
def compute_text_embeddings(list_of_strings):
|
33 |
-
inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
|
34 |
-
result = model.get_text_features(**inputs).detach().numpy()
|
35 |
-
return result / np.linalg.norm(result, axis=1, keepdims=True)
|
36 |
-
|
37 |
-
|
38 |
-
def image_search(query, corpus, n_results=24):
|
39 |
-
positive_embeddings = None
|
40 |
-
|
41 |
-
def concatenate_embeddings(e1, e2):
|
42 |
-
if e1 is None:
|
43 |
-
return e2
|
44 |
-
else:
|
45 |
-
return np.concatenate((e1, e2), axis=0)
|
46 |
-
|
47 |
-
splitted_query = query.split("EXCLUDING ")
|
48 |
-
dot_product = 0
|
49 |
-
k = 0 if corpus == "Unsplash" else 1
|
50 |
-
if len(splitted_query[0]) > 0:
|
51 |
-
positive_queries = splitted_query[0].split(";")
|
52 |
-
for positive_query in positive_queries:
|
53 |
-
match = re.match(r"\[(Movies|Unsplash):(\d{1,5})\](.*)", positive_query)
|
54 |
-
if match:
|
55 |
-
corpus2, idx, remainder = match.groups()
|
56 |
-
idx, remainder = int(idx), remainder.strip()
|
57 |
-
k2 = 0 if corpus2 == "Unsplash" else 1
|
58 |
-
positive_embeddings = concatenate_embeddings(
|
59 |
-
positive_embeddings, embeddings[k2][idx : idx + 1, :]
|
60 |
-
)
|
61 |
-
if len(remainder) > 0:
|
62 |
-
positive_embeddings = concatenate_embeddings(
|
63 |
-
positive_embeddings, compute_text_embeddings([remainder])
|
64 |
-
)
|
65 |
-
else:
|
66 |
-
positive_embeddings = concatenate_embeddings(
|
67 |
-
positive_embeddings, compute_text_embeddings([positive_query])
|
68 |
-
)
|
69 |
-
dot_product = embeddings[k] @ positive_embeddings.T
|
70 |
-
dot_product = dot_product - np.median(dot_product, axis=0)
|
71 |
-
dot_product = dot_product / np.max(dot_product, axis=0, keepdims=True)
|
72 |
-
dot_product = np.min(dot_product, axis=1)
|
73 |
-
|
74 |
-
if len(splitted_query) > 1:
|
75 |
-
negative_queries = (" ".join(splitted_query[1:])).split(";")
|
76 |
-
negative_embeddings = compute_text_embeddings(negative_queries)
|
77 |
-
dot_product2 = embeddings[k] @ negative_embeddings.T
|
78 |
-
dot_product2 = dot_product2 - np.median(dot_product2, axis=0)
|
79 |
-
dot_product2 = dot_product2 / np.max(dot_product2, axis=0, keepdims=True)
|
80 |
-
dot_product -= np.max(np.maximum(dot_product2, 0), axis=1)
|
81 |
-
|
82 |
-
results = np.argsort(dot_product)[-1 : -n_results - 1 : -1]
|
83 |
-
return [
|
84 |
-
(
|
85 |
-
df[k].iloc[i]["path"],
|
86 |
-
df[k].iloc[i]["tooltip"] + source[k],
|
87 |
-
i,
|
88 |
-
)
|
89 |
-
for i in results
|
90 |
-
]
|
91 |
-
|
92 |
-
|
93 |
-
description = """
|
94 |
-
# Semantic image search
|
95 |
-
**Enter your query and hit enter**
|
96 |
-
"""
|
97 |
-
|
98 |
-
howto = """
|
99 |
-
- Click image to find similar images
|
100 |
-
- Use "**;**" to combine multiple queries)
|
101 |
-
- Use "**EXCLUDING**", to exclude a query
|
102 |
-
"""
|
103 |
-
|
104 |
-
|
105 |
-
def main():
|
106 |
-
st.markdown(
|
107 |
-
"""
|
108 |
-
<style>
|
109 |
-
.block-container{
|
110 |
-
max-width: 1200px;
|
111 |
-
}
|
112 |
-
div.row-widget.stRadio > div{
|
113 |
-
flex-direction:row;
|
114 |
-
display: flex;
|
115 |
-
justify-content: center;
|
116 |
-
}
|
117 |
-
div.row-widget.stRadio > div > label{
|
118 |
-
margin-left: 5px;
|
119 |
-
margin-right: 5px;
|
120 |
-
}
|
121 |
-
section.main>div:first-child {
|
122 |
-
padding-top: 0px;
|
123 |
-
}
|
124 |
-
section:not(.main)>div:first-child {
|
125 |
-
padding-top: 30px;
|
126 |
-
}
|
127 |
-
div.reportview-container > section:first-child{
|
128 |
-
max-width: 320px;
|
129 |
-
}
|
130 |
-
#MainMenu {
|
131 |
-
visibility: hidden;
|
132 |
-
}
|
133 |
-
footer {
|
134 |
-
visibility: hidden;
|
135 |
-
}
|
136 |
-
</style>""",
|
137 |
-
unsafe_allow_html=True,
|
138 |
-
)
|
139 |
-
st.sidebar.markdown(description)
|
140 |
-
with st.sidebar.expander("Advanced use"):
|
141 |
-
st.markdown(howto)
|
142 |
-
|
143 |
-
|
144 |
-
st.sidebar.markdown(f"Unsplash has categories that match: backgrounds, photos, nature, iphone, etc")
|
145 |
-
st.sidebar.markdown(f"Unsplash images contain animals, apps, events, feelings, food, travel, nature, people, religion, sports, things, stock")
|
146 |
-
st.sidebar.markdown(f"Unsplash things include flag, tree, clock, money, tattoo, arrow, book, car, fireworks, ghost, health, kiss, dance, balloon, crown, eye, house, music, airplane, lighthouse, typewriter, toys")
|
147 |
-
st.sidebar.markdown(f"unsplash feelings include funny, heart, love, cool, congratulations, love, scary, cute, friendship, inspirational, hug, sad, cursed, beautiful, crazy, respect, transformation, peaceful, happy")
|
148 |
-
st.sidebar.markdown(f"unsplash people contain baby, life, women, family, girls, pregnancy, society, old people, musician, attractive, bohemian")
|
149 |
-
st.sidebar.markdown(f"imagenet queries include: photo of, photo of many, sculpture of, rendering of, graffiti of, tattoo of, embroidered, drawing of, plastic, black and white, painting, video game, doodle, origami, sketch, etc")
|
150 |
-
|
151 |
-
|
152 |
-
_, c, _ = st.columns((1, 3, 1))
|
153 |
-
if "query" in st.session_state:
|
154 |
-
query = c.text_input("", value=st.session_state["query"])
|
155 |
-
else:
|
156 |
-
|
157 |
-
query = c.text_input("", value="lighthouse")
|
158 |
-
corpus = st.radio("", ["Unsplash"])
|
159 |
-
#corpus = st.radio("", ["Unsplash", "Movies"])
|
160 |
-
if len(query) > 0:
|
161 |
-
results = image_search(query, corpus)
|
162 |
-
clicked = clickable_images(
|
163 |
-
[result[0] for result in results],
|
164 |
-
titles=[result[1] for result in results],
|
165 |
-
div_style={
|
166 |
-
"display": "flex",
|
167 |
-
"justify-content": "center",
|
168 |
-
"flex-wrap": "wrap",
|
169 |
-
},
|
170 |
-
img_style={"margin": "2px", "height": "200px"},
|
171 |
-
)
|
172 |
-
if clicked >= 0:
|
173 |
-
change_query = False
|
174 |
-
if "last_clicked" not in st.session_state:
|
175 |
-
change_query = True
|
176 |
-
else:
|
177 |
-
if clicked != st.session_state["last_clicked"]:
|
178 |
-
change_query = True
|
179 |
-
if change_query:
|
180 |
-
st.session_state["query"] = f"[{corpus}:{results[clicked][2]}]"
|
181 |
-
st.experimental_rerun()
|
182 |
-
|
183 |
-
|
184 |
-
if __name__ == "__main__":
|
185 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2Hero4Health/7-ClinicalTerminologyUIUX-GR/app.py
DELETED
@@ -1,327 +0,0 @@
|
|
1 |
-
import pandas_profiling as pp
|
2 |
-
import pandas as pd
|
3 |
-
import tensorflow as tf
|
4 |
-
|
5 |
-
from datasets import load_dataset
|
6 |
-
from tensorflow.python.framework import tensor_shape
|
7 |
-
|
8 |
-
#LOINC
|
9 |
-
datasetLOINC = load_dataset("awacke1/LOINC-CodeSet-Value-Description.csv", split="train")
|
10 |
-
#SNOMED:
|
11 |
-
datasetSNOMED = load_dataset("awacke1/SNOMED-CT-Code-Value-Semantic-Set.csv", split="train")
|
12 |
-
#eCQM:
|
13 |
-
dataseteCQM = load_dataset("awacke1/eCQM-Code-Value-Semantic-Set.csv", split="train")
|
14 |
-
|
15 |
-
# map using autotokenizer
|
16 |
-
from transformers import AutoTokenizer
|
17 |
-
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
18 |
-
dataset = datasetLOINC.map(lambda examples: tokenizer(examples["Description"]), batched=True)
|
19 |
-
JSONOBJ2=dataset[0]
|
20 |
-
print(JSONOBJ2)
|
21 |
-
|
22 |
-
sw = datasetLOINC.filter(lambda example: example["Description"].startswith("Allergy"))
|
23 |
-
len(sw)
|
24 |
-
print(sw)
|
25 |
-
print(datasetLOINC)
|
26 |
-
print(datasetSNOMED)
|
27 |
-
print(dataseteCQM)
|
28 |
-
|
29 |
-
# play with some dataset tools before the show:
|
30 |
-
|
31 |
-
#print(start_with_ar["Description"])
|
32 |
-
|
33 |
-
#---
|
34 |
-
#Main Stage - Begin!
|
35 |
-
#---
|
36 |
-
|
37 |
-
import os
|
38 |
-
import json
|
39 |
-
import numpy as np
|
40 |
-
import gradio as gr
|
41 |
-
|
42 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
43 |
-
CHOICES = ["SNOMED", "LOINC", "CQM"]
|
44 |
-
JSONOBJ = """{"items":{"item":[{"id": "0001","type": null,"is_good": false,"ppu": 0.55,"batters":{"batter":[{ "id": "1001", "type": "Regular" },{ "id": "1002", "type": "Chocolate" },{ "id": "1003", "type": "Blueberry" },{ "id": "1004", "type": "Devil's Food" }]},"topping":[{ "id": "5001", "type": "None" },{ "id": "5002", "type": "Glazed" },{ "id": "5005", "type": "Sugar" },{ "id": "5007", "type": "Powdered Sugar" },{ "id": "5006", "type": "Chocolate with Sprinkles" },{ "id": "5003", "type": "Chocolate" },{ "id": "5004", "type": "Maple" }]}]}}"""
|
45 |
-
|
46 |
-
|
47 |
-
def profile_dataset(dataset=datasetSNOMED, username="awacke1", token=HF_TOKEN, dataset_name="awacke1/SNOMED-CT-Code-Value-Semantic-Set.csv"):
|
48 |
-
df = pd.read_csv(dataset.Description)
|
49 |
-
if len(df.columns) <= 15:
|
50 |
-
profile = pp.ProfileReport(df, title=f"{dataset_name} Report")
|
51 |
-
else:
|
52 |
-
profile = pp.ProfileReport(df, title=f"{dataset_name} Report", minimal = True)
|
53 |
-
|
54 |
-
repo_url = create_repo(f"{username}/{dataset_name}", repo_type = "space", token = token, space_sdk = "static", private=False)
|
55 |
-
|
56 |
-
profile.to_file("./index.html")
|
57 |
-
|
58 |
-
upload_file(path_or_fileobj ="./index.html", path_in_repo = "index.html", repo_id =f"{username}/{dataset_name}", repo_type = "space", token=token)
|
59 |
-
readme = f"---\ntitle: {dataset_name}\nemoji: ✨\ncolorFrom: green\ncolorTo: red\nsdk: static\npinned: false\ntags:\n- dataset-report\n---"
|
60 |
-
with open("README.md", "w+") as f:
|
61 |
-
f.write(readme)
|
62 |
-
upload_file(path_or_fileobj ="./README.md", path_in_repo = "README.md", repo_id =f"{username}/{dataset_name}", repo_type = "space", token=token)
|
63 |
-
return f"Your dataset report will be ready at {repo_url}"
|
64 |
-
|
65 |
-
#def lowercase_title(example):
|
66 |
-
# return {"Description": example[title].lower()}
|
67 |
-
|
68 |
-
# demonstrate map function of dataset
|
69 |
-
#JSONOBJ_MAP=datasetLOINC.map(lowercase_title)
|
70 |
-
#JSONOBJ_MAP=datasetLOINC.filter(lambda example: example["Description"].startswith("Mental health"))
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
def concatenate_text(examples):
|
76 |
-
return {
|
77 |
-
"text": examples["Code"]
|
78 |
-
+ " \n "
|
79 |
-
+ examples["Description"]
|
80 |
-
+ " \n "
|
81 |
-
+ examples["Purpose: Clinical Focus"]
|
82 |
-
}
|
83 |
-
|
84 |
-
def cls_pooling(model_output):
|
85 |
-
return model_output.last_hidden_state[:, 0]
|
86 |
-
|
87 |
-
def get_embeddings(text_list):
|
88 |
-
encoded_input = tokenizer(
|
89 |
-
text_list, padding=True, truncation=True, return_tensors="tf"
|
90 |
-
)
|
91 |
-
encoded_input = {k: v for k, v in encoded_input.items()}
|
92 |
-
model_output = model(**encoded_input)
|
93 |
-
return cls_pooling(model_output)
|
94 |
-
|
95 |
-
|
96 |
-
def fn( text1, text2, num, slider1, slider2, single_checkbox, checkboxes, radio, dropdown, im1, im2, im3, im4,
|
97 |
-
video, audio1, audio2, file, df1, df2,):
|
98 |
-
#def fn( text1, text2, single_checkbox, checkboxes, radio, im4, file, df1, df2,):
|
99 |
-
|
100 |
-
searchTerm = text1
|
101 |
-
searchTermSentence = text2
|
102 |
-
|
103 |
-
start_with_searchTermLOINC = datasetLOINC.filter(lambda example:example["Description"].startswith('Allergy')) #Allergy
|
104 |
-
|
105 |
-
|
106 |
-
# FAISS
|
107 |
-
columns = start_with_searchTermLOINC.column_names
|
108 |
-
columns_to_keep = ["Value Set Name", "Code", "Description", "Purpose: Clinical Focus", "Code System OID"]
|
109 |
-
columns_to_remove = set(columns_to_keep).symmetric_difference(columns)
|
110 |
-
start_with_searchTermLOINC = start_with_searchTermLOINC.remove_columns(columns_to_remove)
|
111 |
-
start_with_searchTermLOINC
|
112 |
-
start_with_searchTermLOINC.set_format("pandas")
|
113 |
-
df = start_with_searchTermLOINC[:]
|
114 |
-
|
115 |
-
df["Purpose: Clinical Focus"][0]
|
116 |
-
|
117 |
-
df4 = df.explode("Purpose: Clinical Focus", ignore_index=True)
|
118 |
-
df4.head(4)
|
119 |
-
|
120 |
-
from datasets import Dataset
|
121 |
-
clinical_dataset = Dataset.from_pandas(df4)
|
122 |
-
clinical_dataset
|
123 |
-
|
124 |
-
clinical_dataset = clinical_dataset.map(lambda x: {"c_length": len(x["Description"].split())})
|
125 |
-
|
126 |
-
clinical_dataset = clinical_dataset.filter(lambda x: x["c_length"] > 15)
|
127 |
-
clinical_dataset
|
128 |
-
|
129 |
-
|
130 |
-
clinical_dataset = clinical_dataset.map(concatenate_text)
|
131 |
-
#embedding = get_embeddings(clinical_dataset["text"][0])
|
132 |
-
#embedding.shape
|
133 |
-
|
134 |
-
from transformers import AutoTokenizer, TFAutoModel
|
135 |
-
|
136 |
-
model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
|
137 |
-
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
138 |
-
model = TFAutoModel.from_pretrained(model_ckpt, from_pt=True)
|
139 |
-
|
140 |
-
# TensorShape([1, 768])
|
141 |
-
tf.shape([1, 768])
|
142 |
-
|
143 |
-
embeddings_dataset = clinical_dataset.map(
|
144 |
-
lambda x: {"embeddings": get_embeddings(x["text"]).numpy()[0]})
|
145 |
-
|
146 |
-
# embeddings_dataset.add_faiss_index(column="embeddings")
|
147 |
-
|
148 |
-
# question = "How can I load a dataset offline?"
|
149 |
-
# question_embedding = get_embeddings([question]).numpy()
|
150 |
-
# question_embedding.shape
|
151 |
-
|
152 |
-
# scores, samples = embeddings_dataset.get_nearest_examples("embeddings", question_embedding, k=5)
|
153 |
-
|
154 |
-
# import pandas as pd
|
155 |
-
|
156 |
-
# samples_df = pd.DataFrame.from_dict(samples)
|
157 |
-
# samples_df["scores"] = scores
|
158 |
-
# samples_df.sort_values("scores", ascending=False, inplace=True)
|
159 |
-
|
160 |
-
|
161 |
-
# "text": examples["Code"]
|
162 |
-
# + " \n "
|
163 |
-
# + examples["Description"]
|
164 |
-
# + " \n "
|
165 |
-
# + examples["Purpose: Clinical Focus"]
|
166 |
-
|
167 |
-
|
168 |
-
# for _, row in samples_df.iterrows():
|
169 |
-
# print(f"Code: {row.Code}")
|
170 |
-
# print(f"Description: {row.Description}")
|
171 |
-
# #print(f"Purpose: Clinical Focus: {row.Purpose: Clinical Focus}")
|
172 |
-
# #print(f"URL: {row.html_url}")
|
173 |
-
# print("=" * 50)
|
174 |
-
# print()
|
175 |
-
|
176 |
-
# SNOMED and CQM ---------------
|
177 |
-
start_with_searchTermSNOMED = datasetSNOMED.filter(lambda example: example["Description"].startswith('Hospital')) #Hospital
|
178 |
-
start_with_searchTermCQM = dataseteCQM.filter(lambda example: example["Description"].startswith('Telephone')) #Telephone
|
179 |
-
|
180 |
-
print(start_with_searchTermLOINC )
|
181 |
-
print(start_with_searchTermSNOMED )
|
182 |
-
print(start_with_searchTermCQM)
|
183 |
-
|
184 |
-
#print(start_with_searchTermLOINC["train"][0] )
|
185 |
-
#print(start_with_searchTermSNOMED["train"][0] )
|
186 |
-
#print(start_with_searchTermCQM["train"][0] )
|
187 |
-
|
188 |
-
#returnMsg=profile_dataset()
|
189 |
-
#print(returnMsg)
|
190 |
-
|
191 |
-
# try:
|
192 |
-
#top1matchLOINC = json.loads(start_with_searchTermLOINC['train'])
|
193 |
-
#top1matchSNOMED = json.loads(start_with_searchTermSNOMED['train'])
|
194 |
-
#top1matchCQM = json.loads(start_with_searchTermCQM['train'])
|
195 |
-
# top1matchLOINC = json.loads(start_with_searchTermLOINC)
|
196 |
-
# top1matchSNOMED = json.loads(start_with_searchTermSNOMED)
|
197 |
-
# top1matchCQM = json.loads(start_with_searchTermCQM)
|
198 |
-
# except:
|
199 |
-
# print('Hello')
|
200 |
-
#print(start_with_searchTermLOINC[0])
|
201 |
-
#print(start_with_searchTermSNOMED[0] )
|
202 |
-
#print(start_with_searchTermCQM[0] )
|
203 |
-
|
204 |
-
#print(returnMsg)
|
205 |
-
# print("Datasets Processed")
|
206 |
-
|
207 |
-
return (
|
208 |
-
(text1 if single_checkbox else text2)
|
209 |
-
+ ", selected:"
|
210 |
-
+ ", ".join(checkboxes), # Text
|
211 |
-
{
|
212 |
-
"positive": num / (num + slider1 + slider2),
|
213 |
-
"negative": slider1 / (num + slider1 + slider2),
|
214 |
-
"neutral": slider2 / (num + slider1 + slider2),
|
215 |
-
}, # Label
|
216 |
-
(audio1[0], np.flipud(audio1[1]))
|
217 |
-
if audio1 is not None else os.path.join(os.path.dirname(__file__), "files/cantina.wav"), # Audio
|
218 |
-
np.flipud(im1)
|
219 |
-
if im1 is not None else os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), # Image
|
220 |
-
video
|
221 |
-
if video is not None else os.path.join(os.path.dirname(__file__), "files/world.mp4"), # Video
|
222 |
-
[
|
223 |
-
("The", "art"),
|
224 |
-
("quick brown", "adj"),
|
225 |
-
("fox", "nn"),
|
226 |
-
("jumped", "vrb"),
|
227 |
-
("testing testing testing", None),
|
228 |
-
("over", "prp"),
|
229 |
-
("the", "art"),
|
230 |
-
("testing", None),
|
231 |
-
("lazy", "adj"),
|
232 |
-
("dogs", "nn"),
|
233 |
-
(".", "punc"),
|
234 |
-
] + [(f"test {x}", f"test {x}") for x in range(10)], # HighlightedText
|
235 |
-
[
|
236 |
-
("The testing testing testing", None),
|
237 |
-
("over", 0.6),
|
238 |
-
("the", 0.2),
|
239 |
-
("testing", None),
|
240 |
-
("lazy", -0.1),
|
241 |
-
("dogs", 0.4),
|
242 |
-
(".", 0),
|
243 |
-
] + [(f"test", x / 10) for x in range(-10, 10)], # HighlightedText
|
244 |
-
#json.loads(JSONOBJ), # JSON
|
245 |
-
start_with_searchTermLOINC.to_json(orient="records", path_or_buf="None"),
|
246 |
-
#json.dumps(json.loads(start_with_searchTermLOINC['train'].to_json(orient="records", path_or_buf="None"))),
|
247 |
-
"<button style='background-color: red'>Click Me: " + radio + "</button>", # HTML
|
248 |
-
os.path.join(os.path.dirname(__file__), "files/titanic.csv"),
|
249 |
-
df1, # Dataframe
|
250 |
-
np.random.randint(0, 10, (4, 4)), # Dataframe
|
251 |
-
df2, # Timeseries
|
252 |
-
)
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
demo = gr.Interface(
|
257 |
-
fn,
|
258 |
-
inputs=[
|
259 |
-
gr.Textbox(value="Allergy", label="Textbox"),
|
260 |
-
gr.Textbox(lines=3, value="Bathing", placeholder="Type here..", label="Textbox 2"),
|
261 |
-
gr.Number(label="Number", value=42),
|
262 |
-
gr.Slider(10, 20, value=15, label="Slider: 10 - 20"),
|
263 |
-
gr.Slider(maximum=20, step=0.04, label="Slider: step @ 0.04"),
|
264 |
-
gr.Checkbox(label="Check for NER Match on Submit"),
|
265 |
-
gr.CheckboxGroup(label="Clinical Terminology to Check", choices=CHOICES, value=CHOICES[0:2]),
|
266 |
-
gr.Radio(label="Preferred Terminology Output", choices=CHOICES, value=CHOICES[2]),
|
267 |
-
gr.Dropdown(label="Dropdown", choices=CHOICES),
|
268 |
-
gr.Image(label="Image"),
|
269 |
-
gr.Image(label="Image w/ Cropper", tool="select"),
|
270 |
-
gr.Image(label="Sketchpad", source="canvas"),
|
271 |
-
gr.Image(label="Webcam", source="webcam"),
|
272 |
-
gr.Video(label="Video"),
|
273 |
-
gr.Audio(label="Audio"),
|
274 |
-
gr.Audio(label="Microphone", source="microphone"),
|
275 |
-
gr.File(label="File"),
|
276 |
-
gr.Dataframe(label="Filters", headers=["Name", "Age", "Gender"]),
|
277 |
-
gr.Timeseries(x="time", y=["price", "value"], colors=["pink", "purple"]),
|
278 |
-
],
|
279 |
-
outputs=[
|
280 |
-
gr.Textbox(label="Textbox"),
|
281 |
-
gr.Label(label="Label"),
|
282 |
-
gr.Audio(label="Audio"),
|
283 |
-
gr.Image(label="Image"),
|
284 |
-
gr.Video(label="Video"),
|
285 |
-
gr.HighlightedText(label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"}),
|
286 |
-
gr.HighlightedText(label="HighlightedText", show_legend=True),
|
287 |
-
gr.JSON(label="JSON"),
|
288 |
-
gr.HTML(label="HTML"),
|
289 |
-
gr.File(label="File"),
|
290 |
-
gr.Dataframe(label="Dataframe"),
|
291 |
-
gr.Dataframe(label="Numpy"),
|
292 |
-
gr.Timeseries(x="time", y=["price", "value"], label="Timeseries"),
|
293 |
-
],
|
294 |
-
examples=[
|
295 |
-
[
|
296 |
-
"Allergy",
|
297 |
-
"Admission",
|
298 |
-
10,
|
299 |
-
12,
|
300 |
-
4,
|
301 |
-
True,
|
302 |
-
["SNOMED", "LOINC", "CQM"],
|
303 |
-
"SNOMED",
|
304 |
-
"bar",
|
305 |
-
os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
|
306 |
-
os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
|
307 |
-
os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
|
308 |
-
os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
|
309 |
-
os.path.join(os.path.dirname(__file__), "files/world.mp4"),
|
310 |
-
os.path.join(os.path.dirname(__file__), "files/cantina.wav"),
|
311 |
-
os.path.join(os.path.dirname(__file__), "files/cantina.wav"),
|
312 |
-
os.path.join(os.path.dirname(__file__), "files/titanic.csv"),
|
313 |
-
[[1, 2, 3], [3, 4, 5]],
|
314 |
-
os.path.join(os.path.dirname(__file__), "files/time.csv"),
|
315 |
-
]
|
316 |
-
]
|
317 |
-
* 3,
|
318 |
-
theme="default",
|
319 |
-
title="⚗️🧠🔬🧬 Clinical Terminology Auto Mapper AI 👩⚕️🩺⚕️🙋",
|
320 |
-
cache_examples=False,
|
321 |
-
description="Clinical Terminology Auto Mapper AI",
|
322 |
-
article="Learn more at [Yggdrasil](https://github.com/AaronCWacker/Yggdrasil)",
|
323 |
-
# live=True,
|
324 |
-
)
|
325 |
-
|
326 |
-
if __name__ == "__main__":
|
327 |
-
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbandonedMuse/UnlimitedMusicGen/README.md
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: UnlimitedMusicGen
|
3 |
-
emoji: 🎼
|
4 |
-
colorFrom: white
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.38.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
tags:
|
12 |
-
- musicgen
|
13 |
-
- unlimited
|
14 |
-
duplicated_from: Surn/UnlimitedMusicGen
|
15 |
-
---
|
16 |
-
|
17 |
-
[arxiv]: https://arxiv.org/abs/2306.05284
|
18 |
-
[musicgen_samples]: https://ai.honu.io/papers/musicgen/
|
19 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
20 |
-
|
21 |
-
# UnlimitedMusicGen
|
22 |
-
This is my modification of the Audiocraft project to enable unlimited Audio generation. I have added a few features to the original project to enable this. I have also added a few features to the gradio interface to make it easier to use.
|
23 |
-
|
24 |
-
# Audiocraft
|
25 |
-

|
26 |
-

|
27 |
-

|
28 |
-
|
29 |
-
Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model.
|
30 |
-
|
31 |
-
## MusicGen
|
32 |
-
|
33 |
-
Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive
|
34 |
-
Transformer model trained over a 32kHz <a href="https://github.com/facebookresearch/encodec">EnCodec tokenizer</a> with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't require a self-supervised semantic representation, and it generates
|
35 |
-
all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict
|
36 |
-
them in parallel, thus having only 50 auto-regressive steps per second of audio.
|
37 |
-
Check out our [sample page][musicgen_samples] or test the available demo!
|
38 |
-
|
39 |
-
<a target="_blank" href="https://colab.research.google.com/drive/1-Xe9NCdIs2sCUbiSmwHXozK6AAhMm7_i?usp=sharing">
|
40 |
-
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
|
41 |
-
</a>
|
42 |
-
<a target="_blank" href="https://huggingface.co/spaces/facebook/MusicGen">
|
43 |
-
<img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="Open in HugginFace"/>
|
44 |
-
</a>
|
45 |
-
<br>
|
46 |
-
|
47 |
-
We use 20K hours of licensed music to train MusicGen. Specifically, we rely on an internal dataset of 10K high-quality music tracks, and on the ShutterStock and Pond5 music data.
|
48 |
-
|
49 |
-
## Installation
|
50 |
-
Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following:
|
51 |
-
|
52 |
-
```shell
|
53 |
-
# Best to make sure you have torch installed first, in particular before installing xformers.
|
54 |
-
# Don't run this if you already have PyTorch installed.
|
55 |
-
pip install 'torch>=2.0'
|
56 |
-
# Then proceed to one of the following
|
57 |
-
pip install -U audiocraft # stable release
|
58 |
-
pip install -U git+https://[email protected]/facebookresearch/audiocraft#egg=audiocraft # bleeding edge
|
59 |
-
pip install -e . # or if you cloned the repo locally
|
60 |
-
```
|
61 |
-
|
62 |
-
## Usage
|
63 |
-
We offer a number of way to interact with MusicGen:
|
64 |
-
1. A demo is also available on the [`facebook/MusicGen` HuggingFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support).
|
65 |
-
2. You can run the Gradio demo in Colab: [colab notebook](https://colab.research.google.com/drive/1-Xe9NCdIs2sCUbiSmwHXozK6AAhMm7_i?usp=sharing).
|
66 |
-
3. You can use the gradio demo locally by running `python app.py`.
|
67 |
-
4. You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally (if you have a GPU).
|
68 |
-
5. Checkout [@camenduru Colab page](https://github.com/camenduru/MusicGen-colab) which is regularly
|
69 |
-
updated with contributions from @camenduru and the community.
|
70 |
-
6. Finally, MusicGen is available in 🤗 Transformers from v4.31.0 onwards, see section [🤗 Transformers Usage](#-transformers-usage) below.
|
71 |
-
|
72 |
-
### More info about Top-k, Top-p, Temperature and Classifier Free Guidance from ChatGPT
|
73 |
-
6. Finally, MusicGen is available in 🤗 Transformers from v4.31.0 onwards, see section [🤗 Transformers Usage](#-transformers-usage) below.
|
74 |
-
|
75 |
-
|
76 |
-
Top-k: Top-k is a parameter used in text generation models, including music generation models. It determines the number of most likely next tokens to consider at each step of the generation process. The model ranks all possible tokens based on their predicted probabilities, and then selects the top-k tokens from the ranked list. The model then samples from this reduced set of tokens to determine the next token in the generated sequence. A smaller value of k results in a more focused and deterministic output, while a larger value of k allows for more diversity in the generated music.
|
77 |
-
|
78 |
-
Top-p (or nucleus sampling): Top-p, also known as nucleus sampling or probabilistic sampling, is another method used for token selection during text generation. Instead of specifying a fixed number like top-k, top-p considers the cumulative probability distribution of the ranked tokens. It selects the smallest possible set of tokens whose cumulative probability exceeds a certain threshold (usually denoted as p). The model then samples from this set to choose the next token. This approach ensures that the generated output maintains a balance between diversity and coherence, as it allows for a varying number of tokens to be considered based on their probabilities.
|
79 |
-
|
80 |
-
Temperature: Temperature is a parameter that controls the randomness of the generated output. It is applied during the sampling process, where a higher temperature value results in more random and diverse outputs, while a lower temperature value leads to more deterministic and focused outputs. In the context of music generation, a higher temperature can introduce more variability and creativity into the generated music, but it may also lead to less coherent or structured compositions. On the other hand, a lower temperature can produce more repetitive and predictable music.
|
81 |
-
|
82 |
-
Classifier-Free Guidance: Classifier-Free Guidance refers to a technique used in some music generation models where a separate classifier network is trained to provide guidance or control over the generated music. This classifier is trained on labeled data to recognize specific musical characteristics or styles. During the generation process, the output of the generator model is evaluated by the classifier, and the generator is encouraged to produce music that aligns with the desired characteristics or style. This approach allows for more fine-grained control over the generated music, enabling users to specify certain attributes they want the model to capture.
|
83 |
-
|
84 |
-
These parameters, such as top-k, top-p, temperature, and classifier-free guidance, provide different ways to influence the output of a music generation model and strike a balance between creativity, diversity, coherence, and control. The specific values for these parameters can be tuned based on the desired outcome and user preferences.
|
85 |
-
|
86 |
-
## API
|
87 |
-
|
88 |
-
We provide a simple API and 4 pre-trained models. The pre trained models are:
|
89 |
-
- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small)
|
90 |
-
- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium)
|
91 |
-
- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody)
|
92 |
-
- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large)
|
93 |
-
|
94 |
-
We observe the best trade-off between quality and compute with the `medium` or `melody` model.
|
95 |
-
In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller
|
96 |
-
GPUs will be able to generate short sequences, or longer sequences with the `small` model.
|
97 |
-
|
98 |
-
**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`.
|
99 |
-
You can install it with:
|
100 |
-
```
|
101 |
-
apt-get install ffmpeg
|
102 |
-
```
|
103 |
-
|
104 |
-
See after a quick example for using the API.
|
105 |
-
|
106 |
-
```python
|
107 |
-
import torchaudio
|
108 |
-
from audiocraft.models import MusicGen
|
109 |
-
from audiocraft.data.audio import audio_write
|
110 |
-
|
111 |
-
model = MusicGen.get_pretrained('melody')
|
112 |
-
model.set_generation_params(duration=8) # generate 8 seconds.
|
113 |
-
wav = model.generate_unconditional(4) # generates 4 unconditional audio samples
|
114 |
-
descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
|
115 |
-
wav = model.generate(descriptions) # generates 3 samples.
|
116 |
-
|
117 |
-
melody, sr = torchaudio.load('./assets/bach.mp3')
|
118 |
-
# generates using the melody from the given audio and the provided descriptions.
|
119 |
-
wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr)
|
120 |
-
|
121 |
-
for idx, one_wav in enumerate(wav):
|
122 |
-
# Will save under {idx}.wav, with loudness normalization at -14 db LUFS.
|
123 |
-
audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True)
|
124 |
-
```
|
125 |
-
## 🤗 Transformers Usage
|
126 |
-
|
127 |
-
MusicGen is available in the 🤗 Transformers library from version 4.31.0 onwards, requiring minimal dependencies
|
128 |
-
and additional packages. Steps to get started:
|
129 |
-
|
130 |
-
1. First install the 🤗 [Transformers library](https://github.com/huggingface/transformers) from main:
|
131 |
-
|
132 |
-
```
|
133 |
-
pip install git+https://github.com/huggingface/transformers.git
|
134 |
-
```
|
135 |
-
|
136 |
-
2. Run the following Python code to generate text-conditional audio samples:
|
137 |
-
|
138 |
-
```py
|
139 |
-
from transformers import AutoProcessor, MusicgenForConditionalGeneration
|
140 |
-
|
141 |
-
|
142 |
-
processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
|
143 |
-
model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
|
144 |
-
|
145 |
-
inputs = processor(
|
146 |
-
text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
|
147 |
-
padding=True,
|
148 |
-
return_tensors="pt",
|
149 |
-
)
|
150 |
-
|
151 |
-
audio_values = model.generate(**inputs, max_new_tokens=256)
|
152 |
-
```
|
153 |
-
|
154 |
-
3. Listen to the audio samples either in an ipynb notebook:
|
155 |
-
|
156 |
-
```py
|
157 |
-
from IPython.display import Audio
|
158 |
-
|
159 |
-
sampling_rate = model.config.audio_encoder.sampling_rate
|
160 |
-
Audio(audio_values[0].numpy(), rate=sampling_rate)
|
161 |
-
```
|
162 |
-
|
163 |
-
Or save them as a `.wav` file using a third-party library, e.g. `scipy`:
|
164 |
-
|
165 |
-
```py
|
166 |
-
import scipy
|
167 |
-
|
168 |
-
sampling_rate = model.config.audio_encoder.sampling_rate
|
169 |
-
scipy.io.wavfile.write("musicgen_out.wav", rate=sampling_rate, data=audio_values[0, 0].numpy())
|
170 |
-
```
|
171 |
-
|
172 |
-
For more details on using the MusicGen model for inference using the 🤗 Transformers library, refer to the
|
173 |
-
[MusicGen docs](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) or the hands-on
|
174 |
-
[Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/MusicGen.ipynb).
|
175 |
-
|
176 |
-
|
177 |
-
## Model Card
|
178 |
-
|
179 |
-
See [the model card page](./MODEL_CARD.md).
|
180 |
-
|
181 |
-
## FAQ
|
182 |
-
|
183 |
-
#### Will the training code be released?
|
184 |
-
|
185 |
-
Yes. We will soon release the training code for MusicGen and EnCodec.
|
186 |
-
|
187 |
-
|
188 |
-
#### I need help on Windows
|
189 |
-
|
190 |
-
@FurkanGozukara made a complete tutorial for [Audiocraft/MusicGen on Windows](https://youtu.be/v-YpvPkhdO4)
|
191 |
-
|
192 |
-
#### I need help for running the demo on Colab
|
193 |
-
|
194 |
-
Check [@camenduru tutorial on Youtube](https://www.youtube.com/watch?v=EGfxuTy9Eeo).
|
195 |
-
|
196 |
-
## Citation
|
197 |
-
```
|
198 |
-
@article{copet2023simple,
|
199 |
-
title={Simple and Controllable Music Generation},
|
200 |
-
author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez},
|
201 |
-
year={2023},
|
202 |
-
journal={arXiv preprint arXiv:2306.05284},
|
203 |
-
}
|
204 |
-
```
|
205 |
-
|
206 |
-
## License
|
207 |
-
* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE).
|
208 |
-
* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights).
|
209 |
-
[arxiv]: https://arxiv.org/abs/2306.05284
|
210 |
-
[musicgen_samples]: https://ai.honu.io/papers/musicgen/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/sum.ts
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
export function sum(nums: number[]): number {
|
2 |
-
return nums.reduce((a, b) => a + b, 0);
|
3 |
-
}
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/DeepAi.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
import js2py
|
5 |
-
import random
|
6 |
-
import hashlib
|
7 |
-
from aiohttp import ClientSession
|
8 |
-
|
9 |
-
from ..typing import AsyncGenerator
|
10 |
-
from .base_provider import AsyncGeneratorProvider
|
11 |
-
|
12 |
-
|
13 |
-
class DeepAi(AsyncGeneratorProvider):
|
14 |
-
url: str = "https://deepai.org"
|
15 |
-
working = True
|
16 |
-
supports_gpt_35_turbo = True
|
17 |
-
|
18 |
-
@staticmethod
|
19 |
-
async def create_async_generator(
|
20 |
-
model: str,
|
21 |
-
messages: list[dict[str, str]],
|
22 |
-
proxy: str = None,
|
23 |
-
**kwargs
|
24 |
-
) -> AsyncGenerator:
|
25 |
-
|
26 |
-
token_js = """
|
27 |
-
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
|
28 |
-
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
|
29 |
-
h = Math.round(1E11 * Math.random()) + "";
|
30 |
-
f = function () {
|
31 |
-
for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
|
32 |
-
|
33 |
-
return function (t) {
|
34 |
-
var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
|
35 |
-
Z = [],
|
36 |
-
A = unescape(encodeURI(t)) + "\u0080",
|
37 |
-
z = A.length;
|
38 |
-
t = --z / 4 + 2 | 15;
|
39 |
-
for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
|
40 |
-
for (q = A = 0; q < t; q += 16) {
|
41 |
-
for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
|
42 |
-
for (A = 4; A;) ea[--A] += z[A]
|
43 |
-
}
|
44 |
-
for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
|
45 |
-
return t.split("").reverse().join("")
|
46 |
-
}
|
47 |
-
}();
|
48 |
-
|
49 |
-
"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
|
50 |
-
"""
|
51 |
-
|
52 |
-
payload = {"chat_style": "chat", "chatHistory": json.dumps(messages)}
|
53 |
-
api_key = js2py.eval_js(token_js)
|
54 |
-
headers = {
|
55 |
-
"api-key": api_key,
|
56 |
-
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
|
57 |
-
**kwargs.get("headers", {})
|
58 |
-
}
|
59 |
-
async with ClientSession(
|
60 |
-
headers=headers
|
61 |
-
) as session:
|
62 |
-
fill = "ing_is"
|
63 |
-
fill = f"ack{fill}_a_crim"
|
64 |
-
async with session.post(f"https://api.deepai.org/h{fill}e", proxy=proxy, data=payload) as response:
|
65 |
-
response.raise_for_status()
|
66 |
-
async for stream in response.content.iter_any():
|
67 |
-
if stream:
|
68 |
-
yield stream.decode()
|
69 |
-
|
70 |
-
|
71 |
-
def get_api_key(user_agent: str):
|
72 |
-
e = str(round(1E11 * random.random()))
|
73 |
-
|
74 |
-
def hash(data: str):
|
75 |
-
return hashlib.md5(data.encode()).hexdigest()[::-1]
|
76 |
-
|
77 |
-
return f"tryit-{e}-" + hash(user_agent + hash(user_agent + hash(user_agent + e + "x")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatFree.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import os, requests
|
2 |
-
from ...typing import sha256, Dict, get_type_hints
|
3 |
-
import json
|
4 |
-
|
5 |
-
url = "https://v.chatfree.cc"
|
6 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k']
|
7 |
-
supports_stream = False
|
8 |
-
needs_auth = False
|
9 |
-
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
-
headers = {
|
13 |
-
'authority': 'chat.dfehub.com',
|
14 |
-
'accept': '*/*',
|
15 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
16 |
-
'content-type': 'application/json',
|
17 |
-
'origin': 'https://v.chatfree.cc',
|
18 |
-
'referer': 'https://v.chatfree.cc/',
|
19 |
-
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
20 |
-
'sec-ch-ua-mobile': '?0',
|
21 |
-
'sec-ch-ua-platform': '"macOS"',
|
22 |
-
'sec-fetch-dest': 'empty',
|
23 |
-
'sec-fetch-mode': 'cors',
|
24 |
-
'sec-fetch-site': 'same-origin',
|
25 |
-
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
26 |
-
'x-requested-with': 'XMLHttpRequest',
|
27 |
-
}
|
28 |
-
|
29 |
-
json_data = {
|
30 |
-
'messages': messages,
|
31 |
-
'stream': True,
|
32 |
-
'model': model,
|
33 |
-
'temperature': 0.5,
|
34 |
-
'presence_penalty': 0,
|
35 |
-
'frequency_penalty': 0,
|
36 |
-
'top_p': 1,
|
37 |
-
}
|
38 |
-
|
39 |
-
response = requests.post('https://v.chatfree.cc/api/openai/v1/chat/completions',
|
40 |
-
headers=headers, json=json_data)
|
41 |
-
|
42 |
-
for chunk in response.iter_lines():
|
43 |
-
if b'content' in chunk:
|
44 |
-
data = json.loads(chunk.decode().split('data: ')[1])
|
45 |
-
yield (data['choices'][0]['delta']['content'])
|
46 |
-
|
47 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
48 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ezcht.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
from ...typing import sha256, Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://gpt4.ezchat.top'
|
7 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
|
8 |
-
supports_stream = True
|
9 |
-
needs_auth = False
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
12 |
-
headers = {
|
13 |
-
'Content-Type': 'application/json',
|
14 |
-
}
|
15 |
-
data = {
|
16 |
-
'model': model,
|
17 |
-
'temperature': 0.7,
|
18 |
-
'presence_penalty': 0,
|
19 |
-
'messages': messages,
|
20 |
-
}
|
21 |
-
response = requests.post(url + '/api/openai/v1/chat/completions',
|
22 |
-
json=data, stream=True)
|
23 |
-
|
24 |
-
if stream:
|
25 |
-
for chunk in response.iter_content(chunk_size=None):
|
26 |
-
chunk = chunk.decode('utf-8')
|
27 |
-
if chunk.strip():
|
28 |
-
message = json.loads(chunk)['choices'][0]['message']['content']
|
29 |
-
yield message
|
30 |
-
else:
|
31 |
-
message = response.json()['choices'][0]['message']['content']
|
32 |
-
yield message
|
33 |
-
|
34 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
35 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/los/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Los from './Los.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('los', function (config) {
|
6 |
-
var gameObject = new Los(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.Spinner.Los', Los);
|
12 |
-
|
13 |
-
export default Los;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/orbit/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Orbit from './Orbit.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('orbit', function (config) {
|
6 |
-
var gameObject = new Orbit(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.Spinner.Orbit', Orbit);
|
12 |
-
|
13 |
-
export default Orbit;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWortega/ruImageCaptionong/app.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
|
4 |
-
import sys
|
5 |
-
|
6 |
-
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
from PIL import Image
|
10 |
-
|
11 |
-
device = 'cpu'
|
12 |
-
import clip
|
13 |
-
import os
|
14 |
-
from torch import nn
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
import torch.nn.functional as nnf
|
18 |
-
import sys
|
19 |
-
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
20 |
-
from tqdm import tqdm, trange
|
21 |
-
import PIL.Image
|
22 |
-
#ggf
|
23 |
-
|
24 |
-
import transformers
|
25 |
-
|
26 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
27 |
-
|
28 |
-
model_path = 'coco_prefix_latest.pt'
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
class MLP(nn.Module):
|
35 |
-
|
36 |
-
def forward(self, x):
|
37 |
-
return self.model(x)
|
38 |
-
|
39 |
-
def __init__(self, sizes, bias=True, act=nn.Tanh):
|
40 |
-
super(MLP, self).__init__()
|
41 |
-
layers = []
|
42 |
-
for i in range(len(sizes) -1):
|
43 |
-
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias))
|
44 |
-
if i < len(sizes) - 2:
|
45 |
-
layers.append(act())
|
46 |
-
self.model = nn.Sequential(*layers)
|
47 |
-
|
48 |
-
|
49 |
-
class ClipCaptionModel(nn.Module):
|
50 |
-
|
51 |
-
#@functools.lru_cache #FIXME
|
52 |
-
def get_dummy_token(self, batch_size, device):
|
53 |
-
return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device)
|
54 |
-
|
55 |
-
def forward(self, tokens, prefix, mask, labels):
|
56 |
-
embedding_text = self.gpt.transformer.wte(tokens)
|
57 |
-
prefix_projections = self.clip_project(prefix).view(-1, self.prefix_length, self.gpt_embedding_size)
|
58 |
-
#print(embedding_text.size()) #torch.Size([5, 67, 768])
|
59 |
-
#print(prefix_projections.size()) #torch.Size([5, 1, 768])
|
60 |
-
embedding_cat = torch.cat((prefix_projections, embedding_text), dim=1)
|
61 |
-
if labels is not None:
|
62 |
-
dummy_token = self.get_dummy_token(tokens.shape[0], tokens.device)
|
63 |
-
labels = torch.cat((dummy_token, tokens), dim=1)
|
64 |
-
out = self.gpt(inputs_embeds=embedding_cat, labels=labels, attention_mask=mask)
|
65 |
-
return out
|
66 |
-
|
67 |
-
def __init__(self, prefix_length, prefix_size: int = 512):
|
68 |
-
super(ClipCaptionModel, self).__init__()
|
69 |
-
self.prefix_length = prefix_length
|
70 |
-
|
71 |
-
self.gpt = GPT2LMHeadModel.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2')
|
72 |
-
|
73 |
-
self.gpt_embedding_size = self.gpt.transformer.wte.weight.shape[1]
|
74 |
-
if prefix_length > 10: # not enough memory
|
75 |
-
self.clip_project = nn.Linear(prefix_size, self.gpt_embedding_size * prefix_length)
|
76 |
-
else:
|
77 |
-
self.clip_project = MLP((prefix_size, (self.gpt_embedding_size * prefix_length) // 2, self.gpt_embedding_size * prefix_length))
|
78 |
-
|
79 |
-
|
80 |
-
class ClipCaptionPrefix(ClipCaptionModel):
|
81 |
-
|
82 |
-
def parameters(self, recurse: bool = True):
|
83 |
-
return self.clip_project.parameters()
|
84 |
-
|
85 |
-
def train(self, mode: bool = True):
|
86 |
-
super(ClipCaptionPrefix, self).train(mode)
|
87 |
-
self.gpt.eval()
|
88 |
-
return self
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
clip_model, preprocess = clip.load("ViT-B/32", device=device, jit=False)
|
93 |
-
tokenizer = GPT2Tokenizer.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2')
|
94 |
-
prefix_length = 10
|
95 |
-
model = ClipCaptionModel(prefix_length)
|
96 |
-
model.load_state_dict(torch.load(model_path, map_location='cpu'))
|
97 |
-
model.to(device)
|
98 |
-
def generate2(
|
99 |
-
model,
|
100 |
-
tokenizer,
|
101 |
-
tokens=None,
|
102 |
-
prompt=None,
|
103 |
-
embed=None,
|
104 |
-
entry_count=1,
|
105 |
-
entry_length=67,
|
106 |
-
top_p=0.98,
|
107 |
-
temperature=1.,
|
108 |
-
stop_token = '.',
|
109 |
-
):
|
110 |
-
model.eval()
|
111 |
-
generated_num = 0
|
112 |
-
generated_list = []
|
113 |
-
stop_token_index = tokenizer.encode(stop_token)[0]
|
114 |
-
filter_value = -float("Inf")
|
115 |
-
device = next(model.parameters()).device
|
116 |
-
|
117 |
-
with torch.no_grad():
|
118 |
-
|
119 |
-
for entry_idx in trange(entry_count):
|
120 |
-
if embed is not None:
|
121 |
-
generated = embed
|
122 |
-
else:
|
123 |
-
if tokens is None:
|
124 |
-
tokens = torch.tensor(tokenizer.encode(prompt))
|
125 |
-
tokens = tokens.unsqueeze(0).to(device)
|
126 |
-
|
127 |
-
generated = model.gpt.transformer.wte(tokens)
|
128 |
-
|
129 |
-
for i in range(entry_length):
|
130 |
-
|
131 |
-
outputs = model.gpt(inputs_embeds=generated)
|
132 |
-
logits = outputs.logits
|
133 |
-
logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
|
134 |
-
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
135 |
-
cumulative_probs = torch.cumsum(nnf.softmax(sorted_logits, dim=-1), dim=-1)
|
136 |
-
sorted_indices_to_remove = cumulative_probs > top_p
|
137 |
-
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[
|
138 |
-
..., :-1
|
139 |
-
].clone()
|
140 |
-
sorted_indices_to_remove[..., 0] = 0
|
141 |
-
|
142 |
-
indices_to_remove = sorted_indices[sorted_indices_to_remove]
|
143 |
-
logits[:, indices_to_remove] = filter_value
|
144 |
-
#
|
145 |
-
top_k = 2000
|
146 |
-
top_p = 0.98
|
147 |
-
#print(logits)
|
148 |
-
#next_token = transformers.top_k_top_p_filtering(logits.to(torch.int64).unsqueeze(0), top_k=top_k, top_p=top_p)
|
149 |
-
next_token = torch.argmax(logits, -1).unsqueeze(0)
|
150 |
-
next_token_embed = model.gpt.transformer.wte(next_token)
|
151 |
-
|
152 |
-
if tokens is None:
|
153 |
-
tokens = next_token
|
154 |
-
else:
|
155 |
-
tokens = torch.cat((tokens, next_token), dim=1)
|
156 |
-
generated = torch.cat((generated, next_token_embed), dim=1)
|
157 |
-
|
158 |
-
if stop_token_index == next_token.item():
|
159 |
-
break
|
160 |
-
|
161 |
-
output_list = list(tokens.squeeze().cpu().numpy())
|
162 |
-
output_text = tokenizer.decode(output_list)
|
163 |
-
generated_list.append(output_text)
|
164 |
-
|
165 |
-
return generated_list[0]
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
def _to_caption(pil_image):
|
170 |
-
|
171 |
-
image = preprocess(pil_image).unsqueeze(0).to(device)
|
172 |
-
with torch.no_grad():
|
173 |
-
|
174 |
-
prefix = clip_model.encode_image(image).to(device, dtype=torch.float32)
|
175 |
-
prefix_embed = model.clip_project(prefix).reshape(1, prefix_length, -1)
|
176 |
-
|
177 |
-
generated_text_prefix = generate2(model, tokenizer, embed=prefix_embed)
|
178 |
-
return generated_text_prefix
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
def classify_image(inp):
|
183 |
-
print(type(inp))
|
184 |
-
inp = Image.fromarray(inp)
|
185 |
-
texts = _to_caption(inp)
|
186 |
-
|
187 |
-
print(texts)
|
188 |
-
|
189 |
-
|
190 |
-
return texts
|
191 |
-
|
192 |
-
image = gr.inputs.Image(shape=(256, 256))
|
193 |
-
label = gr.outputs.Label(num_top_classes=3)
|
194 |
-
|
195 |
-
|
196 |
-
iface = gr.Interface(fn=classify_image, description="https://github.com/AlexWortega/ruImageCaptioning RuImage Captioning trained for a image2text task to predict caption of image by https://t.me/lovedeathtransformers Alex Wortega", inputs=image, outputs="text",examples=[
|
197 |
-
['1.jpeg']])
|
198 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/utils/croper.py
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import cv2
|
3 |
-
import time
|
4 |
-
import glob
|
5 |
-
import argparse
|
6 |
-
import scipy
|
7 |
-
import numpy as np
|
8 |
-
from PIL import Image
|
9 |
-
import torch
|
10 |
-
from tqdm import tqdm
|
11 |
-
from itertools import cycle
|
12 |
-
|
13 |
-
from src.face3d.extract_kp_videos_safe import KeypointExtractor
|
14 |
-
from facexlib.alignment import landmark_98_to_68
|
15 |
-
|
16 |
-
import numpy as np
|
17 |
-
from PIL import Image
|
18 |
-
|
19 |
-
class Preprocesser:
|
20 |
-
def __init__(self, device='cuda'):
|
21 |
-
self.predictor = KeypointExtractor(device)
|
22 |
-
|
23 |
-
def get_landmark(self, img_np):
|
24 |
-
"""get landmark with dlib
|
25 |
-
:return: np.array shape=(68, 2)
|
26 |
-
"""
|
27 |
-
with torch.no_grad():
|
28 |
-
dets = self.predictor.det_net.detect_faces(img_np, 0.97)
|
29 |
-
|
30 |
-
if len(dets) == 0:
|
31 |
-
return None
|
32 |
-
det = dets[0]
|
33 |
-
|
34 |
-
img = img_np[int(det[1]):int(det[3]), int(det[0]):int(det[2]), :]
|
35 |
-
lm = landmark_98_to_68(self.predictor.detector.get_landmarks(img)) # [0]
|
36 |
-
|
37 |
-
#### keypoints to the original location
|
38 |
-
lm[:,0] += int(det[0])
|
39 |
-
lm[:,1] += int(det[1])
|
40 |
-
|
41 |
-
return lm
|
42 |
-
|
43 |
-
def align_face(self, img, lm, output_size=1024):
|
44 |
-
"""
|
45 |
-
:param filepath: str
|
46 |
-
:return: PIL Image
|
47 |
-
"""
|
48 |
-
lm_chin = lm[0: 17] # left-right
|
49 |
-
lm_eyebrow_left = lm[17: 22] # left-right
|
50 |
-
lm_eyebrow_right = lm[22: 27] # left-right
|
51 |
-
lm_nose = lm[27: 31] # top-down
|
52 |
-
lm_nostrils = lm[31: 36] # top-down
|
53 |
-
lm_eye_left = lm[36: 42] # left-clockwise
|
54 |
-
lm_eye_right = lm[42: 48] # left-clockwise
|
55 |
-
lm_mouth_outer = lm[48: 60] # left-clockwise
|
56 |
-
lm_mouth_inner = lm[60: 68] # left-clockwise
|
57 |
-
|
58 |
-
# Calculate auxiliary vectors.
|
59 |
-
eye_left = np.mean(lm_eye_left, axis=0)
|
60 |
-
eye_right = np.mean(lm_eye_right, axis=0)
|
61 |
-
eye_avg = (eye_left + eye_right) * 0.5
|
62 |
-
eye_to_eye = eye_right - eye_left
|
63 |
-
mouth_left = lm_mouth_outer[0]
|
64 |
-
mouth_right = lm_mouth_outer[6]
|
65 |
-
mouth_avg = (mouth_left + mouth_right) * 0.5
|
66 |
-
eye_to_mouth = mouth_avg - eye_avg
|
67 |
-
|
68 |
-
# Choose oriented crop rectangle.
|
69 |
-
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] # Addition of binocular difference and double mouth difference
|
70 |
-
x /= np.hypot(*x) # hypot函数计算直角三角形的斜边长,用斜边长对三角形两条直边做归一化
|
71 |
-
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) # 双眼差和眼嘴差,选较大的作为基准尺度
|
72 |
-
y = np.flipud(x) * [-1, 1]
|
73 |
-
c = eye_avg + eye_to_mouth * 0.1
|
74 |
-
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) # 定义四边形,以面部基准位置为中心上下左右平移得到四个顶点
|
75 |
-
qsize = np.hypot(*x) * 2 # 定义四边形的大小(边长),为基准尺度的2倍
|
76 |
-
|
77 |
-
# Shrink.
|
78 |
-
# 如果计算出的四边形太大了,就按比例缩小它
|
79 |
-
shrink = int(np.floor(qsize / output_size * 0.5))
|
80 |
-
if shrink > 1:
|
81 |
-
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
|
82 |
-
img = img.resize(rsize, Image.ANTIALIAS)
|
83 |
-
quad /= shrink
|
84 |
-
qsize /= shrink
|
85 |
-
else:
|
86 |
-
rsize = (int(np.rint(float(img.size[0]))), int(np.rint(float(img.size[1]))))
|
87 |
-
|
88 |
-
# Crop.
|
89 |
-
border = max(int(np.rint(qsize * 0.1)), 3)
|
90 |
-
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
91 |
-
int(np.ceil(max(quad[:, 1]))))
|
92 |
-
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
|
93 |
-
min(crop[3] + border, img.size[1]))
|
94 |
-
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
|
95 |
-
# img = img.crop(crop)
|
96 |
-
quad -= crop[0:2]
|
97 |
-
|
98 |
-
# Pad.
|
99 |
-
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
100 |
-
int(np.ceil(max(quad[:, 1]))))
|
101 |
-
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
|
102 |
-
max(pad[3] - img.size[1] + border, 0))
|
103 |
-
# if enable_padding and max(pad) > border - 4:
|
104 |
-
# pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
|
105 |
-
# img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
106 |
-
# h, w, _ = img.shape
|
107 |
-
# y, x, _ = np.ogrid[:h, :w, :1]
|
108 |
-
# mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
|
109 |
-
# 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
|
110 |
-
# blur = qsize * 0.02
|
111 |
-
# img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
112 |
-
# img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
|
113 |
-
# img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
|
114 |
-
# quad += pad[:2]
|
115 |
-
|
116 |
-
# Transform.
|
117 |
-
quad = (quad + 0.5).flatten()
|
118 |
-
lx = max(min(quad[0], quad[2]), 0)
|
119 |
-
ly = max(min(quad[1], quad[7]), 0)
|
120 |
-
rx = min(max(quad[4], quad[6]), img.size[0])
|
121 |
-
ry = min(max(quad[3], quad[5]), img.size[0])
|
122 |
-
|
123 |
-
# Save aligned image.
|
124 |
-
return rsize, crop, [lx, ly, rx, ry]
|
125 |
-
|
126 |
-
def crop(self, img_np_list, still=False, xsize=512): # first frame for all video
|
127 |
-
img_np = img_np_list[0]
|
128 |
-
lm = self.get_landmark(img_np)
|
129 |
-
|
130 |
-
if lm is None:
|
131 |
-
raise 'can not detect the landmark from source image'
|
132 |
-
rsize, crop, quad = self.align_face(img=Image.fromarray(img_np), lm=lm, output_size=xsize)
|
133 |
-
clx, cly, crx, cry = crop
|
134 |
-
lx, ly, rx, ry = quad
|
135 |
-
lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
|
136 |
-
for _i in range(len(img_np_list)):
|
137 |
-
_inp = img_np_list[_i]
|
138 |
-
_inp = cv2.resize(_inp, (rsize[0], rsize[1]))
|
139 |
-
_inp = _inp[cly:cry, clx:crx]
|
140 |
-
if not still:
|
141 |
-
_inp = _inp[ly:ry, lx:rx]
|
142 |
-
img_np_list[_i] = _inp
|
143 |
-
return img_np_list, crop, quad
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/waiter.h
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include <utility>
|
4 |
-
#include <string>
|
5 |
-
#include <mutex>
|
6 |
-
#include <atomic>
|
7 |
-
|
8 |
-
#include "libipc/def.h"
|
9 |
-
#include "libipc/mutex.h"
|
10 |
-
#include "libipc/condition.h"
|
11 |
-
#include "libipc/platform/detail.h"
|
12 |
-
|
13 |
-
namespace ipc {
|
14 |
-
namespace detail {
|
15 |
-
|
16 |
-
class waiter {
|
17 |
-
ipc::sync::condition cond_;
|
18 |
-
ipc::sync::mutex lock_;
|
19 |
-
std::atomic<bool> quit_ {false};
|
20 |
-
|
21 |
-
public:
|
22 |
-
static void init();
|
23 |
-
|
24 |
-
waiter() = default;
|
25 |
-
waiter(char const *name) {
|
26 |
-
open(name);
|
27 |
-
}
|
28 |
-
|
29 |
-
~waiter() {
|
30 |
-
close();
|
31 |
-
}
|
32 |
-
|
33 |
-
bool valid() const noexcept {
|
34 |
-
return cond_.valid() && lock_.valid();
|
35 |
-
}
|
36 |
-
|
37 |
-
bool open(char const *name) noexcept {
|
38 |
-
quit_.store(false, std::memory_order_relaxed);
|
39 |
-
if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) {
|
40 |
-
return false;
|
41 |
-
}
|
42 |
-
if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) {
|
43 |
-
cond_.close();
|
44 |
-
return false;
|
45 |
-
}
|
46 |
-
return valid();
|
47 |
-
}
|
48 |
-
|
49 |
-
void close() noexcept {
|
50 |
-
cond_.close();
|
51 |
-
lock_.close();
|
52 |
-
}
|
53 |
-
|
54 |
-
template <typename F>
|
55 |
-
bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept {
|
56 |
-
IPC_UNUSED_ std::lock_guard<ipc::sync::mutex> guard {lock_};
|
57 |
-
while ([this, &pred] {
|
58 |
-
return !quit_.load(std::memory_order_relaxed)
|
59 |
-
&& std::forward<F>(pred)();
|
60 |
-
}()) {
|
61 |
-
if (!cond_.wait(lock_, tm)) return false;
|
62 |
-
}
|
63 |
-
return true;
|
64 |
-
}
|
65 |
-
|
66 |
-
bool notify() noexcept {
|
67 |
-
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
68 |
-
return cond_.notify(lock_);
|
69 |
-
}
|
70 |
-
|
71 |
-
bool broadcast() noexcept {
|
72 |
-
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
73 |
-
return cond_.broadcast(lock_);
|
74 |
-
}
|
75 |
-
|
76 |
-
bool quit_waiting() {
|
77 |
-
quit_.store(true, std::memory_order_release);
|
78 |
-
return broadcast();
|
79 |
-
}
|
80 |
-
};
|
81 |
-
|
82 |
-
} // namespace detail
|
83 |
-
} // namespace ipc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/training/networks_stylegan2.py
DELETED
@@ -1,974 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Network architectures from the paper
|
10 |
-
"Analyzing and Improving the Image Quality of StyleGAN".
|
11 |
-
Matches the original implementation of configs E-F by Karras et al. at
|
12 |
-
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
|
13 |
-
|
14 |
-
import numpy as np
|
15 |
-
import torch
|
16 |
-
import torch.nn.functional as F
|
17 |
-
from torch_utils import misc
|
18 |
-
from torch_utils import persistence
|
19 |
-
from torch_utils.ops import conv2d_resample
|
20 |
-
from torch_utils.ops import upfirdn2d
|
21 |
-
from torch_utils.ops import bias_act
|
22 |
-
from torch_utils.ops import fma
|
23 |
-
|
24 |
-
# ----------------------------------------------------------------------------
|
25 |
-
|
26 |
-
|
27 |
-
@misc.profiled_function
|
28 |
-
def normalize_2nd_moment(x, dim=1, eps=1e-8):
|
29 |
-
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
|
30 |
-
|
31 |
-
# ----------------------------------------------------------------------------
|
32 |
-
|
33 |
-
|
34 |
-
@misc.profiled_function
|
35 |
-
def modulated_conv2d(
|
36 |
-
# Input tensor of shape [batch_size, in_channels, in_height, in_width].
|
37 |
-
x,
|
38 |
-
# Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
|
39 |
-
weight,
|
40 |
-
# Modulation coefficients of shape [batch_size, in_channels].
|
41 |
-
styles,
|
42 |
-
noise=None, # Optional noise tensor to add to the output activations.
|
43 |
-
up=1, # Integer upsampling factor.
|
44 |
-
down=1, # Integer downsampling factor.
|
45 |
-
padding=0, # Padding with respect to the upsampled image.
|
46 |
-
# Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
|
47 |
-
resample_filter=None,
|
48 |
-
demodulate=True, # Apply weight demodulation?
|
49 |
-
# False = convolution, True = correlation (matches torch.nn.functional.conv2d).
|
50 |
-
flip_weight=True,
|
51 |
-
# Perform modulation, convolution, and demodulation as a single fused operation?
|
52 |
-
fused_modconv=True,
|
53 |
-
):
|
54 |
-
batch_size = x.shape[0]
|
55 |
-
out_channels, in_channels, kh, kw = weight.shape
|
56 |
-
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
|
57 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
|
58 |
-
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
|
59 |
-
|
60 |
-
# Pre-normalize inputs to avoid FP16 overflow.
|
61 |
-
if x.dtype == torch.float16 and demodulate:
|
62 |
-
weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
|
63 |
-
weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
|
64 |
-
styles = styles / \
|
65 |
-
styles.norm(float('inf'), dim=1, keepdim=True) # max_I
|
66 |
-
|
67 |
-
# Calculate per-sample weights and demodulation coefficients.
|
68 |
-
w = None
|
69 |
-
dcoefs = None
|
70 |
-
if demodulate or fused_modconv:
|
71 |
-
w = weight.unsqueeze(0) # [NOIkk]
|
72 |
-
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
|
73 |
-
if demodulate:
|
74 |
-
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
|
75 |
-
if demodulate and fused_modconv:
|
76 |
-
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
|
77 |
-
|
78 |
-
# Execute by scaling the activations before and after the convolution.
|
79 |
-
if not fused_modconv:
|
80 |
-
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
81 |
-
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
|
82 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
|
83 |
-
if demodulate and noise is not None:
|
84 |
-
x = fma.fma(x, dcoefs.to(x.dtype).reshape(
|
85 |
-
batch_size, -1, 1, 1), noise.to(x.dtype))
|
86 |
-
elif demodulate:
|
87 |
-
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
88 |
-
elif noise is not None:
|
89 |
-
x = x.add_(noise.to(x.dtype))
|
90 |
-
return x
|
91 |
-
|
92 |
-
# Execute as one fused op using grouped convolution.
|
93 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
94 |
-
batch_size = int(batch_size)
|
95 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None])
|
96 |
-
x = x.reshape(1, -1, *x.shape[2:])
|
97 |
-
w = w.reshape(-1, in_channels, kh, kw)
|
98 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
99 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
|
100 |
-
x = x.reshape(batch_size, -1, *x.shape[2:])
|
101 |
-
if noise is not None:
|
102 |
-
x = x.add_(noise)
|
103 |
-
return x
|
104 |
-
|
105 |
-
# ----------------------------------------------------------------------------
|
106 |
-
|
107 |
-
|
108 |
-
@persistence.persistent_class
|
109 |
-
class FullyConnectedLayer(torch.nn.Module):
|
110 |
-
def __init__(self,
|
111 |
-
in_features, # Number of input features.
|
112 |
-
out_features, # Number of output features.
|
113 |
-
bias=True, # Apply additive bias before the activation function?
|
114 |
-
# Activation function: 'relu', 'lrelu', etc.
|
115 |
-
activation='linear',
|
116 |
-
lr_multiplier=1, # Learning rate multiplier.
|
117 |
-
bias_init=0, # Initial value for the additive bias.
|
118 |
-
):
|
119 |
-
super().__init__()
|
120 |
-
self.in_features = in_features
|
121 |
-
self.out_features = out_features
|
122 |
-
self.activation = activation
|
123 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
124 |
-
[out_features, in_features]) / lr_multiplier)
|
125 |
-
self.bias = torch.nn.Parameter(torch.full(
|
126 |
-
[out_features], np.float32(bias_init))) if bias else None
|
127 |
-
self.weight_gain = lr_multiplier / np.sqrt(in_features)
|
128 |
-
self.bias_gain = lr_multiplier
|
129 |
-
|
130 |
-
def forward(self, x):
|
131 |
-
w = self.weight.to(x.dtype) * self.weight_gain
|
132 |
-
b = self.bias
|
133 |
-
if b is not None:
|
134 |
-
b = b.to(x.dtype)
|
135 |
-
if self.bias_gain != 1:
|
136 |
-
b = b * self.bias_gain
|
137 |
-
|
138 |
-
if self.activation == 'linear' and b is not None:
|
139 |
-
x = torch.addmm(b.unsqueeze(0), x, w.t())
|
140 |
-
else:
|
141 |
-
x = x.matmul(w.t())
|
142 |
-
x = bias_act.bias_act(x, b, act=self.activation)
|
143 |
-
return x
|
144 |
-
|
145 |
-
def extra_repr(self):
|
146 |
-
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
|
147 |
-
|
148 |
-
# ----------------------------------------------------------------------------
|
149 |
-
|
150 |
-
|
151 |
-
@persistence.persistent_class
|
152 |
-
class Conv2dLayer(torch.nn.Module):
|
153 |
-
def __init__(self,
|
154 |
-
in_channels, # Number of input channels.
|
155 |
-
out_channels, # Number of output channels.
|
156 |
-
# Width and height of the convolution kernel.
|
157 |
-
kernel_size,
|
158 |
-
bias=True, # Apply additive bias before the activation function?
|
159 |
-
# Activation function: 'relu', 'lrelu', etc.
|
160 |
-
activation='linear',
|
161 |
-
up=1, # Integer upsampling factor.
|
162 |
-
down=1, # Integer downsampling factor.
|
163 |
-
# Low-pass filter to apply when resampling activations.
|
164 |
-
resample_filter=[1, 3, 3, 1],
|
165 |
-
# Clamp the output to +-X, None = disable clamping.
|
166 |
-
conv_clamp=None,
|
167 |
-
channels_last=False, # Expect the input to have memory_format=channels_last?
|
168 |
-
trainable=True, # Update the weights of this layer during training?
|
169 |
-
):
|
170 |
-
super().__init__()
|
171 |
-
self.in_channels = in_channels
|
172 |
-
self.out_channels = out_channels
|
173 |
-
self.activation = activation
|
174 |
-
self.up = up
|
175 |
-
self.down = down
|
176 |
-
self.conv_clamp = conv_clamp
|
177 |
-
self.register_buffer(
|
178 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
179 |
-
self.padding = kernel_size // 2
|
180 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
181 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
182 |
-
|
183 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
184 |
-
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
|
185 |
-
memory_format=memory_format)
|
186 |
-
bias = torch.zeros([out_channels]) if bias else None
|
187 |
-
if trainable:
|
188 |
-
self.weight = torch.nn.Parameter(weight)
|
189 |
-
self.bias = torch.nn.Parameter(bias) if bias is not None else None
|
190 |
-
else:
|
191 |
-
self.register_buffer('weight', weight)
|
192 |
-
if bias is not None:
|
193 |
-
self.register_buffer('bias', bias)
|
194 |
-
else:
|
195 |
-
self.bias = None
|
196 |
-
|
197 |
-
def forward(self, x, gain=1):
|
198 |
-
w = self.weight * self.weight_gain
|
199 |
-
b = self.bias.to(x.dtype) if self.bias is not None else None
|
200 |
-
flip_weight = (self.up == 1) # slightly faster
|
201 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
202 |
-
x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
|
203 |
-
|
204 |
-
act_gain = self.act_gain * gain
|
205 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
206 |
-
x = bias_act.bias_act(x, b, act=self.activation,
|
207 |
-
gain=act_gain, clamp=act_clamp)
|
208 |
-
return x
|
209 |
-
|
210 |
-
def extra_repr(self):
|
211 |
-
return ' '.join([
|
212 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
|
213 |
-
f'up={self.up}, down={self.down}'])
|
214 |
-
|
215 |
-
# ----------------------------------------------------------------------------
|
216 |
-
|
217 |
-
|
218 |
-
@persistence.persistent_class
|
219 |
-
class MappingNetwork(torch.nn.Module):
|
220 |
-
def __init__(self,
|
221 |
-
# Input latent (Z) dimensionality, 0 = no latent.
|
222 |
-
z_dim,
|
223 |
-
# Conditioning label (C) dimensionality, 0 = no label.
|
224 |
-
c_dim,
|
225 |
-
# Intermediate latent (W) dimensionality.
|
226 |
-
w_dim,
|
227 |
-
# Number of intermediate latents to output, None = do not broadcast.
|
228 |
-
num_ws,
|
229 |
-
num_layers=8, # Number of mapping layers.
|
230 |
-
# Label embedding dimensionality, None = same as w_dim.
|
231 |
-
embed_features=None,
|
232 |
-
# Number of intermediate features in the mapping layers, None = same as w_dim.
|
233 |
-
layer_features=None,
|
234 |
-
# Activation function: 'relu', 'lrelu', etc.
|
235 |
-
activation='lrelu',
|
236 |
-
# Learning rate multiplier for the mapping layers.
|
237 |
-
lr_multiplier=0.01,
|
238 |
-
# Decay for tracking the moving average of W during training, None = do not track.
|
239 |
-
w_avg_beta=0.998,
|
240 |
-
):
|
241 |
-
super().__init__()
|
242 |
-
self.z_dim = z_dim
|
243 |
-
self.c_dim = c_dim
|
244 |
-
self.w_dim = w_dim
|
245 |
-
self.num_ws = num_ws
|
246 |
-
self.num_layers = num_layers
|
247 |
-
self.w_avg_beta = w_avg_beta
|
248 |
-
|
249 |
-
if embed_features is None:
|
250 |
-
embed_features = w_dim
|
251 |
-
if c_dim == 0:
|
252 |
-
embed_features = 0
|
253 |
-
if layer_features is None:
|
254 |
-
layer_features = w_dim
|
255 |
-
features_list = [z_dim + embed_features] + \
|
256 |
-
[layer_features] * (num_layers - 1) + [w_dim]
|
257 |
-
|
258 |
-
if c_dim > 0:
|
259 |
-
self.embed = FullyConnectedLayer(c_dim, embed_features)
|
260 |
-
for idx in range(num_layers):
|
261 |
-
in_features = features_list[idx]
|
262 |
-
out_features = features_list[idx + 1]
|
263 |
-
layer = FullyConnectedLayer(
|
264 |
-
in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
|
265 |
-
setattr(self, f'fc{idx}', layer)
|
266 |
-
|
267 |
-
if num_ws is not None and w_avg_beta is not None:
|
268 |
-
self.register_buffer('w_avg', torch.zeros([w_dim]))
|
269 |
-
|
270 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
|
271 |
-
# Embed, normalize, and concat inputs.
|
272 |
-
x = None
|
273 |
-
with torch.autograd.profiler.record_function('input'):
|
274 |
-
if self.z_dim > 0:
|
275 |
-
misc.assert_shape(z, [None, self.z_dim])
|
276 |
-
x = normalize_2nd_moment(z.to(torch.float32))
|
277 |
-
if self.c_dim > 0:
|
278 |
-
misc.assert_shape(c, [None, self.c_dim])
|
279 |
-
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
|
280 |
-
x = torch.cat([x, y], dim=1) if x is not None else y
|
281 |
-
|
282 |
-
# Main layers.
|
283 |
-
for idx in range(self.num_layers):
|
284 |
-
layer = getattr(self, f'fc{idx}')
|
285 |
-
x = layer(x)
|
286 |
-
|
287 |
-
# Update moving average of W.
|
288 |
-
if update_emas and self.w_avg_beta is not None:
|
289 |
-
with torch.autograd.profiler.record_function('update_w_avg'):
|
290 |
-
self.w_avg.copy_(x.detach().mean(
|
291 |
-
dim=0).lerp(self.w_avg, self.w_avg_beta))
|
292 |
-
|
293 |
-
# Broadcast.
|
294 |
-
if self.num_ws is not None:
|
295 |
-
with torch.autograd.profiler.record_function('broadcast'):
|
296 |
-
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
|
297 |
-
|
298 |
-
# Apply truncation.
|
299 |
-
if truncation_psi != 1:
|
300 |
-
with torch.autograd.profiler.record_function('truncate'):
|
301 |
-
assert self.w_avg_beta is not None
|
302 |
-
if self.num_ws is None or truncation_cutoff is None:
|
303 |
-
x = self.w_avg.lerp(x, truncation_psi)
|
304 |
-
else:
|
305 |
-
x[:, :truncation_cutoff] = self.w_avg.lerp(
|
306 |
-
x[:, :truncation_cutoff], truncation_psi)
|
307 |
-
return x
|
308 |
-
|
309 |
-
def extra_repr(self):
|
310 |
-
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
|
311 |
-
|
312 |
-
# ----------------------------------------------------------------------------
|
313 |
-
|
314 |
-
|
315 |
-
@persistence.persistent_class
|
316 |
-
class SynthesisLayer(torch.nn.Module):
|
317 |
-
def __init__(self,
|
318 |
-
in_channels, # Number of input channels.
|
319 |
-
out_channels, # Number of output channels.
|
320 |
-
# Intermediate latent (W) dimensionality.
|
321 |
-
w_dim,
|
322 |
-
resolution, # Resolution of this layer.
|
323 |
-
kernel_size=3, # Convolution kernel size.
|
324 |
-
up=1, # Integer upsampling factor.
|
325 |
-
use_noise=True, # Enable noise input?
|
326 |
-
# Activation function: 'relu', 'lrelu', etc.
|
327 |
-
activation='lrelu',
|
328 |
-
# Low-pass filter to apply when resampling activations.
|
329 |
-
resample_filter=[1, 3, 3, 1],
|
330 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
331 |
-
conv_clamp=None,
|
332 |
-
channels_last=False, # Use channels_last format for the weights?
|
333 |
-
):
|
334 |
-
super().__init__()
|
335 |
-
self.in_channels = in_channels
|
336 |
-
self.out_channels = out_channels
|
337 |
-
self.w_dim = w_dim
|
338 |
-
self.resolution = resolution
|
339 |
-
self.up = up
|
340 |
-
self.use_noise = use_noise
|
341 |
-
self.activation = activation
|
342 |
-
self.conv_clamp = conv_clamp
|
343 |
-
self.register_buffer(
|
344 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
345 |
-
self.padding = kernel_size // 2
|
346 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
347 |
-
|
348 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
349 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
350 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
351 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
352 |
-
if use_noise:
|
353 |
-
self.register_buffer(
|
354 |
-
'noise_const', torch.randn([resolution, resolution]))
|
355 |
-
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
|
356 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
357 |
-
|
358 |
-
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
|
359 |
-
assert noise_mode in ['random', 'const', 'none']
|
360 |
-
in_resolution = self.resolution // self.up
|
361 |
-
misc.assert_shape(x, [None, self.in_channels,
|
362 |
-
in_resolution, in_resolution])
|
363 |
-
styles = self.affine(w)
|
364 |
-
|
365 |
-
noise = None
|
366 |
-
if self.use_noise and noise_mode == 'random':
|
367 |
-
noise = torch.randn([x.shape[0], 1, self.resolution,
|
368 |
-
self.resolution], device=x.device) * self.noise_strength
|
369 |
-
if self.use_noise and noise_mode == 'const':
|
370 |
-
noise = self.noise_const * self.noise_strength
|
371 |
-
|
372 |
-
flip_weight = (self.up == 1) # slightly faster
|
373 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
|
374 |
-
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
|
375 |
-
|
376 |
-
act_gain = self.act_gain * gain
|
377 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
378 |
-
x = bias_act.bias_act(x, self.bias.to(
|
379 |
-
x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
|
380 |
-
return x
|
381 |
-
|
382 |
-
def extra_repr(self):
|
383 |
-
return ' '.join([
|
384 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
|
385 |
-
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
|
386 |
-
|
387 |
-
# ----------------------------------------------------------------------------
|
388 |
-
|
389 |
-
|
390 |
-
@persistence.persistent_class
|
391 |
-
class ToRGBLayer(torch.nn.Module):
|
392 |
-
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
|
393 |
-
super().__init__()
|
394 |
-
self.in_channels = in_channels
|
395 |
-
self.out_channels = out_channels
|
396 |
-
self.w_dim = w_dim
|
397 |
-
self.conv_clamp = conv_clamp
|
398 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
399 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
400 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
401 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
402 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
403 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
404 |
-
|
405 |
-
def forward(self, x, w, fused_modconv=True):
|
406 |
-
styles = self.affine(w) * self.weight_gain
|
407 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
|
408 |
-
demodulate=False, fused_modconv=fused_modconv)
|
409 |
-
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
|
410 |
-
return x
|
411 |
-
|
412 |
-
def extra_repr(self):
|
413 |
-
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
|
414 |
-
|
415 |
-
# ----------------------------------------------------------------------------
|
416 |
-
|
417 |
-
|
418 |
-
@persistence.persistent_class
|
419 |
-
class SynthesisBlock(torch.nn.Module):
|
420 |
-
def __init__(self,
|
421 |
-
# Number of input channels, 0 = first block.
|
422 |
-
in_channels,
|
423 |
-
# Number of output channels.
|
424 |
-
out_channels,
|
425 |
-
# Intermediate latent (W) dimensionality.
|
426 |
-
w_dim,
|
427 |
-
# Resolution of this block.
|
428 |
-
resolution,
|
429 |
-
# Number of output color channels.
|
430 |
-
img_channels,
|
431 |
-
is_last, # Is this the last block?
|
432 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
433 |
-
architecture='skip',
|
434 |
-
# Low-pass filter to apply when resampling activations.
|
435 |
-
resample_filter=[1, 3, 3, 1],
|
436 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
437 |
-
conv_clamp=256,
|
438 |
-
use_fp16=False, # Use FP16 for this block?
|
439 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
440 |
-
# Default value of fused_modconv. 'inference_only' = True for inference, False for training.
|
441 |
-
fused_modconv_default=True,
|
442 |
-
# Arguments for SynthesisLayer.
|
443 |
-
**layer_kwargs,
|
444 |
-
):
|
445 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
446 |
-
super().__init__()
|
447 |
-
self.in_channels = in_channels
|
448 |
-
self.w_dim = w_dim
|
449 |
-
self.resolution = resolution
|
450 |
-
self.img_channels = img_channels
|
451 |
-
self.is_last = is_last
|
452 |
-
self.architecture = architecture
|
453 |
-
self.use_fp16 = use_fp16
|
454 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
455 |
-
self.fused_modconv_default = fused_modconv_default
|
456 |
-
self.register_buffer(
|
457 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
458 |
-
self.num_conv = 0
|
459 |
-
self.num_torgb = 0
|
460 |
-
|
461 |
-
if in_channels == 0:
|
462 |
-
self.const = torch.nn.Parameter(torch.randn(
|
463 |
-
[out_channels, resolution, resolution]))
|
464 |
-
|
465 |
-
if in_channels != 0:
|
466 |
-
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
|
467 |
-
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
|
468 |
-
self.num_conv += 1
|
469 |
-
|
470 |
-
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
|
471 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
|
472 |
-
self.num_conv += 1
|
473 |
-
|
474 |
-
if is_last or architecture == 'skip':
|
475 |
-
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
|
476 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last)
|
477 |
-
self.num_torgb += 1
|
478 |
-
|
479 |
-
if in_channels != 0 and architecture == 'resnet':
|
480 |
-
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
|
481 |
-
resample_filter=resample_filter, channels_last=self.channels_last)
|
482 |
-
|
483 |
-
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
|
484 |
-
_ = update_emas # unused
|
485 |
-
misc.assert_shape(
|
486 |
-
ws, [None, self.num_conv + self.num_torgb, self.w_dim])
|
487 |
-
w_iter = iter(ws.unbind(dim=1))
|
488 |
-
if ws.device.type != 'cuda':
|
489 |
-
force_fp32 = True
|
490 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
491 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
492 |
-
if fused_modconv is None:
|
493 |
-
fused_modconv = self.fused_modconv_default
|
494 |
-
if fused_modconv == 'inference_only':
|
495 |
-
fused_modconv = (not self.training)
|
496 |
-
|
497 |
-
# Input.
|
498 |
-
if self.in_channels == 0:
|
499 |
-
x = self.const.to(dtype=dtype, memory_format=memory_format)
|
500 |
-
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
|
501 |
-
else:
|
502 |
-
misc.assert_shape(x, [None, self.in_channels,
|
503 |
-
self.resolution // 2, self.resolution // 2])
|
504 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
505 |
-
|
506 |
-
# Main layers.
|
507 |
-
if self.in_channels == 0:
|
508 |
-
x = self.conv1(x, next(w_iter),
|
509 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
510 |
-
elif self.architecture == 'resnet':
|
511 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
512 |
-
x = self.conv0(x, next(w_iter),
|
513 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
514 |
-
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
|
515 |
-
gain=np.sqrt(0.5), **layer_kwargs)
|
516 |
-
x = y.add_(x)
|
517 |
-
else:
|
518 |
-
x = self.conv0(x, next(w_iter),
|
519 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
520 |
-
x = self.conv1(x, next(w_iter),
|
521 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
522 |
-
|
523 |
-
# ToRGB.
|
524 |
-
if img is not None:
|
525 |
-
misc.assert_shape(
|
526 |
-
img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
|
527 |
-
img = upfirdn2d.upsample2d(img, self.resample_filter)
|
528 |
-
if self.is_last or self.architecture == 'skip':
|
529 |
-
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
|
530 |
-
y = y.to(dtype=torch.float32,
|
531 |
-
memory_format=torch.contiguous_format)
|
532 |
-
img = img.add_(y) if img is not None else y
|
533 |
-
|
534 |
-
assert x.dtype == dtype
|
535 |
-
assert img is None or img.dtype == torch.float32
|
536 |
-
return x, img
|
537 |
-
|
538 |
-
def extra_repr(self):
|
539 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
540 |
-
|
541 |
-
# ----------------------------------------------------------------------------
|
542 |
-
|
543 |
-
|
544 |
-
@persistence.persistent_class
|
545 |
-
class SynthesisNetwork(torch.nn.Module):
|
546 |
-
def __init__(self,
|
547 |
-
# Intermediate latent (W) dimensionality.
|
548 |
-
w_dim,
|
549 |
-
img_resolution, # Output image resolution.
|
550 |
-
img_channels, # Number of color channels.
|
551 |
-
# Overall multiplier for the number of channels.
|
552 |
-
channel_base=32768,
|
553 |
-
# Maximum number of channels in any layer.
|
554 |
-
channel_max=512,
|
555 |
-
# Use FP16 for the N highest resolutions.
|
556 |
-
num_fp16_res=4,
|
557 |
-
**block_kwargs, # Arguments for SynthesisBlock.
|
558 |
-
):
|
559 |
-
assert img_resolution >= 4 and img_resolution & (
|
560 |
-
img_resolution - 1) == 0
|
561 |
-
super().__init__()
|
562 |
-
self.w_dim = w_dim
|
563 |
-
self.img_resolution = img_resolution
|
564 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
565 |
-
self.img_channels = img_channels
|
566 |
-
self.num_fp16_res = num_fp16_res
|
567 |
-
self.block_resolutions = [
|
568 |
-
2 ** i for i in range(2, self.img_resolution_log2 + 1)]
|
569 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
570 |
-
for res in self.block_resolutions}
|
571 |
-
fp16_resolution = max(
|
572 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
573 |
-
|
574 |
-
self.num_ws = 0
|
575 |
-
for res in self.block_resolutions:
|
576 |
-
in_channels = channels_dict[res // 2] if res > 4 else 0
|
577 |
-
out_channels = channels_dict[res]
|
578 |
-
use_fp16 = (res >= fp16_resolution)
|
579 |
-
is_last = (res == self.img_resolution)
|
580 |
-
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
|
581 |
-
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
|
582 |
-
self.num_ws += block.num_conv
|
583 |
-
if is_last:
|
584 |
-
self.num_ws += block.num_torgb
|
585 |
-
setattr(self, f'b{res}', block)
|
586 |
-
|
587 |
-
def forward(self, ws, return_feature=False, **block_kwargs):
|
588 |
-
block_ws = []
|
589 |
-
features = []
|
590 |
-
with torch.autograd.profiler.record_function('split_ws'):
|
591 |
-
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
|
592 |
-
ws = ws.to(torch.float32)
|
593 |
-
w_idx = 0
|
594 |
-
for res in self.block_resolutions:
|
595 |
-
block = getattr(self, f'b{res}')
|
596 |
-
block_ws.append(
|
597 |
-
ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
|
598 |
-
w_idx += block.num_conv
|
599 |
-
|
600 |
-
x = img = None
|
601 |
-
for res, cur_ws in zip(self.block_resolutions, block_ws):
|
602 |
-
block = getattr(self, f'b{res}')
|
603 |
-
x, img = block(x, img, cur_ws, **block_kwargs)
|
604 |
-
features.append(x)
|
605 |
-
if return_feature:
|
606 |
-
return img, features
|
607 |
-
else:
|
608 |
-
return img
|
609 |
-
|
610 |
-
def extra_repr(self):
|
611 |
-
return ' '.join([
|
612 |
-
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
|
613 |
-
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
|
614 |
-
f'num_fp16_res={self.num_fp16_res:d}'])
|
615 |
-
|
616 |
-
# ----------------------------------------------------------------------------
|
617 |
-
|
618 |
-
|
619 |
-
@persistence.persistent_class
|
620 |
-
class Generator(torch.nn.Module):
|
621 |
-
def __init__(self,
|
622 |
-
z_dim, # Input latent (Z) dimensionality.
|
623 |
-
# Conditioning label (C) dimensionality.
|
624 |
-
c_dim,
|
625 |
-
# Intermediate latent (W) dimensionality.
|
626 |
-
w_dim,
|
627 |
-
img_resolution, # Output resolution.
|
628 |
-
img_channels, # Number of output color channels.
|
629 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
630 |
-
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
|
631 |
-
resize=None,
|
632 |
-
# **synthesis_kwargs, # Arguments for SynthesisNetwork.
|
633 |
-
):
|
634 |
-
super().__init__()
|
635 |
-
self.z_dim = z_dim
|
636 |
-
self.c_dim = c_dim
|
637 |
-
self.w_dim = w_dim
|
638 |
-
self.img_resolution = img_resolution
|
639 |
-
self.img_channels = img_channels
|
640 |
-
self.synthesis = SynthesisNetwork(
|
641 |
-
w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
|
642 |
-
self.num_ws = self.synthesis.num_ws
|
643 |
-
self.mapping = MappingNetwork(
|
644 |
-
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
|
645 |
-
self.resize = resize
|
646 |
-
|
647 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs):
|
648 |
-
if input_is_w:
|
649 |
-
ws = z
|
650 |
-
if ws.dim() == 2:
|
651 |
-
ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
|
652 |
-
else:
|
653 |
-
ws = self.mapping(z, c, truncation_psi=truncation_psi,
|
654 |
-
truncation_cutoff=truncation_cutoff, update_emas=update_emas)
|
655 |
-
img = self.synthesis(ws, update_emas=update_emas,
|
656 |
-
return_feature=return_feature, **synthesis_kwargs)
|
657 |
-
if self.resize is not None:
|
658 |
-
img = imresize(img, [self.resize, self.resize])
|
659 |
-
return img
|
660 |
-
|
661 |
-
|
662 |
-
def imresize(image, size):
|
663 |
-
dim = image.dim()
|
664 |
-
if dim == 3:
|
665 |
-
image = image.unsqueeze(1)
|
666 |
-
b, _, h, w = image.shape
|
667 |
-
if size[0] > h:
|
668 |
-
image = F.interpolate(image, size, mode='bilinear')
|
669 |
-
elif size[0] < h:
|
670 |
-
image = F.interpolate(image, size, mode='area')
|
671 |
-
if dim == 3:
|
672 |
-
image = image.squeeze(1)
|
673 |
-
return image
|
674 |
-
|
675 |
-
# ----------------------------------------------------------------------------
|
676 |
-
|
677 |
-
|
678 |
-
@persistence.persistent_class
|
679 |
-
class DiscriminatorBlock(torch.nn.Module):
|
680 |
-
def __init__(self,
|
681 |
-
# Number of input channels, 0 = first block.
|
682 |
-
in_channels,
|
683 |
-
# Number of intermediate channels.
|
684 |
-
tmp_channels,
|
685 |
-
# Number of output channels.
|
686 |
-
out_channels,
|
687 |
-
# Resolution of this block.
|
688 |
-
resolution,
|
689 |
-
# Number of input color channels.
|
690 |
-
img_channels,
|
691 |
-
# Index of the first layer.
|
692 |
-
first_layer_idx,
|
693 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
694 |
-
architecture='resnet',
|
695 |
-
# Activation function: 'relu', 'lrelu', etc.
|
696 |
-
activation='lrelu',
|
697 |
-
# Low-pass filter to apply when resampling activations.
|
698 |
-
resample_filter=[1, 3, 3, 1],
|
699 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
700 |
-
conv_clamp=None,
|
701 |
-
use_fp16=False, # Use FP16 for this block?
|
702 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
703 |
-
# Freeze-D: Number of layers to freeze.
|
704 |
-
freeze_layers=0,
|
705 |
-
):
|
706 |
-
assert in_channels in [0, tmp_channels]
|
707 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
708 |
-
super().__init__()
|
709 |
-
self.in_channels = in_channels
|
710 |
-
self.resolution = resolution
|
711 |
-
self.img_channels = img_channels
|
712 |
-
self.first_layer_idx = first_layer_idx
|
713 |
-
self.architecture = architecture
|
714 |
-
self.use_fp16 = use_fp16
|
715 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
716 |
-
self.register_buffer(
|
717 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
718 |
-
|
719 |
-
self.num_layers = 0
|
720 |
-
|
721 |
-
def trainable_gen():
|
722 |
-
while True:
|
723 |
-
layer_idx = self.first_layer_idx + self.num_layers
|
724 |
-
trainable = (layer_idx >= freeze_layers)
|
725 |
-
self.num_layers += 1
|
726 |
-
yield trainable
|
727 |
-
trainable_iter = trainable_gen()
|
728 |
-
|
729 |
-
if in_channels == 0 or architecture == 'skip':
|
730 |
-
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
|
731 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
732 |
-
|
733 |
-
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
|
734 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
735 |
-
|
736 |
-
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
|
737 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
|
738 |
-
|
739 |
-
if architecture == 'resnet':
|
740 |
-
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
|
741 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
|
742 |
-
|
743 |
-
def forward(self, x, img, force_fp32=False):
|
744 |
-
if (x if x is not None else img).device.type != 'cuda':
|
745 |
-
force_fp32 = True
|
746 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
747 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
748 |
-
|
749 |
-
# Input.
|
750 |
-
if x is not None:
|
751 |
-
misc.assert_shape(x, [None, self.in_channels,
|
752 |
-
self.resolution, self.resolution])
|
753 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
754 |
-
|
755 |
-
# FromRGB.
|
756 |
-
if self.in_channels == 0 or self.architecture == 'skip':
|
757 |
-
misc.assert_shape(
|
758 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
759 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
760 |
-
y = self.fromrgb(img)
|
761 |
-
x = x + y if x is not None else y
|
762 |
-
img = upfirdn2d.downsample2d(
|
763 |
-
img, self.resample_filter) if self.architecture == 'skip' else None
|
764 |
-
|
765 |
-
# Main layers.
|
766 |
-
if self.architecture == 'resnet':
|
767 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
768 |
-
x = self.conv0(x)
|
769 |
-
x = self.conv1(x, gain=np.sqrt(0.5))
|
770 |
-
x = y.add_(x)
|
771 |
-
else:
|
772 |
-
x = self.conv0(x)
|
773 |
-
x = self.conv1(x)
|
774 |
-
|
775 |
-
assert x.dtype == dtype
|
776 |
-
return x, img
|
777 |
-
|
778 |
-
def extra_repr(self):
|
779 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
780 |
-
|
781 |
-
# ----------------------------------------------------------------------------
|
782 |
-
|
783 |
-
|
784 |
-
@persistence.persistent_class
|
785 |
-
class MinibatchStdLayer(torch.nn.Module):
|
786 |
-
def __init__(self, group_size, num_channels=1):
|
787 |
-
super().__init__()
|
788 |
-
self.group_size = group_size
|
789 |
-
self.num_channels = num_channels
|
790 |
-
|
791 |
-
def forward(self, x):
|
792 |
-
N, C, H, W = x.shape
|
793 |
-
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
|
794 |
-
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
|
795 |
-
N)) if self.group_size is not None else N
|
796 |
-
F = self.num_channels
|
797 |
-
c = C // F
|
798 |
-
|
799 |
-
# [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
|
800 |
-
y = x.reshape(G, -1, F, c, H, W)
|
801 |
-
# [GnFcHW] Subtract mean over group.
|
802 |
-
y = y - y.mean(dim=0)
|
803 |
-
# [nFcHW] Calc variance over group.
|
804 |
-
y = y.square().mean(dim=0)
|
805 |
-
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
|
806 |
-
# [nF] Take average over channels and pixels.
|
807 |
-
y = y.mean(dim=[2, 3, 4])
|
808 |
-
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
|
809 |
-
# [NFHW] Replicate over group and pixels.
|
810 |
-
y = y.repeat(G, 1, H, W)
|
811 |
-
# [NCHW] Append to input as new channels.
|
812 |
-
x = torch.cat([x, y], dim=1)
|
813 |
-
return x
|
814 |
-
|
815 |
-
def extra_repr(self):
|
816 |
-
return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
|
817 |
-
|
818 |
-
# ----------------------------------------------------------------------------
|
819 |
-
|
820 |
-
|
821 |
-
@persistence.persistent_class
|
822 |
-
class DiscriminatorEpilogue(torch.nn.Module):
|
823 |
-
def __init__(self,
|
824 |
-
in_channels, # Number of input channels.
|
825 |
-
# Dimensionality of mapped conditioning label, 0 = no label.
|
826 |
-
cmap_dim,
|
827 |
-
resolution, # Resolution of this block.
|
828 |
-
# Number of input color channels.
|
829 |
-
img_channels,
|
830 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
831 |
-
architecture='resnet',
|
832 |
-
# Group size for the minibatch standard deviation layer, None = entire minibatch.
|
833 |
-
mbstd_group_size=4,
|
834 |
-
# Number of features for the minibatch standard deviation layer, 0 = disable.
|
835 |
-
mbstd_num_channels=1,
|
836 |
-
# Activation function: 'relu', 'lrelu', etc.
|
837 |
-
activation='lrelu',
|
838 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
839 |
-
conv_clamp=None,
|
840 |
-
):
|
841 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
842 |
-
super().__init__()
|
843 |
-
self.in_channels = in_channels
|
844 |
-
self.cmap_dim = cmap_dim
|
845 |
-
self.resolution = resolution
|
846 |
-
self.img_channels = img_channels
|
847 |
-
self.architecture = architecture
|
848 |
-
|
849 |
-
if architecture == 'skip':
|
850 |
-
self.fromrgb = Conv2dLayer(
|
851 |
-
img_channels, in_channels, kernel_size=1, activation=activation)
|
852 |
-
self.mbstd = MinibatchStdLayer(
|
853 |
-
group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
|
854 |
-
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
|
855 |
-
kernel_size=3, activation=activation, conv_clamp=conv_clamp)
|
856 |
-
self.fc = FullyConnectedLayer(
|
857 |
-
in_channels * (resolution ** 2), in_channels, activation=activation)
|
858 |
-
self.out = FullyConnectedLayer(
|
859 |
-
in_channels, 1 if cmap_dim == 0 else cmap_dim)
|
860 |
-
|
861 |
-
def forward(self, x, img, cmap, force_fp32=False):
|
862 |
-
misc.assert_shape(x, [None, self.in_channels,
|
863 |
-
self.resolution, self.resolution]) # [NCHW]
|
864 |
-
_ = force_fp32 # unused
|
865 |
-
dtype = torch.float32
|
866 |
-
memory_format = torch.contiguous_format
|
867 |
-
|
868 |
-
# FromRGB.
|
869 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
870 |
-
if self.architecture == 'skip':
|
871 |
-
misc.assert_shape(
|
872 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
873 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
874 |
-
x = x + self.fromrgb(img)
|
875 |
-
|
876 |
-
# Main layers.
|
877 |
-
if self.mbstd is not None:
|
878 |
-
x = self.mbstd(x)
|
879 |
-
x = self.conv(x)
|
880 |
-
x = self.fc(x.flatten(1))
|
881 |
-
x = self.out(x)
|
882 |
-
|
883 |
-
# Conditioning.
|
884 |
-
if self.cmap_dim > 0:
|
885 |
-
misc.assert_shape(cmap, [None, self.cmap_dim])
|
886 |
-
x = (x * cmap).sum(dim=1, keepdim=True) * \
|
887 |
-
(1 / np.sqrt(self.cmap_dim))
|
888 |
-
|
889 |
-
assert x.dtype == dtype
|
890 |
-
return x
|
891 |
-
|
892 |
-
def extra_repr(self):
|
893 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
894 |
-
|
895 |
-
# ----------------------------------------------------------------------------
|
896 |
-
|
897 |
-
|
898 |
-
@persistence.persistent_class
|
899 |
-
class Discriminator(torch.nn.Module):
|
900 |
-
def __init__(self,
|
901 |
-
# Conditioning label (C) dimensionality.
|
902 |
-
c_dim,
|
903 |
-
img_resolution, # Input resolution.
|
904 |
-
# Number of input color channels.
|
905 |
-
img_channels,
|
906 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
907 |
-
architecture='resnet',
|
908 |
-
# Overall multiplier for the number of channels.
|
909 |
-
channel_base=32768,
|
910 |
-
# Maximum number of channels in any layer.
|
911 |
-
channel_max=512,
|
912 |
-
# Use FP16 for the N highest resolutions.
|
913 |
-
num_fp16_res=4,
|
914 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
915 |
-
conv_clamp=256,
|
916 |
-
# Dimensionality of mapped conditioning label, None = default.
|
917 |
-
cmap_dim=None,
|
918 |
-
block_kwargs={}, # Arguments for DiscriminatorBlock.
|
919 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
920 |
-
# Arguments for DiscriminatorEpilogue.
|
921 |
-
epilogue_kwargs={},
|
922 |
-
):
|
923 |
-
super().__init__()
|
924 |
-
self.c_dim = c_dim
|
925 |
-
self.img_resolution = img_resolution
|
926 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
927 |
-
self.img_channels = img_channels
|
928 |
-
self.block_resolutions = [
|
929 |
-
2 ** i for i in range(self.img_resolution_log2, 2, -1)]
|
930 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
931 |
-
for res in self.block_resolutions + [4]}
|
932 |
-
fp16_resolution = max(
|
933 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
934 |
-
|
935 |
-
if cmap_dim is None:
|
936 |
-
cmap_dim = channels_dict[4]
|
937 |
-
if c_dim == 0:
|
938 |
-
cmap_dim = 0
|
939 |
-
|
940 |
-
common_kwargs = dict(img_channels=img_channels,
|
941 |
-
architecture=architecture, conv_clamp=conv_clamp)
|
942 |
-
cur_layer_idx = 0
|
943 |
-
for res in self.block_resolutions:
|
944 |
-
in_channels = channels_dict[res] if res < img_resolution else 0
|
945 |
-
tmp_channels = channels_dict[res]
|
946 |
-
out_channels = channels_dict[res // 2]
|
947 |
-
use_fp16 = (res >= fp16_resolution)
|
948 |
-
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
|
949 |
-
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
|
950 |
-
setattr(self, f'b{res}', block)
|
951 |
-
cur_layer_idx += block.num_layers
|
952 |
-
if c_dim > 0:
|
953 |
-
self.mapping = MappingNetwork(
|
954 |
-
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
|
955 |
-
self.b4 = DiscriminatorEpilogue(
|
956 |
-
channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
|
957 |
-
|
958 |
-
def forward(self, img, c, update_emas=False, **block_kwargs):
|
959 |
-
_ = update_emas # unused
|
960 |
-
x = None
|
961 |
-
for res in self.block_resolutions:
|
962 |
-
block = getattr(self, f'b{res}')
|
963 |
-
x, img = block(x, img, **block_kwargs)
|
964 |
-
|
965 |
-
cmap = None
|
966 |
-
if self.c_dim > 0:
|
967 |
-
cmap = self.mapping(None, c)
|
968 |
-
x = self.b4(x, img, cmap)
|
969 |
-
return x
|
970 |
-
|
971 |
-
def extra_repr(self):
|
972 |
-
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
|
973 |
-
|
974 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/training/augment.py
DELETED
@@ -1,562 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Augmentation pipeline from the paper
|
10 |
-
"Training Generative Adversarial Networks with Limited Data".
|
11 |
-
Matches the original implementation by Karras et al. at
|
12 |
-
https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py"""
|
13 |
-
|
14 |
-
import numpy as np
|
15 |
-
import scipy.signal
|
16 |
-
import torch
|
17 |
-
from torch_utils import persistence
|
18 |
-
from torch_utils import misc
|
19 |
-
from torch_utils.ops import upfirdn2d
|
20 |
-
from torch_utils.ops import grid_sample_gradfix
|
21 |
-
from torch_utils.ops import conv2d_gradfix
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
# Coefficients of various wavelet decomposition low-pass filters.
|
25 |
-
|
26 |
-
wavelets = {
|
27 |
-
'haar': [0.7071067811865476, 0.7071067811865476],
|
28 |
-
'db1': [0.7071067811865476, 0.7071067811865476],
|
29 |
-
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
|
30 |
-
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
|
31 |
-
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
|
32 |
-
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
|
33 |
-
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
|
34 |
-
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
|
35 |
-
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
|
36 |
-
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
|
37 |
-
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
|
38 |
-
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
|
39 |
-
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
|
40 |
-
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
|
41 |
-
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
|
42 |
-
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
|
43 |
-
}
|
44 |
-
|
45 |
-
# ----------------------------------------------------------------------------
|
46 |
-
# Helpers for constructing transformation matrices.
|
47 |
-
|
48 |
-
|
49 |
-
def matrix(*rows, device=None):
|
50 |
-
assert all(len(row) == len(rows[0]) for row in rows)
|
51 |
-
elems = [x for row in rows for x in row]
|
52 |
-
ref = [x for x in elems if isinstance(x, torch.Tensor)]
|
53 |
-
if len(ref) == 0:
|
54 |
-
return misc.constant(np.asarray(rows), device=device)
|
55 |
-
assert device is None or device == ref[0].device
|
56 |
-
elems = [x if isinstance(x, torch.Tensor) else misc.constant(
|
57 |
-
x, shape=ref[0].shape, device=ref[0].device) for x in elems]
|
58 |
-
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
|
59 |
-
|
60 |
-
|
61 |
-
def translate2d(tx, ty, **kwargs):
|
62 |
-
return matrix(
|
63 |
-
[1, 0, tx],
|
64 |
-
[0, 1, ty],
|
65 |
-
[0, 0, 1],
|
66 |
-
**kwargs)
|
67 |
-
|
68 |
-
|
69 |
-
def translate3d(tx, ty, tz, **kwargs):
|
70 |
-
return matrix(
|
71 |
-
[1, 0, 0, tx],
|
72 |
-
[0, 1, 0, ty],
|
73 |
-
[0, 0, 1, tz],
|
74 |
-
[0, 0, 0, 1],
|
75 |
-
**kwargs)
|
76 |
-
|
77 |
-
|
78 |
-
def scale2d(sx, sy, **kwargs):
|
79 |
-
return matrix(
|
80 |
-
[sx, 0, 0],
|
81 |
-
[0, sy, 0],
|
82 |
-
[0, 0, 1],
|
83 |
-
**kwargs)
|
84 |
-
|
85 |
-
|
86 |
-
def scale3d(sx, sy, sz, **kwargs):
|
87 |
-
return matrix(
|
88 |
-
[sx, 0, 0, 0],
|
89 |
-
[0, sy, 0, 0],
|
90 |
-
[0, 0, sz, 0],
|
91 |
-
[0, 0, 0, 1],
|
92 |
-
**kwargs)
|
93 |
-
|
94 |
-
|
95 |
-
def rotate2d(theta, **kwargs):
|
96 |
-
return matrix(
|
97 |
-
[torch.cos(theta), torch.sin(-theta), 0],
|
98 |
-
[torch.sin(theta), torch.cos(theta), 0],
|
99 |
-
[0, 0, 1],
|
100 |
-
**kwargs)
|
101 |
-
|
102 |
-
|
103 |
-
def rotate3d(v, theta, **kwargs):
|
104 |
-
vx = v[..., 0]
|
105 |
-
vy = v[..., 1]
|
106 |
-
vz = v[..., 2]
|
107 |
-
s = torch.sin(theta)
|
108 |
-
c = torch.cos(theta)
|
109 |
-
cc = 1 - c
|
110 |
-
return matrix(
|
111 |
-
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
|
112 |
-
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
|
113 |
-
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
|
114 |
-
[0, 0, 0, 1],
|
115 |
-
**kwargs)
|
116 |
-
|
117 |
-
|
118 |
-
def translate2d_inv(tx, ty, **kwargs):
|
119 |
-
return translate2d(-tx, -ty, **kwargs)
|
120 |
-
|
121 |
-
|
122 |
-
def scale2d_inv(sx, sy, **kwargs):
|
123 |
-
return scale2d(1 / sx, 1 / sy, **kwargs)
|
124 |
-
|
125 |
-
|
126 |
-
def rotate2d_inv(theta, **kwargs):
|
127 |
-
return rotate2d(-theta, **kwargs)
|
128 |
-
|
129 |
-
# ----------------------------------------------------------------------------
|
130 |
-
# Versatile image augmentation pipeline from the paper
|
131 |
-
# "Training Generative Adversarial Networks with Limited Data".
|
132 |
-
#
|
133 |
-
# All augmentations are disabled by default; individual augmentations can
|
134 |
-
# be enabled by setting their probability multipliers to 1.
|
135 |
-
|
136 |
-
|
137 |
-
@persistence.persistent_class
|
138 |
-
class AugmentPipe(torch.nn.Module):
|
139 |
-
def __init__(self,
|
140 |
-
xflip=0, rotate90=0, xint=0, xint_max=0.125,
|
141 |
-
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
|
142 |
-
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
|
143 |
-
imgfilter=0, imgfilter_bands=[1, 1, 1, 1], imgfilter_std=1,
|
144 |
-
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
|
145 |
-
):
|
146 |
-
super().__init__()
|
147 |
-
# Overall multiplier for augmentation probability.
|
148 |
-
self.register_buffer('p', torch.ones([]))
|
149 |
-
|
150 |
-
# Pixel blitting.
|
151 |
-
# Probability multiplier for x-flip.
|
152 |
-
self.xflip = float(xflip)
|
153 |
-
# Probability multiplier for 90 degree rotations.
|
154 |
-
self.rotate90 = float(rotate90)
|
155 |
-
# Probability multiplier for integer translation.
|
156 |
-
self.xint = float(xint)
|
157 |
-
# Range of integer translation, relative to image dimensions.
|
158 |
-
self.xint_max = float(xint_max)
|
159 |
-
|
160 |
-
# General geometric transformations.
|
161 |
-
# Probability multiplier for isotropic scaling.
|
162 |
-
self.scale = float(scale)
|
163 |
-
# Probability multiplier for arbitrary rotation.
|
164 |
-
self.rotate = float(rotate)
|
165 |
-
# Probability multiplier for anisotropic scaling.
|
166 |
-
self.aniso = float(aniso)
|
167 |
-
# Probability multiplier for fractional translation.
|
168 |
-
self.xfrac = float(xfrac)
|
169 |
-
# Log2 standard deviation of isotropic scaling.
|
170 |
-
self.scale_std = float(scale_std)
|
171 |
-
# Range of arbitrary rotation, 1 = full circle.
|
172 |
-
self.rotate_max = float(rotate_max)
|
173 |
-
# Log2 standard deviation of anisotropic scaling.
|
174 |
-
self.aniso_std = float(aniso_std)
|
175 |
-
# Standard deviation of frational translation, relative to image dimensions.
|
176 |
-
self.xfrac_std = float(xfrac_std)
|
177 |
-
|
178 |
-
# Color transformations.
|
179 |
-
# Probability multiplier for brightness.
|
180 |
-
self.brightness = float(brightness)
|
181 |
-
# Probability multiplier for contrast.
|
182 |
-
self.contrast = float(contrast)
|
183 |
-
# Probability multiplier for luma flip.
|
184 |
-
self.lumaflip = float(lumaflip)
|
185 |
-
# Probability multiplier for hue rotation.
|
186 |
-
self.hue = float(hue)
|
187 |
-
# Probability multiplier for saturation.
|
188 |
-
self.saturation = float(saturation)
|
189 |
-
# Standard deviation of brightness.
|
190 |
-
self.brightness_std = float(brightness_std)
|
191 |
-
# Log2 standard deviation of contrast.
|
192 |
-
self.contrast_std = float(contrast_std)
|
193 |
-
# Range of hue rotation, 1 = full circle.
|
194 |
-
self.hue_max = float(hue_max)
|
195 |
-
# Log2 standard deviation of saturation.
|
196 |
-
self.saturation_std = float(saturation_std)
|
197 |
-
|
198 |
-
# Image-space filtering.
|
199 |
-
# Probability multiplier for image-space filtering.
|
200 |
-
self.imgfilter = float(imgfilter)
|
201 |
-
# Probability multipliers for individual frequency bands.
|
202 |
-
self.imgfilter_bands = list(imgfilter_bands)
|
203 |
-
# Log2 standard deviation of image-space filter amplification.
|
204 |
-
self.imgfilter_std = float(imgfilter_std)
|
205 |
-
|
206 |
-
# Image-space corruptions.
|
207 |
-
# Probability multiplier for additive RGB noise.
|
208 |
-
self.noise = float(noise)
|
209 |
-
# Probability multiplier for cutout.
|
210 |
-
self.cutout = float(cutout)
|
211 |
-
# Standard deviation of additive RGB noise.
|
212 |
-
self.noise_std = float(noise_std)
|
213 |
-
# Size of the cutout rectangle, relative to image dimensions.
|
214 |
-
self.cutout_size = float(cutout_size)
|
215 |
-
|
216 |
-
# Setup orthogonal lowpass filter for geometric augmentations.
|
217 |
-
self.register_buffer(
|
218 |
-
'Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
|
219 |
-
|
220 |
-
# Construct filter bank for image-space filtering.
|
221 |
-
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
|
222 |
-
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
|
223 |
-
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
|
224 |
-
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
|
225 |
-
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
|
226 |
-
for i in range(1, Hz_fbank.shape[0]):
|
227 |
-
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(
|
228 |
-
Hz_fbank.shape[0], -1)[:, :-1]
|
229 |
-
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
|
230 |
-
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) //
|
231 |
-
2: (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
|
232 |
-
self.register_buffer('Hz_fbank', torch.as_tensor(
|
233 |
-
Hz_fbank, dtype=torch.float32))
|
234 |
-
|
235 |
-
def forward(self, images, debug_percentile=None):
|
236 |
-
assert isinstance(images, torch.Tensor) and images.ndim == 4
|
237 |
-
batch_size, num_channels, height, width = images.shape
|
238 |
-
device = images.device
|
239 |
-
if debug_percentile is not None:
|
240 |
-
debug_percentile = torch.as_tensor(
|
241 |
-
debug_percentile, dtype=torch.float32, device=device)
|
242 |
-
|
243 |
-
# -------------------------------------
|
244 |
-
# Select parameters for pixel blitting.
|
245 |
-
# -------------------------------------
|
246 |
-
|
247 |
-
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
|
248 |
-
I_3 = torch.eye(3, device=device)
|
249 |
-
G_inv = I_3
|
250 |
-
|
251 |
-
# Apply x-flip with probability (xflip * strength).
|
252 |
-
if self.xflip > 0:
|
253 |
-
i = torch.floor(torch.rand([batch_size], device=device) * 2)
|
254 |
-
i = torch.where(torch.rand(
|
255 |
-
[batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
|
256 |
-
if debug_percentile is not None:
|
257 |
-
i = torch.full_like(i, torch.floor(debug_percentile * 2))
|
258 |
-
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
|
259 |
-
|
260 |
-
# Apply 90 degree rotations with probability (rotate90 * strength).
|
261 |
-
if self.rotate90 > 0:
|
262 |
-
i = torch.floor(torch.rand([batch_size], device=device) * 4)
|
263 |
-
i = torch.where(torch.rand(
|
264 |
-
[batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
|
265 |
-
if debug_percentile is not None:
|
266 |
-
i = torch.full_like(i, torch.floor(debug_percentile * 4))
|
267 |
-
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
|
268 |
-
|
269 |
-
# Apply integer translation with probability (xint * strength).
|
270 |
-
if self.xint > 0:
|
271 |
-
t = (torch.rand([batch_size, 2], device=device)
|
272 |
-
* 2 - 1) * self.xint_max
|
273 |
-
t = torch.where(torch.rand(
|
274 |
-
[batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
|
275 |
-
if debug_percentile is not None:
|
276 |
-
t = torch.full_like(
|
277 |
-
t, (debug_percentile * 2 - 1) * self.xint_max)
|
278 |
-
G_inv = G_inv @ translate2d_inv(torch.round(
|
279 |
-
t[:, 0] * width), torch.round(t[:, 1] * height))
|
280 |
-
|
281 |
-
# --------------------------------------------------------
|
282 |
-
# Select parameters for general geometric transformations.
|
283 |
-
# --------------------------------------------------------
|
284 |
-
|
285 |
-
# Apply isotropic scaling with probability (scale * strength).
|
286 |
-
if self.scale > 0:
|
287 |
-
s = torch.exp2(torch.randn(
|
288 |
-
[batch_size], device=device) * self.scale_std)
|
289 |
-
s = torch.where(torch.rand(
|
290 |
-
[batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
|
291 |
-
if debug_percentile is not None:
|
292 |
-
s = torch.full_like(s, torch.exp2(torch.erfinv(
|
293 |
-
debug_percentile * 2 - 1) * self.scale_std))
|
294 |
-
G_inv = G_inv @ scale2d_inv(s, s)
|
295 |
-
|
296 |
-
# Apply pre-rotation with probability p_rot.
|
297 |
-
# P(pre OR post) = p
|
298 |
-
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1))
|
299 |
-
if self.rotate > 0:
|
300 |
-
theta = (torch.rand([batch_size], device=device)
|
301 |
-
* 2 - 1) * np.pi * self.rotate_max
|
302 |
-
theta = torch.where(torch.rand(
|
303 |
-
[batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
|
304 |
-
if debug_percentile is not None:
|
305 |
-
theta = torch.full_like(
|
306 |
-
theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
|
307 |
-
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
|
308 |
-
|
309 |
-
# Apply anisotropic scaling with probability (aniso * strength).
|
310 |
-
if self.aniso > 0:
|
311 |
-
s = torch.exp2(torch.randn(
|
312 |
-
[batch_size], device=device) * self.aniso_std)
|
313 |
-
s = torch.where(torch.rand(
|
314 |
-
[batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
|
315 |
-
if debug_percentile is not None:
|
316 |
-
s = torch.full_like(s, torch.exp2(torch.erfinv(
|
317 |
-
debug_percentile * 2 - 1) * self.aniso_std))
|
318 |
-
G_inv = G_inv @ scale2d_inv(s, 1 / s)
|
319 |
-
|
320 |
-
# Apply post-rotation with probability p_rot.
|
321 |
-
if self.rotate > 0:
|
322 |
-
theta = (torch.rand([batch_size], device=device)
|
323 |
-
* 2 - 1) * np.pi * self.rotate_max
|
324 |
-
theta = torch.where(torch.rand(
|
325 |
-
[batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
|
326 |
-
if debug_percentile is not None:
|
327 |
-
theta = torch.zeros_like(theta)
|
328 |
-
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
|
329 |
-
|
330 |
-
# Apply fractional translation with probability (xfrac * strength).
|
331 |
-
if self.xfrac > 0:
|
332 |
-
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
|
333 |
-
t = torch.where(torch.rand(
|
334 |
-
[batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
|
335 |
-
if debug_percentile is not None:
|
336 |
-
t = torch.full_like(t, torch.erfinv(
|
337 |
-
debug_percentile * 2 - 1) * self.xfrac_std)
|
338 |
-
G_inv = G_inv @ translate2d_inv(t[:, 0] * width, t[:, 1] * height)
|
339 |
-
|
340 |
-
# ----------------------------------
|
341 |
-
# Execute geometric transformations.
|
342 |
-
# ----------------------------------
|
343 |
-
|
344 |
-
# Execute if the transform is not identity.
|
345 |
-
if G_inv is not I_3:
|
346 |
-
|
347 |
-
# Calculate padding.
|
348 |
-
cx = (width - 1) / 2
|
349 |
-
cy = (height - 1) / 2
|
350 |
-
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1],
|
351 |
-
[-cx, cy, 1], device=device) # [idx, xyz]
|
352 |
-
cp = G_inv @ cp.t() # [batch, xyz, idx]
|
353 |
-
Hz_pad = self.Hz_geom.shape[0] // 4
|
354 |
-
margin = cp[:, :2, :].permute(
|
355 |
-
1, 0, 2).flatten(1) # [xy, batch * idx]
|
356 |
-
# [x0, y0, x1, y1]
|
357 |
-
margin = torch.cat([-margin, margin]).max(dim=1).values
|
358 |
-
margin = margin + \
|
359 |
-
misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy]
|
360 |
-
* 2, device=device)
|
361 |
-
margin = margin.max(misc.constant([0, 0] * 2, device=device))
|
362 |
-
margin = margin.min(misc.constant(
|
363 |
-
[width-1, height-1] * 2, device=device))
|
364 |
-
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
|
365 |
-
|
366 |
-
# Pad image and adjust origin.
|
367 |
-
images = torch.nn.functional.pad(
|
368 |
-
input=images, pad=[mx0, mx1, my0, my1], mode='reflect')
|
369 |
-
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
|
370 |
-
|
371 |
-
# Upsample.
|
372 |
-
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
|
373 |
-
G_inv = scale2d(
|
374 |
-
2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
|
375 |
-
G_inv = translate2d(-0.5, -0.5,
|
376 |
-
device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
|
377 |
-
|
378 |
-
# Execute transformation.
|
379 |
-
shape = [batch_size, num_channels,
|
380 |
-
(height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
|
381 |
-
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(
|
382 |
-
2 / shape[3], 2 / shape[2], device=device)
|
383 |
-
grid = torch.nn.functional.affine_grid(
|
384 |
-
theta=G_inv[:, :2, :], size=shape, align_corners=False)
|
385 |
-
images = grid_sample_gradfix.grid_sample(images, grid)
|
386 |
-
|
387 |
-
# Downsample and crop.
|
388 |
-
images = upfirdn2d.downsample2d(
|
389 |
-
x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
|
390 |
-
|
391 |
-
# --------------------------------------------
|
392 |
-
# Select parameters for color transformations.
|
393 |
-
# --------------------------------------------
|
394 |
-
|
395 |
-
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
|
396 |
-
I_4 = torch.eye(4, device=device)
|
397 |
-
C = I_4
|
398 |
-
|
399 |
-
# Apply brightness with probability (brightness * strength).
|
400 |
-
if self.brightness > 0:
|
401 |
-
b = torch.randn([batch_size], device=device) * self.brightness_std
|
402 |
-
b = torch.where(torch.rand(
|
403 |
-
[batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
|
404 |
-
if debug_percentile is not None:
|
405 |
-
b = torch.full_like(b, torch.erfinv(
|
406 |
-
debug_percentile * 2 - 1) * self.brightness_std)
|
407 |
-
C = translate3d(b, b, b) @ C
|
408 |
-
|
409 |
-
# Apply contrast with probability (contrast * strength).
|
410 |
-
if self.contrast > 0:
|
411 |
-
c = torch.exp2(torch.randn(
|
412 |
-
[batch_size], device=device) * self.contrast_std)
|
413 |
-
c = torch.where(torch.rand(
|
414 |
-
[batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
|
415 |
-
if debug_percentile is not None:
|
416 |
-
c = torch.full_like(c, torch.exp2(torch.erfinv(
|
417 |
-
debug_percentile * 2 - 1) * self.contrast_std))
|
418 |
-
C = scale3d(c, c, c) @ C
|
419 |
-
|
420 |
-
# Apply luma flip with probability (lumaflip * strength).
|
421 |
-
# Luma axis.
|
422 |
-
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device)
|
423 |
-
if self.lumaflip > 0:
|
424 |
-
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
|
425 |
-
i = torch.where(torch.rand(
|
426 |
-
[batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
|
427 |
-
if debug_percentile is not None:
|
428 |
-
i = torch.full_like(i, torch.floor(debug_percentile * 2))
|
429 |
-
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
|
430 |
-
|
431 |
-
# Apply hue rotation with probability (hue * strength).
|
432 |
-
if self.hue > 0 and num_channels > 1:
|
433 |
-
theta = (torch.rand([batch_size], device=device)
|
434 |
-
* 2 - 1) * np.pi * self.hue_max
|
435 |
-
theta = torch.where(torch.rand(
|
436 |
-
[batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
|
437 |
-
if debug_percentile is not None:
|
438 |
-
theta = torch.full_like(
|
439 |
-
theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
|
440 |
-
C = rotate3d(v, theta) @ C # Rotate around v.
|
441 |
-
|
442 |
-
# Apply saturation with probability (saturation * strength).
|
443 |
-
if self.saturation > 0 and num_channels > 1:
|
444 |
-
s = torch.exp2(torch.randn(
|
445 |
-
[batch_size, 1, 1], device=device) * self.saturation_std)
|
446 |
-
s = torch.where(torch.rand(
|
447 |
-
[batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
|
448 |
-
if debug_percentile is not None:
|
449 |
-
s = torch.full_like(s, torch.exp2(torch.erfinv(
|
450 |
-
debug_percentile * 2 - 1) * self.saturation_std))
|
451 |
-
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
|
452 |
-
|
453 |
-
# ------------------------------
|
454 |
-
# Execute color transformations.
|
455 |
-
# ------------------------------
|
456 |
-
|
457 |
-
# Execute if the transform is not identity.
|
458 |
-
if C is not I_4:
|
459 |
-
images = images.reshape([batch_size, num_channels, height * width])
|
460 |
-
if num_channels == 3:
|
461 |
-
images = C[:, :3, :3] @ images + C[:, :3, 3:]
|
462 |
-
elif num_channels == 1:
|
463 |
-
C = C[:, :3, :].mean(dim=1, keepdims=True)
|
464 |
-
images = images * \
|
465 |
-
C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
|
466 |
-
else:
|
467 |
-
raise ValueError(
|
468 |
-
'Image must be RGB (3 channels) or L (1 channel)')
|
469 |
-
images = images.reshape([batch_size, num_channels, height, width])
|
470 |
-
|
471 |
-
# ----------------------
|
472 |
-
# Image-space filtering.
|
473 |
-
# ----------------------
|
474 |
-
|
475 |
-
if self.imgfilter > 0:
|
476 |
-
num_bands = self.Hz_fbank.shape[0]
|
477 |
-
assert len(self.imgfilter_bands) == num_bands
|
478 |
-
# Expected power spectrum (1/f).
|
479 |
-
expected_power = misc.constant(
|
480 |
-
np.array([10, 1, 1, 1]) / 13, device=device)
|
481 |
-
|
482 |
-
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
|
483 |
-
# Global gain vector (identity).
|
484 |
-
g = torch.ones([batch_size, num_bands], device=device)
|
485 |
-
for i, band_strength in enumerate(self.imgfilter_bands):
|
486 |
-
t_i = torch.exp2(torch.randn(
|
487 |
-
[batch_size], device=device) * self.imgfilter_std)
|
488 |
-
t_i = torch.where(torch.rand(
|
489 |
-
[batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
|
490 |
-
if debug_percentile is not None:
|
491 |
-
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(
|
492 |
-
debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
|
493 |
-
# Temporary gain vector.
|
494 |
-
t = torch.ones([batch_size, num_bands], device=device)
|
495 |
-
# Replace i'th element.
|
496 |
-
t[:, i] = t_i
|
497 |
-
# Normalize power.
|
498 |
-
t = t / (expected_power * t.square()
|
499 |
-
).sum(dim=-1, keepdims=True).sqrt()
|
500 |
-
# Accumulate into global gain.
|
501 |
-
g = g * t
|
502 |
-
|
503 |
-
# Construct combined amplification filter.
|
504 |
-
# [batch, tap]
|
505 |
-
Hz_prime = g @ self.Hz_fbank
|
506 |
-
Hz_prime = Hz_prime.unsqueeze(1).repeat(
|
507 |
-
[1, num_channels, 1]) # [batch, channels, tap]
|
508 |
-
# [batch * channels, 1, tap]
|
509 |
-
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1])
|
510 |
-
|
511 |
-
# Apply filter.
|
512 |
-
p = self.Hz_fbank.shape[1] // 2
|
513 |
-
images = images.reshape(
|
514 |
-
[1, batch_size * num_channels, height, width])
|
515 |
-
images = torch.nn.functional.pad(
|
516 |
-
input=images, pad=[p, p, p, p], mode='reflect')
|
517 |
-
images = conv2d_gradfix.conv2d(
|
518 |
-
input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
|
519 |
-
images = conv2d_gradfix.conv2d(
|
520 |
-
input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
|
521 |
-
images = images.reshape([batch_size, num_channels, height, width])
|
522 |
-
|
523 |
-
# ------------------------
|
524 |
-
# Image-space corruptions.
|
525 |
-
# ------------------------
|
526 |
-
|
527 |
-
# Apply additive RGB noise with probability (noise * strength).
|
528 |
-
if self.noise > 0:
|
529 |
-
sigma = torch.randn([batch_size, 1, 1, 1],
|
530 |
-
device=device).abs() * self.noise_std
|
531 |
-
sigma = torch.where(torch.rand(
|
532 |
-
[batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
|
533 |
-
if debug_percentile is not None:
|
534 |
-
sigma = torch.full_like(sigma, torch.erfinv(
|
535 |
-
debug_percentile) * self.noise_std)
|
536 |
-
images = images + \
|
537 |
-
torch.randn([batch_size, num_channels, height,
|
538 |
-
width], device=device) * sigma
|
539 |
-
|
540 |
-
# Apply cutout with probability (cutout * strength).
|
541 |
-
if self.cutout > 0:
|
542 |
-
size = torch.full([batch_size, 2, 1, 1, 1],
|
543 |
-
self.cutout_size, device=device)
|
544 |
-
size = torch.where(torch.rand(
|
545 |
-
[batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
|
546 |
-
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
|
547 |
-
if debug_percentile is not None:
|
548 |
-
size = torch.full_like(size, self.cutout_size)
|
549 |
-
center = torch.full_like(center, debug_percentile)
|
550 |
-
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
|
551 |
-
coord_y = torch.arange(
|
552 |
-
height, device=device).reshape([1, 1, -1, 1])
|
553 |
-
mask_x = (((coord_x + 0.5) / width -
|
554 |
-
center[:, 0]).abs() >= size[:, 0] / 2)
|
555 |
-
mask_y = (((coord_y + 0.5) / height -
|
556 |
-
center[:, 1]).abs() >= size[:, 1] / 2)
|
557 |
-
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
|
558 |
-
images = images * mask
|
559 |
-
|
560 |
-
return images
|
561 |
-
|
562 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/vq_diffusion.md
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# VQDiffusionScheduler
|
14 |
-
|
15 |
-
## Overview
|
16 |
-
|
17 |
-
Original paper can be found [here](https://arxiv.org/abs/2111.14822)
|
18 |
-
|
19 |
-
## VQDiffusionScheduler
|
20 |
-
[[autodoc]] VQDiffusionScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_dpm_multi.py
DELETED
@@ -1,273 +0,0 @@
|
|
1 |
-
import tempfile
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
from diffusers import (
|
6 |
-
DEISMultistepScheduler,
|
7 |
-
DPMSolverMultistepScheduler,
|
8 |
-
DPMSolverSinglestepScheduler,
|
9 |
-
UniPCMultistepScheduler,
|
10 |
-
)
|
11 |
-
|
12 |
-
from .test_schedulers import SchedulerCommonTest
|
13 |
-
|
14 |
-
|
15 |
-
class DPMSolverMultistepSchedulerTest(SchedulerCommonTest):
|
16 |
-
scheduler_classes = (DPMSolverMultistepScheduler,)
|
17 |
-
forward_default_kwargs = (("num_inference_steps", 25),)
|
18 |
-
|
19 |
-
def get_scheduler_config(self, **kwargs):
|
20 |
-
config = {
|
21 |
-
"num_train_timesteps": 1000,
|
22 |
-
"beta_start": 0.0001,
|
23 |
-
"beta_end": 0.02,
|
24 |
-
"beta_schedule": "linear",
|
25 |
-
"solver_order": 2,
|
26 |
-
"prediction_type": "epsilon",
|
27 |
-
"thresholding": False,
|
28 |
-
"sample_max_value": 1.0,
|
29 |
-
"algorithm_type": "dpmsolver++",
|
30 |
-
"solver_type": "midpoint",
|
31 |
-
"lower_order_final": False,
|
32 |
-
"lambda_min_clipped": -float("inf"),
|
33 |
-
"variance_type": None,
|
34 |
-
}
|
35 |
-
|
36 |
-
config.update(**kwargs)
|
37 |
-
return config
|
38 |
-
|
39 |
-
def check_over_configs(self, time_step=0, **config):
|
40 |
-
kwargs = dict(self.forward_default_kwargs)
|
41 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
42 |
-
sample = self.dummy_sample
|
43 |
-
residual = 0.1 * sample
|
44 |
-
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
|
45 |
-
|
46 |
-
for scheduler_class in self.scheduler_classes:
|
47 |
-
scheduler_config = self.get_scheduler_config(**config)
|
48 |
-
scheduler = scheduler_class(**scheduler_config)
|
49 |
-
scheduler.set_timesteps(num_inference_steps)
|
50 |
-
# copy over dummy past residuals
|
51 |
-
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
|
52 |
-
|
53 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
54 |
-
scheduler.save_config(tmpdirname)
|
55 |
-
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
|
56 |
-
new_scheduler.set_timesteps(num_inference_steps)
|
57 |
-
# copy over dummy past residuals
|
58 |
-
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
|
59 |
-
|
60 |
-
output, new_output = sample, sample
|
61 |
-
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
|
62 |
-
output = scheduler.step(residual, t, output, **kwargs).prev_sample
|
63 |
-
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
|
64 |
-
|
65 |
-
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
66 |
-
|
67 |
-
def test_from_save_pretrained(self):
|
68 |
-
pass
|
69 |
-
|
70 |
-
def check_over_forward(self, time_step=0, **forward_kwargs):
|
71 |
-
kwargs = dict(self.forward_default_kwargs)
|
72 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
73 |
-
sample = self.dummy_sample
|
74 |
-
residual = 0.1 * sample
|
75 |
-
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
|
76 |
-
|
77 |
-
for scheduler_class in self.scheduler_classes:
|
78 |
-
scheduler_config = self.get_scheduler_config()
|
79 |
-
scheduler = scheduler_class(**scheduler_config)
|
80 |
-
scheduler.set_timesteps(num_inference_steps)
|
81 |
-
|
82 |
-
# copy over dummy past residuals (must be after setting timesteps)
|
83 |
-
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
|
84 |
-
|
85 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
86 |
-
scheduler.save_config(tmpdirname)
|
87 |
-
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
|
88 |
-
# copy over dummy past residuals
|
89 |
-
new_scheduler.set_timesteps(num_inference_steps)
|
90 |
-
|
91 |
-
# copy over dummy past residual (must be after setting timesteps)
|
92 |
-
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
|
93 |
-
|
94 |
-
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
|
95 |
-
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
|
96 |
-
|
97 |
-
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
98 |
-
|
99 |
-
def full_loop(self, scheduler=None, **config):
|
100 |
-
if scheduler is None:
|
101 |
-
scheduler_class = self.scheduler_classes[0]
|
102 |
-
scheduler_config = self.get_scheduler_config(**config)
|
103 |
-
scheduler = scheduler_class(**scheduler_config)
|
104 |
-
|
105 |
-
num_inference_steps = 10
|
106 |
-
model = self.dummy_model()
|
107 |
-
sample = self.dummy_sample_deter
|
108 |
-
scheduler.set_timesteps(num_inference_steps)
|
109 |
-
|
110 |
-
for i, t in enumerate(scheduler.timesteps):
|
111 |
-
residual = model(sample, t)
|
112 |
-
sample = scheduler.step(residual, t, sample).prev_sample
|
113 |
-
|
114 |
-
return sample
|
115 |
-
|
116 |
-
def test_step_shape(self):
|
117 |
-
kwargs = dict(self.forward_default_kwargs)
|
118 |
-
|
119 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
120 |
-
|
121 |
-
for scheduler_class in self.scheduler_classes:
|
122 |
-
scheduler_config = self.get_scheduler_config()
|
123 |
-
scheduler = scheduler_class(**scheduler_config)
|
124 |
-
|
125 |
-
sample = self.dummy_sample
|
126 |
-
residual = 0.1 * sample
|
127 |
-
|
128 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
129 |
-
scheduler.set_timesteps(num_inference_steps)
|
130 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
131 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
132 |
-
|
133 |
-
# copy over dummy past residuals (must be done after set_timesteps)
|
134 |
-
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
|
135 |
-
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
|
136 |
-
|
137 |
-
time_step_0 = scheduler.timesteps[5]
|
138 |
-
time_step_1 = scheduler.timesteps[6]
|
139 |
-
|
140 |
-
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
|
141 |
-
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
|
142 |
-
|
143 |
-
self.assertEqual(output_0.shape, sample.shape)
|
144 |
-
self.assertEqual(output_0.shape, output_1.shape)
|
145 |
-
|
146 |
-
def test_timesteps(self):
|
147 |
-
for timesteps in [25, 50, 100, 999, 1000]:
|
148 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
149 |
-
|
150 |
-
def test_thresholding(self):
|
151 |
-
self.check_over_configs(thresholding=False)
|
152 |
-
for order in [1, 2, 3]:
|
153 |
-
for solver_type in ["midpoint", "heun"]:
|
154 |
-
for threshold in [0.5, 1.0, 2.0]:
|
155 |
-
for prediction_type in ["epsilon", "sample"]:
|
156 |
-
self.check_over_configs(
|
157 |
-
thresholding=True,
|
158 |
-
prediction_type=prediction_type,
|
159 |
-
sample_max_value=threshold,
|
160 |
-
algorithm_type="dpmsolver++",
|
161 |
-
solver_order=order,
|
162 |
-
solver_type=solver_type,
|
163 |
-
)
|
164 |
-
|
165 |
-
def test_prediction_type(self):
|
166 |
-
for prediction_type in ["epsilon", "v_prediction"]:
|
167 |
-
self.check_over_configs(prediction_type=prediction_type)
|
168 |
-
|
169 |
-
def test_solver_order_and_type(self):
|
170 |
-
for algorithm_type in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]:
|
171 |
-
for solver_type in ["midpoint", "heun"]:
|
172 |
-
for order in [1, 2, 3]:
|
173 |
-
for prediction_type in ["epsilon", "sample"]:
|
174 |
-
if algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]:
|
175 |
-
if order == 3:
|
176 |
-
continue
|
177 |
-
else:
|
178 |
-
self.check_over_configs(
|
179 |
-
solver_order=order,
|
180 |
-
solver_type=solver_type,
|
181 |
-
prediction_type=prediction_type,
|
182 |
-
algorithm_type=algorithm_type,
|
183 |
-
)
|
184 |
-
sample = self.full_loop(
|
185 |
-
solver_order=order,
|
186 |
-
solver_type=solver_type,
|
187 |
-
prediction_type=prediction_type,
|
188 |
-
algorithm_type=algorithm_type,
|
189 |
-
)
|
190 |
-
assert not torch.isnan(sample).any(), "Samples have nan numbers"
|
191 |
-
|
192 |
-
def test_lower_order_final(self):
|
193 |
-
self.check_over_configs(lower_order_final=True)
|
194 |
-
self.check_over_configs(lower_order_final=False)
|
195 |
-
|
196 |
-
def test_lambda_min_clipped(self):
|
197 |
-
self.check_over_configs(lambda_min_clipped=-float("inf"))
|
198 |
-
self.check_over_configs(lambda_min_clipped=-5.1)
|
199 |
-
|
200 |
-
def test_variance_type(self):
|
201 |
-
self.check_over_configs(variance_type=None)
|
202 |
-
self.check_over_configs(variance_type="learned_range")
|
203 |
-
|
204 |
-
def test_inference_steps(self):
|
205 |
-
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
|
206 |
-
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
|
207 |
-
|
208 |
-
def test_full_loop_no_noise(self):
|
209 |
-
sample = self.full_loop()
|
210 |
-
result_mean = torch.mean(torch.abs(sample))
|
211 |
-
|
212 |
-
assert abs(result_mean.item() - 0.3301) < 1e-3
|
213 |
-
|
214 |
-
def test_full_loop_no_noise_thres(self):
|
215 |
-
sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5)
|
216 |
-
result_mean = torch.mean(torch.abs(sample))
|
217 |
-
|
218 |
-
assert abs(result_mean.item() - 1.1364) < 1e-3
|
219 |
-
|
220 |
-
def test_full_loop_with_v_prediction(self):
|
221 |
-
sample = self.full_loop(prediction_type="v_prediction")
|
222 |
-
result_mean = torch.mean(torch.abs(sample))
|
223 |
-
|
224 |
-
assert abs(result_mean.item() - 0.2251) < 1e-3
|
225 |
-
|
226 |
-
def test_full_loop_with_karras_and_v_prediction(self):
|
227 |
-
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
|
228 |
-
result_mean = torch.mean(torch.abs(sample))
|
229 |
-
|
230 |
-
assert abs(result_mean.item() - 0.2096) < 1e-3
|
231 |
-
|
232 |
-
def test_switch(self):
|
233 |
-
# make sure that iterating over schedulers with same config names gives same results
|
234 |
-
# for defaults
|
235 |
-
scheduler = DPMSolverMultistepScheduler(**self.get_scheduler_config())
|
236 |
-
sample = self.full_loop(scheduler=scheduler)
|
237 |
-
result_mean = torch.mean(torch.abs(sample))
|
238 |
-
|
239 |
-
assert abs(result_mean.item() - 0.3301) < 1e-3
|
240 |
-
|
241 |
-
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
|
242 |
-
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
|
243 |
-
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
|
244 |
-
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
|
245 |
-
|
246 |
-
sample = self.full_loop(scheduler=scheduler)
|
247 |
-
result_mean = torch.mean(torch.abs(sample))
|
248 |
-
|
249 |
-
assert abs(result_mean.item() - 0.3301) < 1e-3
|
250 |
-
|
251 |
-
def test_fp16_support(self):
|
252 |
-
scheduler_class = self.scheduler_classes[0]
|
253 |
-
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
|
254 |
-
scheduler = scheduler_class(**scheduler_config)
|
255 |
-
|
256 |
-
num_inference_steps = 10
|
257 |
-
model = self.dummy_model()
|
258 |
-
sample = self.dummy_sample_deter.half()
|
259 |
-
scheduler.set_timesteps(num_inference_steps)
|
260 |
-
|
261 |
-
for i, t in enumerate(scheduler.timesteps):
|
262 |
-
residual = model(sample, t)
|
263 |
-
sample = scheduler.step(residual, t, sample).prev_sample
|
264 |
-
|
265 |
-
assert sample.dtype == torch.float16
|
266 |
-
|
267 |
-
def test_unique_timesteps(self, **config):
|
268 |
-
for scheduler_class in self.scheduler_classes:
|
269 |
-
scheduler_config = self.get_scheduler_config(**config)
|
270 |
-
scheduler = scheduler_class(**scheduler_config)
|
271 |
-
|
272 |
-
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
|
273 |
-
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnimalEquality/chatbot/lv_recipe_chatbot/edamam_api.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/04_edamam_api.ipynb.
|
2 |
-
|
3 |
-
# %% auto 0
|
4 |
-
__all__ = ['foo']
|
5 |
-
|
6 |
-
# %% ../nbs/04_edamam_api.ipynb 3
|
7 |
-
def foo():
|
8 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/embeddings.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
from extensions.openai.errors import ServiceUnavailableError
|
5 |
-
from extensions.openai.utils import debug_msg, float_list_to_base64
|
6 |
-
from sentence_transformers import SentenceTransformer
|
7 |
-
|
8 |
-
embeddings_params_initialized = False
|
9 |
-
# using 'lazy loading' to avoid circular import
|
10 |
-
# so this function will be executed only once
|
11 |
-
def initialize_embedding_params():
|
12 |
-
global embeddings_params_initialized
|
13 |
-
if not embeddings_params_initialized:
|
14 |
-
global st_model, embeddings_model, embeddings_device
|
15 |
-
from extensions.openai.script import params
|
16 |
-
st_model = os.environ.get("OPENEDAI_EMBEDDING_MODEL", params.get('embedding_model', 'all-mpnet-base-v2'))
|
17 |
-
embeddings_model = None
|
18 |
-
# OPENEDAI_EMBEDDING_DEVICE: auto (best or cpu), cpu, cuda, ipu, xpu, mkldnn, opengl, opencl, ideep, hip, ve, fpga, ort, xla, lazy, vulkan, mps, meta, hpu, mtia, privateuseone
|
19 |
-
embeddings_device = os.environ.get("OPENEDAI_EMBEDDING_DEVICE", params.get('embedding_device', 'cpu'))
|
20 |
-
if embeddings_device.lower() == 'auto':
|
21 |
-
embeddings_device = None
|
22 |
-
embeddings_params_initialized = True
|
23 |
-
|
24 |
-
|
25 |
-
def load_embedding_model(model: str) -> SentenceTransformer:
|
26 |
-
initialize_embedding_params()
|
27 |
-
global embeddings_device, embeddings_model
|
28 |
-
try:
|
29 |
-
embeddings_model = 'loading...' # flag
|
30 |
-
# see: https://www.sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer
|
31 |
-
emb_model = SentenceTransformer(model, device=embeddings_device)
|
32 |
-
# ... emb_model.device doesn't seem to work, always cpu anyways? but specify cpu anyways to free more VRAM
|
33 |
-
print(f"\nLoaded embedding model: {model} on {emb_model.device} [always seems to say 'cpu', even if 'cuda'], max sequence length: {emb_model.max_seq_length}")
|
34 |
-
except Exception as e:
|
35 |
-
embeddings_model = None
|
36 |
-
raise ServiceUnavailableError(f"Error: Failed to load embedding model: {model}", internal_message=repr(e))
|
37 |
-
|
38 |
-
return emb_model
|
39 |
-
|
40 |
-
|
41 |
-
def get_embeddings_model() -> SentenceTransformer:
|
42 |
-
initialize_embedding_params()
|
43 |
-
global embeddings_model, st_model
|
44 |
-
if st_model and not embeddings_model:
|
45 |
-
embeddings_model = load_embedding_model(st_model) # lazy load the model
|
46 |
-
return embeddings_model
|
47 |
-
|
48 |
-
|
49 |
-
def get_embeddings_model_name() -> str:
|
50 |
-
initialize_embedding_params()
|
51 |
-
global st_model
|
52 |
-
return st_model
|
53 |
-
|
54 |
-
|
55 |
-
def get_embeddings(input: list) -> np.ndarray:
|
56 |
-
return get_embeddings_model().encode(input, convert_to_numpy=True, normalize_embeddings=True, convert_to_tensor=False, device=embeddings_device)
|
57 |
-
|
58 |
-
|
59 |
-
def embeddings(input: list, encoding_format: str) -> dict:
|
60 |
-
|
61 |
-
embeddings = get_embeddings(input)
|
62 |
-
|
63 |
-
if encoding_format == "base64":
|
64 |
-
data = [{"object": "embedding", "embedding": float_list_to_base64(emb), "index": n} for n, emb in enumerate(embeddings)]
|
65 |
-
else:
|
66 |
-
data = [{"object": "embedding", "embedding": emb.tolist(), "index": n} for n, emb in enumerate(embeddings)]
|
67 |
-
|
68 |
-
response = {
|
69 |
-
"object": "list",
|
70 |
-
"data": data,
|
71 |
-
"model": st_model, # return the real model
|
72 |
-
"usage": {
|
73 |
-
"prompt_tokens": 0,
|
74 |
-
"total_tokens": 0,
|
75 |
-
}
|
76 |
-
}
|
77 |
-
|
78 |
-
debug_msg(f"Embeddings return size: {len(embeddings[0])}, number: {len(embeddings)}")
|
79 |
-
|
80 |
-
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/position.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
# https://github.com/facebookresearch/detr/blob/main/models/position_encoding.py
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import math
|
7 |
-
|
8 |
-
|
9 |
-
class PositionEmbeddingSine(nn.Module):
|
10 |
-
"""
|
11 |
-
This is a more standard version of the position embedding, very similar to the one
|
12 |
-
used by the Attention is all you need paper, generalized to work on images.
|
13 |
-
"""
|
14 |
-
|
15 |
-
def __init__(self, num_pos_feats=64, temperature=10000, normalize=True, scale=None):
|
16 |
-
super().__init__()
|
17 |
-
self.num_pos_feats = num_pos_feats
|
18 |
-
self.temperature = temperature
|
19 |
-
self.normalize = normalize
|
20 |
-
if scale is not None and normalize is False:
|
21 |
-
raise ValueError("normalize should be True if scale is passed")
|
22 |
-
if scale is None:
|
23 |
-
scale = 2 * math.pi
|
24 |
-
self.scale = scale
|
25 |
-
|
26 |
-
def forward(self, x):
|
27 |
-
# x = tensor_list.tensors # [B, C, H, W]
|
28 |
-
# mask = tensor_list.mask # [B, H, W], input with padding, valid as 0
|
29 |
-
b, c, h, w = x.size()
|
30 |
-
mask = torch.ones((b, h, w), device=x.device) # [B, H, W]
|
31 |
-
y_embed = mask.cumsum(1, dtype=torch.float32)
|
32 |
-
x_embed = mask.cumsum(2, dtype=torch.float32)
|
33 |
-
if self.normalize:
|
34 |
-
eps = 1e-6
|
35 |
-
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
36 |
-
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
37 |
-
|
38 |
-
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
39 |
-
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
40 |
-
|
41 |
-
pos_x = x_embed[:, :, :, None] / dim_t
|
42 |
-
pos_y = y_embed[:, :, :, None] / dim_t
|
43 |
-
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
|
44 |
-
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
|
45 |
-
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
46 |
-
return pos
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anthony7906/MengHuiMXD_GPT/modules/pdf_func.py
DELETED
@@ -1,180 +0,0 @@
|
|
1 |
-
from types import SimpleNamespace
|
2 |
-
import pdfplumber
|
3 |
-
import logging
|
4 |
-
from llama_index import Document
|
5 |
-
|
6 |
-
def prepare_table_config(crop_page):
|
7 |
-
"""Prepare table查找边界, 要求page为原始page
|
8 |
-
|
9 |
-
From https://github.com/jsvine/pdfplumber/issues/242
|
10 |
-
"""
|
11 |
-
page = crop_page.root_page # root/parent
|
12 |
-
cs = page.curves + page.edges
|
13 |
-
def curves_to_edges():
|
14 |
-
"""See https://github.com/jsvine/pdfplumber/issues/127"""
|
15 |
-
edges = []
|
16 |
-
for c in cs:
|
17 |
-
edges += pdfplumber.utils.rect_to_edges(c)
|
18 |
-
return edges
|
19 |
-
edges = curves_to_edges()
|
20 |
-
return {
|
21 |
-
"vertical_strategy": "explicit",
|
22 |
-
"horizontal_strategy": "explicit",
|
23 |
-
"explicit_vertical_lines": edges,
|
24 |
-
"explicit_horizontal_lines": edges,
|
25 |
-
"intersection_y_tolerance": 10,
|
26 |
-
}
|
27 |
-
|
28 |
-
def get_text_outside_table(crop_page):
|
29 |
-
ts = prepare_table_config(crop_page)
|
30 |
-
if len(ts["explicit_vertical_lines"]) == 0 or len(ts["explicit_horizontal_lines"]) == 0:
|
31 |
-
return crop_page
|
32 |
-
|
33 |
-
### Get the bounding boxes of the tables on the page.
|
34 |
-
bboxes = [table.bbox for table in crop_page.root_page.find_tables(table_settings=ts)]
|
35 |
-
def not_within_bboxes(obj):
|
36 |
-
"""Check if the object is in any of the table's bbox."""
|
37 |
-
def obj_in_bbox(_bbox):
|
38 |
-
"""See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404"""
|
39 |
-
v_mid = (obj["top"] + obj["bottom"]) / 2
|
40 |
-
h_mid = (obj["x0"] + obj["x1"]) / 2
|
41 |
-
x0, top, x1, bottom = _bbox
|
42 |
-
return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom)
|
43 |
-
return not any(obj_in_bbox(__bbox) for __bbox in bboxes)
|
44 |
-
|
45 |
-
return crop_page.filter(not_within_bboxes)
|
46 |
-
# 请使用 LaTeX 表达公式,行内公式以 $ 包裹,行间公式以 $$ 包裹
|
47 |
-
|
48 |
-
extract_words = lambda page: page.extract_words(keep_blank_chars=True, y_tolerance=0, x_tolerance=1, extra_attrs=["fontname", "size", "object_type"])
|
49 |
-
# dict_keys(['text', 'x0', 'x1', 'top', 'doctop', 'bottom', 'upright', 'direction', 'fontname', 'size'])
|
50 |
-
|
51 |
-
def get_title_with_cropped_page(first_page):
|
52 |
-
title = [] # 处理标题
|
53 |
-
x0,top,x1,bottom = first_page.bbox # 获取页面边框
|
54 |
-
|
55 |
-
for word in extract_words(first_page):
|
56 |
-
word = SimpleNamespace(**word)
|
57 |
-
|
58 |
-
if word.size >= 14:
|
59 |
-
title.append(word.text)
|
60 |
-
title_bottom = word.bottom
|
61 |
-
elif word.text == "Abstract": # 获取页面abstract
|
62 |
-
top = word.top
|
63 |
-
|
64 |
-
user_info = [i["text"] for i in extract_words(first_page.within_bbox((x0,title_bottom,x1,top)))]
|
65 |
-
# 裁剪掉上半部分, within_bbox: full_included; crop: partial_included
|
66 |
-
return title, user_info, first_page.within_bbox((x0,top,x1,bottom))
|
67 |
-
|
68 |
-
def get_column_cropped_pages(pages, two_column=True):
|
69 |
-
new_pages = []
|
70 |
-
for page in pages:
|
71 |
-
if two_column:
|
72 |
-
left = page.within_bbox((0, 0, page.width/2, page.height),relative=True)
|
73 |
-
right = page.within_bbox((page.width/2, 0, page.width, page.height), relative=True)
|
74 |
-
new_pages.append(left)
|
75 |
-
new_pages.append(right)
|
76 |
-
else:
|
77 |
-
new_pages.append(page)
|
78 |
-
|
79 |
-
return new_pages
|
80 |
-
|
81 |
-
def parse_pdf(filename, two_column = True):
|
82 |
-
level = logging.getLogger().level
|
83 |
-
if level == logging.getLevelName("DEBUG"):
|
84 |
-
logging.getLogger().setLevel("INFO")
|
85 |
-
|
86 |
-
with pdfplumber.open(filename) as pdf:
|
87 |
-
title, user_info, first_page = get_title_with_cropped_page(pdf.pages[0])
|
88 |
-
new_pages = get_column_cropped_pages([first_page] + pdf.pages[1:], two_column)
|
89 |
-
|
90 |
-
chapters = []
|
91 |
-
# tuple (chapter_name, [pageid] (start,stop), chapter_text)
|
92 |
-
create_chapter = lambda page_start,name_top,name_bottom: SimpleNamespace(
|
93 |
-
name=[],
|
94 |
-
name_top=name_top,
|
95 |
-
name_bottom=name_bottom,
|
96 |
-
record_chapter_name = True,
|
97 |
-
|
98 |
-
page_start=page_start,
|
99 |
-
page_stop=None,
|
100 |
-
|
101 |
-
text=[],
|
102 |
-
)
|
103 |
-
cur_chapter = None
|
104 |
-
|
105 |
-
# 按页遍历PDF文档
|
106 |
-
for idx, page in enumerate(new_pages):
|
107 |
-
page = get_text_outside_table(page)
|
108 |
-
|
109 |
-
# 按行遍历页面文本
|
110 |
-
for word in extract_words(page):
|
111 |
-
word = SimpleNamespace(**word)
|
112 |
-
|
113 |
-
# 检查行文本是否以12号字体打印,如果是,则将其作为新章节开始
|
114 |
-
if word.size >= 11: # 出现chapter name
|
115 |
-
if cur_chapter is None:
|
116 |
-
cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
|
117 |
-
elif not cur_chapter.record_chapter_name or (cur_chapter.name_bottom != cur_chapter.name_bottom and cur_chapter.name_top != cur_chapter.name_top):
|
118 |
-
# 不再继续写chapter name
|
119 |
-
cur_chapter.page_stop = page.page_number # stop id
|
120 |
-
chapters.append(cur_chapter)
|
121 |
-
# 重置当前chapter信息
|
122 |
-
cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
|
123 |
-
|
124 |
-
# print(word.size, word.top, word.bottom, word.text)
|
125 |
-
cur_chapter.name.append(word.text)
|
126 |
-
else:
|
127 |
-
cur_chapter.record_chapter_name = False # chapter name 结束
|
128 |
-
cur_chapter.text.append(word.text)
|
129 |
-
else:
|
130 |
-
# 处理最后一个章节
|
131 |
-
cur_chapter.page_stop = page.page_number # stop id
|
132 |
-
chapters.append(cur_chapter)
|
133 |
-
|
134 |
-
for i in chapters:
|
135 |
-
logging.info(f"section: {i.name} pages:{i.page_start, i.page_stop} word-count:{len(i.text)}")
|
136 |
-
logging.debug(" ".join(i.text))
|
137 |
-
|
138 |
-
title = " ".join(title)
|
139 |
-
user_info = " ".join(user_info)
|
140 |
-
text = f"Article Title: {title}, Information:{user_info}\n"
|
141 |
-
for idx, chapter in enumerate(chapters):
|
142 |
-
chapter.name = " ".join(chapter.name)
|
143 |
-
text += f"The {idx}th Chapter {chapter.name}: " + " ".join(chapter.text) + "\n"
|
144 |
-
|
145 |
-
logging.getLogger().setLevel(level)
|
146 |
-
return Document(text=text, extra_info={"title": title})
|
147 |
-
|
148 |
-
BASE_POINTS = """
|
149 |
-
1. Who are the authors?
|
150 |
-
2. What is the process of the proposed method?
|
151 |
-
3. What is the performance of the proposed method? Please note down its performance metrics.
|
152 |
-
4. What are the baseline models and their performances? Please note down these baseline methods.
|
153 |
-
5. What dataset did this paper use?
|
154 |
-
"""
|
155 |
-
|
156 |
-
READING_PROMPT = """
|
157 |
-
You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
|
158 |
-
Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
|
159 |
-
When you are reading, You need to focus on these key points:{}
|
160 |
-
"""
|
161 |
-
|
162 |
-
READING_PROMT_V2 = """
|
163 |
-
You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
|
164 |
-
Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
|
165 |
-
When you are reading, You need to focus on these key points:{},
|
166 |
-
|
167 |
-
And You need to generate a brief but informative title for this part.
|
168 |
-
Your return format:
|
169 |
-
- title: '...'
|
170 |
-
- summary: '...'
|
171 |
-
"""
|
172 |
-
|
173 |
-
SUMMARY_PROMPT = "You are a researcher helper bot. Now you need to read the summaries of a research paper."
|
174 |
-
|
175 |
-
|
176 |
-
if __name__ == '__main__':
|
177 |
-
# Test code
|
178 |
-
z = parse_pdf("./build/test.pdf")
|
179 |
-
print(z["user_info"])
|
180 |
-
print(z["title"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/GODROOP/roop/core.py
DELETED
@@ -1,215 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
import os
|
4 |
-
import sys
|
5 |
-
# single thread doubles cuda performance - needs to be set before torch import
|
6 |
-
if any(arg.startswith('--execution-provider') for arg in sys.argv):
|
7 |
-
os.environ['OMP_NUM_THREADS'] = '1'
|
8 |
-
# reduce tensorflow log level
|
9 |
-
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
10 |
-
import warnings
|
11 |
-
from typing import List
|
12 |
-
import platform
|
13 |
-
import signal
|
14 |
-
import shutil
|
15 |
-
import argparse
|
16 |
-
import torch
|
17 |
-
import onnxruntime
|
18 |
-
import tensorflow
|
19 |
-
|
20 |
-
import roop.globals
|
21 |
-
import roop.metadata
|
22 |
-
import roop.ui as ui
|
23 |
-
from roop.predictor import predict_image, predict_video
|
24 |
-
from roop.processors.frame.core import get_frame_processors_modules
|
25 |
-
from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
|
26 |
-
|
27 |
-
if 'ROCMExecutionProvider' in roop.globals.execution_providers:
|
28 |
-
del torch
|
29 |
-
|
30 |
-
warnings.filterwarnings('ignore', category=FutureWarning, module='insightface')
|
31 |
-
warnings.filterwarnings('ignore', category=UserWarning, module='torchvision')
|
32 |
-
|
33 |
-
|
34 |
-
def parse_args() -> None:
|
35 |
-
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
|
36 |
-
program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100))
|
37 |
-
program.add_argument('-s', '--source', help='select an source image', dest='source_path')
|
38 |
-
program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
|
39 |
-
program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
|
40 |
-
program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+')
|
41 |
-
program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
|
42 |
-
program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
|
43 |
-
program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
|
44 |
-
program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False)
|
45 |
-
program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9'])
|
46 |
-
program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]')
|
47 |
-
program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory())
|
48 |
-
program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
|
49 |
-
program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads())
|
50 |
-
program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}')
|
51 |
-
|
52 |
-
args = program.parse_args()
|
53 |
-
|
54 |
-
roop.globals.source_path = args.source_path
|
55 |
-
roop.globals.target_path = args.target_path
|
56 |
-
roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path)
|
57 |
-
roop.globals.frame_processors = args.frame_processor
|
58 |
-
roop.globals.headless = args.source_path or args.target_path or args.output_path
|
59 |
-
roop.globals.keep_fps = args.keep_fps
|
60 |
-
roop.globals.keep_audio = args.keep_audio
|
61 |
-
roop.globals.keep_frames = args.keep_frames
|
62 |
-
roop.globals.many_faces = args.many_faces
|
63 |
-
roop.globals.video_encoder = args.video_encoder
|
64 |
-
roop.globals.video_quality = args.video_quality
|
65 |
-
roop.globals.max_memory = args.max_memory
|
66 |
-
roop.globals.execution_providers = decode_execution_providers(args.execution_provider)
|
67 |
-
roop.globals.execution_threads = args.execution_threads
|
68 |
-
|
69 |
-
|
70 |
-
def encode_execution_providers(execution_providers: List[str]) -> List[str]:
|
71 |
-
return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers]
|
72 |
-
|
73 |
-
|
74 |
-
def decode_execution_providers(execution_providers: List[str]) -> List[str]:
|
75 |
-
return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
|
76 |
-
if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
|
77 |
-
|
78 |
-
|
79 |
-
def suggest_max_memory() -> int:
|
80 |
-
if platform.system().lower() == 'darwin':
|
81 |
-
return 4
|
82 |
-
return 16
|
83 |
-
|
84 |
-
|
85 |
-
def suggest_execution_providers() -> List[str]:
|
86 |
-
return encode_execution_providers(onnxruntime.get_available_providers())
|
87 |
-
|
88 |
-
|
89 |
-
def suggest_execution_threads() -> int:
|
90 |
-
if 'DmlExecutionProvider' in roop.globals.execution_providers:
|
91 |
-
return 1
|
92 |
-
if 'ROCMExecutionProvider' in roop.globals.execution_providers:
|
93 |
-
return 1
|
94 |
-
return 8
|
95 |
-
|
96 |
-
|
97 |
-
def limit_resources() -> None:
|
98 |
-
# prevent tensorflow memory leak
|
99 |
-
gpus = tensorflow.config.experimental.list_physical_devices('GPU')
|
100 |
-
for gpu in gpus:
|
101 |
-
tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
|
102 |
-
tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)
|
103 |
-
])
|
104 |
-
# limit memory usage
|
105 |
-
if roop.globals.max_memory:
|
106 |
-
memory = roop.globals.max_memory * 1024 ** 3
|
107 |
-
if platform.system().lower() == 'darwin':
|
108 |
-
memory = roop.globals.max_memory * 1024 ** 6
|
109 |
-
if platform.system().lower() == 'windows':
|
110 |
-
import ctypes
|
111 |
-
kernel32 = ctypes.windll.kernel32
|
112 |
-
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
|
113 |
-
else:
|
114 |
-
import resource
|
115 |
-
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
|
116 |
-
|
117 |
-
|
118 |
-
def release_resources() -> None:
|
119 |
-
if 'CUDAExecutionProvider' in roop.globals.execution_providers:
|
120 |
-
torch.cuda.empty_cache()
|
121 |
-
|
122 |
-
|
123 |
-
def pre_check() -> bool:
|
124 |
-
if sys.version_info < (3, 9):
|
125 |
-
update_status('Python version is not supported - please upgrade to 3.9 or higher.')
|
126 |
-
return False
|
127 |
-
if not shutil.which('ffmpeg'):
|
128 |
-
update_status('ffmpeg is not installed.')
|
129 |
-
return False
|
130 |
-
return True
|
131 |
-
|
132 |
-
|
133 |
-
def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
|
134 |
-
print(f'[{scope}] {message}')
|
135 |
-
if not roop.globals.headless:
|
136 |
-
ui.update_status(message)
|
137 |
-
|
138 |
-
|
139 |
-
def start() -> None:
|
140 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
141 |
-
if not frame_processor.pre_start():
|
142 |
-
return
|
143 |
-
# process image to image
|
144 |
-
if has_image_extension(roop.globals.target_path):
|
145 |
-
if predict_image(roop.globals.target_path):
|
146 |
-
destroy()
|
147 |
-
shutil.copy2(roop.globals.target_path, roop.globals.output_path)
|
148 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
149 |
-
update_status('Progressing...', frame_processor.NAME)
|
150 |
-
frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path)
|
151 |
-
frame_processor.post_process()
|
152 |
-
release_resources()
|
153 |
-
if is_image(roop.globals.target_path):
|
154 |
-
update_status('Processing to image succeed!')
|
155 |
-
else:
|
156 |
-
update_status('Processing to image failed!')
|
157 |
-
return
|
158 |
-
# process image to videos
|
159 |
-
if predict_video(roop.globals.target_path):
|
160 |
-
destroy()
|
161 |
-
update_status('Creating temp resources...')
|
162 |
-
create_temp(roop.globals.target_path)
|
163 |
-
update_status('Extracting frames...')
|
164 |
-
extract_frames(roop.globals.target_path)
|
165 |
-
temp_frame_paths = get_temp_frame_paths(roop.globals.target_path)
|
166 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
167 |
-
update_status('Progressing...', frame_processor.NAME)
|
168 |
-
frame_processor.process_video(roop.globals.source_path, temp_frame_paths)
|
169 |
-
frame_processor.post_process()
|
170 |
-
release_resources()
|
171 |
-
# handles fps
|
172 |
-
if roop.globals.keep_fps:
|
173 |
-
update_status('Detecting fps...')
|
174 |
-
fps = detect_fps(roop.globals.target_path)
|
175 |
-
update_status(f'Creating video with {fps} fps...')
|
176 |
-
create_video(roop.globals.target_path, fps)
|
177 |
-
else:
|
178 |
-
update_status('Creating video with 30.0 fps...')
|
179 |
-
create_video(roop.globals.target_path)
|
180 |
-
# handle audio
|
181 |
-
if roop.globals.keep_audio:
|
182 |
-
if roop.globals.keep_fps:
|
183 |
-
update_status('Restoring audio...')
|
184 |
-
else:
|
185 |
-
update_status('Restoring audio might cause issues as fps are not kept...')
|
186 |
-
restore_audio(roop.globals.target_path, roop.globals.output_path)
|
187 |
-
else:
|
188 |
-
move_temp(roop.globals.target_path, roop.globals.output_path)
|
189 |
-
# clean and validate
|
190 |
-
clean_temp(roop.globals.target_path)
|
191 |
-
if is_video(roop.globals.target_path):
|
192 |
-
update_status('Processing to video succeed!')
|
193 |
-
else:
|
194 |
-
update_status('Processing to video failed!')
|
195 |
-
|
196 |
-
|
197 |
-
def destroy() -> None:
|
198 |
-
if roop.globals.target_path:
|
199 |
-
clean_temp(roop.globals.target_path)
|
200 |
-
quit()
|
201 |
-
|
202 |
-
|
203 |
-
def run() -> None:
|
204 |
-
parse_args()
|
205 |
-
if not pre_check():
|
206 |
-
return
|
207 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
208 |
-
if not frame_processor.pre_check():
|
209 |
-
return
|
210 |
-
limit_resources()
|
211 |
-
if roop.globals.headless:
|
212 |
-
start()
|
213 |
-
else:
|
214 |
-
window = ui.init(start, destroy)
|
215 |
-
window.mainloop()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/gunicorn_config.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import gc
|
2 |
-
import multiprocessing
|
3 |
-
|
4 |
-
bind = "0.0.0.0:23456"
|
5 |
-
# workers = multiprocessing.cpu_count()
|
6 |
-
workers = 1
|
7 |
-
preload_app = True
|
8 |
-
|
9 |
-
# disable GC in master as early as possible
|
10 |
-
gc.disable()
|
11 |
-
|
12 |
-
def when_ready(server):
|
13 |
-
# freeze objects after preloading app
|
14 |
-
gc.freeze()
|
15 |
-
print("Objects frozen in perm gen: ", gc.get_freeze_count())
|
16 |
-
|
17 |
-
def post_fork(server, worker):
|
18 |
-
# reenable GC on worker
|
19 |
-
gc.enable()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module provides means to detect the App Engine environment.
|
3 |
-
"""
|
4 |
-
|
5 |
-
import os
|
6 |
-
|
7 |
-
|
8 |
-
def is_appengine():
|
9 |
-
return is_local_appengine() or is_prod_appengine()
|
10 |
-
|
11 |
-
|
12 |
-
def is_appengine_sandbox():
|
13 |
-
"""Reports if the app is running in the first generation sandbox.
|
14 |
-
|
15 |
-
The second generation runtimes are technically still in a sandbox, but it
|
16 |
-
is much less restrictive, so generally you shouldn't need to check for it.
|
17 |
-
see https://cloud.google.com/appengine/docs/standard/runtimes
|
18 |
-
"""
|
19 |
-
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
|
20 |
-
|
21 |
-
|
22 |
-
def is_local_appengine():
|
23 |
-
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
|
24 |
-
"SERVER_SOFTWARE", ""
|
25 |
-
).startswith("Development/")
|
26 |
-
|
27 |
-
|
28 |
-
def is_prod_appengine():
|
29 |
-
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
|
30 |
-
"SERVER_SOFTWARE", ""
|
31 |
-
).startswith("Google App Engine/")
|
32 |
-
|
33 |
-
|
34 |
-
def is_prod_appengine_mvms():
|
35 |
-
"""Deprecated."""
|
36 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/op/conv2d_gradfix.py
DELETED
@@ -1,227 +0,0 @@
|
|
1 |
-
import contextlib
|
2 |
-
import warnings
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torch import autograd
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
enabled = True
|
9 |
-
weight_gradients_disabled = False
|
10 |
-
|
11 |
-
|
12 |
-
@contextlib.contextmanager
|
13 |
-
def no_weight_gradients():
|
14 |
-
global weight_gradients_disabled
|
15 |
-
|
16 |
-
old = weight_gradients_disabled
|
17 |
-
weight_gradients_disabled = True
|
18 |
-
yield
|
19 |
-
weight_gradients_disabled = old
|
20 |
-
|
21 |
-
|
22 |
-
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
23 |
-
if could_use_op(input):
|
24 |
-
return conv2d_gradfix(
|
25 |
-
transpose=False,
|
26 |
-
weight_shape=weight.shape,
|
27 |
-
stride=stride,
|
28 |
-
padding=padding,
|
29 |
-
output_padding=0,
|
30 |
-
dilation=dilation,
|
31 |
-
groups=groups,
|
32 |
-
).apply(input, weight, bias)
|
33 |
-
|
34 |
-
return F.conv2d(
|
35 |
-
input=input,
|
36 |
-
weight=weight,
|
37 |
-
bias=bias,
|
38 |
-
stride=stride,
|
39 |
-
padding=padding,
|
40 |
-
dilation=dilation,
|
41 |
-
groups=groups,
|
42 |
-
)
|
43 |
-
|
44 |
-
|
45 |
-
def conv_transpose2d(
|
46 |
-
input,
|
47 |
-
weight,
|
48 |
-
bias=None,
|
49 |
-
stride=1,
|
50 |
-
padding=0,
|
51 |
-
output_padding=0,
|
52 |
-
groups=1,
|
53 |
-
dilation=1,
|
54 |
-
):
|
55 |
-
if could_use_op(input):
|
56 |
-
return conv2d_gradfix(
|
57 |
-
transpose=True,
|
58 |
-
weight_shape=weight.shape,
|
59 |
-
stride=stride,
|
60 |
-
padding=padding,
|
61 |
-
output_padding=output_padding,
|
62 |
-
groups=groups,
|
63 |
-
dilation=dilation,
|
64 |
-
).apply(input, weight, bias)
|
65 |
-
|
66 |
-
return F.conv_transpose2d(
|
67 |
-
input=input,
|
68 |
-
weight=weight,
|
69 |
-
bias=bias,
|
70 |
-
stride=stride,
|
71 |
-
padding=padding,
|
72 |
-
output_padding=output_padding,
|
73 |
-
dilation=dilation,
|
74 |
-
groups=groups,
|
75 |
-
)
|
76 |
-
|
77 |
-
|
78 |
-
def could_use_op(input):
|
79 |
-
if (not enabled) or (not torch.backends.cudnn.enabled):
|
80 |
-
return False
|
81 |
-
|
82 |
-
if input.device.type != "cuda":
|
83 |
-
return False
|
84 |
-
|
85 |
-
if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8.", "1.9", "1.10"]):
|
86 |
-
return True
|
87 |
-
|
88 |
-
warnings.warn(
|
89 |
-
f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()."
|
90 |
-
)
|
91 |
-
|
92 |
-
return False
|
93 |
-
|
94 |
-
|
95 |
-
def ensure_tuple(xs, ndim):
|
96 |
-
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
|
97 |
-
|
98 |
-
return xs
|
99 |
-
|
100 |
-
|
101 |
-
conv2d_gradfix_cache = dict()
|
102 |
-
|
103 |
-
|
104 |
-
def conv2d_gradfix(
|
105 |
-
transpose, weight_shape, stride, padding, output_padding, dilation, groups
|
106 |
-
):
|
107 |
-
ndim = 2
|
108 |
-
weight_shape = tuple(weight_shape)
|
109 |
-
stride = ensure_tuple(stride, ndim)
|
110 |
-
padding = ensure_tuple(padding, ndim)
|
111 |
-
output_padding = ensure_tuple(output_padding, ndim)
|
112 |
-
dilation = ensure_tuple(dilation, ndim)
|
113 |
-
|
114 |
-
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
|
115 |
-
if key in conv2d_gradfix_cache:
|
116 |
-
return conv2d_gradfix_cache[key]
|
117 |
-
|
118 |
-
common_kwargs = dict(
|
119 |
-
stride=stride, padding=padding, dilation=dilation, groups=groups
|
120 |
-
)
|
121 |
-
|
122 |
-
def calc_output_padding(input_shape, output_shape):
|
123 |
-
if transpose:
|
124 |
-
return [0, 0]
|
125 |
-
|
126 |
-
return [
|
127 |
-
input_shape[i + 2]
|
128 |
-
- (output_shape[i + 2] - 1) * stride[i]
|
129 |
-
- (1 - 2 * padding[i])
|
130 |
-
- dilation[i] * (weight_shape[i + 2] - 1)
|
131 |
-
for i in range(ndim)
|
132 |
-
]
|
133 |
-
|
134 |
-
class Conv2d(autograd.Function):
|
135 |
-
@staticmethod
|
136 |
-
def forward(ctx, input, weight, bias):
|
137 |
-
if not transpose:
|
138 |
-
out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
|
139 |
-
|
140 |
-
else:
|
141 |
-
out = F.conv_transpose2d(
|
142 |
-
input=input,
|
143 |
-
weight=weight,
|
144 |
-
bias=bias,
|
145 |
-
output_padding=output_padding,
|
146 |
-
**common_kwargs,
|
147 |
-
)
|
148 |
-
|
149 |
-
ctx.save_for_backward(input, weight)
|
150 |
-
|
151 |
-
return out
|
152 |
-
|
153 |
-
@staticmethod
|
154 |
-
def backward(ctx, grad_output):
|
155 |
-
input, weight = ctx.saved_tensors
|
156 |
-
grad_input, grad_weight, grad_bias = None, None, None
|
157 |
-
|
158 |
-
if ctx.needs_input_grad[0]:
|
159 |
-
p = calc_output_padding(
|
160 |
-
input_shape=input.shape, output_shape=grad_output.shape
|
161 |
-
)
|
162 |
-
grad_input = conv2d_gradfix(
|
163 |
-
transpose=(not transpose),
|
164 |
-
weight_shape=weight_shape,
|
165 |
-
output_padding=p,
|
166 |
-
**common_kwargs,
|
167 |
-
).apply(grad_output, weight, None)
|
168 |
-
|
169 |
-
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
|
170 |
-
grad_weight = Conv2dGradWeight.apply(grad_output, input)
|
171 |
-
|
172 |
-
if ctx.needs_input_grad[2]:
|
173 |
-
grad_bias = grad_output.sum((0, 2, 3))
|
174 |
-
|
175 |
-
return grad_input, grad_weight, grad_bias
|
176 |
-
|
177 |
-
class Conv2dGradWeight(autograd.Function):
|
178 |
-
@staticmethod
|
179 |
-
def forward(ctx, grad_output, input):
|
180 |
-
op = torch._C._jit_get_operation(
|
181 |
-
"aten::cudnn_convolution_backward_weight"
|
182 |
-
if not transpose
|
183 |
-
else "aten::cudnn_convolution_transpose_backward_weight"
|
184 |
-
)
|
185 |
-
flags = [
|
186 |
-
torch.backends.cudnn.benchmark,
|
187 |
-
torch.backends.cudnn.deterministic,
|
188 |
-
torch.backends.cudnn.allow_tf32,
|
189 |
-
]
|
190 |
-
grad_weight = op(
|
191 |
-
weight_shape,
|
192 |
-
grad_output,
|
193 |
-
input,
|
194 |
-
padding,
|
195 |
-
stride,
|
196 |
-
dilation,
|
197 |
-
groups,
|
198 |
-
*flags,
|
199 |
-
)
|
200 |
-
ctx.save_for_backward(grad_output, input)
|
201 |
-
|
202 |
-
return grad_weight
|
203 |
-
|
204 |
-
@staticmethod
|
205 |
-
def backward(ctx, grad_grad_weight):
|
206 |
-
grad_output, input = ctx.saved_tensors
|
207 |
-
grad_grad_output, grad_grad_input = None, None
|
208 |
-
|
209 |
-
if ctx.needs_input_grad[0]:
|
210 |
-
grad_grad_output = Conv2d.apply(input, grad_grad_weight, None)
|
211 |
-
|
212 |
-
if ctx.needs_input_grad[1]:
|
213 |
-
p = calc_output_padding(
|
214 |
-
input_shape=input.shape, output_shape=grad_output.shape
|
215 |
-
)
|
216 |
-
grad_grad_input = conv2d_gradfix(
|
217 |
-
transpose=(not transpose),
|
218 |
-
weight_shape=weight_shape,
|
219 |
-
output_padding=p,
|
220 |
-
**common_kwargs,
|
221 |
-
).apply(grad_output, grad_grad_weight, None)
|
222 |
-
|
223 |
-
return grad_grad_output, grad_grad_input
|
224 |
-
|
225 |
-
conv2d_gradfix_cache[key] = Conv2d
|
226 |
-
|
227 |
-
return Conv2d
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
|
6 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
7 |
-
from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
|
8 |
-
from detectron2.utils.file_io import PathManager
|
9 |
-
|
10 |
-
"""
|
11 |
-
This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
|
12 |
-
"""
|
13 |
-
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
|
18 |
-
def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
|
19 |
-
files = []
|
20 |
-
# scan through the directory
|
21 |
-
cities = PathManager.ls(image_dir)
|
22 |
-
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
|
23 |
-
image_dict = {}
|
24 |
-
for city in cities:
|
25 |
-
city_img_dir = os.path.join(image_dir, city)
|
26 |
-
for basename in PathManager.ls(city_img_dir):
|
27 |
-
image_file = os.path.join(city_img_dir, basename)
|
28 |
-
|
29 |
-
suffix = "_leftImg8bit.png"
|
30 |
-
assert basename.endswith(suffix), basename
|
31 |
-
basename = os.path.basename(basename)[: -len(suffix)]
|
32 |
-
|
33 |
-
image_dict[basename] = image_file
|
34 |
-
|
35 |
-
for ann in json_info["annotations"]:
|
36 |
-
image_file = image_dict.get(ann["image_id"], None)
|
37 |
-
assert image_file is not None, "No image {} found for annotation {}".format(
|
38 |
-
ann["image_id"], ann["file_name"]
|
39 |
-
)
|
40 |
-
label_file = os.path.join(gt_dir, ann["file_name"])
|
41 |
-
segments_info = ann["segments_info"]
|
42 |
-
|
43 |
-
files.append((image_file, label_file, segments_info))
|
44 |
-
|
45 |
-
assert len(files), "No images found in {}".format(image_dir)
|
46 |
-
assert PathManager.isfile(files[0][0]), files[0][0]
|
47 |
-
assert PathManager.isfile(files[0][1]), files[0][1]
|
48 |
-
return files
|
49 |
-
|
50 |
-
|
51 |
-
def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
|
52 |
-
"""
|
53 |
-
Args:
|
54 |
-
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
|
55 |
-
gt_dir (str): path to the raw annotations. e.g.,
|
56 |
-
"~/cityscapes/gtFine/cityscapes_panoptic_train".
|
57 |
-
gt_json (str): path to the json file. e.g.,
|
58 |
-
"~/cityscapes/gtFine/cityscapes_panoptic_train.json".
|
59 |
-
meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
|
60 |
-
and "stuff_dataset_id_to_contiguous_id" to map category ids to
|
61 |
-
contiguous ids for training.
|
62 |
-
|
63 |
-
Returns:
|
64 |
-
list[dict]: a list of dicts in Detectron2 standard format. (See
|
65 |
-
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
66 |
-
"""
|
67 |
-
|
68 |
-
def _convert_category_id(segment_info, meta):
|
69 |
-
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
70 |
-
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
71 |
-
segment_info["category_id"]
|
72 |
-
]
|
73 |
-
else:
|
74 |
-
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
75 |
-
segment_info["category_id"]
|
76 |
-
]
|
77 |
-
return segment_info
|
78 |
-
|
79 |
-
assert os.path.exists(
|
80 |
-
gt_json
|
81 |
-
), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
|
82 |
-
with open(gt_json) as f:
|
83 |
-
json_info = json.load(f)
|
84 |
-
files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
|
85 |
-
ret = []
|
86 |
-
for image_file, label_file, segments_info in files:
|
87 |
-
sem_label_file = (
|
88 |
-
image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
|
89 |
-
)
|
90 |
-
segments_info = [_convert_category_id(x, meta) for x in segments_info]
|
91 |
-
ret.append(
|
92 |
-
{
|
93 |
-
"file_name": image_file,
|
94 |
-
"image_id": "_".join(
|
95 |
-
os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
|
96 |
-
),
|
97 |
-
"sem_seg_file_name": sem_label_file,
|
98 |
-
"pan_seg_file_name": label_file,
|
99 |
-
"segments_info": segments_info,
|
100 |
-
}
|
101 |
-
)
|
102 |
-
assert len(ret), f"No images found in {image_dir}!"
|
103 |
-
assert PathManager.isfile(
|
104 |
-
ret[0]["sem_seg_file_name"]
|
105 |
-
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
|
106 |
-
assert PathManager.isfile(
|
107 |
-
ret[0]["pan_seg_file_name"]
|
108 |
-
), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
|
109 |
-
return ret
|
110 |
-
|
111 |
-
|
112 |
-
_RAW_CITYSCAPES_PANOPTIC_SPLITS = {
|
113 |
-
"cityscapes_fine_panoptic_train": (
|
114 |
-
"cityscapes/leftImg8bit/train",
|
115 |
-
"cityscapes/gtFine/cityscapes_panoptic_train",
|
116 |
-
"cityscapes/gtFine/cityscapes_panoptic_train.json",
|
117 |
-
),
|
118 |
-
"cityscapes_fine_panoptic_val": (
|
119 |
-
"cityscapes/leftImg8bit/val",
|
120 |
-
"cityscapes/gtFine/cityscapes_panoptic_val",
|
121 |
-
"cityscapes/gtFine/cityscapes_panoptic_val.json",
|
122 |
-
),
|
123 |
-
# "cityscapes_fine_panoptic_test": not supported yet
|
124 |
-
}
|
125 |
-
|
126 |
-
|
127 |
-
def register_all_cityscapes_panoptic(root):
|
128 |
-
meta = {}
|
129 |
-
# The following metadata maps contiguous id from [0, #thing categories +
|
130 |
-
# #stuff categories) to their names and colors. We have to replica of the
|
131 |
-
# same name and color under "thing_*" and "stuff_*" because the current
|
132 |
-
# visualization function in D2 handles thing and class classes differently
|
133 |
-
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
134 |
-
# enable reusing existing visualization functions.
|
135 |
-
thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
|
136 |
-
thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
|
137 |
-
stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
|
138 |
-
stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
|
139 |
-
|
140 |
-
meta["thing_classes"] = thing_classes
|
141 |
-
meta["thing_colors"] = thing_colors
|
142 |
-
meta["stuff_classes"] = stuff_classes
|
143 |
-
meta["stuff_colors"] = stuff_colors
|
144 |
-
|
145 |
-
# There are three types of ids in cityscapes panoptic segmentation:
|
146 |
-
# (1) category id: like semantic segmentation, it is the class id for each
|
147 |
-
# pixel. Since there are some classes not used in evaluation, the category
|
148 |
-
# id is not always contiguous and thus we have two set of category ids:
|
149 |
-
# - original category id: category id in the original dataset, mainly
|
150 |
-
# used for evaluation.
|
151 |
-
# - contiguous category id: [0, #classes), in order to train the classifier
|
152 |
-
# (2) instance id: this id is used to differentiate different instances from
|
153 |
-
# the same category. For "stuff" classes, the instance id is always 0; for
|
154 |
-
# "thing" classes, the instance id starts from 1 and 0 is reserved for
|
155 |
-
# ignored instances (e.g. crowd annotation).
|
156 |
-
# (3) panoptic id: this is the compact id that encode both category and
|
157 |
-
# instance id by: category_id * 1000 + instance_id.
|
158 |
-
thing_dataset_id_to_contiguous_id = {}
|
159 |
-
stuff_dataset_id_to_contiguous_id = {}
|
160 |
-
|
161 |
-
for k in CITYSCAPES_CATEGORIES:
|
162 |
-
if k["isthing"] == 1:
|
163 |
-
thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
|
164 |
-
else:
|
165 |
-
stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
|
166 |
-
|
167 |
-
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
168 |
-
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
169 |
-
|
170 |
-
for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
|
171 |
-
image_dir = os.path.join(root, image_dir)
|
172 |
-
gt_dir = os.path.join(root, gt_dir)
|
173 |
-
gt_json = os.path.join(root, gt_json)
|
174 |
-
|
175 |
-
DatasetCatalog.register(
|
176 |
-
key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
|
177 |
-
)
|
178 |
-
MetadataCatalog.get(key).set(
|
179 |
-
panoptic_root=gt_dir,
|
180 |
-
image_root=image_dir,
|
181 |
-
panoptic_json=gt_json,
|
182 |
-
gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
|
183 |
-
evaluator_type="cityscapes_panoptic_seg",
|
184 |
-
ignore_label=255,
|
185 |
-
label_divisor=1000,
|
186 |
-
**meta,
|
187 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BIASLab/sars-cov-2-classification-fcgr/src/models/resnet50_9mers.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
# https://github.com/c1ph3rr/Deep-Residual-Learning-for-Image-Recognition/blob/master/Resnet50.py
|
2 |
-
from pathlib import Path
|
3 |
-
from tensorflow.keras.models import Model
|
4 |
-
from tensorflow.keras.layers import (
|
5 |
-
Input,
|
6 |
-
Conv2D,
|
7 |
-
Dense,
|
8 |
-
MaxPool2D,
|
9 |
-
GlobalAveragePooling2D,
|
10 |
-
Add,
|
11 |
-
Activation,
|
12 |
-
BatchNormalization,
|
13 |
-
ZeroPadding2D,
|
14 |
-
)
|
15 |
-
|
16 |
-
# Reference name of model
|
17 |
-
MODEL_NAME = str(Path(__file__).resolve().stem)
|
18 |
-
|
19 |
-
def identity_block(inp, filters, kernel_size, block, layer):
|
20 |
-
|
21 |
-
f1, f2, f3 = filters
|
22 |
-
|
23 |
-
conv_name = 'id_conv_b' + block + '_l' + layer
|
24 |
-
batch_name = 'id_batch_b' + block + '_l' + layer
|
25 |
-
|
26 |
-
x = Conv2D(filters=f1, kernel_size=1, padding='same', kernel_initializer='he_normal', name=conv_name + '_a')(inp)
|
27 |
-
x = BatchNormalization(name=batch_name + '_a')(x)
|
28 |
-
x = Activation('relu')(x)
|
29 |
-
|
30 |
-
x = Conv2D(filters=f2, kernel_size=kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name + '_b')(x)
|
31 |
-
x = BatchNormalization(name=batch_name + '_b')(x)
|
32 |
-
x = Activation('relu')(x)
|
33 |
-
|
34 |
-
x = Conv2D(filters=f3, kernel_size=1, padding='same', kernel_initializer='he_normal', name=conv_name + '_c')(x)
|
35 |
-
x = BatchNormalization(name=batch_name + '_c')(x)
|
36 |
-
|
37 |
-
add = Add()([inp, x])
|
38 |
-
x = Activation('relu')(add)
|
39 |
-
|
40 |
-
return x
|
41 |
-
|
42 |
-
|
43 |
-
def convolutional_block(inp, filters, kernel_size, block, layer, strides=2):
|
44 |
-
|
45 |
-
f1, f2, f3 = filters
|
46 |
-
|
47 |
-
conv_name = 'res_conv_b' + block + '_l' + layer
|
48 |
-
batch_name = 'res_batch_b' + block + '_l' + layer
|
49 |
-
|
50 |
-
y = Conv2D(filters=f1, kernel_size=1, padding='same', strides=strides, kernel_initializer='he_normal', name=conv_name + '_a')(inp)
|
51 |
-
y = BatchNormalization(name=batch_name + '_a')(y)
|
52 |
-
y = Activation('relu')(y)
|
53 |
-
|
54 |
-
y = Conv2D(filters=f2, kernel_size=kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name + '_b')(y)
|
55 |
-
y = BatchNormalization(name=batch_name + '_b')(y)
|
56 |
-
y = Activation('relu')(y)
|
57 |
-
|
58 |
-
y = Conv2D(filters=f3, kernel_size=1, padding='same', kernel_initializer='he_normal', name=conv_name + '_c')(y)
|
59 |
-
y = BatchNormalization(name=batch_name + '_c')(y)
|
60 |
-
|
61 |
-
shortcut = Conv2D(filters=f3, kernel_size=1, strides=strides, kernel_initializer='he_normal', name=conv_name + '_shortcut')(inp)
|
62 |
-
shortcut = BatchNormalization(name=batch_name + '_shortcut')(shortcut)
|
63 |
-
|
64 |
-
add = Add()([shortcut, y])
|
65 |
-
y = Activation('relu')(add)
|
66 |
-
|
67 |
-
return y
|
68 |
-
|
69 |
-
def get_model(n_outputs):
|
70 |
-
|
71 |
-
inp = Input(shape=(512, 512, 1), name='input')
|
72 |
-
padd = ZeroPadding2D(3)(inp)
|
73 |
-
|
74 |
-
conv1 = Conv2D(64, 7, strides=2, padding='valid', name='conv1')(padd)
|
75 |
-
conv1 = BatchNormalization(name='batch2')(conv1)
|
76 |
-
conv1 = Activation('relu')(conv1)
|
77 |
-
conv1 = ZeroPadding2D(1)(conv1)
|
78 |
-
conv1 = MaxPool2D(3, 2)(conv1)
|
79 |
-
|
80 |
-
conv2 = convolutional_block(conv1, [64,64,256], 3, '2', '1', strides=1)
|
81 |
-
conv2 = identity_block(conv2, [64,64,256], 3, '2', '2')
|
82 |
-
conv2 = identity_block(conv2, [64,64,256], 3, '2', '3')
|
83 |
-
|
84 |
-
conv3 = convolutional_block(conv2, [128,128,512], 3, '3', '1')
|
85 |
-
conv3 = identity_block(conv3, [128,128,512], 3, '3', '2')
|
86 |
-
conv3 = identity_block(conv3, [128,128,512], 3, '3', '3')
|
87 |
-
conv3 = identity_block(conv3, [128,128,512], 3, '3', '4')
|
88 |
-
|
89 |
-
conv4 = convolutional_block(conv3, [256,256,1024], 3, '4', '1')
|
90 |
-
conv4 = identity_block(conv4, [256,256,1024], 3, '4', '2')
|
91 |
-
conv4 = identity_block(conv4, [256,256,1024], 3, '4', '3')
|
92 |
-
conv4 = identity_block(conv4, [256,256,1024], 3, '4', '4')
|
93 |
-
conv4 = identity_block(conv4, [256,256,1024], 3, '4', '5')
|
94 |
-
conv4 = identity_block(conv4, [256,256,1024], 3, '4', '6')
|
95 |
-
|
96 |
-
conv5 = convolutional_block(conv4, [512,512,2048], 3, '5', '1')
|
97 |
-
conv5 = identity_block(conv5, [512,512,2048], 3, '5', '2')
|
98 |
-
conv5 = identity_block(conv5, [512,512,2048], 3, '5', '3')
|
99 |
-
|
100 |
-
avg_pool = GlobalAveragePooling2D()(conv5)
|
101 |
-
out = Dense(n_outputs, activation='softmax')(avg_pool)
|
102 |
-
|
103 |
-
return Model(inp, out)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/i18n.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
|
3 |
-
def load_language_list(language):
|
4 |
-
try:
|
5 |
-
with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f:
|
6 |
-
return json.load(f)
|
7 |
-
except FileNotFoundError:
|
8 |
-
raise FileNotFoundError(
|
9 |
-
f"Failed to load language file for {language}. Check if the correct .json file exists."
|
10 |
-
)
|
11 |
-
|
12 |
-
|
13 |
-
class I18nAuto:
|
14 |
-
"""
|
15 |
-
A class used for internationalization using JSON language files.
|
16 |
-
|
17 |
-
Examples
|
18 |
-
--------
|
19 |
-
>>> i18n = I18nAuto('en_US')
|
20 |
-
>>> i18n.print()
|
21 |
-
Using Language: en_US
|
22 |
-
"""
|
23 |
-
def __init__(self, language=None):
|
24 |
-
from locale import getdefaultlocale
|
25 |
-
language = language or getdefaultlocale()[0]
|
26 |
-
if not self._language_exists(language):
|
27 |
-
language = "en_US"
|
28 |
-
|
29 |
-
self.language_map = load_language_list(language)
|
30 |
-
self.language = language
|
31 |
-
|
32 |
-
@staticmethod
|
33 |
-
def _language_exists(language):
|
34 |
-
from os.path import exists
|
35 |
-
return exists(f"./i18n/locale/{language}.json")
|
36 |
-
|
37 |
-
def __call__(self, key):
|
38 |
-
"""Returns the translation of the given key if it exists, else returns the key itself."""
|
39 |
-
return self.language_map.get(key, key)
|
40 |
-
|
41 |
-
def print(self):
|
42 |
-
"""Prints the language currently in use."""
|
43 |
-
print(f"Using Language: {self.language}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apkaward Chess.md
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Apkaward Chess: Una Nueva Forma de Jugar Ajedrez Online</h1>
|
3 |
-
<p>Si eres un fanático del ajedrez y estás buscando una nueva forma de jugarlo online, es posible que quieras echar un vistazo a Apkaward Chess. Este es un juego de ajedrez que cuenta con 5D ajedrez con multiverso de viaje en el tiempo, que se basa en el popular juego 5D Ajedrez con Multiverso de Viaje en el Tiempo por Thunkspace. En este artículo, le diremos qué es Apkaward, qué es Apkaward Chess, cómo jugarlo, cuáles son las reglas del ajedrez 5D y por qué debe jugarlo. </p>
|
4 |
-
<h2>¿Qué es Apkaward? </h2>
|
5 |
-
<p>Apkaward es un sitio web que ofrece juegos gratuitos y de pago para dispositivos Android. Puedes encontrar una variedad de juegos en diferentes géneros, como acción, aventura, rompecabezas y más. También puede descargar versiones modificadas de algunos juegos, que le dan recursos ilimitados o funciones desbloqueadas. Algunos de los juegos disponibles en Apkaward son Minecraft PE, GTA San Andreas, PUBG Mobile, y más. </p>
|
6 |
-
<h2>apkaward chess</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6Mjt">https://bltlly.com/2v6Mjt</a></b></p><br /><br />
|
7 |
-
<p>Apkaward también tiene un canal de YouTube donde muestran sus juegos y cómo descargarlos. Puedes ver sus videos para ver cómo se ven y juegan los juegos, y para obtener los enlaces para descargarlos. También puedes suscribirte a su canal para recibir notificaciones de sus últimas subidas. </p>
|
8 |
-
<h2>¿Qué es Apkaward Chess? </h2>
|
9 |
-
<p>Apkaward Chess es uno de los juegos disponibles en Apkaward. Es un juego de ajedrez que presenta ajedrez 5D con viajes en el tiempo multiverso. Esto significa que puede mover sus piezas no solo en el tablero, sino también a través de turnos y líneas de tiempo. Puedes crear nuevas líneas de tiempo moviendo tus piezas hacia atrás en el tiempo o hacia los lados en el tiempo. También puede capturar piezas de otras líneas de tiempo o enviar sus piezas a otras líneas de tiempo. El objetivo es hacer jaque mate al rey de tu oponente en cualquier línea de tiempo. </p>
|
10 |
-
|
11 |
-
<h2>¿Cómo jugar al ajedrez apkaward? </h2>
|
12 |
-
<p>Apkaward Chess se puede descargar desde el sitio web de Apkaward o el canal de YouTube. Puede encontrar los enlaces a ambos en las referencias a continuación . Una vez que haya descargado el archivo APK, debe instalarlo en su dispositivo. Es posible que necesite habilitar la instalación de aplicaciones de fuentes desconocidas en su configuración. Después de eso, puede iniciar el juego y comenzar a jugar. </p>
|
13 |
-
<p>Apkaward Chess se puede jugar fuera de línea o en línea contra otros jugadores o una IA. Puedes elegir entre diferentes niveles de dificultad y modos de juego. También puede personalizar el tablero y las piezas a su gusto. El juego tiene un modo tutorial que te enseña los fundamentos del ajedrez 5D y cómo usar la interfaz. También puede acceder al menú de ayuda en cualquier momento durante el juego para obtener más información. </p>
|
14 |
-
<p>Apkaward Chess tiene las mismas reglas que 5D Chess con Multiverse Time Travel, que se explican en la siguiente sección. </p>
|
15 |
-
<h2>¿Cuáles son las Reglas del Ajedrez 5D con Multiverso Viaje en el Tiempo? </h2>
|
16 |
-
<p>5D Chess with Multiverse Time Travel es una variante de ajedrez que introduce dos ejes adicionales de movimiento: el eje de giro y el eje de línea de tiempo. Todas las piezas conservan sus habilidades de movimiento de ajedrez estándar, pero también pueden moverse a través de turnos y líneas de tiempo. El juego comienza con una configuración de ajedrez normal, pero a medida que el juego progresa, se vuelve cada vez más complejo a través de una serie de líneas de tiempo alternativas que el jugador puede aprovechar. </p>
|
17 |
-
<p>El eje de giro está representado por una fila horizontal de tableros, cada uno correspondiente a un giro diferente en el juego. El eje de la línea de tiempo está representado por una columna vertical de tableros, cada uno correspondiente a una línea de tiempo diferente en el juego. La línea de tiempo principal es la que comienza desde la posición inicial y sigue los movimientos realizados por ambos jugadores. Las líneas de tiempo alternativas se crean cuando una pieza se mueve hacia atrás en el tiempo o hacia los lados en el tiempo. </p>
|
18 |
-
<p></p>
|
19 |
-
|
20 |
-
<p>El objetivo del juego es hacer jaque mate al rey de tu oponente en cualquier cronología. Sin embargo, hay algunas reglas y conceptos adicionales que debes tener en cuenta:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Una pieza solo puede retroceder en el tiempo o de lado en el tiempo si no crea una paradoja. Una paradoja ocurre cuando una pieza se mueve a una posición donde habría sido capturada o bloqueada por sí misma u otra pieza en un giro o línea de tiempo anterior. </li>
|
23 |
-
<li>Una pieza puede capturar otra pieza de cualquier línea de tiempo, siempre y cuando no cree una paradoja. Capturar una pieza de otra línea de tiempo la elimina de todas las tablas posteriores donde habría existido. </li>
|
24 |
-
<li>Una pieza puede estar en jaque o jaque mate desde cualquier línea de tiempo, siempre y cuando sea visible desde el tablero actual. Una pieza es visible si no hay otra pieza bloqueando su línea de visión a través de giros y líneas de tiempo. </li>
|
25 |
-
<li>Un jugador puede mover su rey fuera de jaque moviéndolo en el tablero, moviéndolo a través de turnos o líneas de tiempo, o moviendo otra pieza para bloquear o capturar al atacante. </li>
|
26 |
-
<li>Un jugador no puede hacer un movimiento que pondría a su rey en jaque en cualquier línea de tiempo, incluso si no es visible desde el tablero actual. </li>
|
27 |
-
<li>Un reproductor no puede hacer un movimiento que cree un bucle infinito de líneas de tiempo. Un bucle infinito ocurre cuando una pieza se mueve hacia atrás en el tiempo o hacia los lados en el tiempo a una posición donde ya ha estado antes. </li>
|
28 |
-
</ul>
|
29 |
-
<p>Estas son las reglas básicas del ajedrez 5D, pero hay conceptos y estrategias más avanzadas que puedes aprender mientras juegas. También puede consultar el sitio web oficial de 5D Chess with Multiverse Time Travel para más detalles y ejemplos. </p>
|
30 |
-
<h2>¿Por qué jugar al ajedrez apkaward? </h2>
|
31 |
-
|
32 |
-
<p>Apkaward Chess es también un juego único e innovador que ofrece una nueva forma de jugar al ajedrez en línea. Puedes jugar contra otros jugadores de todo el mundo que hayan descargado Apkaward Chess, o contra una IA que se adapte a tu nivel de habilidad. También puedes chatear con tus oponentes y compartir tus consejos y trucos con ellos. También puedes personalizar la configuración y las preferencias del juego para adaptarlas a tu estilo y estado de ánimo. </p>
|
33 |
-
<p>Apkaward Chess es un juego gratuito que puedes disfrutar en tu dispositivo Android en cualquier momento y en cualquier lugar. No necesitas una conexión a Internet para jugar sin conexión, y no necesitas pagar nada para descargar o jugar el juego. También puedes actualizar el juego regularmente para obtener nuevas características y mejoras. </p>
|
34 |
-
<h2>Conclusión</h2>
|
35 |
-
<p>Apkaward Chess es una nueva forma de jugar ajedrez en línea que cuenta con 5D ajedrez con multiverso de viaje en el tiempo. Se basa en el popular juego 5D Chess con Multiverse Time Travel de Thunkspace, que está disponible para Windows, macOS y Linux. Apkaward Chess se puede descargar gratis desde el sitio web de Apkaward o el canal de YouTube, y se puede jugar fuera de línea o en línea contra otros jugadores o una IA. Apkaward Chess es un juego divertido y desafiante que pone a prueba tu pensamiento estratégico y creatividad, así como un juego único e innovador que ofrece una nueva dimensión del ajedrez. Si eres un fan del ajedrez y estás buscando una nueva forma de jugar online, deberías probar Apkaward Chess. </p>
|
36 |
-
<h2>Preguntas frecuentes</h2>
|
37 |
-
<h3>¿Cuál es la diferencia entre el ajedrez 5D y el ajedrez estándar? </h3>
|
38 |
-
<p>5D chess es una variante de ajedrez que introduce dos ejes adicionales de movimiento: el eje de giro y el eje de línea de tiempo. Esto significa que puede mover sus piezas no solo en el tablero, sino también a través de turnos y líneas de tiempo. Puedes crear nuevas líneas de tiempo moviendo tus piezas hacia atrás en el tiempo o hacia los lados en el tiempo. También puede capturar piezas de otras líneas de tiempo o enviar sus piezas a otras líneas de tiempo. El objetivo es hacer jaque mate al rey de tu oponente en cualquier línea de tiempo. </p>
|
39 |
-
|
40 |
-
<p>Puede descargar Apkaward Chess desde el sitio web de Apkaward o el canal de YouTube. Puede encontrar los enlaces a ambos en las referencias a continuación . Una vez que haya descargado el archivo APK, debe instalarlo en su dispositivo. Es posible que necesite habilitar la instalación de aplicaciones de fuentes desconocidas en su configuración. Después de eso, puede iniciar el juego y comenzar a jugar. </p>
|
41 |
-
<h3>¿Cómo juego Apkaward Chess online? </h3>
|
42 |
-
<p>Puedes jugar Apkaward Chess online contra otros jugadores o una IA. Necesitas tener una conexión a Internet para jugar online. Puedes elegir entre diferentes niveles de dificultad y modos de juego. También puedes chatear con tus oponentes y compartir tus consejos y trucos con ellos. </p>
|
43 |
-
<h3>¿Cómo aprendo las reglas del ajedrez 5D? </h3>
|
44 |
-
<p>Puedes aprender las reglas del ajedrez 5D jugando el modo tutorial en Apkaward Chess. Este modo le enseña los fundamentos del ajedrez 5D y cómo usar la interfaz. También puede acceder al menú de ayuda en cualquier momento durante el juego para obtener más información. También puede consultar el sitio web oficial de 5D Chess with Multiverse Time Travel para más detalles y ejemplos. </p>
|
45 |
-
<h3>¿Cuáles son algunos consejos y trucos para jugar al ajedrez 5D? </h3>
|
46 |
-
<p>Algunos consejos y trucos para jugar al ajedrez 5D son:</p>
|
47 |
-
<ul>
|
48 |
-
<li>Piensa con anticipación tus movimientos y considera cómo afectarán no solo tu posición actual, sino también tus posiciones pasadas y futuras, así como los movimientos de tu oponente a través de giros y líneas de tiempo. </li>
|
49 |
-
<li>Usa la capacidad de tus piezas para moverte a través de turnos y líneas de tiempo para crear nuevas oportunidades o evitar amenazas. También puedes usarlas para confundir o sorprender a tu oponente. </li>
|
50 |
-
<li>Tenga cuidado de no crear paradojas o bucles infinitos al mover sus piezas a través de giros y líneas de tiempo. Estos movimientos son ilegales y resultarán en una pérdida. </li>
|
51 |
-
<li>Tenga en cuenta todas las posibles comprobaciones y checkmates de cualquier línea de tiempo, incluso si no son visibles desde el tablero actual. Puedes usar la interfaz para ver todos los tableros del juego. </li>
|
52 |
-
|
53 |
-
</ul></p> 64aa2da5cf<br />
|
54 |
-
<br />
|
55 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar El Juego Talking Tom Hero Dash Para PC.md
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Talking Tom Hero Dash: Un juego divertido y lleno de acción para PC</h1>
|
3 |
-
<p>¿Te encanta jugar en tu PC? ¿Te gusta correr, saltar y salvar el mundo con tus personajes favoritos? Si respondiste que sí, entonces deberías probar Talking Tom Hero Dash, un juego popular y emocionante que puedes descargar y jugar en tu PC. En este artículo, le diremos qué es Talking Tom Hero Dash, cómo descargarlo para PC y por qué debe jugarlo en su computadora. </p>
|
4 |
-
<h2>Descargar el juego Talking Tom hero dash para PC</h2><br /><p><b><b>Download File</b> ✸✸✸ <a href="https://bltlly.com/2v6JNF">https://bltlly.com/2v6JNF</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Talking Tom Hero Dash? </h2>
|
6 |
-
<p>Talking Tom Hero Dash es un juego desarrollado por Outfit7 Limited, los creadores de My Talking Tom, My Talking Angela y Talking Tom Gold Run. Es un juego de corredor sin fin que cuenta con Talking Tom y sus amigos como superhéroes que tienen que detener el mal Rakoonz de destruir el mundo. </p>
|
7 |
-
<h3>La historia y el juego</h3>
|
8 |
-
Los Rakoonz han capturado a Angela, Ben, Hank y Ginger, y depende de Tom salvarlos. Para ello, tiene que correr y perseguirlos a través de diferentes lugares, como templos perdidos, ciudades antiguas, dunas del desierto y picos nevados. En el camino, tiene que esquivar obstáculos, recoger monedas y gemas, usar power-ups y gadgets, y luchar contra el Rakoonz. También puede desbloquear nuevos atuendos y mejorar sus habilidades a medida que avanza. </p>
|
9 |
-
<h3>Las características y los gráficos</h3>
|
10 |
-
<p>Talking Tom Hero Dash tiene muchas características que lo hacen divertido y atractivo. Por ejemplo, puedes jugar con diferentes personajes, cada uno con sus propios poderes especiales. También puedes completar misiones y eventos para ganar recompensas y desbloquear nuevos mundos. También puedes ver videos de los personajes animados de Outfit7 en YouTube dentro del juego. El juego también tiene gráficos increíbles que son coloridos, vibrantes y detallados. Las animaciones son suaves y realistas, y los efectos de sonido son vivos e inmersivos. </p>
|
11 |
-
<h2> ¿Cómo descargar Talking Tom Hero Dash para PC? </h2>
|
12 |
-
<p>Hay varias maneras de descargar Talking Tom Hero Dash para PC. Aquí están algunas de ellas:</p>
|
13 |
-
|
14 |
-
<p>La forma más fácil de descargar Talking Tom Hero Dash para PC es desde la tienda de Microsoft. Todo lo que necesita es un dispositivo Windows 10 y una conexión a Internet. Estos son los pasos:</p>
|
15 |
-
<p></p>
|
16 |
-
<ol>
|
17 |
-
<li>Abra la aplicación de Microsoft Store en su PC.</li>
|
18 |
-
<li>Buscar "Talking Tom Hero Dash" en la barra de búsqueda. </li>
|
19 |
-
<li>Seleccione el juego de los resultados y haga clic en "Obtener" o "Comprar" dependiendo de si es gratis o de pago. </li>
|
20 |
-
<li>Espere a que finalicen la descarga y la instalación. </li>
|
21 |
-
<li>Inicie el juego desde su menú Inicio o escritorio. </li>
|
22 |
-
</ol>
|
23 |
-
<h3>Descarga desde una plataforma de terceros</h3>
|
24 |
-
<p>Otra forma de descargar Talking Tom Hero Dash para PC es desde una plataforma de terceros como Steam o Epic Games Store. Estas son plataformas de distribución digital que te permiten comprar y descargar juegos para tu PC. Primero tendrá que crear una cuenta e instalar su lanzador en su PC. Estos son los pasos:</p>
|
25 |
-
<ol>
|
26 |
-
<li>Ir a la página web de la plataforma que desea utilizar (por ejemplo, [Steam]( 4 ) o [Epic Games Store]( 5 )). </li>
|
27 |
-
<li>Crea una cuenta o inicia sesión con la existente. </li>
|
28 |
-
<li>Descargue e instale su lanzador en su PC.</li>
|
29 |
-
<li>Abra su lanzador y busque "Talking Tom Hero Dash" en su tienda. </li>
|
30 |
-
<li>Seleccione el juego de los resultados y haga clic en "Añadir al carrito" o "Obtener" dependiendo de si es gratis o de pago. </li>
|
31 |
-
<li>Proceda a pagar el juego si es necesario. </li>
|
32 |
-
<li>Espera a que finalicen la descarga y la instalación. </ <li>Inicia el juego desde su lanzador o tu escritorio. </li>
|
33 |
-
</ol>
|
34 |
-
<h3>Descargar desde el sitio web oficial</h3>
|
35 |
-
<p>La tercera forma de descargar Talking Tom Hero Dash para PC es desde el sitio web oficial de Outfit7 Limited. Esta es la forma más directa y fiable de conseguir el juego, pero puede requerir más pasos y habilidades técnicas. Estos son los pasos:</p>
|
36 |
-
<ol>
|
37 |
-
<li>Ir al [sitio web oficial] de Outfit7 Limited.</li>
|
38 |
-
<li>Haga clic en "Juegos" y seleccione "Talking Tom Hero Dash" de la lista. </li>
|
39 |
-
|
40 |
-
<li>Guarde el archivo en su PC y ejecútelo como administrador. </li>
|
41 |
-
<li>Siga las instrucciones en la pantalla para instalar el juego. </li>
|
42 |
-
<li>Inicie el juego desde su escritorio o menú Inicio. </li>
|
43 |
-
</ol>
|
44 |
-
<h2>¿Por qué jugar Talking Tom Hero Dash en PC? </h2>
|
45 |
-
<p>Ahora que sabe cómo descargar Talking Tom Hero Dash para PC, puede que se pregunte por qué debe jugar en su ordenador en lugar de su dispositivo móvil. Bueno, hay muchas razones por las que jugar juegos para PC puede ser más agradable y gratificante que jugar juegos para móviles. Estas son algunas de ellas:</p>
|
46 |
-
<h3>Los beneficios de jugar juegos de PC</h3>
|
47 |
-
<p>Jugar juegos de PC puede tener muchos beneficios para su salud, habilidades y estado de ánimo. Por ejemplo, jugar juegos de PC puede:</p>
|
48 |
-
<ul>
|
49 |
-
<li>Mejora tus habilidades cognitivas, como la memoria, la atención, la resolución de problemas y la creatividad. </li>
|
50 |
-
<li>Mejora la coordinación mano-ojo, el tiempo de reacción y la conciencia espacial. </li>
|
51 |
-
<li>Reduce tu estrés, ansiedad y depresión, y aumenta tu felicidad y autoestima. </li>
|
52 |
-
<li>Aumenta tus habilidades sociales, comunicación y trabajo en equipo, especialmente si juegas en línea o con amigos. </li>
|
53 |
-
<li>Educarte sobre diferentes temas, culturas y perspectivas, e inspirarte a aprender más. </li>
|
54 |
-
</ul>
|
55 |
-
<h3>Las ventajas de jugar Talking Tom Hero Dash en PC</h3>
|
56 |
-
<p>Jugar Talking Tom Hero Dash en PC también puede tener algunas ventajas específicas sobre jugarlo en su dispositivo móvil. Por ejemplo, jugar Talking Tom Hero Dash en PC puede:</p>
|
57 |
-
<ul>
|
58 |
-
<li>Darle una pantalla más grande y mejor, que puede mejorar su visibilidad, inmersión y disfrute. </li>
|
59 |
-
<li>Le ofrece un rendimiento más suave y rápido, que puede evitar el retraso, estrellarse o congelarse. </li>
|
60 |
-
<li>Le proporciona más opciones de control, como el uso de un teclado, ratón o controlador, que puede adaptarse a sus preferencias y comodidad. </li>
|
61 |
-
<li>Ahorra batería y espacio de almacenamiento en tu dispositivo móvil, lo que puede extender su vida útil y funcionalidad. </li>
|
62 |
-
|
63 |
-
</ul>
|
64 |
-
<h2>Conclusión</h2>
|
65 |
-
<p>Talking Tom Hero Dash es un juego divertido y lleno de acción que puedes descargar y jugar en tu PC. Tiene una historia cautivadora, un juego emocionante y unos gráficos impresionantes. También tiene muchas características que lo hacen atractivo y entretenido. Puede descargarlo desde Microsoft Store, una plataforma de terceros o el sitio web oficial. También puede disfrutar de muchos beneficios y ventajas de jugar en su PC. Entonces, ¿qué estás esperando? Descargar Talking Tom Hero Dash para PC hoy y unirse a Tom y sus amigos en su aventura heroica! </p>
|
66 |
-
<h2>Preguntas frecuentes</h2>
|
67 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Talking Tom Hero Dash:</p>
|
68 |
-
<h4>Q: ¿Es Talking Tom Hero Dash libre para jugar? </h4>
|
69 |
-
<p>A: Sí, Talking Tom Hero Dash es gratis para jugar en todas las plataformas. Sin embargo, puede contener compras en la aplicación que le permiten comprar monedas, gemas u otros artículos con dinero real. </p>
|
70 |
-
<h4>P: ¿Es seguro para los niños Talking Tom Hero Dash? </h4>
|
71 |
-
<p>A: Sí, Talking Tom Hero Dash es seguro para los niños. Tiene una calificación de 4+ en la Microsoft Store y 9+ en la App Store. No contiene ninguna violencia, gore, o blasfemia. Sin embargo, puede tener algunos anuncios o enlaces que conducen a otros sitios web o aplicaciones que pueden no ser adecuados para los niños. Por lo tanto, se recomienda orientación parental. </p>
|
72 |
-
<h4>P: ¿Cómo puedo obtener más monedas y gemas en Talking Tom Hero Dash? </h4>
|
73 |
-
<p>A: Hay muchas maneras de obtener más monedas y gemas en Talking Tom Hero Dash. Por ejemplo, puedes:</p>
|
74 |
-
<ul>
|
75 |
-
<li>Corre tan lejos como puedas y recíbelos en el camino. </li>
|
76 |
-
<li>Completa misiones y eventos para ganarlos como recompensas. </li>
|
77 |
-
<li>Ver vídeos o anuncios para obtenerlos gratis. </li>
|
78 |
-
<li>Hazte miembro VIP para <li>Cómpralos con dinero real si quieres. </li>
|
79 |
-
</ul>
|
80 |
-
<h4>Q: ¿Cómo puedo desbloquear nuevos personajes y trajes en Talking Tom Hero Dash? </h4>
|
81 |
-
|
82 |
-
<h4>Q: ¿Cómo puedo actualizar Talking Tom Hero Dash en PC? </h4>
|
83 |
-
<p>A: Para actualizar Talking Tom Hero Dash en PC, debe verificar si hay una nueva versión disponible en la plataforma donde la descargó. Si lo hay, puede seguir las instrucciones en la pantalla para descargar e instalar la actualización. Alternativamente, puedes desinstalar el juego y reinstalarlo con la última versión. </p> 64aa2da5cf<br />
|
84 |
-
<br />
|
85 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/resolvelib/structs.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
import itertools
|
2 |
-
|
3 |
-
from .compat import collections_abc
|
4 |
-
|
5 |
-
|
6 |
-
class DirectedGraph(object):
|
7 |
-
"""A graph structure with directed edges."""
|
8 |
-
|
9 |
-
def __init__(self):
|
10 |
-
self._vertices = set()
|
11 |
-
self._forwards = {} # <key> -> Set[<key>]
|
12 |
-
self._backwards = {} # <key> -> Set[<key>]
|
13 |
-
|
14 |
-
def __iter__(self):
|
15 |
-
return iter(self._vertices)
|
16 |
-
|
17 |
-
def __len__(self):
|
18 |
-
return len(self._vertices)
|
19 |
-
|
20 |
-
def __contains__(self, key):
|
21 |
-
return key in self._vertices
|
22 |
-
|
23 |
-
def copy(self):
|
24 |
-
"""Return a shallow copy of this graph."""
|
25 |
-
other = DirectedGraph()
|
26 |
-
other._vertices = set(self._vertices)
|
27 |
-
other._forwards = {k: set(v) for k, v in self._forwards.items()}
|
28 |
-
other._backwards = {k: set(v) for k, v in self._backwards.items()}
|
29 |
-
return other
|
30 |
-
|
31 |
-
def add(self, key):
|
32 |
-
"""Add a new vertex to the graph."""
|
33 |
-
if key in self._vertices:
|
34 |
-
raise ValueError("vertex exists")
|
35 |
-
self._vertices.add(key)
|
36 |
-
self._forwards[key] = set()
|
37 |
-
self._backwards[key] = set()
|
38 |
-
|
39 |
-
def remove(self, key):
|
40 |
-
"""Remove a vertex from the graph, disconnecting all edges from/to it."""
|
41 |
-
self._vertices.remove(key)
|
42 |
-
for f in self._forwards.pop(key):
|
43 |
-
self._backwards[f].remove(key)
|
44 |
-
for t in self._backwards.pop(key):
|
45 |
-
self._forwards[t].remove(key)
|
46 |
-
|
47 |
-
def connected(self, f, t):
|
48 |
-
return f in self._backwards[t] and t in self._forwards[f]
|
49 |
-
|
50 |
-
def connect(self, f, t):
|
51 |
-
"""Connect two existing vertices.
|
52 |
-
|
53 |
-
Nothing happens if the vertices are already connected.
|
54 |
-
"""
|
55 |
-
if t not in self._vertices:
|
56 |
-
raise KeyError(t)
|
57 |
-
self._forwards[f].add(t)
|
58 |
-
self._backwards[t].add(f)
|
59 |
-
|
60 |
-
def iter_edges(self):
|
61 |
-
for f, children in self._forwards.items():
|
62 |
-
for t in children:
|
63 |
-
yield f, t
|
64 |
-
|
65 |
-
def iter_children(self, key):
|
66 |
-
return iter(self._forwards[key])
|
67 |
-
|
68 |
-
def iter_parents(self, key):
|
69 |
-
return iter(self._backwards[key])
|
70 |
-
|
71 |
-
|
72 |
-
class IteratorMapping(collections_abc.Mapping):
|
73 |
-
def __init__(self, mapping, accessor, appends=None):
|
74 |
-
self._mapping = mapping
|
75 |
-
self._accessor = accessor
|
76 |
-
self._appends = appends or {}
|
77 |
-
|
78 |
-
def __repr__(self):
|
79 |
-
return "IteratorMapping({!r}, {!r}, {!r})".format(
|
80 |
-
self._mapping,
|
81 |
-
self._accessor,
|
82 |
-
self._appends,
|
83 |
-
)
|
84 |
-
|
85 |
-
def __bool__(self):
|
86 |
-
return bool(self._mapping or self._appends)
|
87 |
-
|
88 |
-
__nonzero__ = __bool__ # XXX: Python 2.
|
89 |
-
|
90 |
-
def __contains__(self, key):
|
91 |
-
return key in self._mapping or key in self._appends
|
92 |
-
|
93 |
-
def __getitem__(self, k):
|
94 |
-
try:
|
95 |
-
v = self._mapping[k]
|
96 |
-
except KeyError:
|
97 |
-
return iter(self._appends[k])
|
98 |
-
return itertools.chain(self._accessor(v), self._appends.get(k, ()))
|
99 |
-
|
100 |
-
def __iter__(self):
|
101 |
-
more = (k for k in self._appends if k not in self._mapping)
|
102 |
-
return itertools.chain(self._mapping, more)
|
103 |
-
|
104 |
-
def __len__(self):
|
105 |
-
more = sum(1 for k in self._appends if k not in self._mapping)
|
106 |
-
return len(self._mapping) + more
|
107 |
-
|
108 |
-
|
109 |
-
class _FactoryIterableView(object):
|
110 |
-
"""Wrap an iterator factory returned by `find_matches()`.
|
111 |
-
|
112 |
-
Calling `iter()` on this class would invoke the underlying iterator
|
113 |
-
factory, making it a "collection with ordering" that can be iterated
|
114 |
-
through multiple times, but lacks random access methods presented in
|
115 |
-
built-in Python sequence types.
|
116 |
-
"""
|
117 |
-
|
118 |
-
def __init__(self, factory):
|
119 |
-
self._factory = factory
|
120 |
-
self._iterable = None
|
121 |
-
|
122 |
-
def __repr__(self):
|
123 |
-
return "{}({})".format(type(self).__name__, list(self))
|
124 |
-
|
125 |
-
def __bool__(self):
|
126 |
-
try:
|
127 |
-
next(iter(self))
|
128 |
-
except StopIteration:
|
129 |
-
return False
|
130 |
-
return True
|
131 |
-
|
132 |
-
__nonzero__ = __bool__ # XXX: Python 2.
|
133 |
-
|
134 |
-
def __iter__(self):
|
135 |
-
iterable = (
|
136 |
-
self._factory() if self._iterable is None else self._iterable
|
137 |
-
)
|
138 |
-
self._iterable, current = itertools.tee(iterable)
|
139 |
-
return current
|
140 |
-
|
141 |
-
|
142 |
-
class _SequenceIterableView(object):
|
143 |
-
"""Wrap an iterable returned by find_matches().
|
144 |
-
|
145 |
-
This is essentially just a proxy to the underlying sequence that provides
|
146 |
-
the same interface as `_FactoryIterableView`.
|
147 |
-
"""
|
148 |
-
|
149 |
-
def __init__(self, sequence):
|
150 |
-
self._sequence = sequence
|
151 |
-
|
152 |
-
def __repr__(self):
|
153 |
-
return "{}({})".format(type(self).__name__, self._sequence)
|
154 |
-
|
155 |
-
def __bool__(self):
|
156 |
-
return bool(self._sequence)
|
157 |
-
|
158 |
-
__nonzero__ = __bool__ # XXX: Python 2.
|
159 |
-
|
160 |
-
def __iter__(self):
|
161 |
-
return iter(self._sequence)
|
162 |
-
|
163 |
-
|
164 |
-
def build_iter_view(matches):
|
165 |
-
"""Build an iterable view from the value returned by `find_matches()`."""
|
166 |
-
if callable(matches):
|
167 |
-
return _FactoryIterableView(matches)
|
168 |
-
if not isinstance(matches, collections_abc.Sequence):
|
169 |
-
matches = list(matches)
|
170 |
-
return _SequenceIterableView(matches)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/wait.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
import errno
|
2 |
-
import select
|
3 |
-
import sys
|
4 |
-
from functools import partial
|
5 |
-
|
6 |
-
try:
|
7 |
-
from time import monotonic
|
8 |
-
except ImportError:
|
9 |
-
from time import time as monotonic
|
10 |
-
|
11 |
-
__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
|
12 |
-
|
13 |
-
|
14 |
-
class NoWayToWaitForSocketError(Exception):
|
15 |
-
pass
|
16 |
-
|
17 |
-
|
18 |
-
# How should we wait on sockets?
|
19 |
-
#
|
20 |
-
# There are two types of APIs you can use for waiting on sockets: the fancy
|
21 |
-
# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
|
22 |
-
# select/poll. The stateful APIs are more efficient when you have a lots of
|
23 |
-
# sockets to keep track of, because you can set them up once and then use them
|
24 |
-
# lots of times. But we only ever want to wait on a single socket at a time
|
25 |
-
# and don't want to keep track of state, so the stateless APIs are actually
|
26 |
-
# more efficient. So we want to use select() or poll().
|
27 |
-
#
|
28 |
-
# Now, how do we choose between select() and poll()? On traditional Unixes,
|
29 |
-
# select() has a strange calling convention that makes it slow, or fail
|
30 |
-
# altogether, for high-numbered file descriptors. The point of poll() is to fix
|
31 |
-
# that, so on Unixes, we prefer poll().
|
32 |
-
#
|
33 |
-
# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
|
34 |
-
# for it), but that's OK, because on Windows, select() doesn't have this
|
35 |
-
# strange calling convention; plain select() works fine.
|
36 |
-
#
|
37 |
-
# So: on Windows we use select(), and everywhere else we use poll(). We also
|
38 |
-
# fall back to select() in case poll() is somehow broken or missing.
|
39 |
-
|
40 |
-
if sys.version_info >= (3, 5):
|
41 |
-
# Modern Python, that retries syscalls by default
|
42 |
-
def _retry_on_intr(fn, timeout):
|
43 |
-
return fn(timeout)
|
44 |
-
|
45 |
-
else:
|
46 |
-
# Old and broken Pythons.
|
47 |
-
def _retry_on_intr(fn, timeout):
|
48 |
-
if timeout is None:
|
49 |
-
deadline = float("inf")
|
50 |
-
else:
|
51 |
-
deadline = monotonic() + timeout
|
52 |
-
|
53 |
-
while True:
|
54 |
-
try:
|
55 |
-
return fn(timeout)
|
56 |
-
# OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
|
57 |
-
except (OSError, select.error) as e:
|
58 |
-
# 'e.args[0]' incantation works for both OSError and select.error
|
59 |
-
if e.args[0] != errno.EINTR:
|
60 |
-
raise
|
61 |
-
else:
|
62 |
-
timeout = deadline - monotonic()
|
63 |
-
if timeout < 0:
|
64 |
-
timeout = 0
|
65 |
-
if timeout == float("inf"):
|
66 |
-
timeout = None
|
67 |
-
continue
|
68 |
-
|
69 |
-
|
70 |
-
def select_wait_for_socket(sock, read=False, write=False, timeout=None):
|
71 |
-
if not read and not write:
|
72 |
-
raise RuntimeError("must specify at least one of read=True, write=True")
|
73 |
-
rcheck = []
|
74 |
-
wcheck = []
|
75 |
-
if read:
|
76 |
-
rcheck.append(sock)
|
77 |
-
if write:
|
78 |
-
wcheck.append(sock)
|
79 |
-
# When doing a non-blocking connect, most systems signal success by
|
80 |
-
# marking the socket writable. Windows, though, signals success by marked
|
81 |
-
# it as "exceptional". We paper over the difference by checking the write
|
82 |
-
# sockets for both conditions. (The stdlib selectors module does the same
|
83 |
-
# thing.)
|
84 |
-
fn = partial(select.select, rcheck, wcheck, wcheck)
|
85 |
-
rready, wready, xready = _retry_on_intr(fn, timeout)
|
86 |
-
return bool(rready or wready or xready)
|
87 |
-
|
88 |
-
|
89 |
-
def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
|
90 |
-
if not read and not write:
|
91 |
-
raise RuntimeError("must specify at least one of read=True, write=True")
|
92 |
-
mask = 0
|
93 |
-
if read:
|
94 |
-
mask |= select.POLLIN
|
95 |
-
if write:
|
96 |
-
mask |= select.POLLOUT
|
97 |
-
poll_obj = select.poll()
|
98 |
-
poll_obj.register(sock, mask)
|
99 |
-
|
100 |
-
# For some reason, poll() takes timeout in milliseconds
|
101 |
-
def do_poll(t):
|
102 |
-
if t is not None:
|
103 |
-
t *= 1000
|
104 |
-
return poll_obj.poll(t)
|
105 |
-
|
106 |
-
return bool(_retry_on_intr(do_poll, timeout))
|
107 |
-
|
108 |
-
|
109 |
-
def null_wait_for_socket(*args, **kwargs):
|
110 |
-
raise NoWayToWaitForSocketError("no select-equivalent available")
|
111 |
-
|
112 |
-
|
113 |
-
def _have_working_poll():
|
114 |
-
# Apparently some systems have a select.poll that fails as soon as you try
|
115 |
-
# to use it, either due to strange configuration or broken monkeypatching
|
116 |
-
# from libraries like eventlet/greenlet.
|
117 |
-
try:
|
118 |
-
poll_obj = select.poll()
|
119 |
-
_retry_on_intr(poll_obj.poll, 0)
|
120 |
-
except (AttributeError, OSError):
|
121 |
-
return False
|
122 |
-
else:
|
123 |
-
return True
|
124 |
-
|
125 |
-
|
126 |
-
def wait_for_socket(*args, **kwargs):
|
127 |
-
# We delay choosing which implementation to use until the first time we're
|
128 |
-
# called. We could do it at import time, but then we might make the wrong
|
129 |
-
# decision if someone goes wild with monkeypatching select.poll after
|
130 |
-
# we're imported.
|
131 |
-
global wait_for_socket
|
132 |
-
if _have_working_poll():
|
133 |
-
wait_for_socket = poll_wait_for_socket
|
134 |
-
elif hasattr(select, "select"):
|
135 |
-
wait_for_socket = select_wait_for_socket
|
136 |
-
else: # Platform-specific: Appengine.
|
137 |
-
wait_for_socket = null_wait_for_socket
|
138 |
-
return wait_for_socket(*args, **kwargs)
|
139 |
-
|
140 |
-
|
141 |
-
def wait_for_read(sock, timeout=None):
|
142 |
-
"""Waits for reading to be available on a given socket.
|
143 |
-
Returns True if the socket is readable, or False if the timeout expired.
|
144 |
-
"""
|
145 |
-
return wait_for_socket(sock, read=True, timeout=timeout)
|
146 |
-
|
147 |
-
|
148 |
-
def wait_for_write(sock, timeout=None):
|
149 |
-
"""Waits for writing to be available on a given socket.
|
150 |
-
Returns True if the socket is readable, or False if the timeout expired.
|
151 |
-
"""
|
152 |
-
return wait_for_socket(sock, write=True, timeout=timeout)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CForGETaass/vits-uma-genshin-honkai/utils.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import argparse
|
4 |
-
import logging
|
5 |
-
import json
|
6 |
-
import subprocess
|
7 |
-
import numpy as np
|
8 |
-
import librosa
|
9 |
-
import torch
|
10 |
-
|
11 |
-
MATPLOTLIB_FLAG = False
|
12 |
-
|
13 |
-
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
14 |
-
logger = logging
|
15 |
-
|
16 |
-
|
17 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
18 |
-
assert os.path.isfile(checkpoint_path)
|
19 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
20 |
-
iteration = checkpoint_dict['iteration']
|
21 |
-
learning_rate = checkpoint_dict['learning_rate']
|
22 |
-
if optimizer is not None:
|
23 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
24 |
-
saved_state_dict = checkpoint_dict['model']
|
25 |
-
if hasattr(model, 'module'):
|
26 |
-
state_dict = model.module.state_dict()
|
27 |
-
else:
|
28 |
-
state_dict = model.state_dict()
|
29 |
-
new_state_dict= {}
|
30 |
-
for k, v in state_dict.items():
|
31 |
-
try:
|
32 |
-
new_state_dict[k] = saved_state_dict[k]
|
33 |
-
except:
|
34 |
-
logger.info("%s is not in the checkpoint" % k)
|
35 |
-
new_state_dict[k] = v
|
36 |
-
if hasattr(model, 'module'):
|
37 |
-
model.module.load_state_dict(new_state_dict)
|
38 |
-
else:
|
39 |
-
model.load_state_dict(new_state_dict)
|
40 |
-
logger.info("Loaded checkpoint '{}' (iteration {})" .format(
|
41 |
-
checkpoint_path, iteration))
|
42 |
-
return model, optimizer, learning_rate, iteration
|
43 |
-
|
44 |
-
|
45 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
46 |
-
global MATPLOTLIB_FLAG
|
47 |
-
if not MATPLOTLIB_FLAG:
|
48 |
-
import matplotlib
|
49 |
-
matplotlib.use("Agg")
|
50 |
-
MATPLOTLIB_FLAG = True
|
51 |
-
mpl_logger = logging.getLogger('matplotlib')
|
52 |
-
mpl_logger.setLevel(logging.WARNING)
|
53 |
-
import matplotlib.pylab as plt
|
54 |
-
import numpy as np
|
55 |
-
|
56 |
-
fig, ax = plt.subplots(figsize=(10,2))
|
57 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
58 |
-
interpolation='none')
|
59 |
-
plt.colorbar(im, ax=ax)
|
60 |
-
plt.xlabel("Frames")
|
61 |
-
plt.ylabel("Channels")
|
62 |
-
plt.tight_layout()
|
63 |
-
|
64 |
-
fig.canvas.draw()
|
65 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
66 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
67 |
-
plt.close()
|
68 |
-
return data
|
69 |
-
|
70 |
-
|
71 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
72 |
-
global MATPLOTLIB_FLAG
|
73 |
-
if not MATPLOTLIB_FLAG:
|
74 |
-
import matplotlib
|
75 |
-
matplotlib.use("Agg")
|
76 |
-
MATPLOTLIB_FLAG = True
|
77 |
-
mpl_logger = logging.getLogger('matplotlib')
|
78 |
-
mpl_logger.setLevel(logging.WARNING)
|
79 |
-
import matplotlib.pylab as plt
|
80 |
-
import numpy as np
|
81 |
-
|
82 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
83 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
84 |
-
interpolation='none')
|
85 |
-
fig.colorbar(im, ax=ax)
|
86 |
-
xlabel = 'Decoder timestep'
|
87 |
-
if info is not None:
|
88 |
-
xlabel += '\n\n' + info
|
89 |
-
plt.xlabel(xlabel)
|
90 |
-
plt.ylabel('Encoder timestep')
|
91 |
-
plt.tight_layout()
|
92 |
-
|
93 |
-
fig.canvas.draw()
|
94 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
95 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
96 |
-
plt.close()
|
97 |
-
return data
|
98 |
-
|
99 |
-
|
100 |
-
def load_audio_to_torch(full_path, target_sampling_rate):
|
101 |
-
audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
|
102 |
-
return torch.FloatTensor(audio.astype(np.float32))
|
103 |
-
|
104 |
-
|
105 |
-
def load_filepaths_and_text(filename, split="|"):
|
106 |
-
with open(filename, encoding='utf-8') as f:
|
107 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
108 |
-
return filepaths_and_text
|
109 |
-
|
110 |
-
|
111 |
-
def get_hparams(init=True):
|
112 |
-
parser = argparse.ArgumentParser()
|
113 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
114 |
-
help='JSON file for configuration')
|
115 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
116 |
-
help='Model name')
|
117 |
-
|
118 |
-
args = parser.parse_args()
|
119 |
-
model_dir = os.path.join("./logs", args.model)
|
120 |
-
|
121 |
-
if not os.path.exists(model_dir):
|
122 |
-
os.makedirs(model_dir)
|
123 |
-
|
124 |
-
config_path = args.config
|
125 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
126 |
-
if init:
|
127 |
-
with open(config_path, "r") as f:
|
128 |
-
data = f.read()
|
129 |
-
with open(config_save_path, "w") as f:
|
130 |
-
f.write(data)
|
131 |
-
else:
|
132 |
-
with open(config_save_path, "r") as f:
|
133 |
-
data = f.read()
|
134 |
-
config = json.loads(data)
|
135 |
-
|
136 |
-
hparams = HParams(**config)
|
137 |
-
hparams.model_dir = model_dir
|
138 |
-
return hparams
|
139 |
-
|
140 |
-
|
141 |
-
def get_hparams_from_dir(model_dir):
|
142 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
143 |
-
with open(config_save_path, "r") as f:
|
144 |
-
data = f.read()
|
145 |
-
config = json.loads(data)
|
146 |
-
|
147 |
-
hparams =HParams(**config)
|
148 |
-
hparams.model_dir = model_dir
|
149 |
-
return hparams
|
150 |
-
|
151 |
-
|
152 |
-
def get_hparams_from_file(config_path):
|
153 |
-
with open(config_path, "r") as f:
|
154 |
-
data = f.read()
|
155 |
-
config = json.loads(data)
|
156 |
-
|
157 |
-
hparams =HParams(**config)
|
158 |
-
return hparams
|
159 |
-
|
160 |
-
|
161 |
-
def check_git_hash(model_dir):
|
162 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
163 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
164 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
165 |
-
source_dir
|
166 |
-
))
|
167 |
-
return
|
168 |
-
|
169 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
170 |
-
|
171 |
-
path = os.path.join(model_dir, "githash")
|
172 |
-
if os.path.exists(path):
|
173 |
-
saved_hash = open(path).read()
|
174 |
-
if saved_hash != cur_hash:
|
175 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
176 |
-
saved_hash[:8], cur_hash[:8]))
|
177 |
-
else:
|
178 |
-
open(path, "w").write(cur_hash)
|
179 |
-
|
180 |
-
|
181 |
-
def get_logger(model_dir, filename="train.log"):
|
182 |
-
global logger
|
183 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
184 |
-
logger.setLevel(logging.DEBUG)
|
185 |
-
|
186 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
187 |
-
if not os.path.exists(model_dir):
|
188 |
-
os.makedirs(model_dir)
|
189 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
190 |
-
h.setLevel(logging.DEBUG)
|
191 |
-
h.setFormatter(formatter)
|
192 |
-
logger.addHandler(h)
|
193 |
-
return logger
|
194 |
-
|
195 |
-
|
196 |
-
class HParams():
|
197 |
-
def __init__(self, **kwargs):
|
198 |
-
for k, v in kwargs.items():
|
199 |
-
if type(v) == dict:
|
200 |
-
v = HParams(**v)
|
201 |
-
self[k] = v
|
202 |
-
|
203 |
-
def keys(self):
|
204 |
-
return self.__dict__.keys()
|
205 |
-
|
206 |
-
def items(self):
|
207 |
-
return self.__dict__.items()
|
208 |
-
|
209 |
-
def values(self):
|
210 |
-
return self.__dict__.values()
|
211 |
-
|
212 |
-
def __len__(self):
|
213 |
-
return len(self.__dict__)
|
214 |
-
|
215 |
-
def __getitem__(self, key):
|
216 |
-
return getattr(self, key)
|
217 |
-
|
218 |
-
def __setitem__(self, key, value):
|
219 |
-
return setattr(self, key, value)
|
220 |
-
|
221 |
-
def __contains__(self, key):
|
222 |
-
return key in self.__dict__
|
223 |
-
|
224 |
-
def __repr__(self):
|
225 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/transforms.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
from torch.nn import functional as F
|
10 |
-
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
|
11 |
-
|
12 |
-
from copy import deepcopy
|
13 |
-
from typing import Tuple
|
14 |
-
|
15 |
-
|
16 |
-
class ResizeLongestSide:
|
17 |
-
"""
|
18 |
-
Resizes images to longest side 'target_length', as well as provides
|
19 |
-
methods for resizing coordinates and boxes. Provides methods for
|
20 |
-
transforming both numpy array and batched torch tensors.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, target_length: int) -> None:
|
24 |
-
self.target_length = target_length
|
25 |
-
|
26 |
-
def apply_image(self, image: np.ndarray) -> np.ndarray:
|
27 |
-
"""
|
28 |
-
Expects a numpy array with shape HxWxC in uint8 format.
|
29 |
-
"""
|
30 |
-
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
|
31 |
-
return np.array(resize(to_pil_image(image), target_size))
|
32 |
-
|
33 |
-
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
|
34 |
-
"""
|
35 |
-
Expects a numpy array of length 2 in the final dimension. Requires the
|
36 |
-
original image size in (H, W) format.
|
37 |
-
"""
|
38 |
-
old_h, old_w = original_size
|
39 |
-
new_h, new_w = self.get_preprocess_shape(
|
40 |
-
original_size[0], original_size[1], self.target_length
|
41 |
-
)
|
42 |
-
coords = deepcopy(coords).astype(float)
|
43 |
-
coords[..., 0] = coords[..., 0] * (new_w / old_w)
|
44 |
-
coords[..., 1] = coords[..., 1] * (new_h / old_h)
|
45 |
-
return coords
|
46 |
-
|
47 |
-
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
|
48 |
-
"""
|
49 |
-
Expects a numpy array shape Bx4. Requires the original image size
|
50 |
-
in (H, W) format.
|
51 |
-
"""
|
52 |
-
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
|
53 |
-
return boxes.reshape(-1, 4)
|
54 |
-
|
55 |
-
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
|
56 |
-
"""
|
57 |
-
Expects batched images with shape BxCxHxW and float format. This
|
58 |
-
transformation may not exactly match apply_image. apply_image is
|
59 |
-
the transformation expected by the model.
|
60 |
-
"""
|
61 |
-
# Expects an image in BCHW format. May not exactly match apply_image.
|
62 |
-
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
|
63 |
-
return F.interpolate(
|
64 |
-
image, target_size, mode="bilinear", align_corners=False, antialias=True
|
65 |
-
)
|
66 |
-
|
67 |
-
def apply_coords_torch(
|
68 |
-
self, coords: torch.Tensor, original_size: Tuple[int, ...]
|
69 |
-
) -> torch.Tensor:
|
70 |
-
"""
|
71 |
-
Expects a torch tensor with length 2 in the last dimension. Requires the
|
72 |
-
original image size in (H, W) format.
|
73 |
-
"""
|
74 |
-
old_h, old_w = original_size
|
75 |
-
new_h, new_w = self.get_preprocess_shape(
|
76 |
-
original_size[0], original_size[1], self.target_length
|
77 |
-
)
|
78 |
-
coords = deepcopy(coords).to(torch.float)
|
79 |
-
coords[..., 0] = coords[..., 0] * (new_w / old_w)
|
80 |
-
coords[..., 1] = coords[..., 1] * (new_h / old_h)
|
81 |
-
return coords
|
82 |
-
|
83 |
-
def apply_boxes_torch(
|
84 |
-
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
|
85 |
-
) -> torch.Tensor:
|
86 |
-
"""
|
87 |
-
Expects a torch tensor with shape Bx4. Requires the original image
|
88 |
-
size in (H, W) format.
|
89 |
-
"""
|
90 |
-
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
|
91 |
-
return boxes.reshape(-1, 4)
|
92 |
-
|
93 |
-
@staticmethod
|
94 |
-
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
|
95 |
-
"""
|
96 |
-
Compute the output size given input size and target long side length.
|
97 |
-
"""
|
98 |
-
scale = long_side_length * 1.0 / max(oldh, oldw)
|
99 |
-
newh, neww = oldh * scale, oldw * scale
|
100 |
-
neww = int(neww + 0.5)
|
101 |
-
newh = int(newh + 0.5)
|
102 |
-
return (newh, neww)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cat125/text-generator-v3/classes.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
# import pymorphy3
|
2 |
-
|
3 |
-
# morph = pymorphy3.MorphAnalyzer()
|
4 |
-
|
5 |
-
# The Token class takes in a token, previous token, text, all tokens, token index, and a boolean value and creates a
|
6 |
-
# token object with attributes such as score and contexts.
|
7 |
-
class Token:
|
8 |
-
def __init__(self, token, prev_token, text, tokens, i, turbo = False):
|
9 |
-
self.token = token
|
10 |
-
self.prev_token = prev_token
|
11 |
-
self.score = 0
|
12 |
-
self.contexts = []
|
13 |
-
for t in tokens[i-10:i+10]:
|
14 |
-
# if turbo:
|
15 |
-
self.contexts.append(t)
|
16 |
-
# continue
|
17 |
-
# result = morph.parse(w)
|
18 |
-
# if len(result) == 0:
|
19 |
-
# continue
|
20 |
-
# result = result[0]
|
21 |
-
# if 'LATN' in result.tag:
|
22 |
-
# continue
|
23 |
-
# if result.tag.POS == 'NOUN':
|
24 |
-
# self.contexts.append(t)
|
25 |
-
# self.contexts.append(result.normal_form)
|
26 |
-
|
27 |
-
def __repr__(self):
|
28 |
-
return f"'{self.prev_token} > {self.token} ({len(self.contexts)} contexts)'"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cat125/text-generator-v3/train.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import re
|
3 |
-
from pprint import pprint
|
4 |
-
|
5 |
-
from tokenizers import Tokenizer
|
6 |
-
from tqdm import trange
|
7 |
-
|
8 |
-
from classes import Token
|
9 |
-
from datamanager import get_data, get_texts, models, set_data, set_data_v3
|
10 |
-
|
11 |
-
turbo = False
|
12 |
-
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
|
13 |
-
|
14 |
-
def process_text(db, db3, text):
|
15 |
-
tokens = tokenizer.encode(text).ids
|
16 |
-
for i in trange(0, len(tokens), colour="green", unit="tokens"):
|
17 |
-
token = tokens[i]
|
18 |
-
prev_token = 0 if i == 0 else tokens[i - 1]
|
19 |
-
t = Token(token, prev_token, text, tokens, turbo)
|
20 |
-
if t not in db:
|
21 |
-
db.append(t)
|
22 |
-
if prev_token not in db3:
|
23 |
-
db3[prev_token] = []
|
24 |
-
db3[prev_token].append(t)
|
25 |
-
|
26 |
-
def train(model_name):
|
27 |
-
db = []
|
28 |
-
db3 = {}
|
29 |
-
print(f'Rebuilding database for "{model_name}"...')
|
30 |
-
k = 0
|
31 |
-
texts = get_texts(model_name)
|
32 |
-
total_texts = len(texts)
|
33 |
-
for text in texts:
|
34 |
-
k += 1
|
35 |
-
print(f'Processing text {k} of {total_texts}...')
|
36 |
-
process_text(db, db3, text)
|
37 |
-
|
38 |
-
set_data(model_name, db)
|
39 |
-
models[model_name]["db"] = db
|
40 |
-
set_data_v3(model_name, db3)
|
41 |
-
models[model_name]["db3"] = db3
|
42 |
-
|
43 |
-
if __name__ == '__main__':
|
44 |
-
parser = argparse.ArgumentParser(
|
45 |
-
prog='Text Generator v3',
|
46 |
-
description='Generates text from a text file')
|
47 |
-
parser.add_argument('-r', '--rebuild', action='extend', nargs="+", type=str)
|
48 |
-
parser.add_argument('-l', '--log', action='extend', nargs="+", type=str)
|
49 |
-
parser.add_argument('-t', '--turbo', action='store_true')
|
50 |
-
args = parser.parse_args()
|
51 |
-
|
52 |
-
if args.rebuild:
|
53 |
-
models_to_rebuild = args.rebuild
|
54 |
-
if args.rebuild[0] in ('*', 'all'):
|
55 |
-
models_to_rebuild = list(models.keys())
|
56 |
-
for model in models_to_rebuild:
|
57 |
-
if model not in models:
|
58 |
-
raise ValueError("Model '%s' not found" % model)
|
59 |
-
turbo = args.turbo
|
60 |
-
train(model)
|
61 |
-
if args.log:
|
62 |
-
for model in args.log:
|
63 |
-
if model not in models:
|
64 |
-
raise ValueError("Model '%s' not found" % model)
|
65 |
-
pprint(get_data(model))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/server.py
DELETED
@@ -1,148 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
A Simple server used to show altair graphics from a prompt or script.
|
3 |
-
|
4 |
-
This is adapted from the mpld3 package; see
|
5 |
-
https://github.com/mpld3/mpld3/blob/master/mpld3/_server.py
|
6 |
-
"""
|
7 |
-
import sys
|
8 |
-
import threading
|
9 |
-
import webbrowser
|
10 |
-
import socket
|
11 |
-
from http import server
|
12 |
-
from io import BytesIO as IO
|
13 |
-
import itertools
|
14 |
-
import random
|
15 |
-
|
16 |
-
JUPYTER_WARNING = """
|
17 |
-
Note: if you're in the Jupyter notebook, Chart.serve() is not the best
|
18 |
-
way to view plots. Consider using Chart.display().
|
19 |
-
You must interrupt the kernel to cancel this command.
|
20 |
-
"""
|
21 |
-
|
22 |
-
|
23 |
-
# Mock server used for testing
|
24 |
-
|
25 |
-
|
26 |
-
class MockRequest:
|
27 |
-
def makefile(self, *args, **kwargs):
|
28 |
-
return IO(b"GET /")
|
29 |
-
|
30 |
-
def sendall(self, response):
|
31 |
-
pass
|
32 |
-
|
33 |
-
|
34 |
-
class MockServer:
|
35 |
-
def __init__(self, ip_port, Handler):
|
36 |
-
Handler(MockRequest(), ip_port[0], self)
|
37 |
-
|
38 |
-
def serve_forever(self):
|
39 |
-
pass
|
40 |
-
|
41 |
-
def server_close(self):
|
42 |
-
pass
|
43 |
-
|
44 |
-
|
45 |
-
def generate_handler(html, files=None):
|
46 |
-
if files is None:
|
47 |
-
files = {}
|
48 |
-
|
49 |
-
class MyHandler(server.BaseHTTPRequestHandler):
|
50 |
-
def do_GET(self):
|
51 |
-
"""Respond to a GET request."""
|
52 |
-
if self.path == "/":
|
53 |
-
self.send_response(200)
|
54 |
-
self.send_header("Content-type", "text/html")
|
55 |
-
self.end_headers()
|
56 |
-
self.wfile.write(html.encode())
|
57 |
-
elif self.path in files:
|
58 |
-
content_type, content = files[self.path]
|
59 |
-
self.send_response(200)
|
60 |
-
self.send_header("Content-type", content_type)
|
61 |
-
self.end_headers()
|
62 |
-
self.wfile.write(content.encode())
|
63 |
-
else:
|
64 |
-
self.send_error(404)
|
65 |
-
|
66 |
-
return MyHandler
|
67 |
-
|
68 |
-
|
69 |
-
def find_open_port(ip, port, n=50):
|
70 |
-
"""Find an open port near the specified port"""
|
71 |
-
ports = itertools.chain(
|
72 |
-
(port + i for i in range(n)), (port + random.randint(-2 * n, 2 * n))
|
73 |
-
)
|
74 |
-
|
75 |
-
for port in ports:
|
76 |
-
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
77 |
-
result = s.connect_ex((ip, port))
|
78 |
-
s.close()
|
79 |
-
if result != 0:
|
80 |
-
return port
|
81 |
-
raise ValueError("no open ports found")
|
82 |
-
|
83 |
-
|
84 |
-
def serve(
|
85 |
-
html,
|
86 |
-
ip="127.0.0.1",
|
87 |
-
port=8888,
|
88 |
-
n_retries=50,
|
89 |
-
files=None,
|
90 |
-
jupyter_warning=True,
|
91 |
-
open_browser=True,
|
92 |
-
http_server=None,
|
93 |
-
):
|
94 |
-
"""Start a server serving the given HTML, and (optionally) open a browser
|
95 |
-
|
96 |
-
Parameters
|
97 |
-
----------
|
98 |
-
html : string
|
99 |
-
HTML to serve
|
100 |
-
ip : string (default = '127.0.0.1')
|
101 |
-
ip address at which the HTML will be served.
|
102 |
-
port : int (default = 8888)
|
103 |
-
the port at which to serve the HTML
|
104 |
-
n_retries : int (default = 50)
|
105 |
-
the number of nearby ports to search if the specified port is in use.
|
106 |
-
files : dictionary (optional)
|
107 |
-
dictionary of extra content to serve
|
108 |
-
jupyter_warning : bool (optional)
|
109 |
-
if True (default), then print a warning if this is used within Jupyter
|
110 |
-
open_browser : bool (optional)
|
111 |
-
if True (default), then open a web browser to the given HTML
|
112 |
-
http_server : class (optional)
|
113 |
-
optionally specify an HTTPServer class to use for showing the
|
114 |
-
figure. The default is Python's basic HTTPServer.
|
115 |
-
"""
|
116 |
-
port = find_open_port(ip, port, n_retries)
|
117 |
-
Handler = generate_handler(html, files)
|
118 |
-
|
119 |
-
if http_server is None:
|
120 |
-
srvr = server.HTTPServer((ip, port), Handler)
|
121 |
-
else:
|
122 |
-
srvr = http_server((ip, port), Handler)
|
123 |
-
|
124 |
-
if jupyter_warning:
|
125 |
-
try:
|
126 |
-
__IPYTHON__ # noqa
|
127 |
-
except NameError:
|
128 |
-
pass
|
129 |
-
else:
|
130 |
-
print(JUPYTER_WARNING)
|
131 |
-
|
132 |
-
# Start the server
|
133 |
-
print("Serving to http://{}:{}/ [Ctrl-C to exit]".format(ip, port))
|
134 |
-
sys.stdout.flush()
|
135 |
-
|
136 |
-
if open_browser:
|
137 |
-
# Use a thread to open a web browser pointing to the server
|
138 |
-
def b():
|
139 |
-
return webbrowser.open("http://{}:{}".format(ip, port))
|
140 |
-
|
141 |
-
threading.Thread(target=b).start()
|
142 |
-
|
143 |
-
try:
|
144 |
-
srvr.serve_forever()
|
145 |
-
except (KeyboardInterrupt, SystemExit):
|
146 |
-
print("\nstopping Server...")
|
147 |
-
|
148 |
-
srvr.server_close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_sockets.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import socket
|
4 |
-
from abc import abstractmethod
|
5 |
-
from contextlib import AsyncExitStack
|
6 |
-
from io import IOBase
|
7 |
-
from ipaddress import IPv4Address, IPv6Address
|
8 |
-
from socket import AddressFamily
|
9 |
-
from typing import (
|
10 |
-
Any,
|
11 |
-
Callable,
|
12 |
-
Collection,
|
13 |
-
Mapping,
|
14 |
-
Tuple,
|
15 |
-
TypeVar,
|
16 |
-
Union,
|
17 |
-
)
|
18 |
-
|
19 |
-
from .._core._tasks import create_task_group
|
20 |
-
from .._core._typedattr import (
|
21 |
-
TypedAttributeProvider,
|
22 |
-
TypedAttributeSet,
|
23 |
-
typed_attribute,
|
24 |
-
)
|
25 |
-
from ._streams import ByteStream, Listener, UnreliableObjectStream
|
26 |
-
from ._tasks import TaskGroup
|
27 |
-
|
28 |
-
IPAddressType = Union[str, IPv4Address, IPv6Address]
|
29 |
-
IPSockAddrType = Tuple[str, int]
|
30 |
-
SockAddrType = Union[IPSockAddrType, str]
|
31 |
-
UDPPacketType = Tuple[bytes, IPSockAddrType]
|
32 |
-
T_Retval = TypeVar("T_Retval")
|
33 |
-
|
34 |
-
|
35 |
-
class SocketAttribute(TypedAttributeSet):
|
36 |
-
#: the address family of the underlying socket
|
37 |
-
family: AddressFamily = typed_attribute()
|
38 |
-
#: the local socket address of the underlying socket
|
39 |
-
local_address: SockAddrType = typed_attribute()
|
40 |
-
#: for IP addresses, the local port the underlying socket is bound to
|
41 |
-
local_port: int = typed_attribute()
|
42 |
-
#: the underlying stdlib socket object
|
43 |
-
raw_socket: socket.socket = typed_attribute()
|
44 |
-
#: the remote address the underlying socket is connected to
|
45 |
-
remote_address: SockAddrType = typed_attribute()
|
46 |
-
#: for IP addresses, the remote port the underlying socket is connected to
|
47 |
-
remote_port: int = typed_attribute()
|
48 |
-
|
49 |
-
|
50 |
-
class _SocketProvider(TypedAttributeProvider):
|
51 |
-
@property
|
52 |
-
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
53 |
-
from .._core._sockets import convert_ipv6_sockaddr as convert
|
54 |
-
|
55 |
-
attributes: dict[Any, Callable[[], Any]] = {
|
56 |
-
SocketAttribute.family: lambda: self._raw_socket.family,
|
57 |
-
SocketAttribute.local_address: lambda: convert(
|
58 |
-
self._raw_socket.getsockname()
|
59 |
-
),
|
60 |
-
SocketAttribute.raw_socket: lambda: self._raw_socket,
|
61 |
-
}
|
62 |
-
try:
|
63 |
-
peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
|
64 |
-
except OSError:
|
65 |
-
peername = None
|
66 |
-
|
67 |
-
# Provide the remote address for connected sockets
|
68 |
-
if peername is not None:
|
69 |
-
attributes[SocketAttribute.remote_address] = lambda: peername
|
70 |
-
|
71 |
-
# Provide local and remote ports for IP based sockets
|
72 |
-
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
|
73 |
-
attributes[
|
74 |
-
SocketAttribute.local_port
|
75 |
-
] = lambda: self._raw_socket.getsockname()[1]
|
76 |
-
if peername is not None:
|
77 |
-
remote_port = peername[1]
|
78 |
-
attributes[SocketAttribute.remote_port] = lambda: remote_port
|
79 |
-
|
80 |
-
return attributes
|
81 |
-
|
82 |
-
@property
|
83 |
-
@abstractmethod
|
84 |
-
def _raw_socket(self) -> socket.socket:
|
85 |
-
pass
|
86 |
-
|
87 |
-
|
88 |
-
class SocketStream(ByteStream, _SocketProvider):
|
89 |
-
"""
|
90 |
-
Transports bytes over a socket.
|
91 |
-
|
92 |
-
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
93 |
-
"""
|
94 |
-
|
95 |
-
|
96 |
-
class UNIXSocketStream(SocketStream):
|
97 |
-
@abstractmethod
|
98 |
-
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
|
99 |
-
"""
|
100 |
-
Send file descriptors along with a message to the peer.
|
101 |
-
|
102 |
-
:param message: a non-empty bytestring
|
103 |
-
:param fds: a collection of files (either numeric file descriptors or open file or socket
|
104 |
-
objects)
|
105 |
-
"""
|
106 |
-
|
107 |
-
@abstractmethod
|
108 |
-
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
|
109 |
-
"""
|
110 |
-
Receive file descriptors along with a message from the peer.
|
111 |
-
|
112 |
-
:param msglen: length of the message to expect from the peer
|
113 |
-
:param maxfds: maximum number of file descriptors to expect from the peer
|
114 |
-
:return: a tuple of (message, file descriptors)
|
115 |
-
"""
|
116 |
-
|
117 |
-
|
118 |
-
class SocketListener(Listener[SocketStream], _SocketProvider):
|
119 |
-
"""
|
120 |
-
Listens to incoming socket connections.
|
121 |
-
|
122 |
-
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
123 |
-
"""
|
124 |
-
|
125 |
-
@abstractmethod
|
126 |
-
async def accept(self) -> SocketStream:
|
127 |
-
"""Accept an incoming connection."""
|
128 |
-
|
129 |
-
async def serve(
|
130 |
-
self,
|
131 |
-
handler: Callable[[SocketStream], Any],
|
132 |
-
task_group: TaskGroup | None = None,
|
133 |
-
) -> None:
|
134 |
-
async with AsyncExitStack() as exit_stack:
|
135 |
-
if task_group is None:
|
136 |
-
task_group = await exit_stack.enter_async_context(create_task_group())
|
137 |
-
|
138 |
-
while True:
|
139 |
-
stream = await self.accept()
|
140 |
-
task_group.start_soon(handler, stream)
|
141 |
-
|
142 |
-
|
143 |
-
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
|
144 |
-
"""
|
145 |
-
Represents an unconnected UDP socket.
|
146 |
-
|
147 |
-
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
148 |
-
"""
|
149 |
-
|
150 |
-
async def sendto(self, data: bytes, host: str, port: int) -> None:
|
151 |
-
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port)))."""
|
152 |
-
return await self.send((data, (host, port)))
|
153 |
-
|
154 |
-
|
155 |
-
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
156 |
-
"""
|
157 |
-
Represents an connected UDP socket.
|
158 |
-
|
159 |
-
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
160 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dagfinn1962/prodia2/flipper.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
|
5 |
-
def flip_text(x):
|
6 |
-
return x[::-1]
|
7 |
-
|
8 |
-
|
9 |
-
def flip_image(x):
|
10 |
-
return np.fliplr(x)
|
11 |
-
|
12 |
-
|
13 |
-
with gr.Blocks() as demo:
|
14 |
-
gr.Markdown("Flip text or image files using this demo.")
|
15 |
-
with gr.Tab("Flip Text"):
|
16 |
-
text_input = gr.Textbox()
|
17 |
-
text_output = gr.Textbox()
|
18 |
-
text_button = gr.Button("Flip")
|
19 |
-
with gr.Tab("Flip Image"):
|
20 |
-
with gr.Row():
|
21 |
-
image_input = gr.Image()
|
22 |
-
image_output = gr.Image()
|
23 |
-
image_button = gr.Button("Flip")
|
24 |
-
|
25 |
-
with gr.Accordion("Open for More!"):
|
26 |
-
gr.Markdown("Look at me...")
|
27 |
-
|
28 |
-
text_button.click(flip_text, inputs=text_input, outputs=text_output)
|
29 |
-
image_button.click(flip_image, inputs=image_input, outputs=image_output)
|
30 |
-
|
31 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/other/init_env.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
@Date: 2021/08/15
|
3 |
-
@description:
|
4 |
-
"""
|
5 |
-
import random
|
6 |
-
import torch
|
7 |
-
import torch.backends.cudnn as cudnn
|
8 |
-
import numpy as np
|
9 |
-
import os
|
10 |
-
import cv2
|
11 |
-
|
12 |
-
|
13 |
-
def init_env(seed, deterministic=False, loader_work_num=0):
|
14 |
-
# Fix seed
|
15 |
-
# Python & NumPy
|
16 |
-
np.random.seed(seed)
|
17 |
-
random.seed(seed)
|
18 |
-
os.environ['PYTHONHASHSEED'] = str(seed)
|
19 |
-
|
20 |
-
# PyTorch
|
21 |
-
torch.manual_seed(seed) # 为CPU设置随机种子
|
22 |
-
if torch.cuda.is_available():
|
23 |
-
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
|
24 |
-
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
|
25 |
-
|
26 |
-
# cuDNN
|
27 |
-
if deterministic:
|
28 |
-
# 复现
|
29 |
-
torch.backends.cudnn.benchmark = False
|
30 |
-
torch.backends.cudnn.deterministic = True # 将这个 flag 置为 True 的话,每次返回的卷积算法将是确定的,即默认算法
|
31 |
-
else:
|
32 |
-
cudnn.benchmark = True # 如果网络的输入数据维度或类型上变化不大,设置true
|
33 |
-
torch.backends.cudnn.deterministic = False
|
34 |
-
|
35 |
-
# Using multiple threads in Opencv can cause deadlocks
|
36 |
-
if loader_work_num != 0:
|
37 |
-
cv2.setNumThreads(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DollieHell/pisa/Dockerfile
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
FROM node:18-bullseye-slim
|
2 |
-
|
3 |
-
RUN apt-get update && \
|
4 |
-
|
5 |
-
apt-get install -y git
|
6 |
-
|
7 |
-
RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
|
8 |
-
|
9 |
-
WORKDIR /app
|
10 |
-
|
11 |
-
RUN npm install
|
12 |
-
|
13 |
-
COPY Dockerfile greeting.md* .env* ./
|
14 |
-
|
15 |
-
RUN npm run build
|
16 |
-
|
17 |
-
EXPOSE 7860
|
18 |
-
|
19 |
-
ENV NODE_ENV=production
|
20 |
-
|
21 |
-
CMD [ "npm", "start" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DuckyPolice/DeciDiffusion-v1-0/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: DeciDiffusion-v1-0
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.43.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
disable_embedding: true
|
11 |
-
inference: true
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EDGAhab/Paimon-Talking/app.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
|
4 |
-
import torch
|
5 |
-
|
6 |
-
import commons
|
7 |
-
import utils
|
8 |
-
from models import SynthesizerTrn
|
9 |
-
from text.symbols import symbols
|
10 |
-
from text import text_to_sequence
|
11 |
-
|
12 |
-
import IPython.display as ipd
|
13 |
-
|
14 |
-
import json
|
15 |
-
import math
|
16 |
-
|
17 |
-
#new imports
|
18 |
-
import matplotlib.pyplot as plt
|
19 |
-
import re
|
20 |
-
|
21 |
-
from torch import nn
|
22 |
-
from torch.nn import functional as F
|
23 |
-
from torch.utils.data import DataLoader
|
24 |
-
|
25 |
-
from models import SynthesizerTrn
|
26 |
-
import unicodedata
|
27 |
-
import openai
|
28 |
-
|
29 |
-
def get_text(text, hps):
|
30 |
-
text_norm = text_to_sequence(text, hps.data.text_cleaners)
|
31 |
-
if hps.data.add_blank:
|
32 |
-
text_norm = commons.intersperse(text_norm, 0)
|
33 |
-
text_norm = torch.LongTensor(text_norm)
|
34 |
-
return text_norm
|
35 |
-
|
36 |
-
hps = utils.get_hparams_from_file("configs/biaobei_base.json")
|
37 |
-
|
38 |
-
net_g = SynthesizerTrn(
|
39 |
-
len(symbols),
|
40 |
-
hps.data.filter_length // 2 + 1,
|
41 |
-
hps.train.segment_size // hps.data.hop_length,
|
42 |
-
**hps.model)
|
43 |
-
_ = net_g.eval()
|
44 |
-
|
45 |
-
_ = utils.load_checkpoint("G_1434000.pth", net_g, None)
|
46 |
-
|
47 |
-
def friend_chat(text, tts_input3):
|
48 |
-
call_name = "亚托克斯"
|
49 |
-
openai.api_key = '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'
|
50 |
-
identity = tts_input3
|
51 |
-
start_sequence = '\n'+str(call_name)+':'
|
52 |
-
restart_sequence = "\nYou: "
|
53 |
-
all_text = identity + restart_sequence
|
54 |
-
if 1 == 1:
|
55 |
-
prompt0 = text #当期prompt
|
56 |
-
if text == 'quit':
|
57 |
-
return prompt0
|
58 |
-
prompt = identity + prompt0 + start_sequence
|
59 |
-
|
60 |
-
response = openai.Completion.create(
|
61 |
-
model="text-davinci-003",
|
62 |
-
prompt=prompt,
|
63 |
-
temperature=0.5,
|
64 |
-
max_tokens=1000,
|
65 |
-
top_p=1.0,
|
66 |
-
frequency_penalty=0.5,
|
67 |
-
presence_penalty=0.0,
|
68 |
-
stop=["\nYou:"]
|
69 |
-
)
|
70 |
-
return response['choices'][0]['text'].strip()
|
71 |
-
|
72 |
-
def sle(text, tts_input3):
|
73 |
-
text = friend_chat(text, tts_input3).replace('\n','。').replace(' ',',')
|
74 |
-
return text
|
75 |
-
|
76 |
-
def infer(text,tts_input3):
|
77 |
-
stn_tst = get_text(sle(text,tts_input3), hps)
|
78 |
-
with torch.no_grad():
|
79 |
-
x_tst = stn_tst.unsqueeze(0)
|
80 |
-
x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
|
81 |
-
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
|
82 |
-
sampling_rate = 22050
|
83 |
-
return (sampling_rate, audio)
|
84 |
-
|
85 |
-
app = gr.Blocks()
|
86 |
-
|
87 |
-
with app:
|
88 |
-
with gr.Tabs():
|
89 |
-
|
90 |
-
with gr.TabItem("Basic"):
|
91 |
-
|
92 |
-
tts_input1 = gr.TextArea(label="输入你想跟剑魔说的话", value="我是暮光星灵佐伊,我要三天之内杀了你")
|
93 |
-
tts_input3 = gr.TextArea(label="写上你给他的设定", value="你叫亚托克斯,俗称剑魔,世界的终结者。")
|
94 |
-
tts_submit = gr.Button("Generate", variant="primary")
|
95 |
-
tts_output2 = gr.Audio(label="Output")
|
96 |
-
tts_submit.click(infer, [tts_input1,tts_input3], [tts_output2])
|
97 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|