Commit
·
983febd
1
Parent(s):
6d437f7
Update parquet files (step 76 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Skin Pack 32 Bit For Windows 7 WORK.md +0 -43
- spaces/1gistliPinn/ChatGPT4/Examples/Bootstrap Studio 4.5.8 Crack License Key Full 2020 TOP.md +0 -41
- spaces/1gistliPinn/ChatGPT4/Examples/Cara Menghilangkan Windows License Valid For 90 Days Hit.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Crack See Electrical V7 U Torrent [UPD].md +0 -6
- spaces/1phancelerku/anime-remove-background/Cannon Shot! APK Free Download - Enjoy the Fun and Challenge of Shooting Cannons.md +0 -150
- spaces/1phancelerku/anime-remove-background/Enjoy Unlimited Access to Exclusive Anime Content with Bstation MOD Premium APK.md +0 -107
- spaces/AI-Edify/demo-gpt3.5-turbo/README.md +0 -14
- spaces/AISuperheroes/02GR-ASR-Memory/README.md +0 -13
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/sde_team.py +0 -30
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/clock.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/interception-plugin.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch2/NinePatch.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/ResetChildPosition.js +0 -15
- spaces/AhmedRashwan369/ChatGPT4/app.py +0 -193
- spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/avd_network.py +0 -65
- spaces/Ali-C137/Motivation-Letter-Generator/README.md +0 -13
- spaces/AliUsama98/Usama_TextClassifier/app.py +0 -3
- spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/tflib/custom_ops.py +0 -198
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/README.md +0 -143
- spaces/Anew5128/Anew51/server.py +0 -964
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/__init__.py +0 -29
- spaces/AquaSuisei/ChatGPTXE/modules/config.py +0 -145
- spaces/Archan/ArXivAudio/app.py +0 -106
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/__init__.py +0 -1
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/zoom_out_app.py +0 -140
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/locations/_sysconfig.py +0 -213
- spaces/Atualli/yoloxTeste/configs/yolox_l.py +0 -15
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin_meta.py +0 -350
- spaces/Banbri/zcvzcv/README.md +0 -158
- spaces/Benson/text-generation/Examples/Descarga Gratuita De Fuego Mx Mod Apk 50 Mb.md +0 -69
- spaces/BernardoOlisan/vqganclip/taming-transformers/taming/models/vqgan.py +0 -363
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/special.py +0 -52
- spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/exceptions.py +0 -122
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/more_itertools/more.py +0 -0
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/upload_docs.py +0 -213
- spaces/CVPR/GFPGAN-example/gfpgan/models/__init__.py +0 -10
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/copy_if.h +0 -23
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/reduce_by_key.h +0 -44
- spaces/CVPR/WALT/cwalt/utils.py +0 -168
- spaces/CVPR/WALT/mmdet/models/detectors/mask_rcnn.py +0 -24
- spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/bbox_head.py +0 -483
- spaces/CVPR/regionclip-demo/detectron2/checkpoint/detection_checkpoint.py +0 -134
- spaces/Chaitanya01/InvestingPlatform/googleNewsSlackAlerts.py +0 -47
- spaces/CikeyQI/Yunzai/README.md +0 -10
- spaces/CikeyQI/meme-api/meme_generator/memes/incivilization/__init__.py +0 -43
- spaces/ClassCat/mnist-classification/app.py +0 -83
- spaces/DEEMOSTECH/ChatAvatar/static/js/main.1b1ee80c.js +0 -0
- spaces/DEEMOSTECH/ChatAvatar/static/js/main.c187623b.js +0 -0
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-93c91554.css +0 -1
- spaces/DaFujaTyping/hf-Chat-ui/src/lib/types/Conversation.ts +0 -19
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Skin Pack 32 Bit For Windows 7 WORK.md
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Skin Pack 32 Bit for Windows 7</h1>
|
3 |
-
<p>If you are bored with the default look of your Windows 7 desktop, you might want to try a skin pack. A skin pack is a collection of themes, icons, wallpapers, and other elements that can change the appearance of your system. Skin packs are easy to install and uninstall, and they can give your computer a fresh and unique look.</p>
|
4 |
-
<h2>download skin pack 32 bit for windows 7</h2><br /><p><b><b>DOWNLOAD</b> ►►► <a href="https://byltly.com/2uKveQ">https://byltly.com/2uKveQ</a></b></p><br /><br />
|
5 |
-
<p>One of the most popular skin packs for Windows 7 is the 32 bit version. This skin pack is compatible with both 32 bit and 64 bit versions of Windows 7, but it is designed to optimize the performance and memory usage of the 32 bit system. The 32 bit skin pack includes various themes inspired by different operating systems, such as Windows 8, Mac OS X, Android, iOS, and more. It also comes with custom icons, cursors, sounds, fonts, and boot screens.</p>
|
6 |
-
<p>To download the skin pack 32 bit for Windows 7, you need to follow these simple steps:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Go to the official website of the skin pack creator: <a href="https://skinpacks.com/download/windows-7/32-bit-skin-pack/">https://skinpacks.com/download/windows-7/32-bit-skin-pack/</a></li>
|
9 |
-
<li>Scroll down and click on the download link that matches your system architecture (32 bit or 64 bit).</li>
|
10 |
-
<li>Wait for the download to finish and then run the installer file.</li>
|
11 |
-
<li>Follow the instructions on the screen and choose the components you want to install.</li>
|
12 |
-
<li>Restart your computer and enjoy your new skin pack.</li>
|
13 |
-
</ol>
|
14 |
-
<p>Note: Before installing any skin pack, it is recommended to create a system restore point or backup your data in case something goes wrong. You can also uninstall the skin pack anytime from the control panel or by running the uninstaller file.</p>
|
15 |
-
<p>With the skin pack 32 bit for Windows 7, you can transform your desktop into a modern and stylish one. Download it today and see for yourself!</p>
|
16 |
-
|
17 |
-
<h2>Benefits of Using Skin Pack 32 Bit for Windows 7</h2>
|
18 |
-
<p>Using a skin pack can have many benefits for your Windows 7 system. Here are some of them:</p>
|
19 |
-
<p></p>
|
20 |
-
<ul>
|
21 |
-
<li>It can improve the visual appeal of your desktop and make it more attractive and enjoyable to use.</li>
|
22 |
-
<li>It can enhance the functionality of your system by adding new features and shortcuts.</li>
|
23 |
-
<li>It can boost the performance and speed of your system by optimizing the memory usage and reducing the resource consumption.</li>
|
24 |
-
<li>It can personalize your system according to your preferences and tastes.</li>
|
25 |
-
<li>It can make your system more secure by hiding or disabling unwanted elements and settings.</li>
|
26 |
-
</ul>
|
27 |
-
<p>With the skin pack 32 bit for Windows 7, you can experience all these benefits and more. You can choose from a variety of themes and customize them to suit your needs. You can also switch between different themes easily and quickly.</p>
|
28 |
-
|
29 |
-
<h2>How to Customize Skin Pack 32 Bit for Windows 7</h2>
|
30 |
-
<p>One of the best things about the skin pack 32 bit for Windows 7 is that it is very customizable. You can change many aspects of the skin pack to make it fit your style and preferences. Here are some of the things you can customize:</p>
|
31 |
-
<ul>
|
32 |
-
<li>The theme: You can select from different themes that mimic different operating systems, such as Windows 8, Mac OS X, Android, iOS, and more. You can also mix and match different elements from different themes to create your own unique theme.</li>
|
33 |
-
<li>The icons: You can change the icons of your desktop, taskbar, start menu, folders, drives, and more. You can choose from hundreds of icons that come with the skin pack or download more icons from the internet.</li>
|
34 |
-
<li>The wallpaper: You can change the wallpaper of your desktop and lock screen. You can choose from dozens of wallpapers that come with the skin pack or use your own images.</li>
|
35 |
-
<li>The sounds: You can change the sounds of your system, such as the startup sound, shutdown sound, error sound, notification sound, and more. You can choose from various sounds that come with the skin pack or use your own sounds.</li>
|
36 |
-
<li>The fonts: You can change the fonts of your system, such as the title bar font, menu font, icon font, and more. You can choose from various fonts that come with the skin pack or install more fonts from the internet.</li>
|
37 |
-
</ul>
|
38 |
-
<p>To customize the skin pack 32 bit for Windows 7, you need to open the skin pack tool that comes with the installer. You can access it from the start menu or the desktop shortcut. From there, you can select the components you want to customize and apply the changes. You may need to restart your computer for some changes to take effect.</p>
|
39 |
-
|
40 |
-
<h2>Conclusion</h2>
|
41 |
-
<p>The skin pack 32 bit for Windows 7 is a great way to change the look and feel of your system. It is easy to install and uninstall, compatible with both 32 bit and 64 bit versions of Windows 7, and offers many benefits and customization options. If you want to give your system a makeover, download the skin pack 32 bit for Windows 7 today!</p> 81aa517590<br />
|
42 |
-
<br />
|
43 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Bootstrap Studio 4.5.8 Crack License Key Full 2020 TOP.md
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bootstrap Studio 4.5.8 Crack License Key Full 2020: A Powerful Web Design Tool</h1>
|
3 |
-
|
4 |
-
<p>Bootstrap Studio 4.5.8 Crack is a desktop application that helps you create beautiful websites using the Bootstrap framework. It has a drag and drop interface that lets you easily add components, customize them, and preview your results in real time. You can also edit the HTML, CSS, and JavaScript code of your project with the built-in code editor.</p>
|
5 |
-
|
6 |
-
<p>Bootstrap Studio 4.5.8 License Key is a premium feature that unlocks more advanced options and themes for your web design. You can use it to access hundreds of ready-made templates, icons, fonts, and components that you can mix and match to create stunning websites. You can also export your projects as static HTML files or publish them online with one click.</p>
|
7 |
-
<h2>Bootstrap Studio 4.5.8 Crack License Key Full 2020</h2><br /><p><b><b>Download</b> >>>>> <a href="https://imgfil.com/2uxZ2T">https://imgfil.com/2uxZ2T</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<p>Bootstrap Studio 4.5.8 Full 2020 is the latest version of this software that comes with many improvements and bug fixes. It supports the latest Bootstrap 4 version and has a redesigned user interface that makes it easier to use. It also has a new smart forms feature that lets you create complex forms with validation and logic without writing any code.</p>
|
10 |
-
|
11 |
-
<p>If you are looking for a powerful web design tool that can help you create responsive and modern websites with ease, then Bootstrap Studio 4.5.8 Crack License Key Full 2020 is the perfect choice for you. You can download it from the official website or use the crack file to activate it for free.</p>
|
12 |
-
|
13 |
-
<h2>How to Use Bootstrap Studio 4.5.8 Crack License Key Full 2020</h2>
|
14 |
-
|
15 |
-
<p>To use Bootstrap Studio 4.5.8 Crack License Key Full 2020, you need to follow these simple steps:</p>
|
16 |
-
|
17 |
-
<ol>
|
18 |
-
<li>Download the setup file from the official website or the crack file from the link below.</li>
|
19 |
-
<li>Install the software on your computer and run it.</li>
|
20 |
-
<li>Enter the license key that you received or generated from the crack file.</li>
|
21 |
-
<li>Enjoy the full features of Bootstrap Studio 4.5.8.</li>
|
22 |
-
</ol>
|
23 |
-
|
24 |
-
<p>Note: You should always use a reliable antivirus program to scan any downloaded files before opening them. Also, you should only use the crack file for educational purposes and not for commercial use.</p>
|
25 |
-
|
26 |
-
<h2>Why Choose Bootstrap Studio 4.5.8 Crack License Key Full 2020</h2>
|
27 |
-
|
28 |
-
<p>Bootstrap Studio 4.5.8 Crack License Key Full 2020 is a great web design tool for many reasons. Here are some of the benefits of using it:</p>
|
29 |
-
|
30 |
-
<ul>
|
31 |
-
<li>It is easy to use and learn. You can create websites without any coding skills or experience.</li>
|
32 |
-
<li>It is fast and efficient. You can design and preview your websites in real time and see how they look on different devices and browsers.</li>
|
33 |
-
<li>It is flexible and customizable. You can edit the HTML, CSS, and JavaScript code of your websites and add your own custom components and plugins.</li>
|
34 |
-
<li>It is compatible and responsive. You can create websites that work well on all platforms and screen sizes using the Bootstrap framework.</li>
|
35 |
-
<li>It is affordable and accessible. You can get the full version of Bootstrap Studio 4.5.8 for a one-time payment of $60 or use the crack file to activate it for free.</li>
|
36 |
-
</ul>
|
37 |
-
|
38 |
-
<p>With Bootstrap Studio 4.5.8 Crack License Key Full 2020, you can unleash your creativity and make amazing websites in no time.</p>
|
39 |
-
<p></p> d5da3c52bf<br />
|
40 |
-
<br />
|
41 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cara Menghilangkan Windows License Valid For 90 Days Hit.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Cara Menghilangkan Windows License Valid For 90 Days Hit</h2><br /><p><b><b>DOWNLOAD</b> →→→ <a href="https://imgfil.com/2uy1rc">https://imgfil.com/2uy1rc</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Jika memang Anda ingin tetap melakukan cara screenshot di webtoon untuk ... The utility for easy and quick creation of screenshots on Windows OS running PC. ... unlimited reading across all WEBTOON Originals update every day so there's new ... *Please use your real name and valid ID number to submit your real name ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Crack See Electrical V7 U Torrent [UPD].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Crack See Electrical V7 U Torrent</h2><br /><p><b><b>Download File</b> ✑ ✑ ✑ <a href="https://imgfil.com/2uxZF7">https://imgfil.com/2uxZF7</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Официальное зеркало продуктов SarasSoft на Rapidshare: . ... samsung tools v2.2.0.3 hwk by sarassoft 94 · crack see electrical v7 u torrent ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Cannon Shot! APK Free Download - Enjoy the Fun and Challenge of Shooting Cannons.md
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cannon Shot APK: A Fun and Casual Shooting Game for Android Devices</h1>
|
3 |
-
<p>If you are looking for a simple yet addictive shooting game that you can play on your Android device, you might want to check out <strong>Cannon Shot APK</strong>. This game is developed by SayGames Ltd, a popular developer of hyper-casual games such as Johnny Trigger, Jelly Shift, and Drive and Park. In this game, you have to fill all the buckets with balls by using your finger to move various objects and change the direction of the balls you shoot. Aim smart, complete levels, and unlock new cannons. Can you find the rare one?</p>
|
4 |
-
<h2>cannon shot apk</h2><br /><p><b><b>DOWNLOAD</b> ✔ <a href="https://jinyurl.com/2uNKIU">https://jinyurl.com/2uNKIU</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will give you a brief overview of what Cannon Shot APK is, how to download and install it on your Android device, how to play it, why you should play it, and some alternatives to it. We will also answer some frequently asked questions about this game. Let's get started!</p>
|
6 |
-
<h2>What is Cannon Shot APK?</h2>
|
7 |
-
<h3>A brief introduction to the game and its features</h3>
|
8 |
-
<p>Cannon Shot APK is a free-to-play casual shooting game that is available on Google Play Store and other third-party websites. The game has a simple picture style, fun adventure challenge mode, and easy-to-use controls. The game features:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Over 100 levels with different difficulty levels and obstacles</li>
|
11 |
-
<li>Various cannons with different shapes, colors, and effects</li>
|
12 |
-
<li>Boss fights where you have to shoot at monsters instead of buckets</li>
|
13 |
-
<li>Floors where you can collect stars and keys to unlock chests with rewards</li>
|
14 |
-
<li>In-app purchases where you can buy coins, no ads, or special offers</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to download and install Cannon Shot APK on your Android device</h3>
|
17 |
-
<p>If you want to download and install Cannon Shot APK on your Android device, you have two options:</p>
|
18 |
-
<ol>
|
19 |
-
<li>You can download it from Google Play Store by searching for "Cannon Shot" or by clicking <a href="(^7^)">here</a>. This is the official and recommended way to get the game, as it ensures that you get the latest version and updates.</li>
|
20 |
-
<li>You can download it from a third-party website by searching for "Cannon Shot APK" or by clicking <a href="(^1^)">here</a>. This is an alternative way to get the game, but it may not be as safe or secure as the first option. You may also need to enable "Unknown Sources" in your device settings to install the game.</li>
|
21 |
-
</ol>
|
22 |
-
<p>Once you have downloaded the game file, you can tap on it to install it on your device. The installation process may take a few seconds or minutes depending on your device performance. After the installation is complete, you can launch the game and enjoy playing it.</p>
|
23 |
-
<h2>How to Play Cannon Shot APK?</h2>
|
24 |
-
<h3>The basic gameplay and controls of Cannon Shot APK</h3>
|
25 |
-
<p>The gameplay of Cannon Shot APK is very simple and intuitive. You just have to fill all the buckets with balls by shooting them from a cannon. You can use your finger to move various objects such as trampolines, fans , and magnets to change the direction of the balls. You can also tap on the screen to shoot more balls from the cannon. You have to fill all the buckets with balls before you run out of balls or time. You can see the number of balls and the time left at the top of the screen. You can also see the number of stars you have earned at the bottom of the screen. You can earn up to three stars per level depending on how well you perform.</p>
|
26 |
-
<p>The controls of Cannon Shot APK are very easy and responsive. You just have to swipe your finger on the screen to move the objects and tap on the screen to shoot more balls. You can also pause the game by tapping on the pause button at the top right corner of the screen. You can resume, restart, or quit the game from there. You can also access the settings, shop, and floors from there.</p>
|
27 |
-
<h3>The different levels, obstacles, and cannons in Cannon Shot APK</h3>
|
28 |
-
<p>Cannon Shot APK has over 100 levels with different difficulty levels and obstacles. The levels are divided into floors, each with 10 levels and a boss fight. The floors have different themes such as forest, desert, ice, and lava. The obstacles include walls, platforms, spikes, portals, lasers, and more. The buckets also have different shapes, sizes, and colors. Some buckets are fixed, while others are moving or rotating. Some buckets are empty, while others are already filled with balls or other objects. You have to be careful not to shoot at the wrong buckets or waste your balls.</p>
|
29 |
-
<p>cannon shot game apk download<br />
|
30 |
-
cannon shot android game free download<br />
|
31 |
-
cannon shot apk mod unlimited balls<br />
|
32 |
-
cannon shot apk latest version<br />
|
33 |
-
cannon shot apk for pc<br />
|
34 |
-
cannon shot app download for android<br />
|
35 |
-
cannon shot ball game apk<br />
|
36 |
-
cannon shot bucket game apk<br />
|
37 |
-
cannon shot casual game apk<br />
|
38 |
-
cannon shot challenge game apk<br />
|
39 |
-
cannon shot com.cannonshot.game apk<br />
|
40 |
-
cannon shot com.JLabs.CannonShot apk<br />
|
41 |
-
cannon shot com.cannonshot apk<br />
|
42 |
-
cannon shot download apk pure<br />
|
43 |
-
cannon shot free download apk<br />
|
44 |
-
cannon shot full unlocked apk<br />
|
45 |
-
cannon shot game hack apk<br />
|
46 |
-
cannon shot game online apk<br />
|
47 |
-
cannon shot game offline apk<br />
|
48 |
-
cannon shot itechpro apk<br />
|
49 |
-
cannon shot level 1000 apk<br />
|
50 |
-
cannon shot mod apk android 1<br />
|
51 |
-
cannon shot mod apk revdl<br />
|
52 |
-
cannon shot mod apk unlimited money<br />
|
53 |
-
cannon shot new version apk<br />
|
54 |
-
cannon shot no ads apk<br />
|
55 |
-
cannon shot offline mod apk<br />
|
56 |
-
cannon shot online mod apk<br />
|
57 |
-
cannon shot premium apk<br />
|
58 |
-
cannon shot pro apk<br />
|
59 |
-
cannon shot puzzle game apk<br />
|
60 |
-
cannon shot shooting game apk<br />
|
61 |
-
cannon shot strategy game apk<br />
|
62 |
-
cannon shot unlocked all cannons apk<br />
|
63 |
-
cannon shot update version apk<br />
|
64 |
-
download cannon shot game for android<br />
|
65 |
-
download cannon shot mod apk 2023<br />
|
66 |
-
how to play cannon shot game on android<br />
|
67 |
-
how to install cannon shot app on android<br />
|
68 |
-
how to update cannon shot app on android</p>
|
69 |
-
<p>Cannon Shot APK also has various cannons with different shapes, colors, and effects. You can unlock new cannons by collecting stars and keys and opening chests. You can also buy coins with real money and use them to buy cannons in the shop. Some cannons are common, while others are rare or legendary. Some cannons have special effects such as shooting multiple balls, shooting fireballs, shooting rainbow balls, and more. You can switch between your unlocked cannons by tapping on the cannon icon at the bottom left corner of the screen.</p>
|
70 |
-
<h3>Some tips and tricks to master Cannon Shot APK</h3>
|
71 |
-
<p>If you want to master Cannon Shot APK and complete all the levels with three stars, you might want to follow these tips and tricks:</p>
|
72 |
-
<ul>
|
73 |
-
<li>Plan your shots carefully and aim smartly. Don't just shoot randomly or blindly. Try to find the best angle and trajectory for your shots. Use the objects wisely and avoid hitting the obstacles.</li>
|
74 |
-
<li>Shoot more balls when necessary. Sometimes you need to shoot more balls to fill all the buckets or to clear some obstacles. Don't be afraid to use your extra balls if you think they will help you.</li>
|
75 |
-
<li>Watch ads to get more rewards. Sometimes you can watch ads to get more coins, stars, keys, or balls. This can help you unlock more cannons or retry a level if you fail.</li>
|
76 |
-
<li>Try different cannons and find your favorite one. Each cannon has its own advantages and disadvantages. Some cannons may suit your play style better than others. Experiment with different cannons and see which one works best for you.</li>
|
77 |
-
<li>Have fun and enjoy the game. Don't get frustrated or bored if you get stuck on a level or lose a star. Remember that this is a casual game that is meant to be fun and relaxing. Just keep playing and have fun!</li>
|
78 |
-
</ul>
|
79 |
-
<h2>Why Should You Play Cannon Shot APK?</h2>
|
80 |
-
<h3>The benefits and advantages of playing Cannon Shot APK</h3>
|
81 |
-
<p>There are many benefits and advantages of playing Cannon Shot APK, such as:</p>
|
82 |
-
<ul>
|
83 |
-
<li>It is free to play and easy to download and install.</li>
|
84 |
-
<li>It has a simple picture style that is pleasing to the eye.</li>
|
85 |
-
<li>It has a fun adventure challenge mode that is engaging and addictive.</li>
|
86 |
-
<li>It has easy-to-use controls that are suitable for all ages.</li>
|
87 |
-
<li>It has over 100 levels with different difficulty levels and obstacles that are challenging and satisfying.</li>
|
88 |
-
<li>It has various cannons with different shapes, colors, and effects that are cool and fun.</li>
|
89 |
-
<li>It has boss fights where you have to shoot at monsters instead of buckets that are exciting and thrilling.</li>
|
90 |
-
<li>It has floors where you can collect stars and keys to unlock chests with rewards that are rewarding and motivating.</li>
|
91 |
-
<li>It has in-app purchases where you can buy coins, no ads, or special offers that are optional and affordable.</li>
|
92 |
-
</ul>
|
93 |
-
<h3>The challenges and drawbacks of playing Cannon Shot APK</h3>
|
94 |
-
<p>There are also some challenges and drawbacks of playing Cannon Shot APK, such as:</p>
|
95 |
-
<ul>
|
96 |
-
<li>It may require internet connection to play or access some features.</li>
|
97 |
-
<li>It may contain ads that may be annoying or distracting.</li> <li>It may have some bugs or glitches that may affect the gameplay or performance.</li>
|
98 |
-
<li>It may be repetitive or boring after a while if you play it too much or too often.</li>
|
99 |
-
<li>It may be too easy or too hard for some players depending on their skill level or preference.</li>
|
100 |
-
</ul>
|
101 |
-
<h3>Some alternatives to Cannon Shot APK</h3>
|
102 |
-
<p>If you are looking for some alternatives to Cannon Shot APK, you might want to try these games:</p>
|
103 |
-
<table>
|
104 |
-
<tr>
|
105 |
-
<th>Name</th>
|
106 |
-
<th>Description</th>
|
107 |
-
<th>Link</th>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>Knock Balls</td>
|
111 |
-
<td>A game where you have to shoot balls at towers of blocks and knock them down.</td>
|
112 |
-
<td><a href="">Knock Balls - Apps on Google Play</a></td>
|
113 |
-
</tr>
|
114 |
-
<tr>
|
115 |
-
<td>Ball Blast</td>
|
116 |
-
<td>A game where you have to shoot balls at flying objects and make them explode.</td>
|
117 |
-
<td><a href="">Ball Blast - Apps on Google Play</a></td>
|
118 |
-
</tr>
|
119 |
-
<tr>
|
120 |
-
<td>Tank Stars</td>
|
121 |
-
<td>A game where you have to shoot tanks at other tanks and destroy them.</td>
|
122 |
-
<td><a href="">Tank Stars - Apps on Google Play</a></td>
|
123 |
-
</tr>
|
124 |
-
<tr>
|
125 |
-
<td>Mr Bullet</td>
|
126 |
-
<td>A game where you have to shoot bullets at enemies and objects and eliminate them.</td>
|
127 |
-
<td><a href="">Mr Bullet - Spy Puzzles - Apps on Google Play</a></td>
|
128 |
-
</tr>
|
129 |
-
<tr>
|
130 |
-
<td>Angry Birds 2</td>
|
131 |
-
<td>A game where you have to shoot birds at pigs and structures and make them collapse.</td>
|
132 |
-
<td><a href="">Angry Birds 2 - Apps on Google Play</a></td>
|
133 |
-
</tr>
|
134 |
-
</table>
|
135 |
-
<h2>Conclusion</h2>
|
136 |
-
<p>Cannon Shot APK is a fun and casual shooting game for Android devices that you can play for free. It has a simple picture style, fun adventure challenge mode, and easy-to-use controls. It has over 100 levels with different difficulty levels and obstacles, various cannons with different shapes, colors, and effects, boss fights where you have to shoot at monsters instead of buckets, floors where you can collect stars and keys to unlock chests with rewards, and in-app purchases where you can buy coins, no ads, or special offers. It also has some challenges and drawbacks such as requiring internet connection, containing ads, having bugs or glitches, being repetitive or boring, or being too easy or too hard. It also has some alternatives such as Knock Balls, Ball Blast, Tank Stars, Mr Bullet, and Angry Birds 2.</p>
|
137 |
-
<p>If you are looking for a simple yet addictive shooting game that you can play on your Android device, you might want to check out Cannon Shot APK. You can download it from Google Play Store or from a third-party website. You can also read this article to learn more about the game and how to play it. We hope you enjoy playing Cannon Shot APK and have fun!</p>
|
138 |
-
<h2>FAQs</h2>
|
139 |
-
<h3>What are the system requirements for Cannon Shot APK?</h3>
|
140 |
-
<p>Cannon Shot APK requires Android 4.4 or higher and about 60 MB of free storage space on your device. It also requires internet connection to play or access some features.</p>
|
141 |
-
<h3>Is Cannon Shot APK safe and secure to use?</h3>
|
142 |
-
<p>Cannon Shot APK is safe and secure to use if you download it from Google Play Store or from a trusted third-party website. However, you should always be careful when downloading and installing any app from unknown sources. You should also read the app's privacy policy and permissions before using it.</p>
|
143 |
-
<h3>How can I get more stars and unlock more cannons in Cannon Shot APK?</h3>
|
144 |
-
<p>You can get more stars by completing levels with three stars. You can also watch ads to get more stars. You can unlock more cannons by collecting stars and keys and opening chests. You can also buy coins with real money and use them to buy cannons in the shop.</p>
|
145 |
-
<h3>How can I contact the developer of Cannon Shot APK?</h3>
|
146 |
-
<p>You can contact the developer of Cannon Shot APK by emailing them at <a href="mailto:[email protected]">[email protected]</a>. You can also visit their website at <a href="">https://saygames.by/</a>. You can also follow them on Facebook at <a href="">https://www.facebook.com/SayGamesBy/</a>.</p>
|
147 |
-
<h3>Can I play Cannon Shot APK offline?</h3>
|
148 |
-
<p>You can play Cannon Shot APK offline if you have already downloaded the game file and installed it on your device. However, you may not be able to access some features or updates that require internet connection. You may also miss out on some rewards or offers that are available online. Therefore, it is recommended that you play Cannon Shot APK online whenever possible.</p> 197e85843d<br />
|
149 |
-
<br />
|
150 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Unlimited Access to Exclusive Anime Content with Bstation MOD Premium APK.md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Bstation Mod Premium 2023 for Free</h1>
|
3 |
-
<p>If you are an anime lover, you might have heard of Bstation, a popular app that allows you to watch various anime shows and movies online. But did you know that there is a mod version of Bstation that gives you access to premium features for free? In this article, we will tell you what Bstation mod premium 2023 is, what features it offers, and how to download and install it on your device.</p>
|
4 |
-
<h2>download bstation mod premium 2023</h2><br /><p><b><b>Download</b> ->->->-> <a href="https://jinyurl.com/2uNPOk">https://jinyurl.com/2uNPOk</a></b></p><br /><br />
|
5 |
-
<h2>What is Bstation and Why You Need the Mod Version</h2>
|
6 |
-
<h3>Bstation: A Popular App for Anime Lovers</h3>
|
7 |
-
<p>Bstation is an app developed by Bilibili, a Chinese company that specializes in online video services. Bstation provides a platform for anime fans to watch their favorite shows and movies, as well as other content from different countries. You can find a wide range of genres, such as action, comedy, romance, horror, sci-fi, and more. You can also read comics and interact with other users on the app.</p>
|
8 |
-
<h3>Bstation Mod: A Modified App with Premium Features Unlocked</h3>
|
9 |
-
<p>While Bstation is a great app for anime lovers, it has some limitations for free users. For example, you have to watch ads before and during the videos, you cannot watch some exclusive and latest content, and you cannot enjoy HD quality. To get rid of these restrictions, you have to upgrade to a premium account, which costs money per month or per year.</p>
|
10 |
-
<p>However, if you do not want to spend money on a premium account, you can opt for Bstation mod premium 2023, which is a modified version of the app that unlocks all the premium features for free. This means that you can watch any content without ads, in HD quality, and with a mini screen option. You can also enjoy a user-friendly interface that makes it easy to navigate the app.</p>
|
11 |
-
<p>download bstation mod apk premium unlocked 2023 free<br />
|
12 |
-
download bstation mod apk premium v1.35.0 update 2023<br />
|
13 |
-
download bstation mod apk premium v2.31.2 vip unlocked<br />
|
14 |
-
download bstation mod apk premium versi terbaru<br />
|
15 |
-
download bstation mod apk premium no ads<br />
|
16 |
-
download bstation mod apk premium hd resolution<br />
|
17 |
-
download bstation mod apk premium mini screen<br />
|
18 |
-
download bstation mod apk premium user friendly interface<br />
|
19 |
-
download bstation mod apk premium unlimited access<br />
|
20 |
-
download bstation mod apk premium anime collection<br />
|
21 |
-
download bstation mod apk premium video creator<br />
|
22 |
-
download bstation mod apk premium acg community<br />
|
23 |
-
download bstation mod apk premium latest anime<br />
|
24 |
-
download bstation mod apk premium classic anime<br />
|
25 |
-
download bstation mod apk premium various genres<br />
|
26 |
-
download bstation mod apk premium action anime<br />
|
27 |
-
download bstation mod apk premium adventure anime<br />
|
28 |
-
download bstation mod apk premium comedy anime<br />
|
29 |
-
download bstation mod apk premium fantasy anime<br />
|
30 |
-
download bstation mod apk premium romance anime<br />
|
31 |
-
download bstation mod apk premium school anime<br />
|
32 |
-
download bstation mod apk premium sci-fi anime<br />
|
33 |
-
download bstation mod apk premium sports anime<br />
|
34 |
-
download bstation mod apk premium new episodes daily<br />
|
35 |
-
download bstation mod apk premium direct install<br />
|
36 |
-
download bstation mod apk premium only 107.08 mb<br />
|
37 |
-
download bstation mod apk premium only 89 mb<br />
|
38 |
-
download bstation mod apk premium for android<br />
|
39 |
-
download bstation mod apk premium for ios<br />
|
40 |
-
download bstation mod apk premium for pc<br />
|
41 |
-
how to download bstation mod apk premium 2023<br />
|
42 |
-
where to download bstation mod apk premium 2023<br />
|
43 |
-
why to download bstation mod apk premium 2023<br />
|
44 |
-
what is bstation mod apk premium 2023<br />
|
45 |
-
who is behind bstation mod apk premium 2023<br />
|
46 |
-
benefits of downloading bstation mod apk premium 2023<br />
|
47 |
-
drawbacks of downloading bstation mod apk premium 2023<br />
|
48 |
-
alternatives to downloading bstation mod apk premium 2023<br />
|
49 |
-
reviews of downloading bstation mod apk premium 2023<br />
|
50 |
-
tips and tricks for downloading bstation mod apk premium 2023<br />
|
51 |
-
best sites to download bstation mod apk premium 2023<br />
|
52 |
-
best apps to download bstation mod apk premium 2023<br />
|
53 |
-
best tools to download bstation mod apk premium 2023<br />
|
54 |
-
best methods to download bstation mod apk premium 2023<br />
|
55 |
-
best sources to download bstation mod apk premium 2023<br />
|
56 |
-
best guides to download bstation mod apk premium 2023<br />
|
57 |
-
best tutorials to download bstation mod apk premium 2023<br />
|
58 |
-
best strategies to download bstation mod apk premium 2023</p>
|
59 |
-
<h2>Features of Bstation Mod Premium 2023</h2>
|
60 |
-
<h3>No Ads</h3>
|
61 |
-
<p>One of the best features of Bstation mod premium 2023 is that it removes all the ads from the app. This means that you can watch your favorite anime shows and movies without any interruptions or distractions. You can also save your data and battery by not loading unnecessary ads.</p>
|
62 |
-
<h3>HD Quality</h3>
|
63 |
-
<p>Another feature of Bstation mod premium 2023 is that it allows you to watch videos in HD quality. This means that you can enjoy crisp and clear images and sounds that enhance your viewing experience. You can also adjust the video quality according to your preference and network speed.</p>
|
64 |
-
<h3>Mini Screen</h3>
|
65 |
-
<p>Bstation mod premium 2023 also offers a mini screen feature that lets you watch videos in a small window while doing other tasks on your device. For example, you can browse the web, check your messages, or play games while watching anime. You can also move and resize the mini screen as you like.</p>
|
66 |
-
<h3>User-Friendly Interface</h3>
|
67 |
-
<p>Bstation mod premium 2023 has a user-friendly interface that makes it easy to use the app. You can find various categories and genres of anime on the homepage, as well as search for specific titles or keywords. You can also access your history, favorites, downloads, and settings from the menu bar.</p>
|
68 |
-
<h2>How to Download and Install Bstation Mod Premium 2023</h2>
|
69 |
-
<h3>Download the APK File from a Trusted Source</h3>
|
70 |
-
<p>To download Bstation mod premium 2023, you need to find a reliable source that provides the APK file of the app. You can use one of the links below to download the APK file:</p>
|
71 |
-
<ul>
|
72 |
-
<li>[Download B station Mod Premium 2023]</li>
|
73 |
-
<li>[Download Bstation Mod Premium 2023]</li>
|
74 |
-
<li>[Download Bstation Mod Premium 2023]</li>
|
75 |
-
</ul>
|
76 |
-
<p>Make sure to download the latest version of the app, which is 6.0.0 as of June 2023.</p>
|
77 |
-
<h3>Enable Unknown Sources on Your Device</h3>
|
78 |
-
<p>Before you can install Bstation mod premium 2023, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the official app store. To enable unknown sources, follow these steps:</p>
|
79 |
-
<ol>
|
80 |
-
<li>Go to your device's settings and tap on security or privacy.</li>
|
81 |
-
<li>Find the option that says unknown sources or install unknown apps and toggle it on.</li>
|
82 |
-
<li>Confirm your choice by tapping on OK or allow.</li>
|
83 |
-
</ol>
|
84 |
-
<p>You can now install Bstation mod premium 2023 on your device.</p>
|
85 |
-
<h3>Install the APK File and Enjoy</h3>
|
86 |
-
<p>After you have downloaded the APK file and enabled unknown sources, you can install Bstation mod premium 2023 by following these steps:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Locate the APK file on your device's file manager or downloads folder and tap on it.</li>
|
89 |
-
<li>Tap on install and wait for the installation process to complete.</li>
|
90 |
-
<li>Tap on open and launch the app.</li>
|
91 |
-
</ol>
|
92 |
-
<p>You can now enjoy watching anime with Bstation mod premium 2023 for free.</p>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>Bstation mod premium 2023 is a modified version of Bstation, a popular app for anime lovers. It unlocks all the premium features of the app for free, such as no ads, HD quality, mini screen, and user-friendly interface. You can download and install Bstation mod premium 2023 by following the steps in this article. However, you should be careful when downloading apps from unknown sources, as they may contain viruses or malware that can harm your device. You should also respect the rights of the original developers and creators of the app and the content. If you like Bstation, you should consider supporting them by purchasing a premium account or subscribing to their services.</p>
|
95 |
-
<h2>FAQs</h2>
|
96 |
-
<h4>What is the difference between Bstation and Bilibili?</h4>
|
97 |
-
<p>Bstation is an app developed by Bilibili, a Chinese company that specializes in online video services. Bilibili is a website that hosts various types of videos, such as anime, games, music, movies, and more. Bstation is an app that focuses on anime content from different countries.</p>
|
98 |
-
<h4>Is Bstation mod premium 2023 safe to use?</h4>
|
99 |
-
<p>Bstation mod premium 2023 is a modified version of Bstation that unlocks all the premium features for free. However, it is not an official app and it may not be safe to use. It may contain viruses or malware that can harm your device or steal your personal information. You should always download apps from trusted sources and scan them with antivirus software before installing them.</p>
|
100 |
-
<h4>How can I update Bstation mod premium 2023?</h4>
|
101 |
-
<p>To update Bstation mod premium 2023, you need to download the latest version of the APK file from a reliable source and install it over the existing app. You should not update the app from within the app itself, as it may revert to the original version and lose all the mod features.</p>
|
102 |
-
<h4>Can I watch offline with Bstation mod premium 2023?</h4>
|
103 |
-
<p>Bstation mod premium 2023 allows you to watch videos offline by downloading them to your device. You can find the download option on the video page or in the menu bar. You can also manage your downloads from the settings section of the app.</p>
|
104 |
-
<h4>Can I use Bstation mod premium 2023 on PC or TV?</h4>
|
105 |
-
<p>Bstation mod premium 2023 is an app designed for Android devices. However, you can use it on PC or TV by using an emulator or a casting device. An emulator is a software that simulates an Android device on your PC, such as Bluestacks or Nox Player. A casting device is a hardware that connects your Android device to your TV, such as Chromecast or Firestick.</p> 401be4b1e0<br />
|
106 |
-
<br />
|
107 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Edify/demo-gpt3.5-turbo/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Demo Gpt3.5-turbo Model
|
3 |
-
emoji: 📈
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.20.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-nc-4.0
|
11 |
-
duplicated_from: ramon1992/demo-gpt3.5-turbo
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AISuperheroes/02GR-ASR-Memory/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 02GR ASR Memory
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/sde_team.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import logging
|
4 |
-
import re
|
5 |
-
import random
|
6 |
-
from typing import TYPE_CHECKING, Any, List, Optional
|
7 |
-
|
8 |
-
from . import order_registry as OrderRegistry
|
9 |
-
from .base import BaseOrder
|
10 |
-
|
11 |
-
if TYPE_CHECKING:
|
12 |
-
from agentverse.environments import BaseEnvironment
|
13 |
-
|
14 |
-
|
15 |
-
@OrderRegistry.register("sde_team")
|
16 |
-
class SdeTeamOrder(BaseOrder):
|
17 |
-
"""The order for a code problem solving
|
18 |
-
"""
|
19 |
-
next_agent_idx: int = 2
|
20 |
-
|
21 |
-
def get_next_agent_idx(self, environment: BaseEnvironment) -> List[int]:
|
22 |
-
if self.next_agent_idx == 2:
|
23 |
-
self.next_agent_idx = 0
|
24 |
-
return [2] * 5 # TODO set the number in yaml
|
25 |
-
elif self.next_agent_idx == 0:
|
26 |
-
self.next_agent_idx = 1
|
27 |
-
return [0]
|
28 |
-
elif self.next_agent_idx == 1:
|
29 |
-
self.next_agent_idx = 0
|
30 |
-
return [1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/clock.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Clock from './time/clock/Clock.js';
|
2 |
-
export default Clock;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/interception-plugin.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Interception from './interception.js';
|
2 |
-
|
3 |
-
class InterceptionPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
|
5 |
-
constructor(pluginManager) {
|
6 |
-
super(pluginManager);
|
7 |
-
}
|
8 |
-
|
9 |
-
add(gameObject, config) {
|
10 |
-
return new Interception(gameObject, config);
|
11 |
-
}
|
12 |
-
}
|
13 |
-
export default InterceptionPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch2/NinePatch.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import NinePatch from '../../../plugins/ninepatch2';
|
2 |
-
export default NinePatch;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/ResetChildPosition.js
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
var ResetChildPosition = function () {
|
2 |
-
var x = this.left;
|
3 |
-
var y = this.top;
|
4 |
-
if (this.scrollMode === 0) {
|
5 |
-
y += this.childOY;
|
6 |
-
} else {
|
7 |
-
x += this.childOY;
|
8 |
-
}
|
9 |
-
this.child.setPosition(x, y);
|
10 |
-
this.resetChildPositionState(this.child);
|
11 |
-
|
12 |
-
this.setMaskChildrenFlag();
|
13 |
-
};
|
14 |
-
|
15 |
-
export default ResetChildPosition;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AhmedRashwan369/ChatGPT4/app.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
import requests
|
5 |
-
|
6 |
-
#Streaming endpoint
|
7 |
-
API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
|
8 |
-
|
9 |
-
#Huggingface provided GPT4 OpenAI API Key
|
10 |
-
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
11 |
-
|
12 |
-
#Inferenec function
|
13 |
-
def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
|
14 |
-
|
15 |
-
headers = {
|
16 |
-
"Content-Type": "application/json",
|
17 |
-
"Authorization": f"Bearer {OPENAI_API_KEY}"
|
18 |
-
}
|
19 |
-
print(f"system message is ^^ {system_msg}")
|
20 |
-
if system_msg.strip() == '':
|
21 |
-
initial_message = [{"role": "user", "content": f"{inputs}"},]
|
22 |
-
multi_turn_message = []
|
23 |
-
else:
|
24 |
-
initial_message= [{"role": "system", "content": system_msg},
|
25 |
-
{"role": "user", "content": f"{inputs}"},]
|
26 |
-
multi_turn_message = [{"role": "system", "content": system_msg},]
|
27 |
-
|
28 |
-
if chat_counter == 0 :
|
29 |
-
payload = {
|
30 |
-
"model": "gpt-4",
|
31 |
-
"messages": initial_message ,
|
32 |
-
"temperature" : 1.0,
|
33 |
-
"top_p":1.0,
|
34 |
-
"n" : 1,
|
35 |
-
"stream": True,
|
36 |
-
"presence_penalty":0,
|
37 |
-
"frequency_penalty":0,
|
38 |
-
}
|
39 |
-
print(f"chat_counter - {chat_counter}")
|
40 |
-
else: #if chat_counter != 0 :
|
41 |
-
messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},]
|
42 |
-
for data in chatbot:
|
43 |
-
user = {}
|
44 |
-
user["role"] = "user"
|
45 |
-
user["content"] = data[0]
|
46 |
-
assistant = {}
|
47 |
-
assistant["role"] = "assistant"
|
48 |
-
assistant["content"] = data[1]
|
49 |
-
messages.append(user)
|
50 |
-
messages.append(assistant)
|
51 |
-
temp = {}
|
52 |
-
temp["role"] = "user"
|
53 |
-
temp["content"] = inputs
|
54 |
-
messages.append(temp)
|
55 |
-
#messages
|
56 |
-
payload = {
|
57 |
-
"model": "gpt-4",
|
58 |
-
"messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}],
|
59 |
-
"temperature" : temperature, #1.0,
|
60 |
-
"top_p": top_p, #1.0,
|
61 |
-
"n" : 1,
|
62 |
-
"stream": True,
|
63 |
-
"presence_penalty":0,
|
64 |
-
"frequency_penalty":0,}
|
65 |
-
|
66 |
-
chat_counter+=1
|
67 |
-
|
68 |
-
history.append(inputs)
|
69 |
-
print(f"Logging : payload is - {payload}")
|
70 |
-
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
71 |
-
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
72 |
-
print(f"Logging : response code - {response}")
|
73 |
-
token_counter = 0
|
74 |
-
partial_words = ""
|
75 |
-
|
76 |
-
counter=0
|
77 |
-
for chunk in response.iter_lines():
|
78 |
-
#Skipping first chunk
|
79 |
-
if counter == 0:
|
80 |
-
counter+=1
|
81 |
-
continue
|
82 |
-
# check whether each line is non-empty
|
83 |
-
if chunk.decode() :
|
84 |
-
chunk = chunk.decode()
|
85 |
-
# decode each line as response data is in bytes
|
86 |
-
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
87 |
-
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
88 |
-
if token_counter == 0:
|
89 |
-
history.append(" " + partial_words)
|
90 |
-
else:
|
91 |
-
history[-1] = partial_words
|
92 |
-
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
|
93 |
-
token_counter+=1
|
94 |
-
yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
|
95 |
-
|
96 |
-
#Resetting to blank
|
97 |
-
def reset_textbox():
|
98 |
-
return gr.update(value='')
|
99 |
-
|
100 |
-
#to set a component as visible=False
|
101 |
-
def set_visible_false():
|
102 |
-
return gr.update(visible=False)
|
103 |
-
|
104 |
-
#to set a component as visible=True
|
105 |
-
def set_visible_true():
|
106 |
-
return gr.update(visible=True)
|
107 |
-
|
108 |
-
title = """<h1 align="center">🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming</h1>"""
|
109 |
-
|
110 |
-
#display message for themes feature
|
111 |
-
theme_addon_msg = """<center>🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple <code>theme.push_to_hub()</code>.
|
112 |
-
<br>🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - <a href="https://huggingface.co/Gradio-Themes" target="_blank">Gradio-Themes-Party🎨</a> 🏆</center>
|
113 |
-
"""
|
114 |
-
|
115 |
-
#Using info to add additional information about System message in GPT4
|
116 |
-
system_msg_info = """A conversation could begin with a system message to gently instruct the assistant.
|
117 |
-
System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'"""
|
118 |
-
|
119 |
-
#Modifying existing Gradio Theme
|
120 |
-
theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green",
|
121 |
-
text_size=gr.themes.sizes.text_lg)
|
122 |
-
|
123 |
-
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
|
124 |
-
theme=theme) as demo:
|
125 |
-
gr.HTML(title)
|
126 |
-
gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
|
127 |
-
gr.HTML(theme_addon_msg)
|
128 |
-
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
129 |
-
|
130 |
-
with gr.Column(elem_id = "col_container"):
|
131 |
-
#GPT4 API Key is provided by Huggingface
|
132 |
-
with gr.Accordion(label="System message:", open=False):
|
133 |
-
system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="")
|
134 |
-
accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
|
135 |
-
chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot")
|
136 |
-
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
|
137 |
-
state = gr.State([])
|
138 |
-
with gr.Row():
|
139 |
-
with gr.Column(scale=7):
|
140 |
-
b1 = gr.Button().style(full_width=True)
|
141 |
-
with gr.Column(scale=3):
|
142 |
-
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
|
143 |
-
|
144 |
-
#top_p, temperature
|
145 |
-
with gr.Accordion("Parameters", open=False):
|
146 |
-
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
147 |
-
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
148 |
-
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
149 |
-
|
150 |
-
#Event handling
|
151 |
-
inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
|
152 |
-
b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
|
153 |
-
|
154 |
-
inputs.submit(set_visible_false, [], [system_msg])
|
155 |
-
b1.click(set_visible_false, [], [system_msg])
|
156 |
-
inputs.submit(set_visible_true, [], [accordion_msg])
|
157 |
-
b1.click(set_visible_true, [], [accordion_msg])
|
158 |
-
|
159 |
-
b1.click(reset_textbox, [], [inputs])
|
160 |
-
inputs.submit(reset_textbox, [], [inputs])
|
161 |
-
|
162 |
-
#Examples
|
163 |
-
with gr.Accordion(label="Examples for System message:", open=False):
|
164 |
-
gr.Examples(
|
165 |
-
examples = [["""You are an AI programming assistant.
|
166 |
-
|
167 |
-
- Follow the user's requirements carefully and to the letter.
|
168 |
-
- First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail.
|
169 |
-
- Then output the code in a single code block.
|
170 |
-
- Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""],
|
171 |
-
["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."],
|
172 |
-
["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."],
|
173 |
-
["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."],
|
174 |
-
["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."],
|
175 |
-
["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."],
|
176 |
-
["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."],
|
177 |
-
["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."],
|
178 |
-
["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."],
|
179 |
-
["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."],
|
180 |
-
["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."],
|
181 |
-
["You are a helpful assistant that provides detailed and accurate information."],
|
182 |
-
["You are an assistant that speaks like Shakespeare."],
|
183 |
-
["You are a friendly assistant who uses casual language and humor."],
|
184 |
-
["You are a financial advisor who gives expert advice on investments and budgeting."],
|
185 |
-
["You are a health and fitness expert who provides advice on nutrition and exercise."],
|
186 |
-
["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."],
|
187 |
-
["You are a movie critic who shares insightful opinions on films and their themes."],
|
188 |
-
["You are a history enthusiast who loves to discuss historical events and figures."],
|
189 |
-
["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."],
|
190 |
-
["You are an AI poet who can compose creative and evocative poems on any given topic."],],
|
191 |
-
inputs = system_msg,)
|
192 |
-
|
193 |
-
demo.queue(max_size=99, concurrency_count=20).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/avd_network.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
|
6 |
-
class AVDNetwork(nn.Module):
|
7 |
-
"""
|
8 |
-
Animation via Disentanglement network
|
9 |
-
"""
|
10 |
-
|
11 |
-
def __init__(self, num_tps, id_bottle_size=64, pose_bottle_size=64):
|
12 |
-
super(AVDNetwork, self).__init__()
|
13 |
-
input_size = 5*2 * num_tps
|
14 |
-
self.num_tps = num_tps
|
15 |
-
|
16 |
-
self.id_encoder = nn.Sequential(
|
17 |
-
nn.Linear(input_size, 256),
|
18 |
-
nn.BatchNorm1d(256),
|
19 |
-
nn.ReLU(inplace=True),
|
20 |
-
nn.Linear(256, 512),
|
21 |
-
nn.BatchNorm1d(512),
|
22 |
-
nn.ReLU(inplace=True),
|
23 |
-
nn.Linear(512, 1024),
|
24 |
-
nn.BatchNorm1d(1024),
|
25 |
-
nn.ReLU(inplace=True),
|
26 |
-
nn.Linear(1024, id_bottle_size)
|
27 |
-
)
|
28 |
-
|
29 |
-
self.pose_encoder = nn.Sequential(
|
30 |
-
nn.Linear(input_size, 256),
|
31 |
-
nn.BatchNorm1d(256),
|
32 |
-
nn.ReLU(inplace=True),
|
33 |
-
nn.Linear(256, 512),
|
34 |
-
nn.BatchNorm1d(512),
|
35 |
-
nn.ReLU(inplace=True),
|
36 |
-
nn.Linear(512, 1024),
|
37 |
-
nn.BatchNorm1d(1024),
|
38 |
-
nn.ReLU(inplace=True),
|
39 |
-
nn.Linear(1024, pose_bottle_size)
|
40 |
-
)
|
41 |
-
|
42 |
-
self.decoder = nn.Sequential(
|
43 |
-
nn.Linear(pose_bottle_size + id_bottle_size, 1024),
|
44 |
-
nn.BatchNorm1d(1024),
|
45 |
-
nn.ReLU(),
|
46 |
-
nn.Linear(1024, 512),
|
47 |
-
nn.BatchNorm1d(512),
|
48 |
-
nn.ReLU(),
|
49 |
-
nn.Linear(512, 256),
|
50 |
-
nn.BatchNorm1d(256),
|
51 |
-
nn.ReLU(),
|
52 |
-
nn.Linear(256, input_size)
|
53 |
-
)
|
54 |
-
|
55 |
-
def forward(self, kp_source, kp_random):
|
56 |
-
|
57 |
-
bs = kp_source['fg_kp'].shape[0]
|
58 |
-
|
59 |
-
pose_emb = self.pose_encoder(kp_random['fg_kp'].view(bs, -1))
|
60 |
-
id_emb = self.id_encoder(kp_source['fg_kp'].view(bs, -1))
|
61 |
-
|
62 |
-
rec = self.decoder(torch.cat([pose_emb, id_emb], dim=1))
|
63 |
-
|
64 |
-
rec = {'fg_kp': rec.view(bs, self.num_tps*5, -1)}
|
65 |
-
return rec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ali-C137/Motivation-Letter-Generator/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Motivation Letter Generator
|
3 |
-
emoji: 📝
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.7
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AliUsama98/Usama_TextClassifier/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/krupper/text-complexity-classification").launch()
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/tflib/custom_ops.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
|
4 |
-
#
|
5 |
-
# This work is made available under the Nvidia Source Code License-NC.
|
6 |
-
# To view a copy of this license, visit
|
7 |
-
# https://nvlabs.github.io/stylegan2/license.html
|
8 |
-
|
9 |
-
"""TensorFlow custom ops builder.
|
10 |
-
"""
|
11 |
-
|
12 |
-
import os
|
13 |
-
import re
|
14 |
-
import uuid
|
15 |
-
import hashlib
|
16 |
-
import tempfile
|
17 |
-
import shutil
|
18 |
-
import tensorflow as tf
|
19 |
-
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
|
20 |
-
|
21 |
-
# ----------------------------------------------------------------------------
|
22 |
-
# Global options.
|
23 |
-
|
24 |
-
cuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')
|
25 |
-
cuda_cache_version_tag = 'v1'
|
26 |
-
# Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
|
27 |
-
do_not_hash_included_headers = False
|
28 |
-
verbose = True # Print status messages to stdout.
|
29 |
-
|
30 |
-
compiler_bindir_search_path = [
|
31 |
-
'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',
|
32 |
-
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',
|
33 |
-
'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',
|
34 |
-
]
|
35 |
-
|
36 |
-
# ----------------------------------------------------------------------------
|
37 |
-
# Internal helper funcs.
|
38 |
-
|
39 |
-
|
40 |
-
def _find_compiler_bindir():
|
41 |
-
for compiler_path in compiler_bindir_search_path:
|
42 |
-
if os.path.isdir(compiler_path):
|
43 |
-
return compiler_path
|
44 |
-
return None
|
45 |
-
|
46 |
-
|
47 |
-
def _get_compute_cap(device):
|
48 |
-
caps_str = device.physical_device_desc
|
49 |
-
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
|
50 |
-
major = m.group(1)
|
51 |
-
minor = m.group(2)
|
52 |
-
return (major, minor)
|
53 |
-
|
54 |
-
|
55 |
-
def _get_cuda_gpu_arch_string():
|
56 |
-
gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
|
57 |
-
if len(gpus) == 0:
|
58 |
-
raise RuntimeError('No GPU devices found')
|
59 |
-
(major, minor) = _get_compute_cap(gpus[0])
|
60 |
-
return 'sm_%s%s' % (major, minor)
|
61 |
-
|
62 |
-
|
63 |
-
def _run_cmd(cmd):
|
64 |
-
with os.popen(cmd) as pipe:
|
65 |
-
output = pipe.read()
|
66 |
-
status = pipe.close()
|
67 |
-
if status is not None:
|
68 |
-
raise RuntimeError(
|
69 |
-
'NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
|
70 |
-
|
71 |
-
|
72 |
-
def _prepare_nvcc_cli(opts):
|
73 |
-
cmd = 'nvcc ' + opts.strip()
|
74 |
-
cmd += ' --disable-warnings'
|
75 |
-
cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
|
76 |
-
cmd += ' --include-path "%s"' % os.path.join(
|
77 |
-
tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
|
78 |
-
cmd += ' --include-path "%s"' % os.path.join(
|
79 |
-
tf.sysconfig.get_include(), 'external', 'com_google_absl')
|
80 |
-
cmd += ' --include-path "%s"' % os.path.join(
|
81 |
-
tf.sysconfig.get_include(), 'external', 'eigen_archive')
|
82 |
-
|
83 |
-
compiler_bindir = _find_compiler_bindir()
|
84 |
-
if compiler_bindir is None:
|
85 |
-
# Require that _find_compiler_bindir succeeds on Windows. Allow
|
86 |
-
# nvcc to use whatever is the default on Linux.
|
87 |
-
if os.name == 'nt':
|
88 |
-
raise RuntimeError(
|
89 |
-
'Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
|
90 |
-
else:
|
91 |
-
cmd += ' --compiler-bindir "%s"' % compiler_bindir
|
92 |
-
cmd += ' 2>&1'
|
93 |
-
return cmd
|
94 |
-
|
95 |
-
# ----------------------------------------------------------------------------
|
96 |
-
# Main entry point.
|
97 |
-
|
98 |
-
|
99 |
-
_plugin_cache = dict()
|
100 |
-
|
101 |
-
|
102 |
-
def get_plugin(cuda_file):
|
103 |
-
cuda_file_base = os.path.basename(cuda_file)
|
104 |
-
cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
|
105 |
-
|
106 |
-
# Already in cache?
|
107 |
-
if cuda_file in _plugin_cache:
|
108 |
-
return _plugin_cache[cuda_file]
|
109 |
-
|
110 |
-
# Setup plugin.
|
111 |
-
if verbose:
|
112 |
-
print('Setting up TensorFlow plugin "%s": ' %
|
113 |
-
cuda_file_base, end='', flush=True)
|
114 |
-
try:
|
115 |
-
# Hash CUDA source.
|
116 |
-
md5 = hashlib.md5()
|
117 |
-
with open(cuda_file, 'rb') as f:
|
118 |
-
md5.update(f.read())
|
119 |
-
md5.update(b'\n')
|
120 |
-
|
121 |
-
# Hash headers included by the CUDA code by running it through the preprocessor.
|
122 |
-
if not do_not_hash_included_headers:
|
123 |
-
if verbose:
|
124 |
-
print('Preprocessing... ', end='', flush=True)
|
125 |
-
with tempfile.TemporaryDirectory() as tmp_dir:
|
126 |
-
tmp_file = os.path.join(
|
127 |
-
tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
|
128 |
-
_run_cmd(_prepare_nvcc_cli(
|
129 |
-
'"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
|
130 |
-
with open(tmp_file, 'rb') as f:
|
131 |
-
# __FILE__ in error check macros
|
132 |
-
bad_file_str = (
|
133 |
-
'"' + cuda_file.replace('\\', '/') + '"').encode('utf-8')
|
134 |
-
good_file_str = ('"' + cuda_file_base +
|
135 |
-
'"').encode('utf-8')
|
136 |
-
for ln in f:
|
137 |
-
# ignore line number pragmas
|
138 |
-
if not ln.startswith(b'# ') and not ln.startswith(b'#line '):
|
139 |
-
ln = ln.replace(bad_file_str, good_file_str)
|
140 |
-
md5.update(ln)
|
141 |
-
md5.update(b'\n')
|
142 |
-
|
143 |
-
# Select compiler options.
|
144 |
-
compile_opts = ''
|
145 |
-
if os.name == 'nt':
|
146 |
-
compile_opts += '"%s"' % os.path.join(
|
147 |
-
tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
|
148 |
-
elif os.name == 'posix':
|
149 |
-
compile_opts += '"%s"' % os.path.join(
|
150 |
-
tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
|
151 |
-
compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
|
152 |
-
else:
|
153 |
-
assert False # not Windows or Linux, w00t?
|
154 |
-
compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
|
155 |
-
compile_opts += ' --use_fast_math'
|
156 |
-
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
|
157 |
-
|
158 |
-
# Hash build configuration.
|
159 |
-
md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
|
160 |
-
md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
|
161 |
-
md5.update(('cuda_cache_version_tag: ' +
|
162 |
-
cuda_cache_version_tag).encode('utf-8') + b'\n')
|
163 |
-
|
164 |
-
# Compile if not already compiled.
|
165 |
-
bin_file_ext = '.dll' if os.name == 'nt' else '.so'
|
166 |
-
bin_file = os.path.join(
|
167 |
-
cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
|
168 |
-
if not os.path.isfile(bin_file):
|
169 |
-
if verbose:
|
170 |
-
print('Compiling... ', end='', flush=True)
|
171 |
-
with tempfile.TemporaryDirectory() as tmp_dir:
|
172 |
-
tmp_file = os.path.join(
|
173 |
-
tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
|
174 |
-
_run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' %
|
175 |
-
(cuda_file, tmp_file, tmp_dir))
|
176 |
-
os.makedirs(cuda_cache_path, exist_ok=True)
|
177 |
-
intermediate_file = os.path.join(
|
178 |
-
cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
|
179 |
-
shutil.copyfile(tmp_file, intermediate_file)
|
180 |
-
os.rename(intermediate_file, bin_file) # atomic
|
181 |
-
|
182 |
-
# Load.
|
183 |
-
if verbose:
|
184 |
-
print('Loading... ', end='', flush=True)
|
185 |
-
plugin = tf.load_op_library(bin_file)
|
186 |
-
|
187 |
-
# Add to cache.
|
188 |
-
_plugin_cache[cuda_file] = plugin
|
189 |
-
if verbose:
|
190 |
-
print('Done.', flush=True)
|
191 |
-
return plugin
|
192 |
-
|
193 |
-
except:
|
194 |
-
if verbose:
|
195 |
-
print('Failed!', flush=True)
|
196 |
-
raise
|
197 |
-
|
198 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/README.md
DELETED
@@ -1,143 +0,0 @@
|
|
1 |
-
## [Deprecated] Multi Token Textual Inversion
|
2 |
-
|
3 |
-
**IMPORTART: This research project is deprecated. Multi Token Textual Inversion is now supported natively in [the officail textual inversion example](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion#running-locally-with-pytorch).**
|
4 |
-
|
5 |
-
The author of this project is [Isamu Isozaki](https://github.com/isamu-isozaki) - please make sure to tag the author for issue and PRs as well as @patrickvonplaten.
|
6 |
-
|
7 |
-
We add multi token support to textual inversion. I added
|
8 |
-
1. num_vec_per_token for the number of used to reference that token
|
9 |
-
2. progressive_tokens for progressively training the token from 1 token to 2 token etc
|
10 |
-
3. progressive_tokens_max_steps for the max number of steps until we start full training
|
11 |
-
4. vector_shuffle to shuffle vectors
|
12 |
-
|
13 |
-
Feel free to add these options to your training! In practice num_vec_per_token around 10+vector shuffle works great!
|
14 |
-
|
15 |
-
## Textual Inversion fine-tuning example
|
16 |
-
|
17 |
-
[Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples.
|
18 |
-
The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
|
19 |
-
|
20 |
-
## Running on Colab
|
21 |
-
|
22 |
-
Colab for training
|
23 |
-
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
|
24 |
-
|
25 |
-
Colab for inference
|
26 |
-
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb)
|
27 |
-
|
28 |
-
## Running locally with PyTorch
|
29 |
-
### Installing the dependencies
|
30 |
-
|
31 |
-
Before running the scripts, make sure to install the library's training dependencies:
|
32 |
-
|
33 |
-
**Important**
|
34 |
-
|
35 |
-
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
|
36 |
-
```bash
|
37 |
-
git clone https://github.com/huggingface/diffusers
|
38 |
-
cd diffusers
|
39 |
-
pip install .
|
40 |
-
```
|
41 |
-
|
42 |
-
Then cd in the example folder and run
|
43 |
-
```bash
|
44 |
-
pip install -r requirements.txt
|
45 |
-
```
|
46 |
-
|
47 |
-
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
|
48 |
-
|
49 |
-
```bash
|
50 |
-
accelerate config
|
51 |
-
```
|
52 |
-
|
53 |
-
|
54 |
-
### Cat toy example
|
55 |
-
|
56 |
-
You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-5`, so you'll need to visit [its card](https://huggingface.co/runwayml/stable-diffusion-v1-5), read the license and tick the checkbox if you agree.
|
57 |
-
|
58 |
-
You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens).
|
59 |
-
|
60 |
-
Run the following command to authenticate your token
|
61 |
-
|
62 |
-
```bash
|
63 |
-
huggingface-cli login
|
64 |
-
```
|
65 |
-
|
66 |
-
If you have already cloned the repo, then you won't need to go through these steps.
|
67 |
-
|
68 |
-
<br>
|
69 |
-
|
70 |
-
Now let's get our dataset.Download 3-4 images from [here](https://drive.google.com/drive/folders/1fmJMs25nxS_rSNqS5hTcRdLem_YQXbq5) and save them in a directory. This will be our training data.
|
71 |
-
|
72 |
-
And launch the training using
|
73 |
-
|
74 |
-
**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
|
75 |
-
|
76 |
-
```bash
|
77 |
-
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
|
78 |
-
export DATA_DIR="path-to-dir-containing-images"
|
79 |
-
|
80 |
-
accelerate launch textual_inversion.py \
|
81 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
82 |
-
--train_data_dir=$DATA_DIR \
|
83 |
-
--learnable_property="object" \
|
84 |
-
--placeholder_token="<cat-toy>" --initializer_token="toy" \
|
85 |
-
--resolution=512 \
|
86 |
-
--train_batch_size=1 \
|
87 |
-
--gradient_accumulation_steps=4 \
|
88 |
-
--max_train_steps=3000 \
|
89 |
-
--learning_rate=5.0e-04 --scale_lr \
|
90 |
-
--lr_scheduler="constant" \
|
91 |
-
--lr_warmup_steps=0 \
|
92 |
-
--output_dir="textual_inversion_cat"
|
93 |
-
```
|
94 |
-
|
95 |
-
A full training run takes ~1 hour on one V100 GPU.
|
96 |
-
|
97 |
-
### Inference
|
98 |
-
|
99 |
-
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
|
100 |
-
|
101 |
-
```python
|
102 |
-
from diffusers import StableDiffusionPipeline
|
103 |
-
|
104 |
-
model_id = "path-to-your-trained-model"
|
105 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda")
|
106 |
-
|
107 |
-
prompt = "A <cat-toy> backpack"
|
108 |
-
|
109 |
-
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
110 |
-
|
111 |
-
image.save("cat-backpack.png")
|
112 |
-
```
|
113 |
-
|
114 |
-
|
115 |
-
## Training with Flax/JAX
|
116 |
-
|
117 |
-
For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script.
|
118 |
-
|
119 |
-
Before running the scripts, make sure to install the library's training dependencies:
|
120 |
-
|
121 |
-
```bash
|
122 |
-
pip install -U -r requirements_flax.txt
|
123 |
-
```
|
124 |
-
|
125 |
-
```bash
|
126 |
-
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
127 |
-
export DATA_DIR="path-to-dir-containing-images"
|
128 |
-
|
129 |
-
python textual_inversion_flax.py \
|
130 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
131 |
-
--train_data_dir=$DATA_DIR \
|
132 |
-
--learnable_property="object" \
|
133 |
-
--placeholder_token="<cat-toy>" --initializer_token="toy" \
|
134 |
-
--resolution=512 \
|
135 |
-
--train_batch_size=1 \
|
136 |
-
--max_train_steps=3000 \
|
137 |
-
--learning_rate=5.0e-04 --scale_lr \
|
138 |
-
--output_dir="textual_inversion_cat"
|
139 |
-
```
|
140 |
-
It should be at least 70% faster than the PyTorch script with the same configuration.
|
141 |
-
|
142 |
-
### Training with xformers:
|
143 |
-
You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anew5128/Anew51/server.py
DELETED
@@ -1,964 +0,0 @@
|
|
1 |
-
from functools import wraps
|
2 |
-
from flask import (
|
3 |
-
Flask,
|
4 |
-
jsonify,
|
5 |
-
request,
|
6 |
-
Response,
|
7 |
-
render_template_string,
|
8 |
-
abort,
|
9 |
-
send_from_directory,
|
10 |
-
send_file,
|
11 |
-
)
|
12 |
-
from flask_cors import CORS
|
13 |
-
from flask_compress import Compress
|
14 |
-
import markdown
|
15 |
-
import argparse
|
16 |
-
from transformers import AutoTokenizer, AutoProcessor, pipeline
|
17 |
-
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM
|
18 |
-
from transformers import BlipForConditionalGeneration
|
19 |
-
import unicodedata
|
20 |
-
import torch
|
21 |
-
import time
|
22 |
-
import os
|
23 |
-
import gc
|
24 |
-
import sys
|
25 |
-
import secrets
|
26 |
-
from PIL import Image
|
27 |
-
import base64
|
28 |
-
from io import BytesIO
|
29 |
-
from random import randint
|
30 |
-
import webuiapi
|
31 |
-
import hashlib
|
32 |
-
from constants import *
|
33 |
-
from colorama import Fore, Style, init as colorama_init
|
34 |
-
|
35 |
-
colorama_init()
|
36 |
-
|
37 |
-
if sys.hexversion < 0x030b0000:
|
38 |
-
print(f"{Fore.BLUE}{Style.BRIGHT}Python 3.11 or newer is recommended to run this program.{Style.RESET_ALL}")
|
39 |
-
time.sleep(2)
|
40 |
-
|
41 |
-
class SplitArgs(argparse.Action):
|
42 |
-
def __call__(self, parser, namespace, values, option_string=None):
|
43 |
-
setattr(
|
44 |
-
namespace, self.dest, values.replace('"', "").replace("'", "").split(",")
|
45 |
-
)
|
46 |
-
|
47 |
-
#Setting Root Folders for Silero Generations so it is compatible with STSL, should not effect regular runs. - Rolyat
|
48 |
-
parent_dir = os.path.dirname(os.path.abspath(__file__))
|
49 |
-
SILERO_SAMPLES_PATH = os.path.join(parent_dir, "tts_samples")
|
50 |
-
SILERO_SAMPLE_TEXT = os.path.join(parent_dir)
|
51 |
-
|
52 |
-
# Create directories if they don't exist
|
53 |
-
if not os.path.exists(SILERO_SAMPLES_PATH):
|
54 |
-
os.makedirs(SILERO_SAMPLES_PATH)
|
55 |
-
if not os.path.exists(SILERO_SAMPLE_TEXT):
|
56 |
-
os.makedirs(SILERO_SAMPLE_TEXT)
|
57 |
-
|
58 |
-
# Script arguments
|
59 |
-
parser = argparse.ArgumentParser(
|
60 |
-
prog="SillyTavern Extras", description="Web API for transformers models"
|
61 |
-
)
|
62 |
-
parser.add_argument(
|
63 |
-
"--port", type=int, help="Specify the port on which the application is hosted"
|
64 |
-
)
|
65 |
-
parser.add_argument(
|
66 |
-
"--listen", action="store_true", help="Host the app on the local network"
|
67 |
-
)
|
68 |
-
parser.add_argument(
|
69 |
-
"--share", action="store_true", help="Share the app on CloudFlare tunnel"
|
70 |
-
)
|
71 |
-
parser.add_argument("--cpu", action="store_true", help="Run the models on the CPU")
|
72 |
-
parser.add_argument("--cuda", action="store_false", dest="cpu", help="Run the models on the GPU")
|
73 |
-
parser.add_argument("--cuda-device", help="Specify the CUDA device to use")
|
74 |
-
parser.add_argument("--mps", "--apple", "--m1", "--m2", action="store_false", dest="cpu", help="Run the models on Apple Silicon")
|
75 |
-
parser.set_defaults(cpu=True)
|
76 |
-
parser.add_argument("--summarization-model", help="Load a custom summarization model")
|
77 |
-
parser.add_argument(
|
78 |
-
"--classification-model", help="Load a custom text classification model"
|
79 |
-
)
|
80 |
-
parser.add_argument("--captioning-model", help="Load a custom captioning model")
|
81 |
-
parser.add_argument("--embedding-model", help="Load a custom text embedding model")
|
82 |
-
parser.add_argument("--chroma-host", help="Host IP for a remote ChromaDB instance")
|
83 |
-
parser.add_argument("--chroma-port", help="HTTP port for a remote ChromaDB instance (defaults to 8000)")
|
84 |
-
parser.add_argument("--chroma-folder", help="Path for chromadb persistence folder", default='.chroma_db')
|
85 |
-
parser.add_argument('--chroma-persist', help="ChromaDB persistence", default=True, action=argparse.BooleanOptionalAction)
|
86 |
-
parser.add_argument(
|
87 |
-
"--secure", action="store_true", help="Enforces the use of an API key"
|
88 |
-
)
|
89 |
-
sd_group = parser.add_mutually_exclusive_group()
|
90 |
-
|
91 |
-
local_sd = sd_group.add_argument_group("sd-local")
|
92 |
-
local_sd.add_argument("--sd-model", help="Load a custom SD image generation model")
|
93 |
-
local_sd.add_argument("--sd-cpu", help="Force the SD pipeline to run on the CPU", action="store_true")
|
94 |
-
|
95 |
-
remote_sd = sd_group.add_argument_group("sd-remote")
|
96 |
-
remote_sd.add_argument(
|
97 |
-
"--sd-remote", action="store_true", help="Use a remote backend for SD"
|
98 |
-
)
|
99 |
-
remote_sd.add_argument(
|
100 |
-
"--sd-remote-host", type=str, help="Specify the host of the remote SD backend"
|
101 |
-
)
|
102 |
-
remote_sd.add_argument(
|
103 |
-
"--sd-remote-port", type=int, help="Specify the port of the remote SD backend"
|
104 |
-
)
|
105 |
-
remote_sd.add_argument(
|
106 |
-
"--sd-remote-ssl", action="store_true", help="Use SSL for the remote SD backend"
|
107 |
-
)
|
108 |
-
remote_sd.add_argument(
|
109 |
-
"--sd-remote-auth",
|
110 |
-
type=str,
|
111 |
-
help="Specify the username:password for the remote SD backend (if required)",
|
112 |
-
)
|
113 |
-
|
114 |
-
parser.add_argument(
|
115 |
-
"--enable-modules",
|
116 |
-
action=SplitArgs,
|
117 |
-
default=[],
|
118 |
-
help="Override a list of enabled modules",
|
119 |
-
)
|
120 |
-
|
121 |
-
args = parser.parse_args()
|
122 |
-
# [HF, Huggingface] Set port to 7860, set host to remote.
|
123 |
-
port = 7860
|
124 |
-
host = "0.0.0.0"
|
125 |
-
summarization_model = (
|
126 |
-
args.summarization_model
|
127 |
-
if args.summarization_model
|
128 |
-
else DEFAULT_SUMMARIZATION_MODEL
|
129 |
-
)
|
130 |
-
classification_model = (
|
131 |
-
args.classification_model
|
132 |
-
if args.classification_model
|
133 |
-
else DEFAULT_CLASSIFICATION_MODEL
|
134 |
-
)
|
135 |
-
captioning_model = (
|
136 |
-
args.captioning_model if args.captioning_model else DEFAULT_CAPTIONING_MODEL
|
137 |
-
)
|
138 |
-
embedding_model = (
|
139 |
-
args.embedding_model if args.embedding_model else DEFAULT_EMBEDDING_MODEL
|
140 |
-
)
|
141 |
-
|
142 |
-
sd_use_remote = False if args.sd_model else True
|
143 |
-
sd_model = args.sd_model if args.sd_model else DEFAULT_SD_MODEL
|
144 |
-
sd_remote_host = args.sd_remote_host if args.sd_remote_host else DEFAULT_REMOTE_SD_HOST
|
145 |
-
sd_remote_port = args.sd_remote_port if args.sd_remote_port else DEFAULT_REMOTE_SD_PORT
|
146 |
-
sd_remote_ssl = args.sd_remote_ssl
|
147 |
-
sd_remote_auth = args.sd_remote_auth
|
148 |
-
|
149 |
-
modules = (
|
150 |
-
args.enable_modules if args.enable_modules and len(args.enable_modules) > 0 else []
|
151 |
-
)
|
152 |
-
|
153 |
-
if len(modules) == 0:
|
154 |
-
print(
|
155 |
-
f"{Fore.RED}{Style.BRIGHT}You did not select any modules to run! Choose them by adding an --enable-modules option"
|
156 |
-
)
|
157 |
-
print(f"Example: --enable-modules=caption,summarize{Style.RESET_ALL}")
|
158 |
-
|
159 |
-
# Models init
|
160 |
-
cuda_device = DEFAULT_CUDA_DEVICE if not args.cuda_device else args.cuda_device
|
161 |
-
device_string = cuda_device if torch.cuda.is_available() and not args.cpu else 'mps' if torch.backends.mps.is_available() and not args.cpu else 'cpu'
|
162 |
-
device = torch.device(device_string)
|
163 |
-
torch_dtype = torch.float32 if device_string != cuda_device else torch.float16
|
164 |
-
|
165 |
-
if not torch.cuda.is_available() and not args.cpu:
|
166 |
-
print(f"{Fore.YELLOW}{Style.BRIGHT}torch-cuda is not supported on this device.{Style.RESET_ALL}")
|
167 |
-
if not torch.backends.mps.is_available() and not args.cpu:
|
168 |
-
print(f"{Fore.YELLOW}{Style.BRIGHT}torch-mps is not supported on this device.{Style.RESET_ALL}")
|
169 |
-
|
170 |
-
|
171 |
-
print(f"{Fore.GREEN}{Style.BRIGHT}Using torch device: {device_string}{Style.RESET_ALL}")
|
172 |
-
|
173 |
-
if "caption" in modules:
|
174 |
-
print("Initializing an image captioning model...")
|
175 |
-
captioning_processor = AutoProcessor.from_pretrained(captioning_model)
|
176 |
-
if "blip" in captioning_model:
|
177 |
-
captioning_transformer = BlipForConditionalGeneration.from_pretrained(
|
178 |
-
captioning_model, torch_dtype=torch_dtype
|
179 |
-
).to(device)
|
180 |
-
else:
|
181 |
-
captioning_transformer = AutoModelForCausalLM.from_pretrained(
|
182 |
-
captioning_model, torch_dtype=torch_dtype
|
183 |
-
).to(device)
|
184 |
-
|
185 |
-
if "summarize" in modules:
|
186 |
-
print("Initializing a text summarization model...")
|
187 |
-
summarization_tokenizer = AutoTokenizer.from_pretrained(summarization_model)
|
188 |
-
summarization_transformer = AutoModelForSeq2SeqLM.from_pretrained(
|
189 |
-
summarization_model, torch_dtype=torch_dtype
|
190 |
-
).to(device)
|
191 |
-
|
192 |
-
if "classify" in modules:
|
193 |
-
print("Initializing a sentiment classification pipeline...")
|
194 |
-
classification_pipe = pipeline(
|
195 |
-
"text-classification",
|
196 |
-
model=classification_model,
|
197 |
-
top_k=None,
|
198 |
-
device=device,
|
199 |
-
torch_dtype=torch_dtype,
|
200 |
-
)
|
201 |
-
|
202 |
-
if "sd" in modules and not sd_use_remote:
|
203 |
-
from diffusers import StableDiffusionPipeline
|
204 |
-
from diffusers import EulerAncestralDiscreteScheduler
|
205 |
-
|
206 |
-
print("Initializing Stable Diffusion pipeline...")
|
207 |
-
sd_device_string = cuda_device if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
|
208 |
-
sd_device = torch.device(sd_device_string)
|
209 |
-
sd_torch_dtype = torch.float32 if sd_device_string != cuda_device else torch.float16
|
210 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained(
|
211 |
-
sd_model, custom_pipeline="lpw_stable_diffusion", torch_dtype=sd_torch_dtype
|
212 |
-
).to(sd_device)
|
213 |
-
sd_pipe.safety_checker = lambda images, clip_input: (images, False)
|
214 |
-
sd_pipe.enable_attention_slicing()
|
215 |
-
# pipe.scheduler = KarrasVeScheduler.from_config(pipe.scheduler.config)
|
216 |
-
sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
|
217 |
-
sd_pipe.scheduler.config
|
218 |
-
)
|
219 |
-
elif "sd" in modules and sd_use_remote:
|
220 |
-
print("Initializing Stable Diffusion connection")
|
221 |
-
try:
|
222 |
-
sd_remote = webuiapi.WebUIApi(
|
223 |
-
host=sd_remote_host, port=sd_remote_port, use_https=sd_remote_ssl
|
224 |
-
)
|
225 |
-
if sd_remote_auth:
|
226 |
-
username, password = sd_remote_auth.split(":")
|
227 |
-
sd_remote.set_auth(username, password)
|
228 |
-
sd_remote.util_wait_for_ready()
|
229 |
-
except Exception as e:
|
230 |
-
# remote sd from modules
|
231 |
-
print(
|
232 |
-
f"{Fore.RED}{Style.BRIGHT}Could not connect to remote SD backend at http{'s' if sd_remote_ssl else ''}://{sd_remote_host}:{sd_remote_port}! Disabling SD module...{Style.RESET_ALL}"
|
233 |
-
)
|
234 |
-
modules.remove("sd")
|
235 |
-
|
236 |
-
if "tts" in modules:
|
237 |
-
print("tts module is deprecated. Please use silero-tts instead.")
|
238 |
-
modules.remove("tts")
|
239 |
-
modules.append("silero-tts")
|
240 |
-
|
241 |
-
|
242 |
-
if "silero-tts" in modules:
|
243 |
-
if not os.path.exists(SILERO_SAMPLES_PATH):
|
244 |
-
os.makedirs(SILERO_SAMPLES_PATH)
|
245 |
-
print("Initializing Silero TTS server")
|
246 |
-
from silero_api_server import tts
|
247 |
-
|
248 |
-
tts_service = tts.SileroTtsService(SILERO_SAMPLES_PATH)
|
249 |
-
if len(os.listdir(SILERO_SAMPLES_PATH)) == 0:
|
250 |
-
print("Generating Silero TTS samples...")
|
251 |
-
tts_service.update_sample_text(SILERO_SAMPLE_TEXT)
|
252 |
-
tts_service.generate_samples()
|
253 |
-
|
254 |
-
|
255 |
-
if "edge-tts" in modules:
|
256 |
-
print("Initializing Edge TTS client")
|
257 |
-
import tts_edge as edge
|
258 |
-
|
259 |
-
|
260 |
-
if "chromadb" in modules:
|
261 |
-
print("Initializing ChromaDB")
|
262 |
-
import chromadb
|
263 |
-
import posthog
|
264 |
-
from chromadb.config import Settings
|
265 |
-
from sentence_transformers import SentenceTransformer
|
266 |
-
|
267 |
-
# Assume that the user wants in-memory unless a host is specified
|
268 |
-
# Also disable chromadb telemetry
|
269 |
-
posthog.capture = lambda *args, **kwargs: None
|
270 |
-
if args.chroma_host is None:
|
271 |
-
if args.chroma_persist:
|
272 |
-
chromadb_client = chromadb.PersistentClient(path=args.chroma_folder, settings=Settings(anonymized_telemetry=False))
|
273 |
-
print(f"ChromaDB is running in-memory with persistence. Persistence is stored in {args.chroma_folder}. Can be cleared by deleting the folder or purging db.")
|
274 |
-
else:
|
275 |
-
chromadb_client = chromadb.EphemeralClient(Settings(anonymized_telemetry=False))
|
276 |
-
print(f"ChromaDB is running in-memory without persistence.")
|
277 |
-
else:
|
278 |
-
chroma_port=(
|
279 |
-
args.chroma_port if args.chroma_port else DEFAULT_CHROMA_PORT
|
280 |
-
)
|
281 |
-
chromadb_client = chromadb.HttpClient(host=args.chroma_host, port=chroma_port, settings=Settings(anonymized_telemetry=False))
|
282 |
-
print(f"ChromaDB is remotely configured at {args.chroma_host}:{chroma_port}")
|
283 |
-
|
284 |
-
chromadb_embedder = SentenceTransformer(embedding_model, device=device_string)
|
285 |
-
chromadb_embed_fn = lambda *args, **kwargs: chromadb_embedder.encode(*args, **kwargs).tolist()
|
286 |
-
|
287 |
-
# Check if the db is connected and running, otherwise tell the user
|
288 |
-
try:
|
289 |
-
chromadb_client.heartbeat()
|
290 |
-
print("Successfully pinged ChromaDB! Your client is successfully connected.")
|
291 |
-
except:
|
292 |
-
print("Could not ping ChromaDB! If you are running remotely, please check your host and port!")
|
293 |
-
|
294 |
-
# Flask init
|
295 |
-
app = Flask(__name__)
|
296 |
-
CORS(app) # allow cross-domain requests
|
297 |
-
Compress(app) # compress responses
|
298 |
-
app.config["MAX_CONTENT_LENGTH"] = 100 * 1024 * 1024
|
299 |
-
|
300 |
-
|
301 |
-
def require_module(name):
|
302 |
-
def wrapper(fn):
|
303 |
-
@wraps(fn)
|
304 |
-
def decorated_view(*args, **kwargs):
|
305 |
-
if name not in modules:
|
306 |
-
abort(403, "Module is disabled by config")
|
307 |
-
return fn(*args, **kwargs)
|
308 |
-
|
309 |
-
return decorated_view
|
310 |
-
|
311 |
-
return wrapper
|
312 |
-
|
313 |
-
|
314 |
-
# AI stuff
|
315 |
-
def classify_text(text: str) -> list:
|
316 |
-
output = classification_pipe(
|
317 |
-
text,
|
318 |
-
truncation=True,
|
319 |
-
max_length=classification_pipe.model.config.max_position_embeddings,
|
320 |
-
)[0]
|
321 |
-
return sorted(output, key=lambda x: x["score"], reverse=True)
|
322 |
-
|
323 |
-
|
324 |
-
def caption_image(raw_image: Image, max_new_tokens: int = 20) -> str:
|
325 |
-
inputs = captioning_processor(raw_image.convert("RGB"), return_tensors="pt").to(
|
326 |
-
device, torch_dtype
|
327 |
-
)
|
328 |
-
outputs = captioning_transformer.generate(**inputs, max_new_tokens=max_new_tokens)
|
329 |
-
caption = captioning_processor.decode(outputs[0], skip_special_tokens=True)
|
330 |
-
return caption
|
331 |
-
|
332 |
-
|
333 |
-
def summarize_chunks(text: str, params: dict) -> str:
|
334 |
-
try:
|
335 |
-
return summarize(text, params)
|
336 |
-
except IndexError:
|
337 |
-
print(
|
338 |
-
"Sequence length too large for model, cutting text in half and calling again"
|
339 |
-
)
|
340 |
-
new_params = params.copy()
|
341 |
-
new_params["max_length"] = new_params["max_length"] // 2
|
342 |
-
new_params["min_length"] = new_params["min_length"] // 2
|
343 |
-
return summarize_chunks(
|
344 |
-
text[: (len(text) // 2)], new_params
|
345 |
-
) + summarize_chunks(text[(len(text) // 2) :], new_params)
|
346 |
-
|
347 |
-
|
348 |
-
def summarize(text: str, params: dict) -> str:
|
349 |
-
# Tokenize input
|
350 |
-
inputs = summarization_tokenizer(text, return_tensors="pt").to(device)
|
351 |
-
token_count = len(inputs[0])
|
352 |
-
|
353 |
-
bad_words_ids = [
|
354 |
-
summarization_tokenizer(bad_word, add_special_tokens=False).input_ids
|
355 |
-
for bad_word in params["bad_words"]
|
356 |
-
]
|
357 |
-
summary_ids = summarization_transformer.generate(
|
358 |
-
inputs["input_ids"],
|
359 |
-
num_beams=2,
|
360 |
-
max_new_tokens=max(token_count, int(params["max_length"])),
|
361 |
-
min_new_tokens=min(token_count, int(params["min_length"])),
|
362 |
-
repetition_penalty=float(params["repetition_penalty"]),
|
363 |
-
temperature=float(params["temperature"]),
|
364 |
-
length_penalty=float(params["length_penalty"]),
|
365 |
-
bad_words_ids=bad_words_ids,
|
366 |
-
)
|
367 |
-
summary = summarization_tokenizer.batch_decode(
|
368 |
-
summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
|
369 |
-
)[0]
|
370 |
-
summary = normalize_string(summary)
|
371 |
-
return summary
|
372 |
-
|
373 |
-
|
374 |
-
def normalize_string(input: str) -> str:
|
375 |
-
output = " ".join(unicodedata.normalize("NFKC", input).strip().split())
|
376 |
-
return output
|
377 |
-
|
378 |
-
|
379 |
-
def generate_image(data: dict) -> Image:
|
380 |
-
prompt = normalize_string(f'{data["prompt_prefix"]} {data["prompt"]}')
|
381 |
-
|
382 |
-
if sd_use_remote:
|
383 |
-
image = sd_remote.txt2img(
|
384 |
-
prompt=prompt,
|
385 |
-
negative_prompt=data["negative_prompt"],
|
386 |
-
sampler_name=data["sampler"],
|
387 |
-
steps=data["steps"],
|
388 |
-
cfg_scale=data["scale"],
|
389 |
-
width=data["width"],
|
390 |
-
height=data["height"],
|
391 |
-
restore_faces=data["restore_faces"],
|
392 |
-
enable_hr=data["enable_hr"],
|
393 |
-
save_images=True,
|
394 |
-
send_images=True,
|
395 |
-
do_not_save_grid=False,
|
396 |
-
do_not_save_samples=False,
|
397 |
-
).image
|
398 |
-
else:
|
399 |
-
image = sd_pipe(
|
400 |
-
prompt=prompt,
|
401 |
-
negative_prompt=data["negative_prompt"],
|
402 |
-
num_inference_steps=data["steps"],
|
403 |
-
guidance_scale=data["scale"],
|
404 |
-
width=data["width"],
|
405 |
-
height=data["height"],
|
406 |
-
).images[0]
|
407 |
-
|
408 |
-
image.save("./debug.png")
|
409 |
-
return image
|
410 |
-
|
411 |
-
|
412 |
-
def image_to_base64(image: Image, quality: int = 75) -> str:
|
413 |
-
buffer = BytesIO()
|
414 |
-
image.convert("RGB")
|
415 |
-
image.save(buffer, format="JPEG", quality=quality)
|
416 |
-
img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
|
417 |
-
return img_str
|
418 |
-
|
419 |
-
|
420 |
-
ignore_auth = []
|
421 |
-
# [HF, Huggingface] Get password instead of text file.
|
422 |
-
api_key = os.environ.get("password")
|
423 |
-
|
424 |
-
def is_authorize_ignored(request):
|
425 |
-
view_func = app.view_functions.get(request.endpoint)
|
426 |
-
|
427 |
-
if view_func is not None:
|
428 |
-
if view_func in ignore_auth:
|
429 |
-
return True
|
430 |
-
return False
|
431 |
-
|
432 |
-
@app.before_request
|
433 |
-
def before_request():
|
434 |
-
# Request time measuring
|
435 |
-
request.start_time = time.time()
|
436 |
-
|
437 |
-
# Checks if an API key is present and valid, otherwise return unauthorized
|
438 |
-
# The options check is required so CORS doesn't get angry
|
439 |
-
try:
|
440 |
-
if request.method != 'OPTIONS' and is_authorize_ignored(request) == False and getattr(request.authorization, 'token', '') != api_key:
|
441 |
-
print(f"WARNING: Unauthorized API key access from {request.remote_addr}")
|
442 |
-
if request.method == 'POST':
|
443 |
-
print(f"Incoming POST request with {request.headers.get('Authorization')}")
|
444 |
-
response = jsonify({ 'error': '401: Invalid API key' })
|
445 |
-
response.status_code = 401
|
446 |
-
return "https://(hf_name)-(space_name).hf.space/"
|
447 |
-
except Exception as e:
|
448 |
-
print(f"API key check error: {e}")
|
449 |
-
return "https://(hf_name)-(space_name).hf.space/"
|
450 |
-
|
451 |
-
|
452 |
-
@app.after_request
|
453 |
-
def after_request(response):
|
454 |
-
duration = time.time() - request.start_time
|
455 |
-
response.headers["X-Request-Duration"] = str(duration)
|
456 |
-
return response
|
457 |
-
|
458 |
-
|
459 |
-
@app.route("/", methods=["GET"])
|
460 |
-
def index():
|
461 |
-
with open("./README.md", "r", encoding="utf8") as f:
|
462 |
-
content = f.read()
|
463 |
-
return render_template_string(markdown.markdown(content, extensions=["tables"]))
|
464 |
-
|
465 |
-
|
466 |
-
@app.route("/api/extensions", methods=["GET"])
|
467 |
-
def get_extensions():
|
468 |
-
extensions = dict(
|
469 |
-
{
|
470 |
-
"extensions": [
|
471 |
-
{
|
472 |
-
"name": "not-supported",
|
473 |
-
"metadata": {
|
474 |
-
"display_name": """<span style="white-space:break-spaces;">Extensions serving using Extensions API is no longer supported. Please update the mod from: <a href="https://github.com/Cohee1207/SillyTavern">https://github.com/Cohee1207/SillyTavern</a></span>""",
|
475 |
-
"requires": [],
|
476 |
-
"assets": [],
|
477 |
-
},
|
478 |
-
}
|
479 |
-
]
|
480 |
-
}
|
481 |
-
)
|
482 |
-
return jsonify(extensions)
|
483 |
-
|
484 |
-
|
485 |
-
@app.route("/api/caption", methods=["POST"])
|
486 |
-
@require_module("caption")
|
487 |
-
def api_caption():
|
488 |
-
data = request.get_json()
|
489 |
-
|
490 |
-
if "image" not in data or not isinstance(data["image"], str):
|
491 |
-
abort(400, '"image" is required')
|
492 |
-
|
493 |
-
image = Image.open(BytesIO(base64.b64decode(data["image"])))
|
494 |
-
image = image.convert("RGB")
|
495 |
-
image.thumbnail((512, 512))
|
496 |
-
caption = caption_image(image)
|
497 |
-
thumbnail = image_to_base64(image)
|
498 |
-
print("Caption:", caption, sep="\n")
|
499 |
-
gc.collect()
|
500 |
-
return jsonify({"caption": caption, "thumbnail": thumbnail})
|
501 |
-
|
502 |
-
|
503 |
-
@app.route("/api/summarize", methods=["POST"])
|
504 |
-
@require_module("summarize")
|
505 |
-
def api_summarize():
|
506 |
-
data = request.get_json()
|
507 |
-
|
508 |
-
if "text" not in data or not isinstance(data["text"], str):
|
509 |
-
abort(400, '"text" is required')
|
510 |
-
|
511 |
-
params = DEFAULT_SUMMARIZE_PARAMS.copy()
|
512 |
-
|
513 |
-
if "params" in data and isinstance(data["params"], dict):
|
514 |
-
params.update(data["params"])
|
515 |
-
|
516 |
-
print("Summary input:", data["text"], sep="\n")
|
517 |
-
summary = summarize_chunks(data["text"], params)
|
518 |
-
print("Summary output:", summary, sep="\n")
|
519 |
-
gc.collect()
|
520 |
-
return jsonify({"summary": summary})
|
521 |
-
|
522 |
-
|
523 |
-
@app.route("/api/classify", methods=["POST"])
|
524 |
-
@require_module("classify")
|
525 |
-
def api_classify():
|
526 |
-
data = request.get_json()
|
527 |
-
|
528 |
-
if "text" not in data or not isinstance(data["text"], str):
|
529 |
-
abort(400, '"text" is required')
|
530 |
-
|
531 |
-
print("Classification input:", data["text"], sep="\n")
|
532 |
-
classification = classify_text(data["text"])
|
533 |
-
print("Classification output:", classification, sep="\n")
|
534 |
-
gc.collect()
|
535 |
-
return jsonify({"classification": classification})
|
536 |
-
|
537 |
-
|
538 |
-
@app.route("/api/classify/labels", methods=["GET"])
|
539 |
-
@require_module("classify")
|
540 |
-
def api_classify_labels():
|
541 |
-
classification = classify_text("")
|
542 |
-
labels = [x["label"] for x in classification]
|
543 |
-
return jsonify({"labels": labels})
|
544 |
-
|
545 |
-
|
546 |
-
@app.route("/api/image", methods=["POST"])
|
547 |
-
@require_module("sd")
|
548 |
-
def api_image():
|
549 |
-
required_fields = {
|
550 |
-
"prompt": str,
|
551 |
-
}
|
552 |
-
|
553 |
-
optional_fields = {
|
554 |
-
"steps": 30,
|
555 |
-
"scale": 6,
|
556 |
-
"sampler": "DDIM",
|
557 |
-
"width": 512,
|
558 |
-
"height": 512,
|
559 |
-
"restore_faces": False,
|
560 |
-
"enable_hr": False,
|
561 |
-
"prompt_prefix": PROMPT_PREFIX,
|
562 |
-
"negative_prompt": NEGATIVE_PROMPT,
|
563 |
-
}
|
564 |
-
|
565 |
-
data = request.get_json()
|
566 |
-
|
567 |
-
# Check required fields
|
568 |
-
for field, field_type in required_fields.items():
|
569 |
-
if field not in data or not isinstance(data[field], field_type):
|
570 |
-
abort(400, f'"{field}" is required')
|
571 |
-
|
572 |
-
# Set optional fields to default values if not provided
|
573 |
-
for field, default_value in optional_fields.items():
|
574 |
-
type_match = (
|
575 |
-
(int, float)
|
576 |
-
if isinstance(default_value, (int, float))
|
577 |
-
else type(default_value)
|
578 |
-
)
|
579 |
-
if field not in data or not isinstance(data[field], type_match):
|
580 |
-
data[field] = default_value
|
581 |
-
|
582 |
-
try:
|
583 |
-
print("SD inputs:", data, sep="\n")
|
584 |
-
image = generate_image(data)
|
585 |
-
base64image = image_to_base64(image, quality=90)
|
586 |
-
return jsonify({"image": base64image})
|
587 |
-
except RuntimeError as e:
|
588 |
-
abort(400, str(e))
|
589 |
-
|
590 |
-
|
591 |
-
@app.route("/api/image/model", methods=["POST"])
|
592 |
-
@require_module("sd")
|
593 |
-
def api_image_model_set():
|
594 |
-
data = request.get_json()
|
595 |
-
|
596 |
-
if not sd_use_remote:
|
597 |
-
abort(400, "Changing model for local sd is not supported.")
|
598 |
-
if "model" not in data or not isinstance(data["model"], str):
|
599 |
-
abort(400, '"model" is required')
|
600 |
-
|
601 |
-
old_model = sd_remote.util_get_current_model()
|
602 |
-
sd_remote.util_set_model(data["model"], find_closest=False)
|
603 |
-
# sd_remote.util_set_model(data['model'])
|
604 |
-
sd_remote.util_wait_for_ready()
|
605 |
-
new_model = sd_remote.util_get_current_model()
|
606 |
-
|
607 |
-
return jsonify({"previous_model": old_model, "current_model": new_model})
|
608 |
-
|
609 |
-
|
610 |
-
@app.route("/api/image/model", methods=["GET"])
|
611 |
-
@require_module("sd")
|
612 |
-
def api_image_model_get():
|
613 |
-
model = sd_model
|
614 |
-
|
615 |
-
if sd_use_remote:
|
616 |
-
model = sd_remote.util_get_current_model()
|
617 |
-
|
618 |
-
return jsonify({"model": model})
|
619 |
-
|
620 |
-
|
621 |
-
@app.route("/api/image/models", methods=["GET"])
|
622 |
-
@require_module("sd")
|
623 |
-
def api_image_models():
|
624 |
-
models = [sd_model]
|
625 |
-
|
626 |
-
if sd_use_remote:
|
627 |
-
models = sd_remote.util_get_model_names()
|
628 |
-
|
629 |
-
return jsonify({"models": models})
|
630 |
-
|
631 |
-
|
632 |
-
@app.route("/api/image/samplers", methods=["GET"])
|
633 |
-
@require_module("sd")
|
634 |
-
def api_image_samplers():
|
635 |
-
samplers = ["Euler a"]
|
636 |
-
|
637 |
-
if sd_use_remote:
|
638 |
-
samplers = [sampler["name"] for sampler in sd_remote.get_samplers()]
|
639 |
-
|
640 |
-
return jsonify({"samplers": samplers})
|
641 |
-
|
642 |
-
|
643 |
-
@app.route("/api/modules", methods=["GET"])
|
644 |
-
def get_modules():
|
645 |
-
return jsonify({"modules": modules})
|
646 |
-
|
647 |
-
|
648 |
-
@app.route("/api/tts/speakers", methods=["GET"])
|
649 |
-
@require_module("silero-tts")
|
650 |
-
def tts_speakers():
|
651 |
-
voices = [
|
652 |
-
{
|
653 |
-
"name": speaker,
|
654 |
-
"voice_id": speaker,
|
655 |
-
"preview_url": f"{str(request.url_root)}api/tts/sample/{speaker}",
|
656 |
-
}
|
657 |
-
for speaker in tts_service.get_speakers()
|
658 |
-
]
|
659 |
-
return jsonify(voices)
|
660 |
-
|
661 |
-
# Added fix for Silero not working as new files were unable to be created if one already existed. - Rolyat 7/7/23
|
662 |
-
@app.route("/api/tts/generate", methods=["POST"])
|
663 |
-
@require_module("silero-tts")
|
664 |
-
def tts_generate():
|
665 |
-
voice = request.get_json()
|
666 |
-
if "text" not in voice or not isinstance(voice["text"], str):
|
667 |
-
abort(400, '"text" is required')
|
668 |
-
if "speaker" not in voice or not isinstance(voice["speaker"], str):
|
669 |
-
abort(400, '"speaker" is required')
|
670 |
-
# Remove asterisks
|
671 |
-
voice["text"] = voice["text"].replace("*", "")
|
672 |
-
try:
|
673 |
-
# Remove the destination file if it already exists
|
674 |
-
if os.path.exists('test.wav'):
|
675 |
-
os.remove('test.wav')
|
676 |
-
|
677 |
-
audio = tts_service.generate(voice["speaker"], voice["text"])
|
678 |
-
audio_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.basename(audio))
|
679 |
-
|
680 |
-
os.rename(audio, audio_file_path)
|
681 |
-
return send_file(audio_file_path, mimetype="audio/x-wav")
|
682 |
-
except Exception as e:
|
683 |
-
print(e)
|
684 |
-
abort(500, voice["speaker"])
|
685 |
-
|
686 |
-
|
687 |
-
@app.route("/api/tts/sample/<speaker>", methods=["GET"])
|
688 |
-
@require_module("silero-tts")
|
689 |
-
def tts_play_sample(speaker: str):
|
690 |
-
return send_from_directory(SILERO_SAMPLES_PATH, f"{speaker}.wav")
|
691 |
-
|
692 |
-
|
693 |
-
@app.route("/api/edge-tts/list", methods=["GET"])
|
694 |
-
@require_module("edge-tts")
|
695 |
-
def edge_tts_list():
|
696 |
-
voices = edge.get_voices()
|
697 |
-
return jsonify(voices)
|
698 |
-
|
699 |
-
|
700 |
-
@app.route("/api/edge-tts/generate", methods=["POST"])
|
701 |
-
@require_module("edge-tts")
|
702 |
-
def edge_tts_generate():
|
703 |
-
data = request.get_json()
|
704 |
-
if "text" not in data or not isinstance(data["text"], str):
|
705 |
-
abort(400, '"text" is required')
|
706 |
-
if "voice" not in data or not isinstance(data["voice"], str):
|
707 |
-
abort(400, '"voice" is required')
|
708 |
-
if "rate" in data and isinstance(data['rate'], int):
|
709 |
-
rate = data['rate']
|
710 |
-
else:
|
711 |
-
rate = 0
|
712 |
-
# Remove asterisks
|
713 |
-
data["text"] = data["text"].replace("*", "")
|
714 |
-
try:
|
715 |
-
audio = edge.generate_audio(text=data["text"], voice=data["voice"], rate=rate)
|
716 |
-
return Response(audio, mimetype="audio/mpeg")
|
717 |
-
except Exception as e:
|
718 |
-
print(e)
|
719 |
-
abort(500, data["voice"])
|
720 |
-
|
721 |
-
|
722 |
-
@app.route("/api/chromadb", methods=["POST"])
|
723 |
-
@require_module("chromadb")
|
724 |
-
def chromadb_add_messages():
|
725 |
-
data = request.get_json()
|
726 |
-
if "chat_id" not in data or not isinstance(data["chat_id"], str):
|
727 |
-
abort(400, '"chat_id" is required')
|
728 |
-
if "messages" not in data or not isinstance(data["messages"], list):
|
729 |
-
abort(400, '"messages" is required')
|
730 |
-
|
731 |
-
chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
|
732 |
-
collection = chromadb_client.get_or_create_collection(
|
733 |
-
name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
|
734 |
-
)
|
735 |
-
|
736 |
-
documents = [m["content"] for m in data["messages"]]
|
737 |
-
ids = [m["id"] for m in data["messages"]]
|
738 |
-
metadatas = [
|
739 |
-
{"role": m["role"], "date": m["date"], "meta": m.get("meta", "")}
|
740 |
-
for m in data["messages"]
|
741 |
-
]
|
742 |
-
|
743 |
-
collection.upsert(
|
744 |
-
ids=ids,
|
745 |
-
documents=documents,
|
746 |
-
metadatas=metadatas,
|
747 |
-
)
|
748 |
-
|
749 |
-
return jsonify({"count": len(ids)})
|
750 |
-
|
751 |
-
|
752 |
-
@app.route("/api/chromadb/purge", methods=["POST"])
|
753 |
-
@require_module("chromadb")
|
754 |
-
def chromadb_purge():
|
755 |
-
data = request.get_json()
|
756 |
-
if "chat_id" not in data or not isinstance(data["chat_id"], str):
|
757 |
-
abort(400, '"chat_id" is required')
|
758 |
-
|
759 |
-
chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
|
760 |
-
collection = chromadb_client.get_or_create_collection(
|
761 |
-
name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
|
762 |
-
)
|
763 |
-
|
764 |
-
count = collection.count()
|
765 |
-
collection.delete()
|
766 |
-
print("ChromaDB embeddings deleted", count)
|
767 |
-
return 'Ok', 200
|
768 |
-
|
769 |
-
|
770 |
-
@app.route("/api/chromadb/query", methods=["POST"])
|
771 |
-
@require_module("chromadb")
|
772 |
-
def chromadb_query():
|
773 |
-
data = request.get_json()
|
774 |
-
if "chat_id" not in data or not isinstance(data["chat_id"], str):
|
775 |
-
abort(400, '"chat_id" is required')
|
776 |
-
if "query" not in data or not isinstance(data["query"], str):
|
777 |
-
abort(400, '"query" is required')
|
778 |
-
|
779 |
-
if "n_results" not in data or not isinstance(data["n_results"], int):
|
780 |
-
n_results = 1
|
781 |
-
else:
|
782 |
-
n_results = data["n_results"]
|
783 |
-
|
784 |
-
chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
|
785 |
-
collection = chromadb_client.get_or_create_collection(
|
786 |
-
name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
|
787 |
-
)
|
788 |
-
|
789 |
-
if collection.count() == 0:
|
790 |
-
print(f"Queried empty/missing collection for {repr(data['chat_id'])}.")
|
791 |
-
return jsonify([])
|
792 |
-
|
793 |
-
|
794 |
-
n_results = min(collection.count(), n_results)
|
795 |
-
query_result = collection.query(
|
796 |
-
query_texts=[data["query"]],
|
797 |
-
n_results=n_results,
|
798 |
-
)
|
799 |
-
|
800 |
-
documents = query_result["documents"][0]
|
801 |
-
ids = query_result["ids"][0]
|
802 |
-
metadatas = query_result["metadatas"][0]
|
803 |
-
distances = query_result["distances"][0]
|
804 |
-
|
805 |
-
messages = [
|
806 |
-
{
|
807 |
-
"id": ids[i],
|
808 |
-
"date": metadatas[i]["date"],
|
809 |
-
"role": metadatas[i]["role"],
|
810 |
-
"meta": metadatas[i]["meta"],
|
811 |
-
"content": documents[i],
|
812 |
-
"distance": distances[i],
|
813 |
-
}
|
814 |
-
for i in range(len(ids))
|
815 |
-
]
|
816 |
-
|
817 |
-
return jsonify(messages)
|
818 |
-
|
819 |
-
@app.route("/api/chromadb/multiquery", methods=["POST"])
|
820 |
-
@require_module("chromadb")
|
821 |
-
def chromadb_multiquery():
|
822 |
-
data = request.get_json()
|
823 |
-
if "chat_list" not in data or not isinstance(data["chat_list"], list):
|
824 |
-
abort(400, '"chat_list" is required and should be a list')
|
825 |
-
if "query" not in data or not isinstance(data["query"], str):
|
826 |
-
abort(400, '"query" is required')
|
827 |
-
|
828 |
-
if "n_results" not in data or not isinstance(data["n_results"], int):
|
829 |
-
n_results = 1
|
830 |
-
else:
|
831 |
-
n_results = data["n_results"]
|
832 |
-
|
833 |
-
messages = []
|
834 |
-
|
835 |
-
for chat_id in data["chat_list"]:
|
836 |
-
if not isinstance(chat_id, str):
|
837 |
-
continue
|
838 |
-
|
839 |
-
try:
|
840 |
-
chat_id_md5 = hashlib.md5(chat_id.encode()).hexdigest()
|
841 |
-
collection = chromadb_client.get_collection(
|
842 |
-
name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
|
843 |
-
)
|
844 |
-
|
845 |
-
# Skip this chat if the collection is empty
|
846 |
-
if collection.count() == 0:
|
847 |
-
continue
|
848 |
-
|
849 |
-
n_results_per_chat = min(collection.count(), n_results)
|
850 |
-
query_result = collection.query(
|
851 |
-
query_texts=[data["query"]],
|
852 |
-
n_results=n_results_per_chat,
|
853 |
-
)
|
854 |
-
documents = query_result["documents"][0]
|
855 |
-
ids = query_result["ids"][0]
|
856 |
-
metadatas = query_result["metadatas"][0]
|
857 |
-
distances = query_result["distances"][0]
|
858 |
-
|
859 |
-
chat_messages = [
|
860 |
-
{
|
861 |
-
"id": ids[i],
|
862 |
-
"date": metadatas[i]["date"],
|
863 |
-
"role": metadatas[i]["role"],
|
864 |
-
"meta": metadatas[i]["meta"],
|
865 |
-
"content": documents[i],
|
866 |
-
"distance": distances[i],
|
867 |
-
}
|
868 |
-
for i in range(len(ids))
|
869 |
-
]
|
870 |
-
|
871 |
-
messages.extend(chat_messages)
|
872 |
-
except Exception as e:
|
873 |
-
print(e)
|
874 |
-
|
875 |
-
#remove duplicate msgs, filter down to the right number
|
876 |
-
seen = set()
|
877 |
-
messages = [d for d in messages if not (d['content'] in seen or seen.add(d['content']))]
|
878 |
-
messages = sorted(messages, key=lambda x: x['distance'])[0:n_results]
|
879 |
-
|
880 |
-
return jsonify(messages)
|
881 |
-
|
882 |
-
|
883 |
-
@app.route("/api/chromadb/export", methods=["POST"])
|
884 |
-
@require_module("chromadb")
|
885 |
-
def chromadb_export():
|
886 |
-
data = request.get_json()
|
887 |
-
if "chat_id" not in data or not isinstance(data["chat_id"], str):
|
888 |
-
abort(400, '"chat_id" is required')
|
889 |
-
|
890 |
-
chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
|
891 |
-
try:
|
892 |
-
collection = chromadb_client.get_collection(
|
893 |
-
name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
|
894 |
-
)
|
895 |
-
except Exception as e:
|
896 |
-
print(e)
|
897 |
-
abort(400, "Chat collection not found in chromadb")
|
898 |
-
|
899 |
-
collection_content = collection.get()
|
900 |
-
documents = collection_content.get('documents', [])
|
901 |
-
ids = collection_content.get('ids', [])
|
902 |
-
metadatas = collection_content.get('metadatas', [])
|
903 |
-
|
904 |
-
unsorted_content = [
|
905 |
-
{
|
906 |
-
"id": ids[i],
|
907 |
-
"metadata": metadatas[i],
|
908 |
-
"document": documents[i],
|
909 |
-
}
|
910 |
-
for i in range(len(ids))
|
911 |
-
]
|
912 |
-
|
913 |
-
sorted_content = sorted(unsorted_content, key=lambda x: x['metadata']['date'])
|
914 |
-
|
915 |
-
export = {
|
916 |
-
"chat_id": data["chat_id"],
|
917 |
-
"content": sorted_content
|
918 |
-
}
|
919 |
-
|
920 |
-
return jsonify(export)
|
921 |
-
|
922 |
-
@app.route("/api/chromadb/import", methods=["POST"])
|
923 |
-
@require_module("chromadb")
|
924 |
-
def chromadb_import():
|
925 |
-
data = request.get_json()
|
926 |
-
content = data['content']
|
927 |
-
if "chat_id" not in data or not isinstance(data["chat_id"], str):
|
928 |
-
abort(400, '"chat_id" is required')
|
929 |
-
|
930 |
-
chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
|
931 |
-
collection = chromadb_client.get_or_create_collection(
|
932 |
-
name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
|
933 |
-
)
|
934 |
-
|
935 |
-
documents = [item['document'] for item in content]
|
936 |
-
metadatas = [item['metadata'] for item in content]
|
937 |
-
ids = [item['id'] for item in content]
|
938 |
-
|
939 |
-
|
940 |
-
collection.upsert(documents=documents, metadatas=metadatas, ids=ids)
|
941 |
-
print(f"Imported {len(ids)} (total {collection.count()}) content entries into {repr(data['chat_id'])}")
|
942 |
-
|
943 |
-
return jsonify({"count": len(ids)})
|
944 |
-
|
945 |
-
|
946 |
-
if args.share:
|
947 |
-
from flask_cloudflared import _run_cloudflared
|
948 |
-
import inspect
|
949 |
-
|
950 |
-
sig = inspect.signature(_run_cloudflared)
|
951 |
-
sum = sum(
|
952 |
-
1
|
953 |
-
for param in sig.parameters.values()
|
954 |
-
if param.kind == param.POSITIONAL_OR_KEYWORD
|
955 |
-
)
|
956 |
-
if sum > 1:
|
957 |
-
metrics_port = randint(8100, 9000)
|
958 |
-
cloudflare = _run_cloudflared(port, metrics_port)
|
959 |
-
else:
|
960 |
-
cloudflare = _run_cloudflared(port)
|
961 |
-
print("Running on", cloudflare)
|
962 |
-
|
963 |
-
ignore_auth.append(tts_play_sample)
|
964 |
-
app.run(host=host, port=port)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/__init__.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from .checkpoint import CheckpointHook
|
3 |
-
from .closure import ClosureHook
|
4 |
-
from .ema import EMAHook
|
5 |
-
from .evaluation import DistEvalHook, EvalHook
|
6 |
-
from .hook import HOOKS, Hook
|
7 |
-
from .iter_timer import IterTimerHook
|
8 |
-
from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
|
9 |
-
NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
|
10 |
-
TextLoggerHook, WandbLoggerHook)
|
11 |
-
from .lr_updater import LrUpdaterHook
|
12 |
-
from .memory import EmptyCacheHook
|
13 |
-
from .momentum_updater import MomentumUpdaterHook
|
14 |
-
from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
|
15 |
-
GradientCumulativeOptimizerHook, OptimizerHook)
|
16 |
-
from .profiler import ProfilerHook
|
17 |
-
from .sampler_seed import DistSamplerSeedHook
|
18 |
-
from .sync_buffer import SyncBuffersHook
|
19 |
-
|
20 |
-
__all__ = [
|
21 |
-
'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
|
22 |
-
'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
|
23 |
-
'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
|
24 |
-
'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
|
25 |
-
'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
|
26 |
-
'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
|
27 |
-
'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
|
28 |
-
'GradientCumulativeFp16OptimizerHook'
|
29 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AquaSuisei/ChatGPTXE/modules/config.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
from collections import defaultdict
|
2 |
-
from contextlib import contextmanager
|
3 |
-
import os
|
4 |
-
import logging
|
5 |
-
import sys
|
6 |
-
import json
|
7 |
-
|
8 |
-
from . import shared
|
9 |
-
|
10 |
-
|
11 |
-
__all__ = [
|
12 |
-
"my_api_key",
|
13 |
-
"authflag",
|
14 |
-
"auth_list",
|
15 |
-
"dockerflag",
|
16 |
-
"retrieve_proxy",
|
17 |
-
"log_level",
|
18 |
-
"advance_docs",
|
19 |
-
"update_doc_config",
|
20 |
-
"multi_api_key",
|
21 |
-
]
|
22 |
-
|
23 |
-
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
|
24 |
-
# 同时,也可以为后续支持自定义功能提供config的帮助
|
25 |
-
if os.path.exists("config.json"):
|
26 |
-
with open("config.json", "r", encoding='utf-8') as f:
|
27 |
-
config = json.load(f)
|
28 |
-
else:
|
29 |
-
config = {}
|
30 |
-
|
31 |
-
## 处理docker if we are running in Docker
|
32 |
-
dockerflag = config.get("dockerflag", False)
|
33 |
-
if os.environ.get("dockerrun") == "yes":
|
34 |
-
dockerflag = True
|
35 |
-
|
36 |
-
## 处理 api-key 以及 允许的用户列表
|
37 |
-
my_api_key = config.get("openai_api_key", "") # 在这里输入你的 API 密钥
|
38 |
-
my_api_key = os.environ.get("my_api_key", my_api_key)
|
39 |
-
|
40 |
-
## 多账户机制
|
41 |
-
multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
|
42 |
-
if multi_api_key:
|
43 |
-
api_key_list = config.get("api_key_list", [])
|
44 |
-
if len(api_key_list) == 0:
|
45 |
-
logging.error("多账号模式已开启,但api_key_list为空,请检查config.json")
|
46 |
-
sys.exit(1)
|
47 |
-
shared.state.set_api_key_queue(api_key_list)
|
48 |
-
|
49 |
-
auth_list = config.get("users", []) # 实际上是使用者的列表
|
50 |
-
authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
|
51 |
-
|
52 |
-
# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
|
53 |
-
api_host = os.environ.get("api_host", config.get("api_host", ""))
|
54 |
-
if api_host:
|
55 |
-
shared.state.set_api_host(api_host)
|
56 |
-
|
57 |
-
if dockerflag:
|
58 |
-
if my_api_key == "empty":
|
59 |
-
logging.error("Please give a api key!")
|
60 |
-
sys.exit(1)
|
61 |
-
# auth
|
62 |
-
username = os.environ.get("USERNAME")
|
63 |
-
password = os.environ.get("PASSWORD")
|
64 |
-
if not (isinstance(username, type(None)) or isinstance(password, type(None))):
|
65 |
-
auth_list.append((os.environ.get("USERNAME"), os.environ.get("PASSWORD")))
|
66 |
-
authflag = True
|
67 |
-
else:
|
68 |
-
if (
|
69 |
-
not my_api_key
|
70 |
-
and os.path.exists("api_key.txt")
|
71 |
-
and os.path.getsize("api_key.txt")
|
72 |
-
):
|
73 |
-
with open("api_key.txt", "r") as f:
|
74 |
-
my_api_key = f.read().strip()
|
75 |
-
if os.path.exists("auth.json"):
|
76 |
-
authflag = True
|
77 |
-
with open("auth.json", "r", encoding='utf-8') as f:
|
78 |
-
auth = json.load(f)
|
79 |
-
for _ in auth:
|
80 |
-
if auth[_]["username"] and auth[_]["password"]:
|
81 |
-
auth_list.append((auth[_]["username"], auth[_]["password"]))
|
82 |
-
else:
|
83 |
-
logging.error("请检查auth.json文件中的用户名和密码!")
|
84 |
-
sys.exit(1)
|
85 |
-
|
86 |
-
@contextmanager
|
87 |
-
def retrieve_openai_api(api_key = None):
|
88 |
-
old_api_key = os.environ.get("OPENAI_API_KEY", "")
|
89 |
-
if api_key is None:
|
90 |
-
os.environ["OPENAI_API_KEY"] = my_api_key
|
91 |
-
yield my_api_key
|
92 |
-
else:
|
93 |
-
os.environ["OPENAI_API_KEY"] = api_key
|
94 |
-
yield api_key
|
95 |
-
os.environ["OPENAI_API_KEY"] = old_api_key
|
96 |
-
|
97 |
-
## 处理log
|
98 |
-
log_level = config.get("log_level", "INFO")
|
99 |
-
logging.basicConfig(
|
100 |
-
level=log_level,
|
101 |
-
format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
|
102 |
-
)
|
103 |
-
|
104 |
-
## 处理代理:
|
105 |
-
http_proxy = config.get("http_proxy", "")
|
106 |
-
https_proxy = config.get("https_proxy", "")
|
107 |
-
http_proxy = os.environ.get("HTTP_PROXY", http_proxy)
|
108 |
-
https_proxy = os.environ.get("HTTPS_PROXY", https_proxy)
|
109 |
-
|
110 |
-
# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报
|
111 |
-
os.environ["HTTP_PROXY"] = ""
|
112 |
-
os.environ["HTTPS_PROXY"] = ""
|
113 |
-
|
114 |
-
@contextmanager
|
115 |
-
def retrieve_proxy(proxy=None):
|
116 |
-
"""
|
117 |
-
1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理
|
118 |
-
2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量6
|
119 |
-
"""
|
120 |
-
global http_proxy, https_proxy
|
121 |
-
if proxy is not None:
|
122 |
-
http_proxy = proxy
|
123 |
-
https_proxy = proxy
|
124 |
-
yield http_proxy, https_proxy
|
125 |
-
else:
|
126 |
-
old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
|
127 |
-
os.environ["HTTP_PROXY"] = http_proxy
|
128 |
-
os.environ["HTTPS_PROXY"] = https_proxy
|
129 |
-
yield http_proxy, https_proxy # return new proxy
|
130 |
-
|
131 |
-
# return old proxy
|
132 |
-
os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
|
133 |
-
|
134 |
-
|
135 |
-
## 处理advance docs
|
136 |
-
advance_docs = defaultdict(lambda: defaultdict(dict))
|
137 |
-
advance_docs.update(config.get("advance_docs", {}))
|
138 |
-
def update_doc_config(two_column_pdf):
|
139 |
-
global advance_docs
|
140 |
-
if two_column_pdf:
|
141 |
-
advance_docs["pdf"]["two_column"] = True
|
142 |
-
else:
|
143 |
-
advance_docs["pdf"]["two_column"] = False
|
144 |
-
|
145 |
-
logging.info(f"更新后的文件参数为:{advance_docs}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Archan/ArXivAudio/app.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import streamlit as st
|
3 |
-
from pdfminer.high_level import extract_pages
|
4 |
-
from search import search
|
5 |
-
from get_paper import get_paper
|
6 |
-
from get_pages import get_pages
|
7 |
-
from tts import inference
|
8 |
-
|
9 |
-
st.title("ArXiV Audio")
|
10 |
-
|
11 |
-
with st.form(key="search_form"):
|
12 |
-
col1, col2, col3 = st.columns(3)
|
13 |
-
with col1:
|
14 |
-
query = st.text_input("Search Paper")
|
15 |
-
with col2:
|
16 |
-
sort_by = st.selectbox(label="Sort By", options=(
|
17 |
-
'Relevance', 'Last Updated Date', 'Submitted Date'))
|
18 |
-
with col3:
|
19 |
-
order_by = st.selectbox(
|
20 |
-
label="Order By", options=('Ascending', 'Descending'))
|
21 |
-
submit = st.form_submit_button(label="Search")
|
22 |
-
|
23 |
-
lst = search(query=query, sort_by=sort_by, sort_order=order_by)
|
24 |
-
if len(lst) != 0:
|
25 |
-
label = "Papers for " + query
|
26 |
-
with st.form(key="paper_form"):
|
27 |
-
pname = st.selectbox(label=label, options=lst)
|
28 |
-
submit_paper = st.form_submit_button(label="Fetch Paper")
|
29 |
-
else:
|
30 |
-
with st.form(key="paper_form"):
|
31 |
-
pname = st.selectbox(label="NO PAPERS", options=lst)
|
32 |
-
submit_paper = st.form_submit_button(label="Fetch Paper")
|
33 |
-
|
34 |
-
paper = ""
|
35 |
-
if submit_paper or os.path.exists('downloads/paper.pdf'):
|
36 |
-
paper = get_paper(pname)
|
37 |
-
|
38 |
-
print("Submit_paper = ", submit_paper)
|
39 |
-
|
40 |
-
name = ""
|
41 |
-
tpages = 0
|
42 |
-
lst_idx = 1
|
43 |
-
if paper:
|
44 |
-
name = "./downloads/paper.pdf"
|
45 |
-
tpages = len(list(extract_pages(name)))
|
46 |
-
lst_idx = tpages-1
|
47 |
-
|
48 |
-
pgs = [i+1 for i in range(tpages)]
|
49 |
-
|
50 |
-
start_page = 1
|
51 |
-
end_page = 1
|
52 |
-
#content = get_pages(name, start_page, end_page)
|
53 |
-
#audio_path = inference(content, "english")
|
54 |
-
#audio_file = open(audio_path, "rb")
|
55 |
-
#audio_bytes = audio_file.read()
|
56 |
-
#st.audio(audio_bytes, format='audio/wav')
|
57 |
-
|
58 |
-
with st.form(key="page_form"):
|
59 |
-
print("inside page form")
|
60 |
-
col4, col5 = st.columns(2)
|
61 |
-
with col4:
|
62 |
-
print("column 1")
|
63 |
-
s_page = st.selectbox(label="Start Page", options=pgs)
|
64 |
-
print(s_page)
|
65 |
-
start_page = s_page
|
66 |
-
with col5:
|
67 |
-
print("column 2")
|
68 |
-
e_page = st.selectbox(label="End Page", options=pgs, index=lst_idx)
|
69 |
-
print(e_page)
|
70 |
-
end_page = e_page
|
71 |
-
st.text("*")
|
72 |
-
submit_pages = st.form_submit_button(label="Convert To Audio")
|
73 |
-
print("Submit_pages' = ", submit_pages)
|
74 |
-
print(start_page, end_page)
|
75 |
-
|
76 |
-
print("Submit_pages = ", submit_pages)
|
77 |
-
if submit_pages:
|
78 |
-
content = get_pages(name, start_page, end_page)
|
79 |
-
x = st.text("Converting to Audio..... Please Wait")
|
80 |
-
audio_path = inference(content, "english")
|
81 |
-
audio_file = open(audio_path, "rb")
|
82 |
-
audio_bytes = audio_file.read()
|
83 |
-
x = st.text("Done")
|
84 |
-
st.audio(audio_bytes, format='audio/wav')
|
85 |
-
os.remove('downloads/paper.pdf')
|
86 |
-
|
87 |
-
print("Submit_paper at end state = ", submit_paper)
|
88 |
-
|
89 |
-
|
90 |
-
else:
|
91 |
-
with st.form(key="page_form"):
|
92 |
-
col1, col2 = st.columns(2)
|
93 |
-
with col1:
|
94 |
-
s_page = st.selectbox(label="Start Page", options=[])
|
95 |
-
with col2:
|
96 |
-
e_page = st.selectbox(label="End Page", options=[])
|
97 |
-
submit_pages2 = st.form_submit_button(label="Convert To Audio")
|
98 |
-
st.text(" ")
|
99 |
-
st.text(" ")
|
100 |
-
st.text(" ")
|
101 |
-
st.text(" ")
|
102 |
-
st.text(" ")
|
103 |
-
st.markdown("Created by [Archan Ghosh](https://github.com/ArchanGhosh) & [Madhurima Maji](https://github.com/madhurima99). Special Thanks to [Herumb](https://github.com/krypticmouse) for helping us with the deployment.", unsafe_allow_html=True)
|
104 |
-
st.markdown("Do Support us on [Github](https://github.com/ArchanGhosh/ArxivAudio)", unsafe_allow_html =True)
|
105 |
-
st.text(" ")
|
106 |
-
st.text("* - Please limit to 3 pages as we are currently rate limited on CPU, we are planning to move to a GPU in the coming future. ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
__version__ = "0.0.1"
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/zoom_out_app.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import torch
|
5 |
-
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
from video_diffusion.inpaint_zoom.utils.zoom_out_utils import (
|
9 |
-
dummy,
|
10 |
-
preprocess_image,
|
11 |
-
preprocess_mask_image,
|
12 |
-
write_video,
|
13 |
-
)
|
14 |
-
|
15 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
16 |
-
|
17 |
-
|
18 |
-
stable_paint_model_list = ["stabilityai/stable-diffusion-2-inpainting", "runwayml/stable-diffusion-inpainting"]
|
19 |
-
|
20 |
-
stable_paint_prompt_list = [
|
21 |
-
"children running in the forest , sunny, bright, by studio ghibli painting, superior quality, masterpiece, traditional Japanese colors, by Grzegorz Rutkowski, concept art",
|
22 |
-
"A beautiful landscape of a mountain range with a lake in the foreground",
|
23 |
-
]
|
24 |
-
|
25 |
-
stable_paint_negative_prompt_list = [
|
26 |
-
"lurry, bad art, blurred, text, watermark",
|
27 |
-
]
|
28 |
-
|
29 |
-
|
30 |
-
class StableDiffusionZoomOut:
|
31 |
-
def __init__(self):
|
32 |
-
self.pipe = None
|
33 |
-
|
34 |
-
def load_model(self, model_id):
|
35 |
-
if self.pipe is None:
|
36 |
-
self.pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
37 |
-
self.pipe.set_use_memory_efficient_attention_xformers(True)
|
38 |
-
self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
|
39 |
-
self.pipe = self.pipe.to("cuda")
|
40 |
-
self.pipe.safety_checker = dummy
|
41 |
-
self.g_cuda = torch.Generator(device="cuda")
|
42 |
-
|
43 |
-
return self.pipe
|
44 |
-
|
45 |
-
def generate_video(
|
46 |
-
self,
|
47 |
-
model_id,
|
48 |
-
prompt,
|
49 |
-
negative_prompt,
|
50 |
-
guidance_scale,
|
51 |
-
num_inference_steps,
|
52 |
-
num_frames,
|
53 |
-
step_size,
|
54 |
-
):
|
55 |
-
pipe = self.load_model(model_id)
|
56 |
-
|
57 |
-
new_image = Image.new(mode="RGBA", size=(512, 512))
|
58 |
-
current_image, mask_image = preprocess_mask_image(new_image)
|
59 |
-
|
60 |
-
current_image = pipe(
|
61 |
-
prompt=[prompt],
|
62 |
-
negative_prompt=[negative_prompt],
|
63 |
-
image=current_image,
|
64 |
-
mask_image=mask_image,
|
65 |
-
num_inference_steps=num_inference_steps,
|
66 |
-
guidance_scale=guidance_scale,
|
67 |
-
).images[0]
|
68 |
-
|
69 |
-
all_frames = []
|
70 |
-
all_frames.append(current_image)
|
71 |
-
|
72 |
-
for i in range(num_frames):
|
73 |
-
prev_image = preprocess_image(current_image, step_size, 512)
|
74 |
-
current_image = prev_image
|
75 |
-
current_image, mask_image = preprocess_mask_image(current_image)
|
76 |
-
current_image = pipe(
|
77 |
-
prompt=[prompt],
|
78 |
-
negative_prompt=[negative_prompt],
|
79 |
-
image=current_image,
|
80 |
-
mask_image=mask_image,
|
81 |
-
num_inference_steps=num_inference_steps,
|
82 |
-
).images[0]
|
83 |
-
current_image.paste(prev_image, mask=prev_image)
|
84 |
-
all_frames.append(current_image)
|
85 |
-
|
86 |
-
save_path = "output.mp4"
|
87 |
-
write_video(save_path, all_frames, fps=30)
|
88 |
-
return save_path
|
89 |
-
|
90 |
-
def app():
|
91 |
-
with gr.Blocks():
|
92 |
-
with gr.Row():
|
93 |
-
with gr.Column():
|
94 |
-
text2image_out_model_path = gr.Dropdown(
|
95 |
-
choices=stable_paint_model_list, value=stable_paint_model_list[0], label="Text-Image Model Id"
|
96 |
-
)
|
97 |
-
|
98 |
-
text2image_out_prompt = gr.Textbox(lines=2, value=stable_paint_prompt_list[0], label="Prompt")
|
99 |
-
|
100 |
-
text2image_out_negative_prompt = gr.Textbox(
|
101 |
-
lines=1, value=stable_paint_negative_prompt_list[0], label="Negative Prompt"
|
102 |
-
)
|
103 |
-
|
104 |
-
with gr.Row():
|
105 |
-
with gr.Column():
|
106 |
-
text2image_out_guidance_scale = gr.Slider(
|
107 |
-
minimum=0.1, maximum=15, step=0.1, value=7.5, label="Guidance Scale"
|
108 |
-
)
|
109 |
-
|
110 |
-
text2image_out_num_inference_step = gr.Slider(
|
111 |
-
minimum=1, maximum=100, step=1, value=50, label="Num Inference Step"
|
112 |
-
)
|
113 |
-
with gr.Row():
|
114 |
-
with gr.Column():
|
115 |
-
text2image_out_step_size = gr.Slider(
|
116 |
-
minimum=1, maximum=100, step=1, value=10, label="Step Size"
|
117 |
-
)
|
118 |
-
|
119 |
-
text2image_out_num_frames = gr.Slider(
|
120 |
-
minimum=1, maximum=100, step=1, value=10, label="Frames"
|
121 |
-
)
|
122 |
-
|
123 |
-
text2image_out_predict = gr.Button(value="Generator")
|
124 |
-
|
125 |
-
with gr.Column():
|
126 |
-
output_image = gr.Video(label="Output")
|
127 |
-
|
128 |
-
text2image_out_predict.click(
|
129 |
-
fn=StableDiffusionZoomOut().generate_video,
|
130 |
-
inputs=[
|
131 |
-
text2image_out_model_path,
|
132 |
-
text2image_out_prompt,
|
133 |
-
text2image_out_negative_prompt,
|
134 |
-
text2image_out_guidance_scale,
|
135 |
-
text2image_out_num_inference_step,
|
136 |
-
text2image_out_step_size,
|
137 |
-
text2image_out_num_frames,
|
138 |
-
],
|
139 |
-
outputs=output_image,
|
140 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/locations/_sysconfig.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
import sysconfig
|
5 |
-
import typing
|
6 |
-
|
7 |
-
from pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid
|
8 |
-
from pip._internal.models.scheme import SCHEME_KEYS, Scheme
|
9 |
-
from pip._internal.utils.virtualenv import running_under_virtualenv
|
10 |
-
|
11 |
-
from .base import change_root, get_major_minor_version, is_osx_framework
|
12 |
-
|
13 |
-
logger = logging.getLogger(__name__)
|
14 |
-
|
15 |
-
|
16 |
-
# Notes on _infer_* functions.
|
17 |
-
# Unfortunately ``get_default_scheme()`` didn't exist before 3.10, so there's no
|
18 |
-
# way to ask things like "what is the '_prefix' scheme on this platform". These
|
19 |
-
# functions try to answer that with some heuristics while accounting for ad-hoc
|
20 |
-
# platforms not covered by CPython's default sysconfig implementation. If the
|
21 |
-
# ad-hoc implementation does not fully implement sysconfig, we'll fall back to
|
22 |
-
# a POSIX scheme.
|
23 |
-
|
24 |
-
_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())
|
25 |
-
|
26 |
-
_PREFERRED_SCHEME_API = getattr(sysconfig, "get_preferred_scheme", None)
|
27 |
-
|
28 |
-
|
29 |
-
def _should_use_osx_framework_prefix() -> bool:
|
30 |
-
"""Check for Apple's ``osx_framework_library`` scheme.
|
31 |
-
|
32 |
-
Python distributed by Apple's Command Line Tools has this special scheme
|
33 |
-
that's used when:
|
34 |
-
|
35 |
-
* This is a framework build.
|
36 |
-
* We are installing into the system prefix.
|
37 |
-
|
38 |
-
This does not account for ``pip install --prefix`` (also means we're not
|
39 |
-
installing to the system prefix), which should use ``posix_prefix``, but
|
40 |
-
logic here means ``_infer_prefix()`` outputs ``osx_framework_library``. But
|
41 |
-
since ``prefix`` is not available for ``sysconfig.get_default_scheme()``,
|
42 |
-
which is the stdlib replacement for ``_infer_prefix()``, presumably Apple
|
43 |
-
wouldn't be able to magically switch between ``osx_framework_library`` and
|
44 |
-
``posix_prefix``. ``_infer_prefix()`` returning ``osx_framework_library``
|
45 |
-
means its behavior is consistent whether we use the stdlib implementation
|
46 |
-
or our own, and we deal with this special case in ``get_scheme()`` instead.
|
47 |
-
"""
|
48 |
-
return (
|
49 |
-
"osx_framework_library" in _AVAILABLE_SCHEMES
|
50 |
-
and not running_under_virtualenv()
|
51 |
-
and is_osx_framework()
|
52 |
-
)
|
53 |
-
|
54 |
-
|
55 |
-
def _infer_prefix() -> str:
|
56 |
-
"""Try to find a prefix scheme for the current platform.
|
57 |
-
|
58 |
-
This tries:
|
59 |
-
|
60 |
-
* A special ``osx_framework_library`` for Python distributed by Apple's
|
61 |
-
Command Line Tools, when not running in a virtual environment.
|
62 |
-
* Implementation + OS, used by PyPy on Windows (``pypy_nt``).
|
63 |
-
* Implementation without OS, used by PyPy on POSIX (``pypy``).
|
64 |
-
* OS + "prefix", used by CPython on POSIX (``posix_prefix``).
|
65 |
-
* Just the OS name, used by CPython on Windows (``nt``).
|
66 |
-
|
67 |
-
If none of the above works, fall back to ``posix_prefix``.
|
68 |
-
"""
|
69 |
-
if _PREFERRED_SCHEME_API:
|
70 |
-
return _PREFERRED_SCHEME_API("prefix")
|
71 |
-
if _should_use_osx_framework_prefix():
|
72 |
-
return "osx_framework_library"
|
73 |
-
implementation_suffixed = f"{sys.implementation.name}_{os.name}"
|
74 |
-
if implementation_suffixed in _AVAILABLE_SCHEMES:
|
75 |
-
return implementation_suffixed
|
76 |
-
if sys.implementation.name in _AVAILABLE_SCHEMES:
|
77 |
-
return sys.implementation.name
|
78 |
-
suffixed = f"{os.name}_prefix"
|
79 |
-
if suffixed in _AVAILABLE_SCHEMES:
|
80 |
-
return suffixed
|
81 |
-
if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called "nt".
|
82 |
-
return os.name
|
83 |
-
return "posix_prefix"
|
84 |
-
|
85 |
-
|
86 |
-
def _infer_user() -> str:
|
87 |
-
"""Try to find a user scheme for the current platform."""
|
88 |
-
if _PREFERRED_SCHEME_API:
|
89 |
-
return _PREFERRED_SCHEME_API("user")
|
90 |
-
if is_osx_framework() and not running_under_virtualenv():
|
91 |
-
suffixed = "osx_framework_user"
|
92 |
-
else:
|
93 |
-
suffixed = f"{os.name}_user"
|
94 |
-
if suffixed in _AVAILABLE_SCHEMES:
|
95 |
-
return suffixed
|
96 |
-
if "posix_user" not in _AVAILABLE_SCHEMES: # User scheme unavailable.
|
97 |
-
raise UserInstallationInvalid()
|
98 |
-
return "posix_user"
|
99 |
-
|
100 |
-
|
101 |
-
def _infer_home() -> str:
|
102 |
-
"""Try to find a home for the current platform."""
|
103 |
-
if _PREFERRED_SCHEME_API:
|
104 |
-
return _PREFERRED_SCHEME_API("home")
|
105 |
-
suffixed = f"{os.name}_home"
|
106 |
-
if suffixed in _AVAILABLE_SCHEMES:
|
107 |
-
return suffixed
|
108 |
-
return "posix_home"
|
109 |
-
|
110 |
-
|
111 |
-
# Update these keys if the user sets a custom home.
|
112 |
-
_HOME_KEYS = [
|
113 |
-
"installed_base",
|
114 |
-
"base",
|
115 |
-
"installed_platbase",
|
116 |
-
"platbase",
|
117 |
-
"prefix",
|
118 |
-
"exec_prefix",
|
119 |
-
]
|
120 |
-
if sysconfig.get_config_var("userbase") is not None:
|
121 |
-
_HOME_KEYS.append("userbase")
|
122 |
-
|
123 |
-
|
124 |
-
def get_scheme(
|
125 |
-
dist_name: str,
|
126 |
-
user: bool = False,
|
127 |
-
home: typing.Optional[str] = None,
|
128 |
-
root: typing.Optional[str] = None,
|
129 |
-
isolated: bool = False,
|
130 |
-
prefix: typing.Optional[str] = None,
|
131 |
-
) -> Scheme:
|
132 |
-
"""
|
133 |
-
Get the "scheme" corresponding to the input parameters.
|
134 |
-
|
135 |
-
:param dist_name: the name of the package to retrieve the scheme for, used
|
136 |
-
in the headers scheme path
|
137 |
-
:param user: indicates to use the "user" scheme
|
138 |
-
:param home: indicates to use the "home" scheme
|
139 |
-
:param root: root under which other directories are re-based
|
140 |
-
:param isolated: ignored, but kept for distutils compatibility (where
|
141 |
-
this controls whether the user-site pydistutils.cfg is honored)
|
142 |
-
:param prefix: indicates to use the "prefix" scheme and provides the
|
143 |
-
base directory for the same
|
144 |
-
"""
|
145 |
-
if user and prefix:
|
146 |
-
raise InvalidSchemeCombination("--user", "--prefix")
|
147 |
-
if home and prefix:
|
148 |
-
raise InvalidSchemeCombination("--home", "--prefix")
|
149 |
-
|
150 |
-
if home is not None:
|
151 |
-
scheme_name = _infer_home()
|
152 |
-
elif user:
|
153 |
-
scheme_name = _infer_user()
|
154 |
-
else:
|
155 |
-
scheme_name = _infer_prefix()
|
156 |
-
|
157 |
-
# Special case: When installing into a custom prefix, use posix_prefix
|
158 |
-
# instead of osx_framework_library. See _should_use_osx_framework_prefix()
|
159 |
-
# docstring for details.
|
160 |
-
if prefix is not None and scheme_name == "osx_framework_library":
|
161 |
-
scheme_name = "posix_prefix"
|
162 |
-
|
163 |
-
if home is not None:
|
164 |
-
variables = {k: home for k in _HOME_KEYS}
|
165 |
-
elif prefix is not None:
|
166 |
-
variables = {k: prefix for k in _HOME_KEYS}
|
167 |
-
else:
|
168 |
-
variables = {}
|
169 |
-
|
170 |
-
paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)
|
171 |
-
|
172 |
-
# Logic here is very arbitrary, we're doing it for compatibility, don't ask.
|
173 |
-
# 1. Pip historically uses a special header path in virtual environments.
|
174 |
-
# 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We
|
175 |
-
# only do the same when not running in a virtual environment because
|
176 |
-
# pip's historical header path logic (see point 1) did not do this.
|
177 |
-
if running_under_virtualenv():
|
178 |
-
if user:
|
179 |
-
base = variables.get("userbase", sys.prefix)
|
180 |
-
else:
|
181 |
-
base = variables.get("base", sys.prefix)
|
182 |
-
python_xy = f"python{get_major_minor_version()}"
|
183 |
-
paths["include"] = os.path.join(base, "include", "site", python_xy)
|
184 |
-
elif not dist_name:
|
185 |
-
dist_name = "UNKNOWN"
|
186 |
-
|
187 |
-
scheme = Scheme(
|
188 |
-
platlib=paths["platlib"],
|
189 |
-
purelib=paths["purelib"],
|
190 |
-
headers=os.path.join(paths["include"], dist_name),
|
191 |
-
scripts=paths["scripts"],
|
192 |
-
data=paths["data"],
|
193 |
-
)
|
194 |
-
if root is not None:
|
195 |
-
for key in SCHEME_KEYS:
|
196 |
-
value = change_root(root, getattr(scheme, key))
|
197 |
-
setattr(scheme, key, value)
|
198 |
-
return scheme
|
199 |
-
|
200 |
-
|
201 |
-
def get_bin_prefix() -> str:
|
202 |
-
# Forcing to use /usr/local/bin for standard macOS framework installs.
|
203 |
-
if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/":
|
204 |
-
return "/usr/local/bin"
|
205 |
-
return sysconfig.get_paths()["scripts"]
|
206 |
-
|
207 |
-
|
208 |
-
def get_purelib() -> str:
|
209 |
-
return sysconfig.get_paths()["purelib"]
|
210 |
-
|
211 |
-
|
212 |
-
def get_platlib() -> str:
|
213 |
-
return sysconfig.get_paths()["platlib"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Atualli/yoloxTeste/configs/yolox_l.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding:utf-8 -*-
|
3 |
-
# Copyright (c) Megvii, Inc. and its affiliates.
|
4 |
-
|
5 |
-
import os
|
6 |
-
|
7 |
-
from yolox.exp import Exp as MyExp
|
8 |
-
|
9 |
-
|
10 |
-
class Exp(MyExp):
|
11 |
-
def __init__(self):
|
12 |
-
super(Exp, self).__init__()
|
13 |
-
self.depth = 1.0
|
14 |
-
self.width = 1.0
|
15 |
-
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin_meta.py
DELETED
@@ -1,350 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
"""
|
5 |
-
Note:
|
6 |
-
For your custom dataset, there is no need to hard-code metadata anywhere in the code.
|
7 |
-
For example, for COCO-format dataset, metadata will be obtained automatically
|
8 |
-
when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
|
9 |
-
during loading.
|
10 |
-
|
11 |
-
However, we hard-coded metadata for a few common dataset here.
|
12 |
-
The only goal is to allow users who don't have these dataset to use pre-trained models.
|
13 |
-
Users don't have to download a COCO json (which contains metadata), in order to visualize a
|
14 |
-
COCO model (with correct class names and colors).
|
15 |
-
"""
|
16 |
-
|
17 |
-
|
18 |
-
# All coco categories, together with their nice-looking visualization colors
|
19 |
-
# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
|
20 |
-
COCO_CATEGORIES = [
|
21 |
-
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
|
22 |
-
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
|
23 |
-
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
|
24 |
-
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
|
25 |
-
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
|
26 |
-
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
|
27 |
-
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
|
28 |
-
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
|
29 |
-
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
|
30 |
-
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
|
31 |
-
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
|
32 |
-
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
|
33 |
-
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
|
34 |
-
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
|
35 |
-
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
|
36 |
-
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
|
37 |
-
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
|
38 |
-
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
|
39 |
-
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
|
40 |
-
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
|
41 |
-
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
|
42 |
-
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
|
43 |
-
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
|
44 |
-
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
|
45 |
-
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
|
46 |
-
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
|
47 |
-
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
|
48 |
-
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
|
49 |
-
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
|
50 |
-
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
|
51 |
-
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
|
52 |
-
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
|
53 |
-
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
|
54 |
-
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
|
55 |
-
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
|
56 |
-
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
|
57 |
-
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
|
58 |
-
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
|
59 |
-
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
|
60 |
-
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
|
61 |
-
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
|
62 |
-
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
|
63 |
-
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
|
64 |
-
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
|
65 |
-
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
|
66 |
-
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
|
67 |
-
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
|
68 |
-
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
|
69 |
-
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
|
70 |
-
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
|
71 |
-
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
|
72 |
-
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
|
73 |
-
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
|
74 |
-
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
|
75 |
-
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
|
76 |
-
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
|
77 |
-
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
|
78 |
-
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
|
79 |
-
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
|
80 |
-
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
|
81 |
-
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
|
82 |
-
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
|
83 |
-
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
|
84 |
-
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
|
85 |
-
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
|
86 |
-
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
|
87 |
-
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
|
88 |
-
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
|
89 |
-
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
|
90 |
-
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
|
91 |
-
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
|
92 |
-
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
|
93 |
-
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
|
94 |
-
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
|
95 |
-
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
|
96 |
-
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
|
97 |
-
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
|
98 |
-
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
|
99 |
-
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
|
100 |
-
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
|
101 |
-
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
|
102 |
-
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
|
103 |
-
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
|
104 |
-
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
|
105 |
-
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
|
106 |
-
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
|
107 |
-
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
|
108 |
-
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
|
109 |
-
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
|
110 |
-
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
|
111 |
-
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
|
112 |
-
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
|
113 |
-
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
|
114 |
-
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
|
115 |
-
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
|
116 |
-
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
|
117 |
-
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
|
118 |
-
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
|
119 |
-
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
|
120 |
-
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
|
121 |
-
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
|
122 |
-
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
|
123 |
-
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
|
124 |
-
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
|
125 |
-
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
|
126 |
-
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
|
127 |
-
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
|
128 |
-
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
|
129 |
-
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
|
130 |
-
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
|
131 |
-
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
|
132 |
-
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
|
133 |
-
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
|
134 |
-
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
|
135 |
-
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
|
136 |
-
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
|
137 |
-
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
|
138 |
-
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
|
139 |
-
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
|
140 |
-
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
|
141 |
-
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
|
142 |
-
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
|
143 |
-
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
|
144 |
-
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
|
145 |
-
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
|
146 |
-
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
|
147 |
-
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
|
148 |
-
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
|
149 |
-
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
|
150 |
-
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
|
151 |
-
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
|
152 |
-
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
|
153 |
-
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
|
154 |
-
]
|
155 |
-
|
156 |
-
# fmt: off
|
157 |
-
COCO_PERSON_KEYPOINT_NAMES = (
|
158 |
-
"nose",
|
159 |
-
"left_eye", "right_eye",
|
160 |
-
"left_ear", "right_ear",
|
161 |
-
"left_shoulder", "right_shoulder",
|
162 |
-
"left_elbow", "right_elbow",
|
163 |
-
"left_wrist", "right_wrist",
|
164 |
-
"left_hip", "right_hip",
|
165 |
-
"left_knee", "right_knee",
|
166 |
-
"left_ankle", "right_ankle",
|
167 |
-
)
|
168 |
-
# fmt: on
|
169 |
-
|
170 |
-
# Pairs of keypoints that should be exchanged under horizontal flipping
|
171 |
-
COCO_PERSON_KEYPOINT_FLIP_MAP = (
|
172 |
-
("left_eye", "right_eye"),
|
173 |
-
("left_ear", "right_ear"),
|
174 |
-
("left_shoulder", "right_shoulder"),
|
175 |
-
("left_elbow", "right_elbow"),
|
176 |
-
("left_wrist", "right_wrist"),
|
177 |
-
("left_hip", "right_hip"),
|
178 |
-
("left_knee", "right_knee"),
|
179 |
-
("left_ankle", "right_ankle"),
|
180 |
-
)
|
181 |
-
|
182 |
-
# rules for pairs of keypoints to draw a line between, and the line color to use.
|
183 |
-
KEYPOINT_CONNECTION_RULES = [
|
184 |
-
# face
|
185 |
-
("left_ear", "left_eye", (102, 204, 255)),
|
186 |
-
("right_ear", "right_eye", (51, 153, 255)),
|
187 |
-
("left_eye", "nose", (102, 0, 204)),
|
188 |
-
("nose", "right_eye", (51, 102, 255)),
|
189 |
-
# upper-body
|
190 |
-
("left_shoulder", "right_shoulder", (255, 128, 0)),
|
191 |
-
("left_shoulder", "left_elbow", (153, 255, 204)),
|
192 |
-
("right_shoulder", "right_elbow", (128, 229, 255)),
|
193 |
-
("left_elbow", "left_wrist", (153, 255, 153)),
|
194 |
-
("right_elbow", "right_wrist", (102, 255, 224)),
|
195 |
-
# lower-body
|
196 |
-
("left_hip", "right_hip", (255, 102, 0)),
|
197 |
-
("left_hip", "left_knee", (255, 255, 77)),
|
198 |
-
("right_hip", "right_knee", (153, 255, 204)),
|
199 |
-
("left_knee", "left_ankle", (191, 255, 128)),
|
200 |
-
("right_knee", "right_ankle", (255, 195, 77)),
|
201 |
-
]
|
202 |
-
|
203 |
-
# All Cityscapes categories, together with their nice-looking visualization colors
|
204 |
-
# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
|
205 |
-
CITYSCAPES_CATEGORIES = [
|
206 |
-
{"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
|
207 |
-
{"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
|
208 |
-
{"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
|
209 |
-
{"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
|
210 |
-
{"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
|
211 |
-
{"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
|
212 |
-
{"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
|
213 |
-
{"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
|
214 |
-
{"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
|
215 |
-
{"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
|
216 |
-
{"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
|
217 |
-
{"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
|
218 |
-
{"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
|
219 |
-
{"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
|
220 |
-
{"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
|
221 |
-
{"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
|
222 |
-
{"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
|
223 |
-
{"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
|
224 |
-
{"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
|
225 |
-
]
|
226 |
-
|
227 |
-
# fmt: off
|
228 |
-
ADE20K_SEM_SEG_CATEGORIES = [
|
229 |
-
"wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
|
230 |
-
]
|
231 |
-
# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
|
232 |
-
# fmt: on
|
233 |
-
|
234 |
-
|
235 |
-
def _get_coco_instances_meta():
|
236 |
-
thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
237 |
-
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
238 |
-
assert len(thing_ids) == 80, len(thing_ids)
|
239 |
-
# Mapping from the incontiguous COCO category id to an id in [0, 79]
|
240 |
-
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
|
241 |
-
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
242 |
-
ret = {
|
243 |
-
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
|
244 |
-
"thing_classes": thing_classes,
|
245 |
-
"thing_colors": thing_colors,
|
246 |
-
}
|
247 |
-
return ret
|
248 |
-
|
249 |
-
|
250 |
-
def _get_coco_panoptic_separated_meta():
|
251 |
-
"""
|
252 |
-
Returns metadata for "separated" version of the panoptic segmentation dataset.
|
253 |
-
"""
|
254 |
-
stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
|
255 |
-
assert len(stuff_ids) == 53, len(stuff_ids)
|
256 |
-
|
257 |
-
# For semantic segmentation, this mapping maps from contiguous stuff id
|
258 |
-
# (in [0, 53], used in models) to ids in the dataset (used for processing results)
|
259 |
-
# The id 0 is mapped to an extra category "thing".
|
260 |
-
stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
|
261 |
-
# When converting COCO panoptic annotations to semantic annotations
|
262 |
-
# We label the "thing" category to 0
|
263 |
-
stuff_dataset_id_to_contiguous_id[0] = 0
|
264 |
-
|
265 |
-
# 54 names for COCO stuff categories (including "things")
|
266 |
-
stuff_classes = ["things"] + [
|
267 |
-
k["name"].replace("-other", "").replace("-merged", "")
|
268 |
-
for k in COCO_CATEGORIES
|
269 |
-
if k["isthing"] == 0
|
270 |
-
]
|
271 |
-
|
272 |
-
# NOTE: I randomly picked a color for things
|
273 |
-
stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
|
274 |
-
ret = {
|
275 |
-
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
|
276 |
-
"stuff_classes": stuff_classes,
|
277 |
-
"stuff_colors": stuff_colors,
|
278 |
-
}
|
279 |
-
ret.update(_get_coco_instances_meta())
|
280 |
-
return ret
|
281 |
-
|
282 |
-
|
283 |
-
def _get_builtin_metadata(dataset_name):
|
284 |
-
if dataset_name == "coco":
|
285 |
-
return _get_coco_instances_meta()
|
286 |
-
if dataset_name == "coco_panoptic_separated":
|
287 |
-
return _get_coco_panoptic_separated_meta()
|
288 |
-
elif dataset_name == "coco_panoptic_standard":
|
289 |
-
meta = {}
|
290 |
-
# The following metadata maps contiguous id from [0, #thing categories +
|
291 |
-
# #stuff categories) to their names and colors. We have to replica of the
|
292 |
-
# same name and color under "thing_*" and "stuff_*" because the current
|
293 |
-
# visualization function in D2 handles thing and class classes differently
|
294 |
-
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
295 |
-
# enable reusing existing visualization functions.
|
296 |
-
thing_classes = [k["name"] for k in COCO_CATEGORIES]
|
297 |
-
thing_colors = [k["color"] for k in COCO_CATEGORIES]
|
298 |
-
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
|
299 |
-
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
|
300 |
-
|
301 |
-
meta["thing_classes"] = thing_classes
|
302 |
-
meta["thing_colors"] = thing_colors
|
303 |
-
meta["stuff_classes"] = stuff_classes
|
304 |
-
meta["stuff_colors"] = stuff_colors
|
305 |
-
|
306 |
-
# Convert category id for training:
|
307 |
-
# category id: like semantic segmentation, it is the class id for each
|
308 |
-
# pixel. Since there are some classes not used in evaluation, the category
|
309 |
-
# id is not always contiguous and thus we have two set of category ids:
|
310 |
-
# - original category id: category id in the original dataset, mainly
|
311 |
-
# used for evaluation.
|
312 |
-
# - contiguous category id: [0, #classes), in order to train the linear
|
313 |
-
# softmax classifier.
|
314 |
-
thing_dataset_id_to_contiguous_id = {}
|
315 |
-
stuff_dataset_id_to_contiguous_id = {}
|
316 |
-
|
317 |
-
for i, cat in enumerate(COCO_CATEGORIES):
|
318 |
-
if cat["isthing"]:
|
319 |
-
thing_dataset_id_to_contiguous_id[cat["id"]] = i
|
320 |
-
else:
|
321 |
-
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
|
322 |
-
|
323 |
-
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
324 |
-
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
325 |
-
|
326 |
-
return meta
|
327 |
-
elif dataset_name == "coco_person":
|
328 |
-
return {
|
329 |
-
"thing_classes": ["person"],
|
330 |
-
"keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
|
331 |
-
"keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
|
332 |
-
"keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
|
333 |
-
}
|
334 |
-
elif dataset_name == "cityscapes":
|
335 |
-
# fmt: off
|
336 |
-
CITYSCAPES_THING_CLASSES = [
|
337 |
-
"person", "rider", "car", "truck",
|
338 |
-
"bus", "train", "motorcycle", "bicycle",
|
339 |
-
]
|
340 |
-
CITYSCAPES_STUFF_CLASSES = [
|
341 |
-
"road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
|
342 |
-
"traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
|
343 |
-
"truck", "bus", "train", "motorcycle", "bicycle",
|
344 |
-
]
|
345 |
-
# fmt: on
|
346 |
-
return {
|
347 |
-
"thing_classes": CITYSCAPES_THING_CLASSES,
|
348 |
-
"stuff_classes": CITYSCAPES_STUFF_CLASSES,
|
349 |
-
}
|
350 |
-
raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/README.md
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: cbv
|
3 |
-
colorFrom: blue
|
4 |
-
colorTo: yellow
|
5 |
-
sdk: docker
|
6 |
-
pinned: true
|
7 |
-
app_port: 3000
|
8 |
-
---
|
9 |
-
|
10 |
-
# AI Comic Factory
|
11 |
-
|
12 |
-
*(note: the website "aicomicfactory.com" is not affiliated with the AI Comic Factory project, nor it is created or maintained by the AI Comic Factory team. If you see their website has an issue, please contact them directly)*
|
13 |
-
|
14 |
-
## Running the project at home
|
15 |
-
|
16 |
-
First, I would like to highlight that everything is open-source (see [here](https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory/tree/main), [here](https://huggingface.co/spaces/jbilcke-hf/VideoChain-API/tree/main), [here](https://huggingface.co/spaces/hysts/SD-XL/tree/main), [here](https://github.com/huggingface/text-generation-inference)).
|
17 |
-
|
18 |
-
However the project isn't a monolithic Space that can be duplicated and ran immediately:
|
19 |
-
it requires various components to run for the frontend, backend, LLM, SDXL etc.
|
20 |
-
|
21 |
-
If you try to duplicate the project, open the `.env` you will see it requires some variables.
|
22 |
-
|
23 |
-
Provider config:
|
24 |
-
- `LLM_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "OPENAI"
|
25 |
-
- `RENDERING_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "REPLICATE", "VIDEOCHAIN" for now, unless you code your custom solution
|
26 |
-
|
27 |
-
Auth config:
|
28 |
-
- `AUTH_HF_API_TOKEN`: only if you decide to use OpenAI for the LLM engine necessary if you decide to use an inference api model or a custom inference endpoint
|
29 |
-
- `AUTH_OPENAI_TOKEN`: only if you decide to use OpenAI for the LLM engine
|
30 |
-
- `AITH_VIDEOCHAIN_API_TOKEN`: secret token to access the VideoChain API server
|
31 |
-
- `AUTH_REPLICATE_API_TOKEN`: in case you want to use Replicate.com
|
32 |
-
|
33 |
-
Rendering config:
|
34 |
-
- `RENDERING_HF_INFERENCE_ENDPOINT_URL`: necessary if you decide to use a custom inference endpoint
|
35 |
-
- `RENDERING_REPLICATE_API_MODEL_VERSION`: url to the VideoChain API server
|
36 |
-
- `RENDERING_HF_INFERENCE_ENDPOINT_URL`: optional, default to nothing
|
37 |
-
- `RENDERING_HF_INFERENCE_API_BASE_MODEL`: optional, defaults to "stabilityai/stable-diffusion-xl-base-1.0"
|
38 |
-
- `RENDERING_HF_INFERENCE_API_REFINER_MODEL`: optional, defaults to "stabilityai/stable-diffusion-xl-refiner-1.0"
|
39 |
-
- `RENDERING_REPLICATE_API_MODEL`: optional, defaults to "stabilityai/sdxl"
|
40 |
-
- `RENDERING_REPLICATE_API_MODEL_VERSION`: optional, in case you want to change the version
|
41 |
-
|
42 |
-
Language model config:
|
43 |
-
- `LLM_HF_INFERENCE_ENDPOINT_URL`: "https://llama-v2-70b-chat.ngrok.io"
|
44 |
-
- `LLM_HF_INFERENCE_API_MODEL`: "codellama/CodeLlama-7b-hf"
|
45 |
-
|
46 |
-
In addition, there are some community sharing variables that you can just ignore.
|
47 |
-
Those variables are not required to run the AI Comic Factory on your own website or computer
|
48 |
-
(they are meant to create a connection with the Hugging Face community,
|
49 |
-
and thus only make sense for official Hugging Face apps):
|
50 |
-
- `NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING`: you don't need this
|
51 |
-
- `COMMUNITY_API_URL`: you don't need this
|
52 |
-
- `COMMUNITY_API_TOKEN`: you don't need this
|
53 |
-
- `COMMUNITY_API_ID`: you don't need this
|
54 |
-
|
55 |
-
Please read the `.env` default config file for more informations.
|
56 |
-
To customise a variable locally, you should create a `.env.local`
|
57 |
-
(do not commit this file as it will contain your secrets).
|
58 |
-
|
59 |
-
-> If you intend to run it with local, cloud-hosted and/or proprietary models **you are going to need to code 👨💻**.
|
60 |
-
|
61 |
-
## The LLM API (Large Language Model)
|
62 |
-
|
63 |
-
Currently the AI Comic Factory uses [Llama-2 70b](https://huggingface.co/blog/llama2) through an [Inference Endpoint](https://huggingface.co/docs/inference-endpoints/index).
|
64 |
-
|
65 |
-
You have three options:
|
66 |
-
|
67 |
-
### Option 1: Use an Inference API model
|
68 |
-
|
69 |
-
This is a new option added recently, where you can use one of the models from the Hugging Face Hub. By default we suggest to use CodeLlama 34b as it will provide better results than the 7b model.
|
70 |
-
|
71 |
-
To activate it, create a `.env.local` configuration file:
|
72 |
-
|
73 |
-
```bash
|
74 |
-
LLM_ENGINE="INFERENCE_API"
|
75 |
-
|
76 |
-
HF_API_TOKEN="Your Hugging Face token"
|
77 |
-
|
78 |
-
# codellama/CodeLlama-7b-hf" is used by default, but you can change this
|
79 |
-
# note: You should use a model able to generate JSON responses,
|
80 |
-
# so it is storngly suggested to use at least the 34b model
|
81 |
-
HF_INFERENCE_API_MODEL="codellama/CodeLlama-7b-hf"
|
82 |
-
```
|
83 |
-
|
84 |
-
### Option 2: Use an Inference Endpoint URL
|
85 |
-
|
86 |
-
If you would like to run the AI Comic Factory on a private LLM running on the Hugging Face Inference Endpoint service, create a `.env.local` configuration file:
|
87 |
-
|
88 |
-
```bash
|
89 |
-
LLM_ENGINE="INFERENCE_ENDPOINT"
|
90 |
-
|
91 |
-
HF_API_TOKEN="Your Hugging Face token"
|
92 |
-
|
93 |
-
HF_INFERENCE_ENDPOINT_URL="path to your inference endpoint url"
|
94 |
-
```
|
95 |
-
|
96 |
-
To run this kind of LLM locally, you can use [TGI](https://github.com/huggingface/text-generation-inference) (Please read [this post](https://github.com/huggingface/text-generation-inference/issues/726) for more information about the licensing).
|
97 |
-
|
98 |
-
### Option 3: Use an OpenAI API Key
|
99 |
-
|
100 |
-
This is a new option added recently, where you can use OpenAI API with an OpenAI API Key.
|
101 |
-
|
102 |
-
To activate it, create a `.env.local` configuration file:
|
103 |
-
|
104 |
-
```bash
|
105 |
-
LLM_ENGINE="OPENAI"
|
106 |
-
|
107 |
-
# default openai api base url is: https://api.openai.com/v1
|
108 |
-
LLM_OPENAI_API_BASE_URL="Your OpenAI API Base URL"
|
109 |
-
|
110 |
-
LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
|
111 |
-
|
112 |
-
AUTH_OPENAI_API_KEY="Your OpenAI API Key"
|
113 |
-
```
|
114 |
-
|
115 |
-
### Option 4: Fork and modify the code to use a different LLM system
|
116 |
-
|
117 |
-
Another option could be to disable the LLM completely and replace it with another LLM protocol and/or provider (eg. Claude, Replicate), or a human-generated story instead (by returning mock or static data).
|
118 |
-
|
119 |
-
### Notes
|
120 |
-
|
121 |
-
It is possible that I modify the AI Comic Factory to make it easier in the future (eg. add support for Claude or Replicate)
|
122 |
-
|
123 |
-
## The Rendering API
|
124 |
-
|
125 |
-
This API is used to generate the panel images. This is an API I created for my various projects at Hugging Face.
|
126 |
-
|
127 |
-
I haven't written documentation for it yet, but basically it is "just a wrapper ™" around other existing APIs:
|
128 |
-
|
129 |
-
- The [hysts/SD-XL](https://huggingface.co/spaces/hysts/SD-XL?duplicate=true) Space by [@hysts](https://huggingface.co/hysts)
|
130 |
-
- And other APIs for making videos, adding audio etc.. but you won't need them for the AI Comic Factory
|
131 |
-
|
132 |
-
### Option 1: Deploy VideoChain yourself
|
133 |
-
|
134 |
-
You will have to [clone](https://huggingface.co/spaces/jbilcke-hf/VideoChain-API?duplicate=true) the [source-code](https://huggingface.co/spaces/jbilcke-hf/VideoChain-API/tree/main)
|
135 |
-
|
136 |
-
Unfortunately, I haven't had the time to write the documentation for VideoChain yet.
|
137 |
-
(When I do I will update this document to point to the VideoChain's README)
|
138 |
-
|
139 |
-
|
140 |
-
### Option 2: Use Replicate
|
141 |
-
|
142 |
-
To use Replicate, create a `.env.local` configuration file:
|
143 |
-
|
144 |
-
```bash
|
145 |
-
RENDERING_ENGINE="REPLICATE"
|
146 |
-
|
147 |
-
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
|
148 |
-
|
149 |
-
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
|
150 |
-
|
151 |
-
AUTH_REPLICATE_API_TOKEN="Your Replicate token"
|
152 |
-
```
|
153 |
-
|
154 |
-
### Option 3: Use another SDXL API
|
155 |
-
|
156 |
-
If you fork the project you will be able to modify the code to use the Stable Diffusion technology of your choice (local, open-source, proprietary, your custom HF Space etc).
|
157 |
-
|
158 |
-
It would even be something else, such as Dall-E.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descarga Gratuita De Fuego Mx Mod Apk 50 Mb.md
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Free Fire Max: Una versión Premium de Free Fire con gráficos y características mejoradas</h1>
|
3 |
-
<p>Si usted es un fan de Free Fire, el popular juego de batalla móvil royale, es posible que haya oído hablar de Free Fire Max, una versión mejorada del juego que ofrece mejores gráficos, animaciones y jugabilidad. Pero ¿qué es exactamente Free Fire Max y cómo se puede descargar en su dispositivo? ¿Y cuáles son los beneficios de usar un archivo apk mod que afirma darle recursos ilimitados y acceso a todo en el juego? En este artículo, responderemos estas preguntas y más. </p>
|
4 |
-
<h2>descarga gratuita de fuego máx mod apk 50 mb</h2><br /><p><b><b>Download</b> 🔗 <a href="https://bltlly.com/2v6J0Y">https://bltlly.com/2v6J0Y</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Free Fire Max? </h2>
|
6 |
-
<p>Free Fire Max es una aplicación independiente que proporciona el mismo juego Free Fire que millones de jugadores aman, pero con especificaciones mejoradas. Está diseñado para ofrecer una experiencia premium e inmersiva en un entorno battle royale. Puedes disfrutar de una variedad de emocionantes modos de juego con todos los jugadores de Free Fire a través de la exclusiva tecnología Firelink. También puedes experimentar el combate como nunca antes con resoluciones Ultra HD y efectos impresionantes. </p>
|
7 |
-
<p>Free Fire Max es diferente del juego original Free Fire de varias maneras. Algunas de las diferencias incluyen:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Mejor calidad gráfica: Free Fire Max tiene gráficos HD, efectos especiales mejorados y un juego más fluido. También tiene texturas Ultra HD, diseños de mapas realistas, efectos de sonido inmersivos y nuevas animaciones de armas. </li>
|
10 |
-
<li>Nuevas características: Free Fire Max tiene características exclusivas que no están disponibles en el juego original, como un vestíbulo de 360 grados donde puede mostrar sus artículos, un modo Craftland donde puede crear y jugar en sus propios mapas personalizados y un nuevo mapa de Bermuda Max con áreas renovadas. </li>
|
11 |
-
<li>Tecnología Firelink: Con Firelink, puede iniciar sesión en su cuenta de Free Fire existente para jugar Free Fire Max sin ningún problema. El progreso y los elementos se sincronizan en ambas aplicaciones en tiempo real. También puedes jugar con los jugadores de Free Fire y Free Fire Max juntos, sin importar la aplicación que usen. </li>
|
12 |
-
</ul>
|
13 |
-
|
14 |
-
<h3>Cómo descargar gratis Fire Max mod apk 50 mb</h3>
|
15 |
-
<p>Si quieres descargar Free Fire Max en tu dispositivo, puedes hacerlo siguiendo estos pasos:</p>
|
16 |
-
<ol>
|
17 |
-
<li>Ir a [este enlace]( 1 ) para descargar el archivo apk mod para Free Fire Max. El tamaño del archivo es de alrededor de 50 MB.</li>
|
18 |
-
<li>Una vez completada la descarga, busque e instale el archivo en su dispositivo. Es posible que necesite habilitar la instalación desde fuentes desconocidas en su configuración. </li>
|
19 |
-
<li>Abre la aplicación y disfruta jugando Free Fire Max con recursos y funciones ilimitadas. </li>
|
20 |
-
</ol>
|
21 |
-
<p>Sin embargo, antes de descargar y utilizar el archivo apk mod, usted debe ser consciente de algunos riesgos y problemas legales. Los archivos apk mod son versiones modificadas de la aplicación original que omiten las medidas de seguridad y alteran los datos del juego. No están autorizados por Garena, el desarrollador de Free Fire, y pueden contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. También pueden violar los términos de servicio y la política de privacidad del juego, y resultar en que su cuenta sea prohibida o suspendida. Por lo tanto, debe utilizar el archivo apk mod a su propio riesgo y discreción, y solo de fuentes de confianza. </p>
|
22 |
-
<h4>¿Cuáles son los beneficios de usar Free Fire Max mod apk 50 mb</h4>
|
23 |
-
<p>Si decide utilizar el archivo apk mod para Free Fire Max, puede disfrutar de algunos beneficios que no están disponibles en la aplicación oficial. Algunos de estos beneficios son:</p>
|
24 |
-
<tabla>
|
25 |
-
<tr>
|
26 |
-
<th>Beneficio</th>
|
27 |
-
<th>Descripción</th>
|
28 |
-
</tr>
|
29 |
-
<tr>
|
30 |
-
<td>Diamantes y monedas ilimitadas</td>
|
31 |
-
<td>Puedes obtener divisas ilimitadas en el juego que puedes usar para comprar lo que quieras en el juego, como personajes, armas, pieles, objetos y más. No tienes que gastar dinero real o completar tareas para ganarlas. </td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>Acceso a todos los caracteres, armas, skins y elementos</td>
|
35 |
-
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>Mod menú con varios trucos y hacks</td>
|
39 |
-
<td>Puedes acceder a un menú mod que te permite activar o desactivar varios trucos y hacks en el juego, como aimbot, wallhack, speed hack, headshot automático, salud ilimitada, munición ilimitada y más. Puedes ganar ventaja sobre tus enemigos y ganar cada partido fácilmente. </td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td>No se requieren anuncios ni root</td>
|
43 |
-
<td>Puedes jugar el juego sin anuncios molestos o ventanas emergentes que puedan interrumpir tu juego o consumir tus datos. Tampoco necesitas rootear tu dispositivo para usar el archivo apk mod. </td>
|
44 |
-
</tr>
|
45 |
-
</tabla>
|
46 |
-
<h2>Conclusión</h2>
|
47 |
-
<p>Free Fire Max es una versión premium de Free Fire que ofrece gráficos mejorados y características para una experiencia más inmersiva y emocionante battle royale. Puede descargarlo en su dispositivo siguiendo los pasos anteriores, o puede utilizar un archivo apk mod que le da recursos ilimitados y acceso a todo en el juego. Sin embargo, usted debe ser cuidadoso y responsable al usar archivos apk mod, ya que pueden tener algunos riesgos y problemas legales. Si estás interesado en probar Free Fire Max mod apk 50 mb, puedes descargarlo desde [este enlace] y disfrutar del juego con todos los beneficios. </p>
|
48 |
-
<p></p>
|
49 |
-
<p>¿Has probado Free Fire Max mod apk 50 mb? ¿Qué te parece? Déjanos saber en los comentarios de abajo! </p>
|
50 |
-
<h3>Preguntas frecuentes</h3>
|
51 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Free Fire Max mod apk 50 mb:</p>
|
52 |
-
<ul>
|
53 |
-
<li><b> ¿Es seguro usar Free Fire Max mod apk 50 mb? </b></li>
|
54 |
-
<p>Free Fire Max mod apk 50 mb no es una aplicación oficial de Garena, y puede contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. También puede violar los términos de servicio y la política de privacidad del juego, y resultar en que su cuenta sea prohibida o suspendida. Por lo tanto, debe usarlo bajo su propio riesgo y discreción, y solo de fuentes confiables. </p>
|
55 |
-
<li><b> ¿Cómo puedo actualizar Free Fire Max mod apk 50 mb? </b></li>
|
56 |
-
|
57 |
-
<li><b>¿Puedo jugar con mis amigos que usan Free Fire o Free Fire Max? </b></li>
|
58 |
-
<p>Sí, puedes jugar con tus amigos que usan Free Fire o Free Fire Max a través de la tecnología Firelink. Solo tiene que iniciar sesión en su cuenta de Free Fire existente para jugar Free Fire Max con ellos. El progreso y los elementos se sincronizan en ambas aplicaciones en tiempo real. También puedes jugar con los jugadores de Free Fire y Free Fire Max juntos, sin importar la aplicación que usen. </p>
|
59 |
-
<li><b>¿Me prohibirán por usar Free Fire Max mod apk 50 mb? </b></li>
|
60 |
-
<p>Existe la posibilidad de que usted puede conseguir prohibido para el uso de Free Fire Max mod apk 50 mb, ya que no es una aplicación autorizada por Garena y altera los datos del juego. Garena tiene un estricto sistema anti-trucos que detecta y castiga a cualquier jugador que use trucos o hacks en el juego. Si usted es sorprendido usando Free Fire Max mod apk 50 mb, puede enfrentar consecuencias tales como la suspensión de la cuenta, eliminación de la cuenta, o acciones legales. Por lo tanto, usted debe ser cuidadoso y responsable al usar Free Fire Max mod apk 50 mb, y evitar usarlo en partidos clasificados o competitivos. </p>
|
61 |
-
<li><b> ¿Cuáles son algunas alternativas a Free Fire Max mod apk 50 mb? </b></li>
|
62 |
-
<p>Si usted está buscando algunas alternativas a Free Fire Max mod apk 50 mb, puede probar estas opciones:</p>
|
63 |
-
<ul>
|
64 |
-
<li>Descargue la aplicación oficial Free Fire Max desde la Google Play Store o la App Store. Usted puede disfrutar de la misma jugabilidad y características como Free Fire Max mod apk 50 mb, pero sin ningún riesgo o problemas legales. También puedes apoyar a los desarrolladores y al juego comprando dinero y objetos en el juego legítimamente. </li>
|
65 |
-
<li>Utilice una aplicación VPN para cambiar su ubicación y acceder a los servidores Free Fire Max en otras regiones. Puedes jugar el juego con jugadores de diferentes países y experimentar diferentes modos de juego y eventos. También puede omitir cualquier restricción geográfica o problema de red que pueda impedirle jugar el juego. </li>
|
66 |
-
|
67 |
-
</ul></p> 64aa2da5cf<br />
|
68 |
-
<br />
|
69 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/models/vqgan.py
DELETED
@@ -1,363 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
import pytorch_lightning as pl
|
4 |
-
|
5 |
-
from main import instantiate_from_config
|
6 |
-
|
7 |
-
from taming.modules.diffusionmodules.model import Encoder, Decoder
|
8 |
-
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
9 |
-
from taming.modules.vqvae.quantize import GumbelQuantize
|
10 |
-
|
11 |
-
|
12 |
-
class VQModel(pl.LightningModule):
|
13 |
-
def __init__(self,
|
14 |
-
ddconfig,
|
15 |
-
lossconfig,
|
16 |
-
n_embed,
|
17 |
-
embed_dim,
|
18 |
-
ckpt_path=None,
|
19 |
-
ignore_keys=[],
|
20 |
-
image_key="image",
|
21 |
-
colorize_nlabels=None,
|
22 |
-
monitor=None,
|
23 |
-
remap=None,
|
24 |
-
sane_index_shape=False, # tell vector quantizer to return indices as bhw
|
25 |
-
):
|
26 |
-
super().__init__()
|
27 |
-
self.image_key = image_key
|
28 |
-
self.encoder = Encoder(**ddconfig)
|
29 |
-
self.decoder = Decoder(**ddconfig)
|
30 |
-
self.loss = instantiate_from_config(lossconfig)
|
31 |
-
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
|
32 |
-
remap=remap, sane_index_shape=sane_index_shape)
|
33 |
-
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
|
34 |
-
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
35 |
-
if ckpt_path is not None:
|
36 |
-
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
37 |
-
self.image_key = image_key
|
38 |
-
if colorize_nlabels is not None:
|
39 |
-
assert type(colorize_nlabels)==int
|
40 |
-
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
41 |
-
if monitor is not None:
|
42 |
-
self.monitor = monitor
|
43 |
-
|
44 |
-
def init_from_ckpt(self, path, ignore_keys=list()):
|
45 |
-
sd = torch.load(path, map_location="cpu")["state_dict"]
|
46 |
-
keys = list(sd.keys())
|
47 |
-
for k in keys:
|
48 |
-
for ik in ignore_keys:
|
49 |
-
if k.startswith(ik):
|
50 |
-
print("Deleting key {} from state_dict.".format(k))
|
51 |
-
del sd[k]
|
52 |
-
self.load_state_dict(sd, strict=False)
|
53 |
-
print(f"Restored from {path}")
|
54 |
-
|
55 |
-
def encode(self, x):
|
56 |
-
h = self.encoder(x)
|
57 |
-
h = self.quant_conv(h)
|
58 |
-
quant, emb_loss, info = self.quantize(h)
|
59 |
-
return quant, emb_loss, info
|
60 |
-
|
61 |
-
def decode(self, quant):
|
62 |
-
quant = self.post_quant_conv(quant)
|
63 |
-
dec = self.decoder(quant)
|
64 |
-
return dec
|
65 |
-
|
66 |
-
def decode_code(self, code_b):
|
67 |
-
quant_b = self.quantize.embed_code(code_b)
|
68 |
-
dec = self.decode(quant_b)
|
69 |
-
return dec
|
70 |
-
|
71 |
-
def forward(self, input):
|
72 |
-
quant, diff, _ = self.encode(input)
|
73 |
-
dec = self.decode(quant)
|
74 |
-
return dec, diff
|
75 |
-
|
76 |
-
def get_input(self, batch, k):
|
77 |
-
x = batch[k]
|
78 |
-
if len(x.shape) == 3:
|
79 |
-
x = x[..., None]
|
80 |
-
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
|
81 |
-
return x.float()
|
82 |
-
|
83 |
-
def training_step(self, batch, batch_idx, optimizer_idx):
|
84 |
-
x = self.get_input(batch, self.image_key)
|
85 |
-
xrec, qloss = self(x)
|
86 |
-
|
87 |
-
if optimizer_idx == 0:
|
88 |
-
# autoencode
|
89 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
90 |
-
last_layer=self.get_last_layer(), split="train")
|
91 |
-
|
92 |
-
self.log("train/aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
93 |
-
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
94 |
-
return aeloss
|
95 |
-
|
96 |
-
if optimizer_idx == 1:
|
97 |
-
# discriminator
|
98 |
-
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
99 |
-
last_layer=self.get_last_layer(), split="train")
|
100 |
-
self.log("train/discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
101 |
-
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
102 |
-
return discloss
|
103 |
-
|
104 |
-
def validation_step(self, batch, batch_idx):
|
105 |
-
x = self.get_input(batch, self.image_key)
|
106 |
-
xrec, qloss = self(x)
|
107 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
|
108 |
-
last_layer=self.get_last_layer(), split="val")
|
109 |
-
|
110 |
-
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
|
111 |
-
last_layer=self.get_last_layer(), split="val")
|
112 |
-
rec_loss = log_dict_ae["val/rec_loss"]
|
113 |
-
self.log("val/rec_loss", rec_loss,
|
114 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
|
115 |
-
self.log("val/aeloss", aeloss,
|
116 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
|
117 |
-
self.log_dict(log_dict_ae)
|
118 |
-
self.log_dict(log_dict_disc)
|
119 |
-
return self.log_dict
|
120 |
-
|
121 |
-
def configure_optimizers(self):
|
122 |
-
lr = self.learning_rate
|
123 |
-
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
124 |
-
list(self.decoder.parameters())+
|
125 |
-
list(self.quantize.parameters())+
|
126 |
-
list(self.quant_conv.parameters())+
|
127 |
-
list(self.post_quant_conv.parameters()),
|
128 |
-
lr=lr, betas=(0.5, 0.9))
|
129 |
-
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
130 |
-
lr=lr, betas=(0.5, 0.9))
|
131 |
-
return [opt_ae, opt_disc], []
|
132 |
-
|
133 |
-
def get_last_layer(self):
|
134 |
-
return self.decoder.conv_out.weight
|
135 |
-
|
136 |
-
def log_images(self, batch, **kwargs):
|
137 |
-
log = dict()
|
138 |
-
x = self.get_input(batch, self.image_key)
|
139 |
-
x = x.to(self.device)
|
140 |
-
xrec, _ = self(x)
|
141 |
-
if x.shape[1] > 3:
|
142 |
-
# colorize with random projection
|
143 |
-
assert xrec.shape[1] > 3
|
144 |
-
x = self.to_rgb(x)
|
145 |
-
xrec = self.to_rgb(xrec)
|
146 |
-
log["inputs"] = x
|
147 |
-
log["reconstructions"] = xrec
|
148 |
-
return log
|
149 |
-
|
150 |
-
def to_rgb(self, x):
|
151 |
-
assert self.image_key == "segmentation"
|
152 |
-
if not hasattr(self, "colorize"):
|
153 |
-
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
154 |
-
x = F.conv2d(x, weight=self.colorize)
|
155 |
-
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
156 |
-
return x
|
157 |
-
|
158 |
-
|
159 |
-
class VQSegmentationModel(VQModel):
|
160 |
-
def __init__(self, n_labels, *args, **kwargs):
|
161 |
-
super().__init__(*args, **kwargs)
|
162 |
-
self.register_buffer("colorize", torch.randn(3, n_labels, 1, 1))
|
163 |
-
|
164 |
-
def configure_optimizers(self):
|
165 |
-
lr = self.learning_rate
|
166 |
-
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
167 |
-
list(self.decoder.parameters())+
|
168 |
-
list(self.quantize.parameters())+
|
169 |
-
list(self.quant_conv.parameters())+
|
170 |
-
list(self.post_quant_conv.parameters()),
|
171 |
-
lr=lr, betas=(0.5, 0.9))
|
172 |
-
return opt_ae
|
173 |
-
|
174 |
-
def training_step(self, batch, batch_idx):
|
175 |
-
x = self.get_input(batch, self.image_key)
|
176 |
-
xrec, qloss = self(x)
|
177 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="train")
|
178 |
-
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
179 |
-
return aeloss
|
180 |
-
|
181 |
-
def validation_step(self, batch, batch_idx):
|
182 |
-
x = self.get_input(batch, self.image_key)
|
183 |
-
xrec, qloss = self(x)
|
184 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="val")
|
185 |
-
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
186 |
-
total_loss = log_dict_ae["val/total_loss"]
|
187 |
-
self.log("val/total_loss", total_loss,
|
188 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
|
189 |
-
return aeloss
|
190 |
-
|
191 |
-
@torch.no_grad()
|
192 |
-
def log_images(self, batch, **kwargs):
|
193 |
-
log = dict()
|
194 |
-
x = self.get_input(batch, self.image_key)
|
195 |
-
x = x.to(self.device)
|
196 |
-
xrec, _ = self(x)
|
197 |
-
if x.shape[1] > 3:
|
198 |
-
# colorize with random projection
|
199 |
-
assert xrec.shape[1] > 3
|
200 |
-
# convert logits to indices
|
201 |
-
xrec = torch.argmax(xrec, dim=1, keepdim=True)
|
202 |
-
xrec = F.one_hot(xrec, num_classes=x.shape[1])
|
203 |
-
xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()
|
204 |
-
x = self.to_rgb(x)
|
205 |
-
xrec = self.to_rgb(xrec)
|
206 |
-
log["inputs"] = x
|
207 |
-
log["reconstructions"] = xrec
|
208 |
-
return log
|
209 |
-
|
210 |
-
|
211 |
-
class VQNoDiscModel(VQModel):
|
212 |
-
def __init__(self,
|
213 |
-
ddconfig,
|
214 |
-
lossconfig,
|
215 |
-
n_embed,
|
216 |
-
embed_dim,
|
217 |
-
ckpt_path=None,
|
218 |
-
ignore_keys=[],
|
219 |
-
image_key="image",
|
220 |
-
colorize_nlabels=None
|
221 |
-
):
|
222 |
-
super().__init__(ddconfig=ddconfig, lossconfig=lossconfig, n_embed=n_embed, embed_dim=embed_dim,
|
223 |
-
ckpt_path=ckpt_path, ignore_keys=ignore_keys, image_key=image_key,
|
224 |
-
colorize_nlabels=colorize_nlabels)
|
225 |
-
|
226 |
-
def training_step(self, batch, batch_idx):
|
227 |
-
x = self.get_input(batch, self.image_key)
|
228 |
-
xrec, qloss = self(x)
|
229 |
-
# autoencode
|
230 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="train")
|
231 |
-
output = pl.TrainResult(minimize=aeloss)
|
232 |
-
output.log("train/aeloss", aeloss,
|
233 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
234 |
-
output.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
235 |
-
return output
|
236 |
-
|
237 |
-
def validation_step(self, batch, batch_idx):
|
238 |
-
x = self.get_input(batch, self.image_key)
|
239 |
-
xrec, qloss = self(x)
|
240 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="val")
|
241 |
-
rec_loss = log_dict_ae["val/rec_loss"]
|
242 |
-
output = pl.EvalResult(checkpoint_on=rec_loss)
|
243 |
-
output.log("val/rec_loss", rec_loss,
|
244 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
245 |
-
output.log("val/aeloss", aeloss,
|
246 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
247 |
-
output.log_dict(log_dict_ae)
|
248 |
-
|
249 |
-
return output
|
250 |
-
|
251 |
-
def configure_optimizers(self):
|
252 |
-
optimizer = torch.optim.Adam(list(self.encoder.parameters())+
|
253 |
-
list(self.decoder.parameters())+
|
254 |
-
list(self.quantize.parameters())+
|
255 |
-
list(self.quant_conv.parameters())+
|
256 |
-
list(self.post_quant_conv.parameters()),
|
257 |
-
lr=self.learning_rate, betas=(0.5, 0.9))
|
258 |
-
return optimizer
|
259 |
-
|
260 |
-
|
261 |
-
class GumbelVQ(VQModel):
|
262 |
-
def __init__(self,
|
263 |
-
ddconfig,
|
264 |
-
lossconfig,
|
265 |
-
n_embed,
|
266 |
-
embed_dim,
|
267 |
-
temperature_scheduler_config,
|
268 |
-
ckpt_path=None,
|
269 |
-
ignore_keys=[],
|
270 |
-
image_key="image",
|
271 |
-
colorize_nlabels=None,
|
272 |
-
monitor=None,
|
273 |
-
kl_weight=1e-8,
|
274 |
-
remap=None,
|
275 |
-
):
|
276 |
-
|
277 |
-
z_channels = ddconfig["z_channels"]
|
278 |
-
super().__init__(ddconfig,
|
279 |
-
lossconfig,
|
280 |
-
n_embed,
|
281 |
-
embed_dim,
|
282 |
-
ckpt_path=None,
|
283 |
-
ignore_keys=ignore_keys,
|
284 |
-
image_key=image_key,
|
285 |
-
colorize_nlabels=colorize_nlabels,
|
286 |
-
monitor=monitor,
|
287 |
-
)
|
288 |
-
|
289 |
-
self.loss.n_classes = n_embed
|
290 |
-
self.vocab_size = n_embed
|
291 |
-
|
292 |
-
self.quantize = GumbelQuantize(z_channels, embed_dim,
|
293 |
-
n_embed=n_embed,
|
294 |
-
kl_weight=kl_weight, temp_init=1.0,
|
295 |
-
remap=remap)
|
296 |
-
|
297 |
-
self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config) # annealing of temp
|
298 |
-
|
299 |
-
if ckpt_path is not None:
|
300 |
-
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
301 |
-
|
302 |
-
def temperature_scheduling(self):
|
303 |
-
self.quantize.temperature = self.temperature_scheduler(self.global_step)
|
304 |
-
|
305 |
-
def encode_to_prequant(self, x):
|
306 |
-
h = self.encoder(x)
|
307 |
-
h = self.quant_conv(h)
|
308 |
-
return h
|
309 |
-
|
310 |
-
def decode_code(self, code_b):
|
311 |
-
raise NotImplementedError
|
312 |
-
|
313 |
-
def training_step(self, batch, batch_idx, optimizer_idx):
|
314 |
-
self.temperature_scheduling()
|
315 |
-
x = self.get_input(batch, self.image_key)
|
316 |
-
xrec, qloss = self(x)
|
317 |
-
|
318 |
-
if optimizer_idx == 0:
|
319 |
-
# autoencode
|
320 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
321 |
-
last_layer=self.get_last_layer(), split="train")
|
322 |
-
|
323 |
-
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
324 |
-
self.log("temperature", self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
325 |
-
return aeloss
|
326 |
-
|
327 |
-
if optimizer_idx == 1:
|
328 |
-
# discriminator
|
329 |
-
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
330 |
-
last_layer=self.get_last_layer(), split="train")
|
331 |
-
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
332 |
-
return discloss
|
333 |
-
|
334 |
-
def validation_step(self, batch, batch_idx):
|
335 |
-
x = self.get_input(batch, self.image_key)
|
336 |
-
xrec, qloss = self(x, return_pred_indices=True)
|
337 |
-
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
|
338 |
-
last_layer=self.get_last_layer(), split="val")
|
339 |
-
|
340 |
-
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
|
341 |
-
last_layer=self.get_last_layer(), split="val")
|
342 |
-
rec_loss = log_dict_ae["val/rec_loss"]
|
343 |
-
self.log("val/rec_loss", rec_loss,
|
344 |
-
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
345 |
-
self.log("val/aeloss", aeloss,
|
346 |
-
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
347 |
-
self.log_dict(log_dict_ae)
|
348 |
-
self.log_dict(log_dict_disc)
|
349 |
-
return self.log_dict
|
350 |
-
|
351 |
-
def log_images(self, batch, **kwargs):
|
352 |
-
log = dict()
|
353 |
-
x = self.get_input(batch, self.image_key)
|
354 |
-
x = x.to(self.device)
|
355 |
-
# encode
|
356 |
-
h = self.encoder(x)
|
357 |
-
h = self.quant_conv(h)
|
358 |
-
quant, _, _ = self.quantize(h)
|
359 |
-
# decode
|
360 |
-
x_rec = self.decode(quant)
|
361 |
-
log["inputs"] = x
|
362 |
-
log["reconstructions"] = x_rec
|
363 |
-
return log
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/special.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
"""Special cased retries.
|
2 |
-
|
3 |
-
These are additional retry cases we still have to handle from the legacy
|
4 |
-
retry handler. They don't make sense as part of the standard mode retry
|
5 |
-
module. Ideally we should be able to remove this module.
|
6 |
-
|
7 |
-
"""
|
8 |
-
import logging
|
9 |
-
from binascii import crc32
|
10 |
-
|
11 |
-
from botocore.retries.base import BaseRetryableChecker
|
12 |
-
|
13 |
-
logger = logging.getLogger(__name__)
|
14 |
-
|
15 |
-
|
16 |
-
# TODO: This is an ideal candidate for the retryable trait once that's
|
17 |
-
# available.
|
18 |
-
class RetryIDPCommunicationError(BaseRetryableChecker):
|
19 |
-
|
20 |
-
_SERVICE_NAME = 'sts'
|
21 |
-
|
22 |
-
def is_retryable(self, context):
|
23 |
-
service_name = context.operation_model.service_model.service_name
|
24 |
-
if service_name != self._SERVICE_NAME:
|
25 |
-
return False
|
26 |
-
error_code = context.get_error_code()
|
27 |
-
return error_code == 'IDPCommunicationError'
|
28 |
-
|
29 |
-
|
30 |
-
class RetryDDBChecksumError(BaseRetryableChecker):
|
31 |
-
|
32 |
-
_CHECKSUM_HEADER = 'x-amz-crc32'
|
33 |
-
_SERVICE_NAME = 'dynamodb'
|
34 |
-
|
35 |
-
def is_retryable(self, context):
|
36 |
-
service_name = context.operation_model.service_model.service_name
|
37 |
-
if service_name != self._SERVICE_NAME:
|
38 |
-
return False
|
39 |
-
if context.http_response is None:
|
40 |
-
return False
|
41 |
-
checksum = context.http_response.headers.get(self._CHECKSUM_HEADER)
|
42 |
-
if checksum is None:
|
43 |
-
return False
|
44 |
-
actual_crc32 = crc32(context.http_response.content) & 0xFFFFFFFF
|
45 |
-
if actual_crc32 != int(checksum):
|
46 |
-
logger.debug(
|
47 |
-
"DynamoDB crc32 checksum does not match, "
|
48 |
-
"expected: %s, actual: %s",
|
49 |
-
checksum,
|
50 |
-
actual_crc32,
|
51 |
-
)
|
52 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/exceptions.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
from jmespath.compat import with_str_method
|
2 |
-
|
3 |
-
|
4 |
-
class JMESPathError(ValueError):
|
5 |
-
pass
|
6 |
-
|
7 |
-
|
8 |
-
@with_str_method
|
9 |
-
class ParseError(JMESPathError):
|
10 |
-
_ERROR_MESSAGE = 'Invalid jmespath expression'
|
11 |
-
def __init__(self, lex_position, token_value, token_type,
|
12 |
-
msg=_ERROR_MESSAGE):
|
13 |
-
super(ParseError, self).__init__(lex_position, token_value, token_type)
|
14 |
-
self.lex_position = lex_position
|
15 |
-
self.token_value = token_value
|
16 |
-
self.token_type = token_type.upper()
|
17 |
-
self.msg = msg
|
18 |
-
# Whatever catches the ParseError can fill in the full expression
|
19 |
-
self.expression = None
|
20 |
-
|
21 |
-
def __str__(self):
|
22 |
-
# self.lex_position +1 to account for the starting double quote char.
|
23 |
-
underline = ' ' * (self.lex_position + 1) + '^'
|
24 |
-
return (
|
25 |
-
'%s: Parse error at column %s, '
|
26 |
-
'token "%s" (%s), for expression:\n"%s"\n%s' % (
|
27 |
-
self.msg, self.lex_position, self.token_value, self.token_type,
|
28 |
-
self.expression, underline))
|
29 |
-
|
30 |
-
|
31 |
-
@with_str_method
|
32 |
-
class IncompleteExpressionError(ParseError):
|
33 |
-
def set_expression(self, expression):
|
34 |
-
self.expression = expression
|
35 |
-
self.lex_position = len(expression)
|
36 |
-
self.token_type = None
|
37 |
-
self.token_value = None
|
38 |
-
|
39 |
-
def __str__(self):
|
40 |
-
# self.lex_position +1 to account for the starting double quote char.
|
41 |
-
underline = ' ' * (self.lex_position + 1) + '^'
|
42 |
-
return (
|
43 |
-
'Invalid jmespath expression: Incomplete expression:\n'
|
44 |
-
'"%s"\n%s' % (self.expression, underline))
|
45 |
-
|
46 |
-
|
47 |
-
@with_str_method
|
48 |
-
class LexerError(ParseError):
|
49 |
-
def __init__(self, lexer_position, lexer_value, message, expression=None):
|
50 |
-
self.lexer_position = lexer_position
|
51 |
-
self.lexer_value = lexer_value
|
52 |
-
self.message = message
|
53 |
-
super(LexerError, self).__init__(lexer_position,
|
54 |
-
lexer_value,
|
55 |
-
message)
|
56 |
-
# Whatever catches LexerError can set this.
|
57 |
-
self.expression = expression
|
58 |
-
|
59 |
-
def __str__(self):
|
60 |
-
underline = ' ' * self.lexer_position + '^'
|
61 |
-
return 'Bad jmespath expression: %s:\n%s\n%s' % (
|
62 |
-
self.message, self.expression, underline)
|
63 |
-
|
64 |
-
|
65 |
-
@with_str_method
|
66 |
-
class ArityError(ParseError):
|
67 |
-
def __init__(self, expected, actual, name):
|
68 |
-
self.expected_arity = expected
|
69 |
-
self.actual_arity = actual
|
70 |
-
self.function_name = name
|
71 |
-
self.expression = None
|
72 |
-
|
73 |
-
def __str__(self):
|
74 |
-
return ("Expected %s %s for function %s(), "
|
75 |
-
"received %s" % (
|
76 |
-
self.expected_arity,
|
77 |
-
self._pluralize('argument', self.expected_arity),
|
78 |
-
self.function_name,
|
79 |
-
self.actual_arity))
|
80 |
-
|
81 |
-
def _pluralize(self, word, count):
|
82 |
-
if count == 1:
|
83 |
-
return word
|
84 |
-
else:
|
85 |
-
return word + 's'
|
86 |
-
|
87 |
-
|
88 |
-
@with_str_method
|
89 |
-
class VariadictArityError(ArityError):
|
90 |
-
def __str__(self):
|
91 |
-
return ("Expected at least %s %s for function %s(), "
|
92 |
-
"received %s" % (
|
93 |
-
self.expected_arity,
|
94 |
-
self._pluralize('argument', self.expected_arity),
|
95 |
-
self.function_name,
|
96 |
-
self.actual_arity))
|
97 |
-
|
98 |
-
|
99 |
-
@with_str_method
|
100 |
-
class JMESPathTypeError(JMESPathError):
|
101 |
-
def __init__(self, function_name, current_value, actual_type,
|
102 |
-
expected_types):
|
103 |
-
self.function_name = function_name
|
104 |
-
self.current_value = current_value
|
105 |
-
self.actual_type = actual_type
|
106 |
-
self.expected_types = expected_types
|
107 |
-
|
108 |
-
def __str__(self):
|
109 |
-
return ('In function %s(), invalid type for value: %s, '
|
110 |
-
'expected one of: %s, received: "%s"' % (
|
111 |
-
self.function_name, self.current_value,
|
112 |
-
self.expected_types, self.actual_type))
|
113 |
-
|
114 |
-
|
115 |
-
class EmptyExpressionError(JMESPathError):
|
116 |
-
def __init__(self):
|
117 |
-
super(EmptyExpressionError, self).__init__(
|
118 |
-
"Invalid JMESPath expression: cannot be empty.")
|
119 |
-
|
120 |
-
|
121 |
-
class UnknownFunctionError(JMESPathError):
|
122 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/more_itertools/more.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/upload_docs.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""upload_docs
|
3 |
-
|
4 |
-
Implements a Distutils 'upload_docs' subcommand (upload documentation to
|
5 |
-
sites other than PyPi such as devpi).
|
6 |
-
"""
|
7 |
-
|
8 |
-
from base64 import standard_b64encode
|
9 |
-
from distutils import log
|
10 |
-
from distutils.errors import DistutilsOptionError
|
11 |
-
import os
|
12 |
-
import socket
|
13 |
-
import zipfile
|
14 |
-
import tempfile
|
15 |
-
import shutil
|
16 |
-
import itertools
|
17 |
-
import functools
|
18 |
-
import http.client
|
19 |
-
import urllib.parse
|
20 |
-
import warnings
|
21 |
-
|
22 |
-
from .._importlib import metadata
|
23 |
-
from .. import SetuptoolsDeprecationWarning
|
24 |
-
|
25 |
-
from .upload import upload
|
26 |
-
|
27 |
-
|
28 |
-
def _encode(s):
|
29 |
-
return s.encode('utf-8', 'surrogateescape')
|
30 |
-
|
31 |
-
|
32 |
-
class upload_docs(upload):
|
33 |
-
# override the default repository as upload_docs isn't
|
34 |
-
# supported by Warehouse (and won't be).
|
35 |
-
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
|
36 |
-
|
37 |
-
description = 'Upload documentation to sites other than PyPi such as devpi'
|
38 |
-
|
39 |
-
user_options = [
|
40 |
-
('repository=', 'r',
|
41 |
-
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
|
42 |
-
('show-response', None,
|
43 |
-
'display full response text from server'),
|
44 |
-
('upload-dir=', None, 'directory to upload'),
|
45 |
-
]
|
46 |
-
boolean_options = upload.boolean_options
|
47 |
-
|
48 |
-
def has_sphinx(self):
|
49 |
-
return bool(
|
50 |
-
self.upload_dir is None
|
51 |
-
and metadata.entry_points(group='distutils.commands', name='build_sphinx')
|
52 |
-
)
|
53 |
-
|
54 |
-
sub_commands = [('build_sphinx', has_sphinx)]
|
55 |
-
|
56 |
-
def initialize_options(self):
|
57 |
-
upload.initialize_options(self)
|
58 |
-
self.upload_dir = None
|
59 |
-
self.target_dir = None
|
60 |
-
|
61 |
-
def finalize_options(self):
|
62 |
-
log.warn(
|
63 |
-
"Upload_docs command is deprecated. Use Read the Docs "
|
64 |
-
"(https://readthedocs.org) instead.")
|
65 |
-
upload.finalize_options(self)
|
66 |
-
if self.upload_dir is None:
|
67 |
-
if self.has_sphinx():
|
68 |
-
build_sphinx = self.get_finalized_command('build_sphinx')
|
69 |
-
self.target_dir = dict(build_sphinx.builder_target_dirs)['html']
|
70 |
-
else:
|
71 |
-
build = self.get_finalized_command('build')
|
72 |
-
self.target_dir = os.path.join(build.build_base, 'docs')
|
73 |
-
else:
|
74 |
-
self.ensure_dirname('upload_dir')
|
75 |
-
self.target_dir = self.upload_dir
|
76 |
-
self.announce('Using upload directory %s' % self.target_dir)
|
77 |
-
|
78 |
-
def create_zipfile(self, filename):
|
79 |
-
zip_file = zipfile.ZipFile(filename, "w")
|
80 |
-
try:
|
81 |
-
self.mkpath(self.target_dir) # just in case
|
82 |
-
for root, dirs, files in os.walk(self.target_dir):
|
83 |
-
if root == self.target_dir and not files:
|
84 |
-
tmpl = "no files found in upload directory '%s'"
|
85 |
-
raise DistutilsOptionError(tmpl % self.target_dir)
|
86 |
-
for name in files:
|
87 |
-
full = os.path.join(root, name)
|
88 |
-
relative = root[len(self.target_dir):].lstrip(os.path.sep)
|
89 |
-
dest = os.path.join(relative, name)
|
90 |
-
zip_file.write(full, dest)
|
91 |
-
finally:
|
92 |
-
zip_file.close()
|
93 |
-
|
94 |
-
def run(self):
|
95 |
-
warnings.warn(
|
96 |
-
"upload_docs is deprecated and will be removed in a future "
|
97 |
-
"version. Use tools like httpie or curl instead.",
|
98 |
-
SetuptoolsDeprecationWarning,
|
99 |
-
)
|
100 |
-
|
101 |
-
# Run sub commands
|
102 |
-
for cmd_name in self.get_sub_commands():
|
103 |
-
self.run_command(cmd_name)
|
104 |
-
|
105 |
-
tmp_dir = tempfile.mkdtemp()
|
106 |
-
name = self.distribution.metadata.get_name()
|
107 |
-
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
|
108 |
-
try:
|
109 |
-
self.create_zipfile(zip_file)
|
110 |
-
self.upload_file(zip_file)
|
111 |
-
finally:
|
112 |
-
shutil.rmtree(tmp_dir)
|
113 |
-
|
114 |
-
@staticmethod
|
115 |
-
def _build_part(item, sep_boundary):
|
116 |
-
key, values = item
|
117 |
-
title = '\nContent-Disposition: form-data; name="%s"' % key
|
118 |
-
# handle multiple entries for the same name
|
119 |
-
if not isinstance(values, list):
|
120 |
-
values = [values]
|
121 |
-
for value in values:
|
122 |
-
if isinstance(value, tuple):
|
123 |
-
title += '; filename="%s"' % value[0]
|
124 |
-
value = value[1]
|
125 |
-
else:
|
126 |
-
value = _encode(value)
|
127 |
-
yield sep_boundary
|
128 |
-
yield _encode(title)
|
129 |
-
yield b"\n\n"
|
130 |
-
yield value
|
131 |
-
if value and value[-1:] == b'\r':
|
132 |
-
yield b'\n' # write an extra newline (lurve Macs)
|
133 |
-
|
134 |
-
@classmethod
|
135 |
-
def _build_multipart(cls, data):
|
136 |
-
"""
|
137 |
-
Build up the MIME payload for the POST data
|
138 |
-
"""
|
139 |
-
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
|
140 |
-
sep_boundary = b'\n--' + boundary.encode('ascii')
|
141 |
-
end_boundary = sep_boundary + b'--'
|
142 |
-
end_items = end_boundary, b"\n",
|
143 |
-
builder = functools.partial(
|
144 |
-
cls._build_part,
|
145 |
-
sep_boundary=sep_boundary,
|
146 |
-
)
|
147 |
-
part_groups = map(builder, data.items())
|
148 |
-
parts = itertools.chain.from_iterable(part_groups)
|
149 |
-
body_items = itertools.chain(parts, end_items)
|
150 |
-
content_type = 'multipart/form-data; boundary=%s' % boundary
|
151 |
-
return b''.join(body_items), content_type
|
152 |
-
|
153 |
-
def upload_file(self, filename):
|
154 |
-
with open(filename, 'rb') as f:
|
155 |
-
content = f.read()
|
156 |
-
meta = self.distribution.metadata
|
157 |
-
data = {
|
158 |
-
':action': 'doc_upload',
|
159 |
-
'name': meta.get_name(),
|
160 |
-
'content': (os.path.basename(filename), content),
|
161 |
-
}
|
162 |
-
# set up the authentication
|
163 |
-
credentials = _encode(self.username + ':' + self.password)
|
164 |
-
credentials = standard_b64encode(credentials).decode('ascii')
|
165 |
-
auth = "Basic " + credentials
|
166 |
-
|
167 |
-
body, ct = self._build_multipart(data)
|
168 |
-
|
169 |
-
msg = "Submitting documentation to %s" % (self.repository)
|
170 |
-
self.announce(msg, log.INFO)
|
171 |
-
|
172 |
-
# build the Request
|
173 |
-
# We can't use urllib2 since we need to send the Basic
|
174 |
-
# auth right with the first request
|
175 |
-
schema, netloc, url, params, query, fragments = \
|
176 |
-
urllib.parse.urlparse(self.repository)
|
177 |
-
assert not params and not query and not fragments
|
178 |
-
if schema == 'http':
|
179 |
-
conn = http.client.HTTPConnection(netloc)
|
180 |
-
elif schema == 'https':
|
181 |
-
conn = http.client.HTTPSConnection(netloc)
|
182 |
-
else:
|
183 |
-
raise AssertionError("unsupported schema " + schema)
|
184 |
-
|
185 |
-
data = ''
|
186 |
-
try:
|
187 |
-
conn.connect()
|
188 |
-
conn.putrequest("POST", url)
|
189 |
-
content_type = ct
|
190 |
-
conn.putheader('Content-type', content_type)
|
191 |
-
conn.putheader('Content-length', str(len(body)))
|
192 |
-
conn.putheader('Authorization', auth)
|
193 |
-
conn.endheaders()
|
194 |
-
conn.send(body)
|
195 |
-
except socket.error as e:
|
196 |
-
self.announce(str(e), log.ERROR)
|
197 |
-
return
|
198 |
-
|
199 |
-
r = conn.getresponse()
|
200 |
-
if r.status == 200:
|
201 |
-
msg = 'Server response (%s): %s' % (r.status, r.reason)
|
202 |
-
self.announce(msg, log.INFO)
|
203 |
-
elif r.status == 301:
|
204 |
-
location = r.getheader('Location')
|
205 |
-
if location is None:
|
206 |
-
location = 'https://pythonhosted.org/%s/' % meta.get_name()
|
207 |
-
msg = 'Upload successful. Visit %s' % location
|
208 |
-
self.announce(msg, log.INFO)
|
209 |
-
else:
|
210 |
-
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
|
211 |
-
self.announce(msg, log.ERROR)
|
212 |
-
if self.show_response:
|
213 |
-
print('-' * 75, r.read(), '-' * 75)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/GFPGAN-example/gfpgan/models/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
from basicsr.utils import scandir
|
3 |
-
from os import path as osp
|
4 |
-
|
5 |
-
# automatically scan and import model modules for registry
|
6 |
-
# scan all the files that end with '_model.py' under the model folder
|
7 |
-
model_folder = osp.dirname(osp.abspath(__file__))
|
8 |
-
model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
|
9 |
-
# import all the model modules
|
10 |
-
_model_modules = [importlib.import_module(f'gfpgan.models.{file_name}') for file_name in model_filenames]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/copy_if.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits copy_if
|
22 |
-
#include <thrust/system/detail/sequential/copy_if.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/reduce_by_key.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the reduce_by_key.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch reduce_by_key
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/reduce_by_key.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/reduce_by_key.h>
|
32 |
-
#include <thrust/system/cuda/detail/reduce_by_key.h>
|
33 |
-
#include <thrust/system/omp/detail/reduce_by_key.h>
|
34 |
-
#include <thrust/system/tbb/detail/reduce_by_key.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_REDUCE_BY_KEY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/reduce_by_key.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_REDUCE_BY_KEY_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_REDUCE_BY_KEY_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_REDUCE_BY_KEY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/reduce_by_key.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_REDUCE_BY_KEY_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_REDUCE_BY_KEY_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/cwalt/utils.py
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
"""
|
4 |
-
Created on Fri May 20 15:16:56 2022
|
5 |
-
|
6 |
-
@author: dinesh
|
7 |
-
"""
|
8 |
-
|
9 |
-
import json
|
10 |
-
import cv2
|
11 |
-
from PIL import Image
|
12 |
-
import numpy as np
|
13 |
-
from dateutil.parser import parse
|
14 |
-
|
15 |
-
def bb_intersection_over_union(box1, box2):
|
16 |
-
#print(box1, box2)
|
17 |
-
boxA = box1.copy()
|
18 |
-
boxB = box2.copy()
|
19 |
-
boxA[2] = boxA[0]+boxA[2]
|
20 |
-
boxA[3] = boxA[1]+boxA[3]
|
21 |
-
boxB[2] = boxB[0]+boxB[2]
|
22 |
-
boxB[3] = boxB[1]+boxB[3]
|
23 |
-
# determine the (x, y)-coordinates of the intersection rectangle
|
24 |
-
xA = max(boxA[0], boxB[0])
|
25 |
-
yA = max(boxA[1], boxB[1])
|
26 |
-
xB = min(boxA[2], boxB[2])
|
27 |
-
yB = min(boxA[3], boxB[3])
|
28 |
-
|
29 |
-
# compute the area of intersection rectangle
|
30 |
-
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
|
31 |
-
|
32 |
-
if interArea == 0:
|
33 |
-
return 0
|
34 |
-
# compute the area of both the prediction and ground-truth
|
35 |
-
# rectangles
|
36 |
-
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
|
37 |
-
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
|
38 |
-
|
39 |
-
# compute the intersection over union by taking the intersection
|
40 |
-
# area and dividing it by the sum of prediction + ground-truth
|
41 |
-
# areas - the interesection area
|
42 |
-
iou = interArea / float(boxAArea + boxBArea - interArea)
|
43 |
-
return iou
|
44 |
-
|
45 |
-
def bb_intersection_over_union_unoccluded(box1, box2, threshold=0.01):
|
46 |
-
#print(box1, box2)
|
47 |
-
boxA = box1.copy()
|
48 |
-
boxB = box2.copy()
|
49 |
-
boxA[2] = boxA[0]+boxA[2]
|
50 |
-
boxA[3] = boxA[1]+boxA[3]
|
51 |
-
boxB[2] = boxB[0]+boxB[2]
|
52 |
-
boxB[3] = boxB[1]+boxB[3]
|
53 |
-
# determine the (x, y)-coordinates of the intersection rectangle
|
54 |
-
xA = max(boxA[0], boxB[0])
|
55 |
-
yA = max(boxA[1], boxB[1])
|
56 |
-
xB = min(boxA[2], boxB[2])
|
57 |
-
yB = min(boxA[3], boxB[3])
|
58 |
-
|
59 |
-
# compute the area of intersection rectangle
|
60 |
-
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
|
61 |
-
|
62 |
-
if interArea == 0:
|
63 |
-
return 0
|
64 |
-
# compute the area of both the prediction and ground-truth
|
65 |
-
# rectangles
|
66 |
-
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
|
67 |
-
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
|
68 |
-
|
69 |
-
# compute the intersection over union by taking the intersection
|
70 |
-
# area and dividing it by the sum of prediction + ground-truth
|
71 |
-
# areas - the interesection area
|
72 |
-
iou = interArea / float(boxAArea + boxBArea - interArea)
|
73 |
-
|
74 |
-
#print(iou)
|
75 |
-
# return the intersection over union value
|
76 |
-
occlusion = False
|
77 |
-
if iou > threshold and iou < 1:
|
78 |
-
#print(boxA[3], boxB[3], boxB[1])
|
79 |
-
if boxA[3] < boxB[3]:# and boxA[3] > boxB[1]:
|
80 |
-
if boxB[2] > boxA[0]:# and boxB[2] < boxA[2]:
|
81 |
-
#print('first', (boxB[2] - boxA[0])/(boxA[2] - boxA[0]))
|
82 |
-
if (min(boxB[2],boxA[2]) - boxA[0])/(boxA[2] - boxA[0]) > threshold:
|
83 |
-
occlusion = True
|
84 |
-
|
85 |
-
if boxB[0] < boxA[2]: # boxB[0] > boxA[0] and
|
86 |
-
#print('second', (boxA[2] - boxB[0])/(boxA[2] - boxA[0]))
|
87 |
-
if (boxA[2] - max(boxB[0],boxA[0]))/(boxA[2] - boxA[0]) > threshold:
|
88 |
-
occlusion = True
|
89 |
-
if occlusion == False:
|
90 |
-
iou = iou*0
|
91 |
-
#asas
|
92 |
-
# asas
|
93 |
-
#iou = 0.9 #iou*0
|
94 |
-
#print(box1, box2, iou, occlusion)
|
95 |
-
return iou
|
96 |
-
def draw_tracks(image, tracks):
|
97 |
-
"""
|
98 |
-
Draw on input image.
|
99 |
-
|
100 |
-
Args:
|
101 |
-
image (numpy.ndarray): image
|
102 |
-
tracks (list): list of tracks to be drawn on the image.
|
103 |
-
|
104 |
-
Returns:
|
105 |
-
numpy.ndarray: image with the track-ids drawn on it.
|
106 |
-
"""
|
107 |
-
|
108 |
-
for trk in tracks:
|
109 |
-
|
110 |
-
trk_id = trk[1]
|
111 |
-
xmin = trk[2]
|
112 |
-
ymin = trk[3]
|
113 |
-
width = trk[4]
|
114 |
-
height = trk[5]
|
115 |
-
|
116 |
-
xcentroid, ycentroid = int(xmin + 0.5*width), int(ymin + 0.5*height)
|
117 |
-
|
118 |
-
text = "ID {}".format(trk_id)
|
119 |
-
|
120 |
-
cv2.putText(image, text, (xcentroid - 10, ycentroid - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
121 |
-
cv2.circle(image, (xcentroid, ycentroid), 4, (0, 255, 0), -1)
|
122 |
-
|
123 |
-
return image
|
124 |
-
|
125 |
-
|
126 |
-
def draw_bboxes(image, tracks):
|
127 |
-
"""
|
128 |
-
Draw the bounding boxes about detected objects in the image.
|
129 |
-
|
130 |
-
Args:
|
131 |
-
image (numpy.ndarray): Image or video frame.
|
132 |
-
bboxes (numpy.ndarray): Bounding boxes pixel coordinates as (xmin, ymin, width, height)
|
133 |
-
confidences (numpy.ndarray): Detection confidence or detection probability.
|
134 |
-
class_ids (numpy.ndarray): Array containing class ids (aka label ids) of each detected object.
|
135 |
-
|
136 |
-
Returns:
|
137 |
-
numpy.ndarray: image with the bounding boxes drawn on it.
|
138 |
-
"""
|
139 |
-
|
140 |
-
for trk in tracks:
|
141 |
-
xmin = int(trk[2])
|
142 |
-
ymin = int(trk[3])
|
143 |
-
width = int(trk[4])
|
144 |
-
height = int(trk[5])
|
145 |
-
clr = (np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255))
|
146 |
-
cv2.rectangle(image, (xmin, ymin), (xmin + width, ymin + height), clr, 2)
|
147 |
-
|
148 |
-
return image
|
149 |
-
|
150 |
-
|
151 |
-
def num(v):
|
152 |
-
number_as_float = float(v)
|
153 |
-
number_as_int = int(number_as_float)
|
154 |
-
return number_as_int if number_as_float == number_as_int else number_as_float
|
155 |
-
|
156 |
-
|
157 |
-
def parse_bbox(bbox_str):
|
158 |
-
bbox_list = bbox_str.strip('{').strip('}').split(',')
|
159 |
-
bbox_list = [num(elem) for elem in bbox_list]
|
160 |
-
return bbox_list
|
161 |
-
|
162 |
-
def parse_seg(bbox_str):
|
163 |
-
bbox_list = bbox_str.strip('{').strip('}').split(',')
|
164 |
-
bbox_list = [num(elem) for elem in bbox_list]
|
165 |
-
ret = bbox_list # []
|
166 |
-
# for i in range(0, len(bbox_list) - 1, 2):
|
167 |
-
# ret.append((bbox_list[i], bbox_list[i + 1]))
|
168 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/detectors/mask_rcnn.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .two_stage import TwoStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class MaskRCNN(TwoStageDetector):
|
7 |
-
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
|
8 |
-
|
9 |
-
def __init__(self,
|
10 |
-
backbone,
|
11 |
-
rpn_head,
|
12 |
-
roi_head,
|
13 |
-
train_cfg,
|
14 |
-
test_cfg,
|
15 |
-
neck=None,
|
16 |
-
pretrained=None):
|
17 |
-
super(MaskRCNN, self).__init__(
|
18 |
-
backbone=backbone,
|
19 |
-
neck=neck,
|
20 |
-
rpn_head=rpn_head,
|
21 |
-
roi_head=roi_head,
|
22 |
-
train_cfg=train_cfg,
|
23 |
-
test_cfg=test_cfg,
|
24 |
-
pretrained=pretrained)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/bbox_head.py
DELETED
@@ -1,483 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from mmcv.runner import auto_fp16, force_fp32
|
5 |
-
from torch.nn.modules.utils import _pair
|
6 |
-
|
7 |
-
from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
|
8 |
-
from mmdet.models.builder import HEADS, build_loss
|
9 |
-
from mmdet.models.losses import accuracy
|
10 |
-
|
11 |
-
|
12 |
-
@HEADS.register_module()
|
13 |
-
class BBoxHead(nn.Module):
|
14 |
-
"""Simplest RoI head, with only two fc layers for classification and
|
15 |
-
regression respectively."""
|
16 |
-
|
17 |
-
def __init__(self,
|
18 |
-
with_avg_pool=False,
|
19 |
-
with_cls=True,
|
20 |
-
with_reg=True,
|
21 |
-
roi_feat_size=7,
|
22 |
-
in_channels=256,
|
23 |
-
num_classes=80,
|
24 |
-
bbox_coder=dict(
|
25 |
-
type='DeltaXYWHBBoxCoder',
|
26 |
-
clip_border=True,
|
27 |
-
target_means=[0., 0., 0., 0.],
|
28 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
29 |
-
reg_class_agnostic=False,
|
30 |
-
reg_decoded_bbox=False,
|
31 |
-
loss_cls=dict(
|
32 |
-
type='CrossEntropyLoss',
|
33 |
-
use_sigmoid=False,
|
34 |
-
loss_weight=1.0),
|
35 |
-
loss_bbox=dict(
|
36 |
-
type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
|
37 |
-
super(BBoxHead, self).__init__()
|
38 |
-
assert with_cls or with_reg
|
39 |
-
self.with_avg_pool = with_avg_pool
|
40 |
-
self.with_cls = with_cls
|
41 |
-
self.with_reg = with_reg
|
42 |
-
self.roi_feat_size = _pair(roi_feat_size)
|
43 |
-
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
|
44 |
-
self.in_channels = in_channels
|
45 |
-
self.num_classes = num_classes
|
46 |
-
self.reg_class_agnostic = reg_class_agnostic
|
47 |
-
self.reg_decoded_bbox = reg_decoded_bbox
|
48 |
-
self.fp16_enabled = False
|
49 |
-
|
50 |
-
self.bbox_coder = build_bbox_coder(bbox_coder)
|
51 |
-
self.loss_cls = build_loss(loss_cls)
|
52 |
-
self.loss_bbox = build_loss(loss_bbox)
|
53 |
-
|
54 |
-
in_channels = self.in_channels
|
55 |
-
if self.with_avg_pool:
|
56 |
-
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
|
57 |
-
else:
|
58 |
-
in_channels *= self.roi_feat_area
|
59 |
-
if self.with_cls:
|
60 |
-
# need to add background class
|
61 |
-
self.fc_cls = nn.Linear(in_channels, num_classes + 1)
|
62 |
-
if self.with_reg:
|
63 |
-
out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
|
64 |
-
self.fc_reg = nn.Linear(in_channels, out_dim_reg)
|
65 |
-
self.debug_imgs = None
|
66 |
-
|
67 |
-
def init_weights(self):
|
68 |
-
# conv layers are already initialized by ConvModule
|
69 |
-
if self.with_cls:
|
70 |
-
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
|
71 |
-
nn.init.constant_(self.fc_cls.bias, 0)
|
72 |
-
if self.with_reg:
|
73 |
-
nn.init.normal_(self.fc_reg.weight, 0, 0.001)
|
74 |
-
nn.init.constant_(self.fc_reg.bias, 0)
|
75 |
-
|
76 |
-
@auto_fp16()
|
77 |
-
def forward(self, x):
|
78 |
-
if self.with_avg_pool:
|
79 |
-
x = self.avg_pool(x)
|
80 |
-
x = x.view(x.size(0), -1)
|
81 |
-
cls_score = self.fc_cls(x) if self.with_cls else None
|
82 |
-
bbox_pred = self.fc_reg(x) if self.with_reg else None
|
83 |
-
return cls_score, bbox_pred
|
84 |
-
|
85 |
-
def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
|
86 |
-
pos_gt_labels, cfg):
|
87 |
-
"""Calculate the ground truth for proposals in the single image
|
88 |
-
according to the sampling results.
|
89 |
-
|
90 |
-
Args:
|
91 |
-
pos_bboxes (Tensor): Contains all the positive boxes,
|
92 |
-
has shape (num_pos, 4), the last dimension 4
|
93 |
-
represents [tl_x, tl_y, br_x, br_y].
|
94 |
-
neg_bboxes (Tensor): Contains all the negative boxes,
|
95 |
-
has shape (num_neg, 4), the last dimension 4
|
96 |
-
represents [tl_x, tl_y, br_x, br_y].
|
97 |
-
pos_gt_bboxes (Tensor): Contains all the gt_boxes,
|
98 |
-
has shape (num_gt, 4), the last dimension 4
|
99 |
-
represents [tl_x, tl_y, br_x, br_y].
|
100 |
-
pos_gt_labels (Tensor): Contains all the gt_labels,
|
101 |
-
has shape (num_gt).
|
102 |
-
cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
|
103 |
-
|
104 |
-
Returns:
|
105 |
-
Tuple[Tensor]: Ground truth for proposals
|
106 |
-
in a single image. Containing the following Tensors:
|
107 |
-
|
108 |
-
- labels(Tensor): Gt_labels for all proposals, has
|
109 |
-
shape (num_proposals,).
|
110 |
-
- label_weights(Tensor): Labels_weights for all
|
111 |
-
proposals, has shape (num_proposals,).
|
112 |
-
- bbox_targets(Tensor):Regression target for all
|
113 |
-
proposals, has shape (num_proposals, 4), the
|
114 |
-
last dimension 4 represents [tl_x, tl_y, br_x, br_y].
|
115 |
-
- bbox_weights(Tensor):Regression weights for all
|
116 |
-
proposals, has shape (num_proposals, 4).
|
117 |
-
"""
|
118 |
-
num_pos = pos_bboxes.size(0)
|
119 |
-
num_neg = neg_bboxes.size(0)
|
120 |
-
num_samples = num_pos + num_neg
|
121 |
-
|
122 |
-
# original implementation uses new_zeros since BG are set to be 0
|
123 |
-
# now use empty & fill because BG cat_id = num_classes,
|
124 |
-
# FG cat_id = [0, num_classes-1]
|
125 |
-
labels = pos_bboxes.new_full((num_samples, ),
|
126 |
-
self.num_classes,
|
127 |
-
dtype=torch.long)
|
128 |
-
label_weights = pos_bboxes.new_zeros(num_samples)
|
129 |
-
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
|
130 |
-
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
|
131 |
-
if num_pos > 0:
|
132 |
-
labels[:num_pos] = pos_gt_labels
|
133 |
-
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
|
134 |
-
label_weights[:num_pos] = pos_weight
|
135 |
-
if not self.reg_decoded_bbox:
|
136 |
-
pos_bbox_targets = self.bbox_coder.encode(
|
137 |
-
pos_bboxes, pos_gt_bboxes)
|
138 |
-
else:
|
139 |
-
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
|
140 |
-
# is applied directly on the decoded bounding boxes, both
|
141 |
-
# the predicted boxes and regression targets should be with
|
142 |
-
# absolute coordinate format.
|
143 |
-
pos_bbox_targets = pos_gt_bboxes
|
144 |
-
bbox_targets[:num_pos, :] = pos_bbox_targets
|
145 |
-
bbox_weights[:num_pos, :] = 1
|
146 |
-
if num_neg > 0:
|
147 |
-
label_weights[-num_neg:] = 1.0
|
148 |
-
|
149 |
-
return labels, label_weights, bbox_targets, bbox_weights
|
150 |
-
|
151 |
-
def get_targets(self,
|
152 |
-
sampling_results,
|
153 |
-
gt_bboxes,
|
154 |
-
gt_labels,
|
155 |
-
rcnn_train_cfg,
|
156 |
-
concat=True):
|
157 |
-
"""Calculate the ground truth for all samples in a batch according to
|
158 |
-
the sampling_results.
|
159 |
-
|
160 |
-
Almost the same as the implementation in bbox_head, we passed
|
161 |
-
additional parameters pos_inds_list and neg_inds_list to
|
162 |
-
`_get_target_single` function.
|
163 |
-
|
164 |
-
Args:
|
165 |
-
sampling_results (List[obj:SamplingResults]): Assign results of
|
166 |
-
all images in a batch after sampling.
|
167 |
-
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
|
168 |
-
each tensor has shape (num_gt, 4), the last dimension 4
|
169 |
-
represents [tl_x, tl_y, br_x, br_y].
|
170 |
-
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
|
171 |
-
each tensor has shape (num_gt,).
|
172 |
-
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
|
173 |
-
concat (bool): Whether to concatenate the results of all
|
174 |
-
the images in a single batch.
|
175 |
-
|
176 |
-
Returns:
|
177 |
-
Tuple[Tensor]: Ground truth for proposals in a single image.
|
178 |
-
Containing the following list of Tensors:
|
179 |
-
|
180 |
-
- labels (list[Tensor],Tensor): Gt_labels for all
|
181 |
-
proposals in a batch, each tensor in list has
|
182 |
-
shape (num_proposals,) when `concat=False`, otherwise
|
183 |
-
just a single tensor has shape (num_all_proposals,).
|
184 |
-
- label_weights (list[Tensor]): Labels_weights for
|
185 |
-
all proposals in a batch, each tensor in list has
|
186 |
-
shape (num_proposals,) when `concat=False`, otherwise
|
187 |
-
just a single tensor has shape (num_all_proposals,).
|
188 |
-
- bbox_targets (list[Tensor],Tensor): Regression target
|
189 |
-
for all proposals in a batch, each tensor in list
|
190 |
-
has shape (num_proposals, 4) when `concat=False`,
|
191 |
-
otherwise just a single tensor has shape
|
192 |
-
(num_all_proposals, 4), the last dimension 4 represents
|
193 |
-
[tl_x, tl_y, br_x, br_y].
|
194 |
-
- bbox_weights (list[tensor],Tensor): Regression weights for
|
195 |
-
all proposals in a batch, each tensor in list has shape
|
196 |
-
(num_proposals, 4) when `concat=False`, otherwise just a
|
197 |
-
single tensor has shape (num_all_proposals, 4).
|
198 |
-
"""
|
199 |
-
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
|
200 |
-
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
|
201 |
-
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
|
202 |
-
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
|
203 |
-
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
|
204 |
-
self._get_target_single,
|
205 |
-
pos_bboxes_list,
|
206 |
-
neg_bboxes_list,
|
207 |
-
pos_gt_bboxes_list,
|
208 |
-
pos_gt_labels_list,
|
209 |
-
cfg=rcnn_train_cfg)
|
210 |
-
|
211 |
-
if concat:
|
212 |
-
labels = torch.cat(labels, 0)
|
213 |
-
label_weights = torch.cat(label_weights, 0)
|
214 |
-
bbox_targets = torch.cat(bbox_targets, 0)
|
215 |
-
bbox_weights = torch.cat(bbox_weights, 0)
|
216 |
-
return labels, label_weights, bbox_targets, bbox_weights
|
217 |
-
|
218 |
-
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
|
219 |
-
def loss(self,
|
220 |
-
cls_score,
|
221 |
-
bbox_pred,
|
222 |
-
rois,
|
223 |
-
labels,
|
224 |
-
label_weights,
|
225 |
-
bbox_targets,
|
226 |
-
bbox_weights,
|
227 |
-
reduction_override=None):
|
228 |
-
losses = dict()
|
229 |
-
if cls_score is not None:
|
230 |
-
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
|
231 |
-
if cls_score.numel() > 0:
|
232 |
-
losses['loss_cls'] = self.loss_cls(
|
233 |
-
cls_score,
|
234 |
-
labels,
|
235 |
-
label_weights,
|
236 |
-
avg_factor=avg_factor,
|
237 |
-
reduction_override=reduction_override)
|
238 |
-
losses['acc'] = accuracy(cls_score, labels)
|
239 |
-
if bbox_pred is not None:
|
240 |
-
bg_class_ind = self.num_classes
|
241 |
-
# 0~self.num_classes-1 are FG, self.num_classes is BG
|
242 |
-
pos_inds = (labels >= 0) & (labels < bg_class_ind)
|
243 |
-
# do not perform bounding box regression for BG anymore.
|
244 |
-
if pos_inds.any():
|
245 |
-
if self.reg_decoded_bbox:
|
246 |
-
# When the regression loss (e.g. `IouLoss`,
|
247 |
-
# `GIouLoss`, `DIouLoss`) is applied directly on
|
248 |
-
# the decoded bounding boxes, it decodes the
|
249 |
-
# already encoded coordinates to absolute format.
|
250 |
-
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
|
251 |
-
if self.reg_class_agnostic:
|
252 |
-
pos_bbox_pred = bbox_pred.view(
|
253 |
-
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
|
254 |
-
else:
|
255 |
-
pos_bbox_pred = bbox_pred.view(
|
256 |
-
bbox_pred.size(0), -1,
|
257 |
-
4)[pos_inds.type(torch.bool),
|
258 |
-
labels[pos_inds.type(torch.bool)]]
|
259 |
-
losses['loss_bbox'] = self.loss_bbox(
|
260 |
-
pos_bbox_pred,
|
261 |
-
bbox_targets[pos_inds.type(torch.bool)],
|
262 |
-
bbox_weights[pos_inds.type(torch.bool)],
|
263 |
-
avg_factor=bbox_targets.size(0),
|
264 |
-
reduction_override=reduction_override)
|
265 |
-
else:
|
266 |
-
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
|
267 |
-
return losses
|
268 |
-
|
269 |
-
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
|
270 |
-
def get_bboxes(self,
|
271 |
-
rois,
|
272 |
-
cls_score,
|
273 |
-
bbox_pred,
|
274 |
-
img_shape,
|
275 |
-
scale_factor,
|
276 |
-
rescale=False,
|
277 |
-
cfg=None):
|
278 |
-
"""Transform network output for a batch into bbox predictions.
|
279 |
-
|
280 |
-
If the input rois has batch dimension, the function would be in
|
281 |
-
`batch_mode` and return is a tuple[list[Tensor], list[Tensor]],
|
282 |
-
otherwise, the return is a tuple[Tensor, Tensor].
|
283 |
-
|
284 |
-
Args:
|
285 |
-
rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5)
|
286 |
-
or (B, num_boxes, 5)
|
287 |
-
cls_score (list[Tensor] or Tensor): Box scores for
|
288 |
-
each scale level, each is a 4D-tensor, the channel number is
|
289 |
-
num_points * num_classes.
|
290 |
-
bbox_pred (Tensor, optional): Box energies / deltas for each scale
|
291 |
-
level, each is a 4D-tensor, the channel number is
|
292 |
-
num_classes * 4.
|
293 |
-
img_shape (Sequence[int] or torch.Tensor or Sequence[
|
294 |
-
Sequence[int]], optional): Maximum bounds for boxes, specifies
|
295 |
-
(H, W, C) or (H, W). If rois shape is (B, num_boxes, 4), then
|
296 |
-
the max_shape should be a Sequence[Sequence[int]]
|
297 |
-
and the length of max_shape should also be B.
|
298 |
-
scale_factor (tuple[ndarray] or ndarray): Scale factor of the
|
299 |
-
image arange as (w_scale, h_scale, w_scale, h_scale). In
|
300 |
-
`batch_mode`, the scale_factor shape is tuple[ndarray].
|
301 |
-
rescale (bool): If True, return boxes in original image space.
|
302 |
-
Default: False.
|
303 |
-
cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
|
304 |
-
|
305 |
-
Returns:
|
306 |
-
tuple[list[Tensor], list[Tensor]] or tuple[Tensor, Tensor]:
|
307 |
-
If the input has a batch dimension, the return value is
|
308 |
-
a tuple of the list. The first list contains the boxes of
|
309 |
-
the corresponding image in a batch, each tensor has the
|
310 |
-
shape (num_boxes, 5) and last dimension 5 represent
|
311 |
-
(tl_x, tl_y, br_x, br_y, score). Each Tensor in the second
|
312 |
-
list is the labels with shape (num_boxes, ). The length of
|
313 |
-
both lists should be equal to batch_size. Otherwise return
|
314 |
-
value is a tuple of two tensors, the first tensor is the
|
315 |
-
boxes with scores, the second tensor is the labels, both
|
316 |
-
have the same shape as the first case.
|
317 |
-
"""
|
318 |
-
if isinstance(cls_score, list):
|
319 |
-
cls_score = sum(cls_score) / float(len(cls_score))
|
320 |
-
|
321 |
-
scores = F.softmax(
|
322 |
-
cls_score, dim=-1) if cls_score is not None else None
|
323 |
-
|
324 |
-
batch_mode = True
|
325 |
-
if rois.ndim == 2:
|
326 |
-
# e.g. AugTest, Cascade R-CNN, HTC, SCNet...
|
327 |
-
batch_mode = False
|
328 |
-
|
329 |
-
# add batch dimension
|
330 |
-
if scores is not None:
|
331 |
-
scores = scores.unsqueeze(0)
|
332 |
-
if bbox_pred is not None:
|
333 |
-
bbox_pred = bbox_pred.unsqueeze(0)
|
334 |
-
rois = rois.unsqueeze(0)
|
335 |
-
|
336 |
-
if bbox_pred is not None:
|
337 |
-
bboxes = self.bbox_coder.decode(
|
338 |
-
rois[..., 1:], bbox_pred, max_shape=img_shape)
|
339 |
-
else:
|
340 |
-
bboxes = rois[..., 1:].clone()
|
341 |
-
if img_shape is not None:
|
342 |
-
max_shape = bboxes.new_tensor(img_shape)[..., :2]
|
343 |
-
min_xy = bboxes.new_tensor(0)
|
344 |
-
max_xy = torch.cat(
|
345 |
-
[max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
|
346 |
-
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
|
347 |
-
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
|
348 |
-
|
349 |
-
if rescale and bboxes.size(-2) > 0:
|
350 |
-
if not isinstance(scale_factor, tuple):
|
351 |
-
scale_factor = tuple([scale_factor])
|
352 |
-
# B, 1, bboxes.size(-1)
|
353 |
-
scale_factor = bboxes.new_tensor(scale_factor).unsqueeze(1).repeat(
|
354 |
-
1, 1,
|
355 |
-
bboxes.size(-1) // 4)
|
356 |
-
bboxes /= scale_factor
|
357 |
-
|
358 |
-
det_bboxes = []
|
359 |
-
det_labels = []
|
360 |
-
for (bbox, score) in zip(bboxes, scores):
|
361 |
-
if cfg is not None:
|
362 |
-
det_bbox, det_label = multiclass_nms(bbox, score,
|
363 |
-
cfg.score_thr, cfg.nms,
|
364 |
-
cfg.max_per_img)
|
365 |
-
else:
|
366 |
-
det_bbox, det_label = bbox, score
|
367 |
-
det_bboxes.append(det_bbox)
|
368 |
-
det_labels.append(det_label)
|
369 |
-
|
370 |
-
if not batch_mode:
|
371 |
-
det_bboxes = det_bboxes[0]
|
372 |
-
det_labels = det_labels[0]
|
373 |
-
return det_bboxes, det_labels
|
374 |
-
|
375 |
-
@force_fp32(apply_to=('bbox_preds', ))
|
376 |
-
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
|
377 |
-
"""Refine bboxes during training.
|
378 |
-
|
379 |
-
Args:
|
380 |
-
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
|
381 |
-
and bs is the sampled RoIs per image. The first column is
|
382 |
-
the image id and the next 4 columns are x1, y1, x2, y2.
|
383 |
-
labels (Tensor): Shape (n*bs, ).
|
384 |
-
bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
|
385 |
-
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
|
386 |
-
is a gt bbox.
|
387 |
-
img_metas (list[dict]): Meta info of each image.
|
388 |
-
|
389 |
-
Returns:
|
390 |
-
list[Tensor]: Refined bboxes of each image in a mini-batch.
|
391 |
-
|
392 |
-
Example:
|
393 |
-
>>> # xdoctest: +REQUIRES(module:kwarray)
|
394 |
-
>>> import kwarray
|
395 |
-
>>> import numpy as np
|
396 |
-
>>> from mmdet.core.bbox.demodata import random_boxes
|
397 |
-
>>> self = BBoxHead(reg_class_agnostic=True)
|
398 |
-
>>> n_roi = 2
|
399 |
-
>>> n_img = 4
|
400 |
-
>>> scale = 512
|
401 |
-
>>> rng = np.random.RandomState(0)
|
402 |
-
>>> img_metas = [{'img_shape': (scale, scale)}
|
403 |
-
... for _ in range(n_img)]
|
404 |
-
>>> # Create rois in the expected format
|
405 |
-
>>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
|
406 |
-
>>> img_ids = torch.randint(0, n_img, (n_roi,))
|
407 |
-
>>> img_ids = img_ids.float()
|
408 |
-
>>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
|
409 |
-
>>> # Create other args
|
410 |
-
>>> labels = torch.randint(0, 2, (n_roi,)).long()
|
411 |
-
>>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
|
412 |
-
>>> # For each image, pretend random positive boxes are gts
|
413 |
-
>>> is_label_pos = (labels.numpy() > 0).astype(np.int)
|
414 |
-
>>> lbl_per_img = kwarray.group_items(is_label_pos,
|
415 |
-
... img_ids.numpy())
|
416 |
-
>>> pos_per_img = [sum(lbl_per_img.get(gid, []))
|
417 |
-
... for gid in range(n_img)]
|
418 |
-
>>> pos_is_gts = [
|
419 |
-
>>> torch.randint(0, 2, (npos,)).byte().sort(
|
420 |
-
>>> descending=True)[0]
|
421 |
-
>>> for npos in pos_per_img
|
422 |
-
>>> ]
|
423 |
-
>>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
|
424 |
-
>>> pos_is_gts, img_metas)
|
425 |
-
>>> print(bboxes_list)
|
426 |
-
"""
|
427 |
-
img_ids = rois[:, 0].long().unique(sorted=True)
|
428 |
-
assert img_ids.numel() <= len(img_metas)
|
429 |
-
|
430 |
-
bboxes_list = []
|
431 |
-
for i in range(len(img_metas)):
|
432 |
-
inds = torch.nonzero(
|
433 |
-
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
|
434 |
-
num_rois = inds.numel()
|
435 |
-
|
436 |
-
bboxes_ = rois[inds, 1:]
|
437 |
-
label_ = labels[inds]
|
438 |
-
bbox_pred_ = bbox_preds[inds]
|
439 |
-
img_meta_ = img_metas[i]
|
440 |
-
pos_is_gts_ = pos_is_gts[i]
|
441 |
-
|
442 |
-
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
|
443 |
-
img_meta_)
|
444 |
-
|
445 |
-
# filter gt bboxes
|
446 |
-
pos_keep = 1 - pos_is_gts_
|
447 |
-
keep_inds = pos_is_gts_.new_ones(num_rois)
|
448 |
-
keep_inds[:len(pos_is_gts_)] = pos_keep
|
449 |
-
|
450 |
-
bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
|
451 |
-
|
452 |
-
return bboxes_list
|
453 |
-
|
454 |
-
@force_fp32(apply_to=('bbox_pred', ))
|
455 |
-
def regress_by_class(self, rois, label, bbox_pred, img_meta):
|
456 |
-
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
|
457 |
-
|
458 |
-
Args:
|
459 |
-
rois (Tensor): shape (n, 4) or (n, 5)
|
460 |
-
label (Tensor): shape (n, )
|
461 |
-
bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4)
|
462 |
-
img_meta (dict): Image meta info.
|
463 |
-
|
464 |
-
Returns:
|
465 |
-
Tensor: Regressed bboxes, the same shape as input rois.
|
466 |
-
"""
|
467 |
-
assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
|
468 |
-
|
469 |
-
if not self.reg_class_agnostic:
|
470 |
-
label = label * 4
|
471 |
-
inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
|
472 |
-
bbox_pred = torch.gather(bbox_pred, 1, inds)
|
473 |
-
assert bbox_pred.size(1) == 4
|
474 |
-
|
475 |
-
if rois.size(1) == 4:
|
476 |
-
new_rois = self.bbox_coder.decode(
|
477 |
-
rois, bbox_pred, max_shape=img_meta['img_shape'])
|
478 |
-
else:
|
479 |
-
bboxes = self.bbox_coder.decode(
|
480 |
-
rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])
|
481 |
-
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
|
482 |
-
|
483 |
-
return new_rois
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/checkpoint/detection_checkpoint.py
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
import pickle
|
5 |
-
import torch
|
6 |
-
from fvcore.common.checkpoint import Checkpointer
|
7 |
-
from torch.nn.parallel import DistributedDataParallel
|
8 |
-
|
9 |
-
import detectron2.utils.comm as comm
|
10 |
-
from detectron2.utils.env import TORCH_VERSION
|
11 |
-
from detectron2.utils.file_io import PathManager
|
12 |
-
|
13 |
-
from .c2_model_loading import align_and_update_state_dicts
|
14 |
-
from .clip_model_loading import align_and_update_state_dicts_for_CLIP
|
15 |
-
|
16 |
-
class DetectionCheckpointer(Checkpointer):
|
17 |
-
"""
|
18 |
-
Same as :class:`Checkpointer`, but is able to:
|
19 |
-
1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
|
20 |
-
2. correctly load checkpoints that are only available on the master worker
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, model, save_dir="", *, save_to_disk=None, bb_rpn_weights=False, **checkpointables):
|
24 |
-
is_main_process = comm.is_main_process()
|
25 |
-
super().__init__(
|
26 |
-
model,
|
27 |
-
save_dir,
|
28 |
-
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
|
29 |
-
**checkpointables,
|
30 |
-
)
|
31 |
-
self.path_manager = PathManager
|
32 |
-
self.bb_rpn_weights = bb_rpn_weights
|
33 |
-
|
34 |
-
def load(self, path, *args, **kwargs):
|
35 |
-
need_sync = False
|
36 |
-
|
37 |
-
if path and isinstance(self.model, DistributedDataParallel):
|
38 |
-
logger = logging.getLogger(__name__)
|
39 |
-
path = self.path_manager.get_local_path(path)
|
40 |
-
has_file = os.path.isfile(path)
|
41 |
-
all_has_file = comm.all_gather(has_file)
|
42 |
-
if not all_has_file[0]:
|
43 |
-
raise OSError(f"File {path} not found on main worker.")
|
44 |
-
if not all(all_has_file):
|
45 |
-
logger.warning(
|
46 |
-
f"Not all workers can read checkpoint {path}. "
|
47 |
-
"Training may fail to fully resume."
|
48 |
-
)
|
49 |
-
# TODO: broadcast the checkpoint file contents from main
|
50 |
-
# worker, and load from it instead.
|
51 |
-
need_sync = True
|
52 |
-
if not has_file:
|
53 |
-
path = None # don't load if not readable
|
54 |
-
ret = super().load(path, *args, **kwargs)
|
55 |
-
|
56 |
-
if need_sync:
|
57 |
-
logger.info("Broadcasting model states from main worker ...")
|
58 |
-
if TORCH_VERSION >= (1, 7):
|
59 |
-
self.model._sync_params_and_buffers()
|
60 |
-
return ret
|
61 |
-
|
62 |
-
def _load_file(self, filename):
|
63 |
-
if filename.endswith(".pkl"):
|
64 |
-
with PathManager.open(filename, "rb") as f:
|
65 |
-
data = pickle.load(f, encoding="latin1")
|
66 |
-
if "model" in data and "__author__" in data:
|
67 |
-
# file is in Detectron2 model zoo format
|
68 |
-
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
|
69 |
-
return data
|
70 |
-
else:
|
71 |
-
# assume file is from Caffe2 / Detectron1 model zoo
|
72 |
-
if "blobs" in data:
|
73 |
-
# Detection models have "blobs", but ImageNet models don't
|
74 |
-
data = data["blobs"]
|
75 |
-
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
|
76 |
-
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
|
77 |
-
elif filename.endswith(".pyth"):
|
78 |
-
# assume file is from pycls; no one else seems to use the ".pyth" extension
|
79 |
-
with PathManager.open(filename, "rb") as f:
|
80 |
-
data = torch.load(f)
|
81 |
-
assert (
|
82 |
-
"model_state" in data
|
83 |
-
), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
|
84 |
-
model_state = {
|
85 |
-
k: v
|
86 |
-
for k, v in data["model_state"].items()
|
87 |
-
if not k.endswith("num_batches_tracked")
|
88 |
-
}
|
89 |
-
return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
|
90 |
-
elif "OAI_CLIP" in filename:
|
91 |
-
# assume file is from OpenAI CLIP pre-trained model
|
92 |
-
loaded = super()._load_file(filename) # load native pth checkpoint
|
93 |
-
if "model" not in loaded:
|
94 |
-
loaded = {"model": loaded}
|
95 |
-
return {"model": loaded["model"], "__author__": "OAI_CLIP", "matching_heuristics": True}
|
96 |
-
|
97 |
-
loaded = super()._load_file(filename) # load native pth checkpoint
|
98 |
-
if "model" not in loaded:
|
99 |
-
loaded = {"model": loaded}
|
100 |
-
return loaded
|
101 |
-
|
102 |
-
def _load_model(self, checkpoint):
|
103 |
-
# if checkpoint.get("matching_heuristics", False) or self.bb_rpn_weights:
|
104 |
-
# self._convert_ndarray_to_tensor(checkpoint["model"])
|
105 |
-
# # convert weights by name-matching heuristics
|
106 |
-
# if checkpoint.get("__author__", "NA") == "OAI_CLIP" or self.bb_rpn_weights: # for OAI_CLIP or 2nd ckpt (offline modules)
|
107 |
-
# checkpoint["model"] = align_and_update_state_dicts_for_CLIP(
|
108 |
-
# self.model.state_dict(),
|
109 |
-
# checkpoint["model"],
|
110 |
-
# bb_rpn_weights=self.bb_rpn_weights,
|
111 |
-
# )
|
112 |
-
# else: # default loading
|
113 |
-
# checkpoint["model"] = align_and_update_state_dicts(
|
114 |
-
# self.model.state_dict(),
|
115 |
-
# checkpoint["model"],
|
116 |
-
# c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
|
117 |
-
# )
|
118 |
-
# for non-caffe2 models, use standard ways to load it
|
119 |
-
# if not self.bb_rpn_weights:
|
120 |
-
# checkpoint = {'model': {'backbone.' + key: val for key, val in checkpoint['model'].items()}}
|
121 |
-
incompatible = super()._load_model(checkpoint)
|
122 |
-
del checkpoint # try saving memory
|
123 |
-
|
124 |
-
model_buffers = dict(self.model.named_buffers(recurse=False))
|
125 |
-
for k in ["pixel_mean", "pixel_std"]:
|
126 |
-
# Ignore missing key message about pixel_mean/std.
|
127 |
-
# Though they may be missing in old checkpoints, they will be correctly
|
128 |
-
# initialized from config anyway.
|
129 |
-
if k in model_buffers:
|
130 |
-
try:
|
131 |
-
incompatible.missing_keys.remove(k)
|
132 |
-
except ValueError:
|
133 |
-
pass
|
134 |
-
return incompatible
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chaitanya01/InvestingPlatform/googleNewsSlackAlerts.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from GoogleNews import GoogleNews
|
2 |
-
import pandas as pd
|
3 |
-
import numpy as np
|
4 |
-
import slack
|
5 |
-
import time
|
6 |
-
from datetime import datetime
|
7 |
-
|
8 |
-
# Slack token
|
9 |
-
SLACK_TOKEN = "xoxb-2557354538181-2570404709172-oNr1bsP5hQoFyOL1HqgqF8lv"
|
10 |
-
# Initialize the slack client
|
11 |
-
client = slack.WebClient(token = SLACK_TOKEN)
|
12 |
-
# Google News Api
|
13 |
-
googlenews = GoogleNews()
|
14 |
-
googlenews = GoogleNews(lang='en', region='US')
|
15 |
-
googlenews = GoogleNews(period='1h')
|
16 |
-
|
17 |
-
googlenews.set_encode('utf-8')
|
18 |
-
|
19 |
-
arr = []
|
20 |
-
while True:
|
21 |
-
# Run this in for loop and is to be run continously
|
22 |
-
today = datetime.now()
|
23 |
-
# If its midnight reset the array
|
24 |
-
if today.hour + today.minute == 0 and today.second<2:
|
25 |
-
arr = []
|
26 |
-
# Search for the word crypto in googlenews
|
27 |
-
googlenews.search("crypto")
|
28 |
-
# Sort the results
|
29 |
-
result = googlenews.results(sort=True)
|
30 |
-
for i in result:
|
31 |
-
# Now if a news has already been scraped, ignore it
|
32 |
-
if i["title"] in arr:
|
33 |
-
continue
|
34 |
-
if "min" in i["date"]:
|
35 |
-
# If the time for the news is in minute then only fetch it
|
36 |
-
if "$" in i["desc"] or "$" in i["title"]:
|
37 |
-
# If the title or decription contains dollar symbol, then go ahead
|
38 |
-
if "million" in i["desc"].lower() or "raised" in i["desc"].lower():
|
39 |
-
# If million or raised keywords are present then go ahead
|
40 |
-
arr.append(i["title"])
|
41 |
-
# Post the news on slack bot
|
42 |
-
client.chat_postMessage(channel = "#bot_alerts",
|
43 |
-
text = f'{i["datetime"]} {i["date"]} {i["title"]} {i["link"]} {i["desc"]}')
|
44 |
-
# Clear the google news
|
45 |
-
googlenews.clear()
|
46 |
-
# Wait for 30seconds for next query
|
47 |
-
time.sleep(30)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Yunzai
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/incivilization/__init__.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from PIL import ImageEnhance
|
5 |
-
from pil_utils import BuildImage
|
6 |
-
|
7 |
-
from meme_generator import add_meme
|
8 |
-
from meme_generator.exception import TextOverLength
|
9 |
-
|
10 |
-
img_dir = Path(__file__).parent / "images"
|
11 |
-
|
12 |
-
|
13 |
-
def incivilization(images: List[BuildImage], texts: List[str], args):
|
14 |
-
frame = BuildImage.open(img_dir / "0.png")
|
15 |
-
points = ((0, 20), (154, 0), (164, 153), (22, 180))
|
16 |
-
img = images[0].convert("RGBA").circle().resize((150, 150)).perspective(points)
|
17 |
-
image = ImageEnhance.Brightness(img.image).enhance(0.8)
|
18 |
-
frame.paste(image, (137, 151), alpha=True)
|
19 |
-
text = texts[0] if texts else "你刚才说的话不是很礼貌!"
|
20 |
-
try:
|
21 |
-
frame.draw_text(
|
22 |
-
(57, 42, 528, 117),
|
23 |
-
text,
|
24 |
-
weight="bold",
|
25 |
-
max_fontsize=50,
|
26 |
-
min_fontsize=20,
|
27 |
-
allow_wrap=True,
|
28 |
-
)
|
29 |
-
except ValueError:
|
30 |
-
raise TextOverLength(text)
|
31 |
-
return frame.save_jpg()
|
32 |
-
|
33 |
-
|
34 |
-
add_meme(
|
35 |
-
"incivilization",
|
36 |
-
incivilization,
|
37 |
-
min_images=1,
|
38 |
-
max_images=1,
|
39 |
-
min_texts=0,
|
40 |
-
max_texts=1,
|
41 |
-
default_texts=["你刚才说的话不是很礼貌!"],
|
42 |
-
keywords=["不文明"],
|
43 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ClassCat/mnist-classification/app.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from torchvision.transforms import ToTensor
|
6 |
-
|
7 |
-
# Define model
|
8 |
-
class ConvNet(nn.Module):
|
9 |
-
def __init__(self):
|
10 |
-
super(ConvNet, self).__init__()
|
11 |
-
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
|
12 |
-
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
|
13 |
-
self.conv3 = nn.Conv2d(32,64, kernel_size=5)
|
14 |
-
self.fc1 = nn.Linear(3*3*64, 256)
|
15 |
-
self.fc2 = nn.Linear(256, 10)
|
16 |
-
|
17 |
-
def forward(self, x):
|
18 |
-
x = F.relu(self.conv1(x))
|
19 |
-
#x = F.dropout(x, p=0.5, training=self.training)
|
20 |
-
x = F.relu(F.max_pool2d(self.conv2(x), 2))
|
21 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
22 |
-
x = F.relu(F.max_pool2d(self.conv3(x),2))
|
23 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
24 |
-
x = x.view(-1,3*3*64 )
|
25 |
-
x = F.relu(self.fc1(x))
|
26 |
-
x = F.dropout(x, training=self.training)
|
27 |
-
logits = self.fc2(x)
|
28 |
-
return logits
|
29 |
-
|
30 |
-
|
31 |
-
model = ConvNet()
|
32 |
-
model.load_state_dict(
|
33 |
-
torch.load("weights/mnist_convnet_model.pth",
|
34 |
-
map_location=torch.device('cpu'))
|
35 |
-
)
|
36 |
-
|
37 |
-
model.eval()
|
38 |
-
|
39 |
-
import gradio as gr
|
40 |
-
from torchvision import transforms
|
41 |
-
|
42 |
-
import os
|
43 |
-
import glob
|
44 |
-
|
45 |
-
examples_dir = './examples'
|
46 |
-
example_files = glob.glob(os.path.join(examples_dir, '*.png'))
|
47 |
-
|
48 |
-
def predict(image):
|
49 |
-
tsr_image = transforms.ToTensor()(image)
|
50 |
-
|
51 |
-
with torch.no_grad():
|
52 |
-
pred = model(tsr_image)
|
53 |
-
prob = torch.nn.functional.softmax(pred[0], dim=0)
|
54 |
-
|
55 |
-
confidences = {i: float(prob[i]) for i in range(10)}
|
56 |
-
return confidences
|
57 |
-
|
58 |
-
|
59 |
-
with gr.Blocks(css=".gradio-container {background:honeydew;}", title="MNIST Classification"
|
60 |
-
) as demo:
|
61 |
-
gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:royalblue;">MNIST Classification</div>""")
|
62 |
-
|
63 |
-
with gr.Row():
|
64 |
-
with gr.Tab("Canvas"):
|
65 |
-
input_image1 = gr.Image(source="canvas", type="pil", image_mode="L", shape=(28,28), invert_colors=True)
|
66 |
-
send_btn1 = gr.Button("Infer")
|
67 |
-
|
68 |
-
with gr.Tab("Image file"):
|
69 |
-
input_image2 = gr.Image(type="pil", image_mode="L", shape=(28, 28), invert_colors=True)
|
70 |
-
send_btn2 = gr.Button("Infer")
|
71 |
-
gr.Examples(example_files, inputs=input_image2)
|
72 |
-
#gr.Examples(['examples/sample02.png', 'examples/sample04.png'], inputs=input_image2)
|
73 |
-
|
74 |
-
output_label=gr.Label(label="Probabilities", num_top_classes=3)
|
75 |
-
|
76 |
-
send_btn1.click(fn=predict, inputs=input_image1, outputs=output_label)
|
77 |
-
send_btn2.click(fn=predict, inputs=input_image2, outputs=output_label)
|
78 |
-
|
79 |
-
# demo.queue(concurrency_count=3)
|
80 |
-
demo.launch()
|
81 |
-
|
82 |
-
|
83 |
-
### EOF ###
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DEEMOSTECH/ChatAvatar/static/js/main.1b1ee80c.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/DEEMOSTECH/ChatAvatar/static/js/main.c187623b.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-93c91554.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
div.svelte-15lo0d8{display:flex;flex-wrap:wrap;gap:var(--layout-gap);width:var(--size-full)}.hide.svelte-15lo0d8{display:none}.compact.svelte-15lo0d8>*,.compact.svelte-15lo0d8 .box{border-radius:0}.compact.svelte-15lo0d8,.panel.svelte-15lo0d8{border-radius:var(--container-radius);background:var(--background-fill-secondary);padding:var(--size-2)}.unequal-height.svelte-15lo0d8{align-items:flex-start}.stretch.svelte-15lo0d8{align-items:stretch}div.svelte-15lo0d8>*,div.svelte-15lo0d8>.form>*{flex:1 1 0%;flex-wrap:wrap;min-width:min(160px,100%)}
|
|
|
|
spaces/DaFujaTyping/hf-Chat-ui/src/lib/types/Conversation.ts
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import type { ObjectId } from "mongodb";
|
2 |
-
import type { Message } from "./Message";
|
3 |
-
import type { Timestamps } from "./Timestamps";
|
4 |
-
|
5 |
-
export interface Conversation extends Timestamps {
|
6 |
-
_id: ObjectId;
|
7 |
-
|
8 |
-
// Can be undefined for shared convo then deleted
|
9 |
-
sessionId: string;
|
10 |
-
|
11 |
-
model: string;
|
12 |
-
|
13 |
-
title: string;
|
14 |
-
messages: Message[];
|
15 |
-
|
16 |
-
meta?: {
|
17 |
-
fromShareId?: string;
|
18 |
-
};
|
19 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|