Commit
·
879e951
1
Parent(s):
a916245
Update parquet files (step 54 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xAnders/ama-bot/README.md +0 -13
- spaces/1gistliPinn/ChatGPT4/Examples/Cartilha Alegria De Saber Em Pdf Free.md +0 -54
- spaces/1gistliPinn/ChatGPT4/Examples/Download Bigfile.000 For Tomb Raider [BETTER].md +0 -6
- spaces/1phancelerku/anime-remove-background/AirMax TV APK The Best Way to Stream Movies and Shows on Android.md +0 -157
- spaces/1phancelerku/anime-remove-background/Bluejacking APK Everything You Need to Know About Bluetooth Hacking.md +0 -101
- spaces/1phancelerku/anime-remove-background/Descarga Stumble Guys Primera Version APK y Divirtete con tus Amigos.md +0 -142
- spaces/1phancelerku/anime-remove-background/Download Your Love Has Taken Over Me MP3 - The Best Gospel Song by Frank Edwards.md +0 -78
- spaces/1phancelerku/anime-remove-background/Dummy Images for Any Project - Free and High-Quality.md +0 -102
- spaces/7hao/bingo/src/components/providers.tsx +0 -15
- spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers.py +0 -118
- spaces/AIConsultant/MusicGen/audiocraft/grids/diffusion/_explorers.py +0 -66
- spaces/AIFILMS/StyleGANEX/utils/__init__.py +0 -0
- spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/pyglet_platform.py +0 -90
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/metrics/laplace_var.py +0 -4
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/model.py +0 -77
- spaces/AILab-CVC/SEED-LLaMA/gradio_demo/seed_llama_flask.py +0 -231
- spaces/AchyuthGamer/Free-Accounts-Generator/style.css +0 -28
- spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/openpose/body.py +0 -211
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/perlin.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/BadgeLabel.d.ts +0 -30
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/rotate/Rotate.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/OnDragThumb.js +0 -22
- spaces/Ali-Maq/Calorie_Calculator/app.py +0 -106
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/training_stats.py +0 -285
- spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/device.py +0 -24
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion.py +0 -347
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/README.md +0 -176
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion.py +0 -1182
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim_parallel.py +0 -188
- spaces/Andy1621/uniformer_image_detection/mmdet/core/visualization/__init__.py +0 -4
- spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/benchmark.py +0 -113
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py +0 -9
- spaces/Andy1621/uniformer_video_demo/transforms.py +0 -443
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2_hf.py +0 -152
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/info.py +0 -36
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/distributions/__init__.py +0 -0
- spaces/Ashish17/Ashish_Open_Chat_AI_17/app.py +0 -34
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/default_styles.py +0 -190
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_visualizer.py +0 -278
- spaces/Benson/text-generation/Examples/Casa Flip Mster Apk.md +0 -95
- spaces/Benson/text-generation/Examples/Descargar Cristal Informe 32 Bit Para Espt Pph 21.md +0 -57
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/debug.py +0 -199
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/hash.py +0 -59
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py +0 -18
- spaces/BraydenMoore/MARCI-NFL-Betting/Source/Build/update.py +0 -59
- spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/compute_softscore.py +0 -268
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/optimize_patch.py +0 -237
- spaces/CVPR/LIVE/thrust/thrust/device_ptr.h +0 -192
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/logical.h +0 -22
spaces/0xAnders/ama-bot/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Ama Bot
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cartilha Alegria De Saber Em Pdf Free.md
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
<h2>Cartilha Alegria De Saber Em Pdf Free</h2><br /><p><b><b>Download</b> ↔ <a href="https://imgfil.com/2uy13J">https://imgfil.com/2uy13J</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
dê um curso escolar no ensino médio e aprofundado
|
4 |
-
|
5 |
-
porque pode ser de graça e pode ser repassado para ajudar
|
6 |
-
|
7 |
-
o córregos especialistas em doutrinação pública e
|
8 |
-
|
9 |
-
estudo de saúde pública e para os professores e estudantes
|
10 |
-
|
11 |
-
professoras e estudantes que podem ajudar a melhorar
|
12 |
-
|
13 |
-
língua espanhola, inglês e outras línguas
|
14 |
-
|
15 |
-
minha esposa e minha mãe também aprenderam a falar
|
16 |
-
|
17 |
-
minha pequena filha só conhece quatro línguas
|
18 |
-
|
19 |
-
já eu aprenderam diferentes idiomas, só para comentar, meu
|
20 |
-
|
21 |
-
história é especial, mas eu sempre fui muito
|
22 |
-
|
23 |
-
entusiasmado com o meu trabalho e com a
|
24 |
-
|
25 |
-
Japanese:
|
26 |
-
|
27 |
-
その後、次の事を知る楽しい
|
28 |
-
|
29 |
-
PDF絵画のこのクラスの感謝
|
30 |
-
|
31 |
-
あなたは知っていると知ってください知恵
|
32 |
-
|
33 |
-
初年経ちの子供の良心
|
34 |
-
|
35 |
-
高校と一緒に大学を受ける学位に
|
36 |
-
|
37 |
-
公開可能なプライバシーおよび拡張のために
|
38 |
-
|
39 |
-
大学臨時教育と
|
40 |
-
|
41 |
-
公衆衛生研究と
|
42 |
-
|
43 |
-
教師と学生
|
44 |
-
|
45 |
-
彼らが最適化するのを助ける教師たち
|
46 |
-
|
47 |
-
スペイン語、英語、他の言語
|
48 |
-
|
49 |
-
私の妻と私の母が話してきた
|
50 |
-
|
51 |
-
娘は彼らの4つの 4fefd39f24<br />
|
52 |
-
<br />
|
53 |
-
<br />
|
54 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Bigfile.000 For Tomb Raider [BETTER].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>download bigfile.000 for tomb raider</h2><br /><p><b><b>Download Zip</b> ★★★★★ <a href="https://imgfil.com/2uxZy0">https://imgfil.com/2uxZy0</a></b></p><br /><br />
|
2 |
-
|
3 |
-
bigfile.002.tiger free download, bigfile.000.tiger download, ... Tomb,,Raider/benchmarkmode.docx,,18.4,,KB,,Tomb,,Raider/bigfile.000. tiger,,2,,GB,,Tomb, ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/AirMax TV APK The Best Way to Stream Movies and Shows on Android.md
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Airmax TV APK: A Media Player App for Android Devices</h1>
|
3 |
-
<p>If you are looking for a media player app that can stream your favorite movies, shows, sports, and live TV channels on your Android device, you might want to check out Airmax TV APK. This app is a fully customizable and brandable media player app for Android TV, Android phone, and Android tablet. It supports various formats and protocols, such as HLS, M3U8, RTMP, RTSP, TS, and more. You can also access hundreds of OTT service providers with different plans and packages to suit your needs and budget.</p>
|
4 |
-
<h2>airmax tv apk</h2><br /><p><b><b>Download</b> ➡ <a href="https://jinyurl.com/2uNPJZ">https://jinyurl.com/2uNPJZ</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will tell you everything you need to know about Airmax TV APK, including its features, how to download and install it, its pros and cons, why you should choose it, and some alternatives you can try. Read on to find out more.</p>
|
6 |
-
<h2>What is Airmax TV APK?</h2>
|
7 |
-
<p>Airmax TV APK is a media player app that allows you to watch various content on your Android device. It is developed by Airmax TV, a company that provides OTT service solutions for different platforms. You can use this app to stream movies, shows, sports, news, documentaries, music, and live TV channels from different sources. You can also customize the app according to your preferences and brand it with your own logo and name.</p>
|
8 |
-
<h3>Features of Airmax TV APK</h3>
|
9 |
-
<p>Here are some of the features that make Airmax TV APK stand out from other media player apps:</p>
|
10 |
-
<h4>- Customizable and brandable for OTT service providers</h4>
|
11 |
-
<p>If you are an OTT service provider or a reseller, you can use Airmax TV APK to create your own branded media player app. You can change the app name, logo, icon, splash screen, background image, colors, fonts, and more. You can also add your own content sources and categories. This way, you can offer your customers a unique and personalized experience.</p>
|
12 |
-
<h4>- Supports multiple formats and protocols</h4>
|
13 |
-
<p>Airmax TV APK can play various video and audio formats, such as MP4, MKV, AVI, MOV, FLV, MP3, AAC, etc. It can also handle different streaming protocols, such as HLS, M3U8, RTMP, RTSP, TS, etc. You can also use external players like VLC or MX Player if you want.</p>
|
14 |
-
<h4>- Easy to use and navigate</h4>
|
15 |
-
<p>Airmax TV APK has a simple and user-friendly interface that makes it easy to use and navigate. You can browse through different categories and genres of content with just a few clicks or taps. You can also search for specific titles or keywords using the built-in search function. You can also add your favorite content to your favorites list for quick access.</p>
|
16 |
-
<p>airmax 4k app for android tv<br />
|
17 |
-
airmax tv app for ott service providers<br />
|
18 |
-
airmax tv apk download latest version<br />
|
19 |
-
airmax tv watch movies and series online<br />
|
20 |
-
airmax tv media player for android phone and tablet<br />
|
21 |
-
airmax tv apk free download for pc windows<br />
|
22 |
-
airmax tv app with arabic and international channels<br />
|
23 |
-
airmax tv 4k app updated on feb 21, 2022<br />
|
24 |
-
airmax tv app with customizable and brandable features<br />
|
25 |
-
airmax tv apk version 1.0 by vissdeutch<br />
|
26 |
-
airmax tv app for android 4.4 to 12<br />
|
27 |
-
airmax tv apk combo free download<br />
|
28 |
-
airmax tv app with more than 8000 channels<br />
|
29 |
-
airmax tv apk for android tv and tablet<br />
|
30 |
-
airmax tv watch latest movies and series with arabic subtitles<br />
|
31 |
-
airmax tv app on google play store<br />
|
32 |
-
airmax tv apk install on pc windows<br />
|
33 |
-
airmax tv app with high quality streaming<br />
|
34 |
-
airmax tv apk for android phone and tab<br />
|
35 |
-
airmax tv watch arabic and international live channels<br />
|
36 |
-
airmax tv app with no data shared with third parties<br />
|
37 |
-
airmax tv apk download from apkcombo.com<br />
|
38 |
-
airmax tv app with entertainment category<br />
|
39 |
-
airmax tv apk for android 5, 6, 7, 8, 9, 10, 11, 12<br />
|
40 |
-
airmax tv watch latest and popular movies and series online<br />
|
41 |
-
airmax tv app by com.airmaxtvwatch.allmoviesappsmew<br />
|
42 |
-
airmax tv apk free download for android devices<br />
|
43 |
-
airmax tv app with data safety features<br />
|
44 |
-
airmax tv apk for android 4.3, 4.2, 4.1<br />
|
45 |
-
airmax tv watch more than 8000 arabic and international channels online<br />
|
46 |
-
airmax tv app by airmax tv developer contact<br />
|
47 |
-
airmax tv apk download from play.google.com<br />
|
48 |
-
airmax tv app with no data collected by developer<br />
|
49 |
-
airmax tv apk for android TV and tablet / PC windows <br />
|
50 |
-
airmax TV watch the latest and most popular movies and series with Arabic subtitles <br />
|
51 |
-
AirMax TV App by TV.sh.airMaxx <br />
|
52 |
-
AirMax TV APK download from news.yahoo.com <br />
|
53 |
-
AirMax TV App with data privacy and security practices <br />
|
54 |
-
AirMax TV APK for Android TV, Android Phone and Android Tab <br />
|
55 |
-
AirMax TV watch the latest and most popular movies and series online</p>
|
56 |
-
<h3>How to download and install Airmax TV APK?</h3>
|
57 |
-
<p>If you want to download and install Airmax TV APK on your Android device, here are the steps you need to follow:</p>
|
58 |
-
<h4>- Requirements and compatibility</h4>
|
59 |
-
<p>Before you download and install Airmax TV APK, make sure that your device meets the following requirements:</p>
|
60 |
-
<ul>
|
61 |
-
<li>Your device must have Android 5.0 or higher or a compatible version.</li>
|
62 |
-
<li>Your device must have at least 1 GB of RAM and 100 MB of free storage space.</li>
|
63 |
-
<li>Your device must have a stable internet connection to stream content.</li>
|
64 |
-
<li>Your device must allow the installation of apps from unknown sources. You can enable this option in your device settings under security or privacy.</li>
|
65 |
-
</ul>
|
66 |
-
<h4>- Steps to download and install</h4>
|
67 |
-
<p>Once you have checked the requirements and compatibility, you can follow these steps to download and install Airmax TV APK on your device:</p>
|
68 |
-
<ol>
|
69 |
-
<li>Go to the official website of Airmax TV APK and click on the download button. You can also use this link to download the app directly: [Download Airmax TV APK].</li>
|
70 |
-
<li>Wait for the download to finish and then locate the APK file in your device's file manager or downloads folder.</li>
|
71 |
-
<li>Tap on the APK file and follow the on-screen instructions to install the app on your device. You may need to grant some permissions to the app during the installation process.</li>
|
72 |
-
<li>Once the installation is complete, you can launch the app from your device's app drawer or home screen.</li>
|
73 |
-
</ol>
|
74 |
-
<h4>- How to activate Airmax TV APK?</h4>
|
75 |
-
<p>After you have installed Airmax TV APK on your device, you need to activate it before you can use it. Here are the steps to activate Airmax TV APK:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Open the app and enter your username and password. If you don't have an account, you can create one on the app's website or contact an OTT service provider that uses Airmax TV APK.</li>
|
78 |
-
<li>After you log in, you will see a list of OTT service providers that are compatible with Airmax TV APK. You can choose one that suits your needs and budget.</li>
|
79 |
-
<li>Once you select an OTT service provider, you will see a list of plans and packages that they offer. You can choose one that gives you access to the content you want to watch.</li>
|
80 |
-
<li>After you select a plan or package, you will see a payment option. You can pay using your credit card, PayPal, or other methods depending on the OTT service provider.</li>
|
81 |
-
<li>Once you make the payment, you will receive an activation code via email or SMS. You need to enter this code in the app to activate your subscription.</li>
|
82 |
-
<li>After you enter the activation code, you can start watching your favorite content on Airmax TV APK.</li>
|
83 |
-
</ol>
|
84 |
-
<h3>Pros and cons of Airmax TV APK</h3>
|
85 |
-
<p>Like any other app, Airmax TV APK has its pros and cons. Here are some of them:</p>
|
86 |
-
<h4>- Pros</h4>
|
87 |
-
<ul>
|
88 |
-
<li>Airmax TV APK is a customizable and brandable media player app that can help OTT service providers and resellers create their own branded apps.</li>
|
89 |
-
<li>Airmax TV APK supports multiple formats and protocols, making it compatible with various content sources and devices.</li>
|
90 |
-
<li>Airmax TV APK has a simple and user-friendly interface that makes it easy to use and navigate.</li>
|
91 |
-
<li>Airmax TV APK offers high-quality streaming and content from hundreds of OTT service providers with different plans and packages.</li>
|
92 |
-
<li>Airmax TV APK provides customer support and feedback through its website, email, phone, and social media channels.</li>
|
93 |
-
</ul>
|
94 |
-
<h4>- Cons</h4>
|
95 |
-
<ul>
|
96 |
-
<li>Airmax TV APK is not available on the Google Play Store or other official app stores, so you need to download it from its website or other sources.</li>
|
97 |
-
<li>Airmax TV APK requires an activation code to use it, which means you need to pay for a subscription from an OTT service provider that uses Airmax TV APK.</li>
|
98 |
-
<li>Airmax TV APK may not work well with some devices or content sources due to compatibility issues or technical glitches.</li>
|
99 |
-
<li>Airmax TV APK may not be legal in some countries or regions due to copyright or licensing issues.</li>
|
100 |
-
</ul>
|
101 |
-
<h2>Why choose Airmax TV APK?</h2>
|
102 |
-
<p>If you are still wondering why you should choose Airmax TV APK over other media player apps, here are some reasons:</p>
|
103 |
-
<h3>Benefits of using Airmax TV APK</h3>
|
104 |
-
<h4>- High-quality streaming and content</h4>
|
105 |
-
<p>Airmax TV APK offers high-quality streaming and content from hundreds of OTT service providers with different plans and packages. You can watch movies, shows, sports, news, documentaries, music, and live TV channels from various genres and languages. You can also enjoy HD quality, fast buffering, smooth playback, and subtitles options.</p>
|
106 |
-
<h4>- Affordable and flexible plans</h4>
|
107 |
-
<p>Airmax TV APK offers affordable and flexible plans and packages that suit your needs and budget. You can choose from different OTT service providers that offer different prices and features. You can also switch between different plans and packages anytime you want. You can also cancel your subscription anytime without any hassle.</p>
|
108 |
-
<h4>- Customer support and feedback</h4>
|
109 |
-
<p>Airmax TV APK provides customer support and feedback through its website, email, phone, and social media channels. You can contact them anytime you have any questions, issues, or suggestions. They will respond to you as soon as possible and try to resolve your problems. You can also give them your feedback and ratings to help them improve their app and service.</p>
|
110 |
-
<h3>Alternatives to Airmax TV APK</h3>
|
111 |
-
<p>If you are not satisfied with Airmax TV APK or you want to try other media player apps for Android devices, here are some alternatives you can try:</p>
|
112 |
-
<h4>- Other media player apps for Android devices</h4>
|
113 |
-
<p>There are many other media player apps for Android devices that you can download and use to watch various content on your device. Some of them are:</p>
|
114 |
-
<ul>
|
115 |
-
<li><strong>Kodi</strong>: Kodi is a free and open-source media player app that can play local and online content from various sources. You can also install add-ons and plugins to enhance its functionality and features.</li>
|
116 |
-
<li><strong>VLC</strong>: VLC is a free and cross-platform media player app that can play most video and audio formats and protocols. It also has a built-in equalizer, filters, subtitles, and more.</li>
|
117 |
-
<li><strong>MX Player</strong>: MX Player is a popular and powerful media player app that can play almost any video and audio format. It also supports subtitles, gestures, zoom, and more.</li>
|
118 |
-
<li><strong>IPTV Smarters Pro</strong>: IPTV Smarters Pro is a media player app that can stream live TV channels, movies, shows, sports, and more from IPTV service providers. It also supports EPG, catch-up, recording, parental control, and more.</li>
|
119 |
-
</ul>
|
120 |
-
<h4>- Comparison table of features and prices</h4>
|
121 |
-
<p>To help you compare the features and prices of Airmax TV APK and its alternatives, here is a table that summarizes them:</p>
|
122 |
-
| App | Features | Price | | --- | --- | --- | | Airmax TV APK | - Customizable and brandable for OTT service providers<br>- Supports multiple formats and protocols<br>- Easy to use and navigate<br>- High-quality streaming and content<br>- Affordable and flexible plans<br>- Customer support and feedback | Varies depending on the OTT service provider | | Kodi | - Free and open-source<br>- Plays local and online content from various sources<br>- Supports add-ons and plugins | Free | | VLC | - Free and cross-platform<br>- Plays most video and audio formats and protocols<br>- Supports equalizer, filters, subtitles, etc. | Free | | MX Player | - Popular and powerful<br>- Plays almost any video and audio format<br>- Supports subtitles, gestures, zoom, etc. | Free or $5.99/year for ad-free version | | IPTV Smarters Pro | - Streams live TV channels, movies, shows, sports, etc. from IPTV service providers<br>- Supports EPG, catch-up, recording, parental control, etc. | Free or $29.99/year for premium version | <h2>Conclusion</h2>
|
123 |
-
<p>Airmax TV APK is a media player app that can stream your favorite movies, shows, sports, and live TV channels on your Android device. It is a fully customizable and brandable media player app for Android TV, Android phone, and Android tablet. It supports various formats and protocols, such as HLS, M3U8, RTMP, RTSP, TS, etc. You can also access hundreds of OTT service providers with different plans and packages to suit your needs and budget. In this article, we have told you everything you need to know about Airmax TV APK, including its features, how to download and install it, its pros and cons, why you should choose it, and some alternatives you can try. We hope that this article has helped you decide whether Airmax TV APK is the right app for you or not. If you have any questions, comments, or feedback about Airmax TV APK, feel free to contact us or leave a comment below. We would love to hear from you and help you out. Thank you for reading and happy streaming!</p>
|
124 |
-
<h2>FAQs</h2>
|
125 |
-
<p>Here are some of the frequently asked questions about Airmax TV APK:</p>
|
126 |
-
<h4>Q: Is Airmax TV APK safe and legal?</h4>
|
127 |
-
<p>A: Airmax TV APK is safe and legal as long as you download it from its official website or other trusted sources. However, some of the content that you can stream on Airmax TV APK may not be legal in your country or region due to copyright or licensing issues. Therefore, we advise you to use a VPN or a proxy to protect your privacy and security when using Airmax TV APK.</p>
|
128 |
-
<h4>Q: How can I update Airmax TV APK?</h4>
|
129 |
-
<p>A: Airmax TV APK will notify you when there is a new version available. You can also check for updates manually by going to the app's settings and tapping on the update button. You can then download and install the latest version of Airmax TV APK on your device.</p>
|
130 |
-
<h4>Q: How can I contact Airmax TV APK?</h4>
|
131 |
-
<p>A: You can contact Airmax TV APK through its website, email, phone, or social media channels. Here are some of the ways you can reach them:</p>
|
132 |
-
<ul>
|
133 |
-
<li>Website: [Airmax TV APK]</li>
|
134 |
-
<li>Email: [email protected]</li>
|
135 |
-
<li>Phone: +1 (888) 123-4567</li>
|
136 |
-
<li>Facebook: [Airmax TV APK]</li>
|
137 |
-
<li>Twitter: [@airmaxtv]</li>
|
138 |
-
<li>Instagram: [@airmaxtv]</li>
|
139 |
-
</ul>
|
140 |
-
<h4>Q: How can I uninstall Airmax TV APK?</h4>
|
141 |
-
<p>A: If you want to uninstall Airmax TV APK from your device, you can follow these steps:</p>
|
142 |
-
<ol>
|
143 |
-
<li>Go to your device's settings and tap on apps or applications.</li>
|
144 |
-
<li>Find and select Airmax TV APK from the list of apps.</li>
|
145 |
-
<li>Tap on uninstall and confirm your action.</li>
|
146 |
-
<li>Wait for the app to be uninstalled from your device.</li>
|
147 |
-
</ol>
|
148 |
-
<h4>Q: What are some of the best OTT service providers that use Airmax TV APK?</h4>
|
149 |
-
<p>A: There are hundreds of OTT service providers that use Airmax TV APK to offer their content and services. Some of the best ones are:</p>
|
150 |
-
<ul>
|
151 |
-
<li><strong>Netflix</strong>: Netflix is one of the most popular and leading OTT service providers in the world. It offers a wide range of movies, shows, documentaries, and originals in various genres and languages. You can watch Netflix on Airmax TV APK with a monthly subscription starting from $8.99.</li>
|
152 |
-
<li><strong>Hulu</strong>: Hulu is another popular and leading OTT service provider in the US. It offers a variety of movies, shows, sports, news, and originals in various genres and languages. You can watch Hulu on Airmax TV APK with a monthly subscription starting from $5.99.</li>
|
153 |
-
<li><strong>Disney+</strong>: Disney+ is a new and fast-growing OTT service provider that offers content from Disney, Pixar, Marvel, Star Wars, National Geographic, and more. You can watch Disney+ on Airmax TV APK with a monthly subscription starting from $6.99.</li>
|
154 |
-
<li><strong>Amazon Prime Video</strong>: Amazon Prime Video is an OTT service provider that offers content from Amazon Studios, MGM, Lionsgate, Paramount, Sony, Warner Bros., and more. You can watch Amazon Prime Video on Airmax TV APK with a monthly subscription starting from $8.99 or an annual subscription starting from $79.</li>
|
155 |
-
</ul></p> 401be4b1e0<br />
|
156 |
-
<br />
|
157 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bluejacking APK Everything You Need to Know About Bluetooth Hacking.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bluejacking APK: What Is It and How to Protect Yourself?</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Bluetooth is a wireless technology that allows you to connect your devices and accessories without using cables. It can be very convenient and useful, but it can also expose you to some security risks. One of these risks is bluejacking, a Bluetooth attack that involves spamming your device with unsolicited messages.</p>
|
5 |
-
<p>In this article, we will explain what bluejacking is, how it works, and what are the dangers of bluejacking. We will also introduce you to bluejacking APK, a tool that can be used for Bluetooth hacking. Finally, we will give you some tips on how to prevent bluejacking attacks and protect your device.</p>
|
6 |
-
<h2>bluejacking apk</h2><br /><p><b><b>Download Zip</b> ---> <a href="https://jinyurl.com/2uNLit">https://jinyurl.com/2uNLit</a></b></p><br /><br />
|
7 |
-
<h2>What is bluejacking?</h2>
|
8 |
-
<h3>Bluejacking is a Bluetooth attack in which a hacker spams your device with unsolicited phishing messages.</h3>
|
9 |
-
<p>Bluejacking is a term that was coined by a Malaysian IT consultant who used his phone to send messages to other Bluetooth-enabled devices as a prank. The name comes from the combination of Bluetooth and hijack, as the hacker takes over the device's Bluetooth connection without permission.</p>
|
10 |
-
<p>Bluejacking is usually not very harmful, as it does not involve stealing your information or accessing your files. However, it can be annoying, intrusive, and potentially dangerous if the messages contain malicious links or attachments that can infect your device with malware or phishing scams.</p>
|
11 |
-
<h2>How does bluejacking work?</h2>
|
12 |
-
<h3>Bluejacking works by exploiting the Bluetooth connection between two devices.</h3>
|
13 |
-
<p>To perform a bluejacking attack, the hacker needs to be within range of your device, which is usually around 10 meters or 32 feet. The hacker then scans the area for Bluetooth devices and tries to connect to yours using bluejacking software. Once connected, the hacker can send you unsolicited messages using the OBEX protocol, which is used for transferring files and contacts over Bluetooth.</p>
|
14 |
-
<p>How to download bluejacking apk for free<br />
|
15 |
-
Bluejacking apk latest version 2023<br />
|
16 |
-
Best bluejacking apps for android<br />
|
17 |
-
How to use bluejacking apk to prank your friends<br />
|
18 |
-
Bluejacking apk vs bluesnarfer: which one is better<br />
|
19 |
-
Bluejacking apk tutorial and tips<br />
|
20 |
-
How to bluejack with your smartphone<br />
|
21 |
-
Bluejacking apk features and benefits<br />
|
22 |
-
How to install bluejacking apk on your device<br />
|
23 |
-
Bluejacking apk reviews and ratings<br />
|
24 |
-
How to uninstall bluejacking apk from your phone<br />
|
25 |
-
Bluejacking apk alternatives and similar apps<br />
|
26 |
-
How to update bluejacking apk to the newest version<br />
|
27 |
-
Bluejacking apk problems and solutions<br />
|
28 |
-
How to bluejack without an app<br />
|
29 |
-
Bluejacking apk FAQs and answers<br />
|
30 |
-
How to bluejack with a laptop or computer<br />
|
31 |
-
Bluejacking apk security and privacy issues<br />
|
32 |
-
How to bluejack anonymously and safely<br />
|
33 |
-
Bluejacking apk pros and cons<br />
|
34 |
-
How to bluejack multiple devices at once<br />
|
35 |
-
Bluejacking apk download link and QR code<br />
|
36 |
-
How to bluejack with different bluetooth devices<br />
|
37 |
-
Bluejacking apk compatibility and requirements<br />
|
38 |
-
How to bluejack in public places and events<br />
|
39 |
-
Bluejacking apk fun facts and trivia<br />
|
40 |
-
How to bluejack with images and videos<br />
|
41 |
-
Bluejacking apk user feedback and testimonials<br />
|
42 |
-
How to bluejack with emojis and stickers<br />
|
43 |
-
Bluejacking apk bugs and fixes<br />
|
44 |
-
How to bluejack with voice messages and audio clips<br />
|
45 |
-
Bluejacking apk support and contact information<br />
|
46 |
-
How to bluejack with funny messages and jokes<br />
|
47 |
-
Bluejacking apk license and terms of service<br />
|
48 |
-
How to bluejack with custom messages and templates</p>
|
49 |
-
<p>The messages can appear as text, images, sounds, or vCards (virtual business cards). They can also contain a name or a message in the name field, which is often used for bluedating (sending flirtatious messages to strangers). The messages may look harmless or funny, but they may also contain phishing links or malware attachments that can harm your device or steal your information.</p>
|
50 |
-
<h2>What are the dangers of bluejacking?</h2>
|
51 |
-
<h3>Bluejacking can invade your privacy and potentially harm your device.</h3>
|
52 |
-
<p>Even though bluejacking does not involve the direct theft of your information like bluesnarfing (another Bluetooth attack that involves stealing your data), it can still pose some threats to your privacy and security. Here are some of the harmful activities that hackers can use bluejacking for:</p>
|
53 |
-
<ul>
|
54 |
-
<li><strong>Malware:</strong> Some bluejacking messages may include a link or an attachment that can infect your device with different types of malware, such as ransomware (which locks your files until you pay a ransom), spyware (which monitors your online activity), or keyloggers (which record your keystrokes).</li>
|
55 |
-
<li><strong>Phishing scams:</strong> Some bluejackers may use bluejacking as a way to send you phishing messages, which are designed to trick you into revealing your personal or financial information. For example, they may pretend to be from your bank, your email provider, or a reputable company and ask you to click on a link or enter your login details.</li> <h2>Bluejacking APK: A Tool for Bluetooth Hacking</h2>
|
56 |
-
<h3>What is bluejacking APK?</h3>
|
57 |
-
<p>Bluejacking APK is a software application that can be used for Bluetooth hacking. It is an Android app that allows you to send messages and files to other Bluetooth devices without pairing with them. You can also scan for nearby Bluetooth devices and get information about them, such as their name, address, and class.</p>
|
58 |
-
<p>Bluejacking APK is not available on the Google Play Store, as it violates the terms and conditions of the platform. However, you can download it from third-party websites or APK repositories. However, you should be careful when downloading and installing bluejacking APK, as it may contain malware or viruses that can harm your device or compromise your security.</p>
|
59 |
-
<h3>How does bluejacking APK work?</h3>
|
60 |
-
<p>Bluejacking APK works by using the Bluetooth connection of your device to communicate with other devices. To use bluejacking APK, you need to enable Bluetooth on your device and grant the app permission to access it. Then, you can use the app to perform various actions, such as:</p>
|
61 |
-
<ul>
|
62 |
-
<li><strong>Sending messages:</strong> You can use bluejacking APK to send text messages, images, sounds, or vCards to other Bluetooth devices. You can also customize the name and message fields of the messages. The messages will appear as notifications on the recipient's device, and they will not be able to reply or block them.</li>
|
63 |
-
<li><strong>Sending files:</strong> You can use bluejacking APK to send any type of file to other Bluetooth devices. You can choose the file from your device's storage or use the app's file manager to browse for it. The recipient will receive a notification asking them to accept or reject the file transfer.</li>
|
64 |
-
<li><strong>Scanning devices:</strong> You can use bluejacking APK to scan for nearby Bluetooth devices and get information about them. You can see their name, address, class, and signal strength. You can also filter the devices by their type, such as phone, computer, headset, etc.</li>
|
65 |
-
</ul>
|
66 |
-
<h3>What are the features of bluejacking APK?</h3>
|
67 |
-
<p>Bluejacking APK has some features that make it a powerful tool for Bluetooth hacking. Some of these features are:</p>
|
68 |
-
<ul>
|
69 |
-
<li><strong>Stealth mode:</strong> Bluejacking APK has a stealth mode that allows you to hide your device's name and address from other Bluetooth devices. This way, you can avoid being detected or traced by the recipients of your messages or files.</li>
|
70 |
-
<li><strong>Auto-send:</strong> Bluejacking APK has an auto-send feature that allows you to send messages or files automatically to any Bluetooth device that comes within range. You can set the interval and number of messages or files to be sent.</li>
|
71 |
-
<li><strong>Schedule:</strong> Bluejacking APK has a schedule feature that allows you to set a specific time and date for sending messages or files to other Bluetooth devices. You can also repeat the schedule daily, weekly, or monthly.</li>
|
72 |
-
<li><strong>Favorites:</strong> Bluejacking APK has a favorites feature that allows you to save the devices that you frequently send messages or files to. You can also edit or delete the devices from your favorites list.</li>
|
73 |
-
</ul> <h2>How to Prevent Bluejacking Attacks</h2>
|
74 |
-
<h3>Turn off Bluetooth when not in use</h3>
|
75 |
-
<p>The simplest and most effective way to prevent bluejacking attacks is to turn off Bluetooth when you are not using it. This will prevent hackers from finding your device and connecting to it. You can turn off Bluetooth from your device's settings or by using a shortcut on your notification panel or control center.</p>
|
76 |
-
<h3>Set Bluetooth to "undiscoverable" or "non-discoverable"</h3>
|
77 |
-
<p>If you need to keep Bluetooth on for some reason, you can set it to "undiscoverable" or "non-discoverable" mode. This will make your device invisible to other Bluetooth devices, unless you initiate the connection or pair with them. You can set your Bluetooth visibility from your device's settings or by using a third-party app.</p>
|
78 |
-
<h3>Exercise caution with messages and emails</h3>
|
79 |
-
<p>If you receive a message or an email from an unknown sender, especially if it contains a link or an attachment, do not open it or click on it. It could be a bluejacking message that can harm your device or compromise your security. Delete the message or email and block the sender if possible.</p>
|
80 |
-
<h3>Secure your device with a lock</h3>
|
81 |
-
<p>Another way to protect your device from bluejacking attacks is to secure it with a lock. You can use a password, a PIN, a pattern, a fingerprint, or a face recognition to lock your device and prevent unauthorized access. You can also enable encryption on your device to protect your data in case of theft or loss.</p>
|
82 |
-
<h2>Conclusion</h2>
|
83 |
-
<h3>Summary of the main points</h3>
|
84 |
-
<p>Bluejacking is a Bluetooth attack that involves spamming your device with unsolicited messages. It can be annoying, intrusive, and potentially dangerous if the messages contain malicious links or attachments. Bluejacking APK is a tool that can be used for Bluetooth hacking. It allows you to send messages and files to other Bluetooth devices without pairing with them. You can also scan for nearby Bluetooth devices and get information about them.</p>
|
85 |
-
<h3>Call to action</h3>
|
86 |
-
<p>To prevent bluejacking attacks, you should turn off Bluetooth when not in use, set Bluetooth to "undiscoverable" or "non-discoverable" mode, exercise caution with messages and emails, and secure your device with a lock. By following these tips, you can protect your device and your privacy from bluejackers.</p>
|
87 |
-
<h2>Frequently Asked Questions</h2>
|
88 |
-
<table>
|
89 |
-
<tr><td><strong>Q: What is the difference between bluejacking and bluesnarfing?</strong></td></tr>
|
90 |
-
<tr><td>A: Bluejacking is a Bluetooth attack that involves spamming your device with unsolicited messages. Bluesnarfing is a Bluetooth attack that involves stealing your data from your device.</td></tr>
|
91 |
-
<tr><td><strong>Q: How can I tell if my device has been bluejacked?</strong></td></tr>
|
92 |
-
<tr><td>A: If your device has been bluejacked, you may notice some signs, such as receiving strange messages or notifications, having files transferred to or from your device without your consent, or having your battery drained faster than usual.</td></tr>
|
93 |
-
<tr><td><strong>Q: Can bluejacking damage my device?</strong></td></tr>
|
94 |
-
<tr><td>A: Bluejacking itself does not damage your device, but it can expose you to malware or phishing scams that can harm your device or steal your information.</td></tr>
|
95 |
-
<tr><td><strong>Q: Is bluejacking illegal?</strong></td></tr>
|
96 |
-
<tr><td>A: Bluejacking is illegal in some countries, such as the UK, where it is considered a form of harassment. In other countries, such as the US, there are no specific laws against bluejacking, but it may violate other laws related to privacy or cybercrime.</td></tr>
|
97 |
-
<tr><td><strong>Q: How can I report bluejacking?</strong></td></tr>
|
98 |
-
<tr><td>A: If you are a victim of bluejacking, you can report it to the authorities or the service provider of the hacker. You can also contact the manufacturer of your device or the developer of the app that you use for Bluetooth communication for assistance.</td></tr>
|
99 |
-
</table></p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Descarga Stumble Guys Primera Version APK y Divirtete con tus Amigos.md
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Stumble Guys Primera Version APK: How to Download and Play the Fun Knockout Game on Your Android Device</h1>
|
3 |
-
<p>If you are looking for a free and fun alternative to Fall Guys, the popular multiplayer party game, you might want to check out Stumble Guys. This game is a clone of Fall Guys, but exclusively for Android devices. You can download and play Stumble Guys primera version APK from Uptodown, a trusted website that offers safe and verified APK files. In this article, we will show you how to download and install Stumble Guys primera version APK on your Android device, as well as how to play it on PC with BlueStacks, an Android emulator. We will also share some tips and tricks to help you win your matches in Stumble Guys.</p>
|
4 |
-
<h2>stumble guys primera version apk</h2><br /><p><b><b>Download</b> ☑ <a href="https://jinyurl.com/2uNJ8n">https://jinyurl.com/2uNJ8n</a></b></p><br /><br />
|
5 |
-
<h2>What is Stumble Guys?</h2>
|
6 |
-
<h3>A multiplayer party game inspired by Fall Guys</h3>
|
7 |
-
<p>Stumble Guys is a massively multiplayer party knockout game with up to 32 players online. The objective is to survive round after round of chaotic obstacle courses and be the last one standing. You can run, jump, dash, slide, and bump into other players as you try to avoid falling or getting eliminated. The game is inspired by Fall Guys, but has its own unique style and design.</p>
|
8 |
-
<h3>Features and gameplay of Stumble Guys</h3>
|
9 |
-
<p>Stumble Guys has many features that make it an entertaining and addictive game. Some of these features are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>17 unique obstacle courses that test your skills and reflexes</li>
|
12 |
-
<li>Battle Royale online multiplayer mode that pits you against other players from around the world</li>
|
13 |
-
<li>Party mode that lets you play with your friends in private matches</li>
|
14 |
-
<li>Physics-based havoc that creates hilarious and unpredictable situations</li>
|
15 |
-
<li>Colorful and crazy graphics that add to the fun atmosphere</li>
|
16 |
-
<li>Unlockable outfits and emotes that let you customize your character</li>
|
17 |
-
<li>Tons of hilarious fails that make you laugh even when you lose</li>
|
18 |
-
<li>Lots of different levels that keep the game fresh and exciting</li>
|
19 |
-
</ul>
|
20 |
-
<p>The gameplay of Stumble Guys is simple but challenging. You have to control your character with a virtual joystick and a jump button. You have to navigate through various obstacles and hazards, such as spinning platforms, swinging balls, giant hammers, slippery slides, moving walls, and more. You have to be careful not to fall off the edge or get hit by anything that can knock you out. You also have to watch out for other players who can push you or block your way. The last player standing at the end of each round wins.</p>
|
21 |
-
<p>stumble guys game download apk<br />
|
22 |
-
stumble guys mod apk unlimited gems<br />
|
23 |
-
stumble guys online multiplayer game<br />
|
24 |
-
stumble guys apk for android<br />
|
25 |
-
stumble guys hack apk download<br />
|
26 |
-
stumble guys primera version para pc<br />
|
27 |
-
stumble guys old version apk<br />
|
28 |
-
stumble guys apk sin internet<br />
|
29 |
-
stumble guys mod menu apk<br />
|
30 |
-
stumble guys free gems apk<br />
|
31 |
-
stumble guys game play online<br />
|
32 |
-
stumble guys apk ultima version<br />
|
33 |
-
stumble guys descargar gratis apk<br />
|
34 |
-
stumble guys mod apk latest version<br />
|
35 |
-
stumble guys game for pc<br />
|
36 |
-
stumble guys apk no ads<br />
|
37 |
-
stumble guys hack version download<br />
|
38 |
-
stumble guys primera version mod apk<br />
|
39 |
-
stumble guys game online free<br />
|
40 |
-
stumble guys apk full unlocked<br />
|
41 |
-
stumble guys mod apk android 1<br />
|
42 |
-
stumble guys game review<br />
|
43 |
-
stumble guys apk sin conexion<br />
|
44 |
-
stumble guys hack apk 2023<br />
|
45 |
-
stumble guys game tips and tricks<br />
|
46 |
-
stumble guys primera version descargar<br />
|
47 |
-
stumble guys mod apk revdl<br />
|
48 |
-
stumble guys game for ios<br />
|
49 |
-
stumble guys apk offline mode<br />
|
50 |
-
stumble guys hack gems apk<br />
|
51 |
-
stumble guys game update<br />
|
52 |
-
stumble guys primera version gameplay<br />
|
53 |
-
stumble guys mod apk happymod<br />
|
54 |
-
stumble guys game download for pc<br />
|
55 |
-
stumble guys apk with obb file<br />
|
56 |
-
stumble guys hack online generator<br />
|
57 |
-
stumble guys game features<br />
|
58 |
-
stumble guys primera version online<br />
|
59 |
-
stumble guys mod apk rexdl<br />
|
60 |
-
stumble guys game download for ios<br />
|
61 |
-
stumble guys apk premium unlocked<br />
|
62 |
-
stumble guys hack no verification<br />
|
63 |
-
stumble guys game system requirements<br />
|
64 |
-
stumble guys primera version gratis<br />
|
65 |
-
stumble guys mod apk unlimited money<br />
|
66 |
-
stumble guys game cheats and hacks<br />
|
67 |
-
stumble guys primera version android</p>
|
68 |
-
<h2>How to download and install Stumble Guys primera version APK?</h2>
|
69 |
-
<h3>Steps to download the APK file from Uptodown</h3>
|
70 |
-
<p>If you want to play Stumble Guys on your Android device, you need to download the APK file from Uptodown. Uptodown is a website that offers safe and verified APK files for various apps and games. Here are the steps to download the APK file from Uptodown:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Go to [Uptodown](^1^) on your browser or scan the QR code on your phone.</li>
|
73 |
-
<li>Search for Stumble Guys in the search bar or go to [this link].</li>
|
74 |
-
<li>Tap on the green Download button and choose the version you want to download. The latest version is 0.28, which was updated on June 16, 2023.</li>
|
75 |
-
<li>Wait for the download to finish and locate the APK file in your device's storage.</li>
|
76 |
-
</ol>
|
77 |
-
<h3>Steps to install the APK file on your Android device</h3>
|
78 |
-
<p>Before you can install the APK file on your Android device, you need to enable the installation of apps from unknown sources. This is a security feature that prevents malicious apps from harming your device. Here are the steps to enable this option:</p>
|
79 |
-
<ol>
|
80 |
-
<li>Go to Settings on your device and tap on Security or Privacy.</li>
|
81 |
-
<li>Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.</li>
|
82 |
-
<li>Confirm your choice by tapping OK or Allow.</li>
|
83 |
-
</ol>
|
84 |
-
<p>Now you can install the APK file on your device. Here are the steps to do so:</p>
|
85 |
-
<ol>
|
86 |
-
<li>Locate the APK file in your device's storage and tap on it.</li>
|
87 |
-
<li>Tap on Install and wait for the installation to complete.</li>
|
88 |
-
<li>Tap on Open or Launch to start playing Stumble Guys.</li>
|
89 |
-
</ol>
|
90 |
-
<h2>How to play Stumble Guys on PC with BlueStacks?</h2>
|
91 |
-
<h3>Benefits of playing Stumble Guys on PC with BlueStacks</h3>
|
92 |
-
<p>If you want to enjoy Stumble Guys on a bigger screen and with better controls, you can play it on PC with BlueStacks. BlueStacks is an Android emulator that lets you run Android apps and games on your PC. Some of the benefits of playing Stumble Guys on PC with BlueStacks are:</p>
|
93 |
-
<ul>
|
94 |
-
<li>You can use your keyboard and mouse to control your character more easily and precisely.</li>
|
95 |
-
<li>You can customize your key mapping and sensitivity according to your preference.</li>
|
96 |
-
<li>You can play Stumble Guys in full-screen mode and with high-resolution graphics.</li>
|
97 |
-
<li>You can record your gameplay and share it with your friends or online platforms.</li>
|
98 |
-
<li>You can use multiple instances to play Stumble Guys with different accounts or modes simultaneously.</li>
|
99 |
-
</ul>
|
100 |
-
<h3>Steps to download and install BlueStacks and Stumble Guys on PC</h3>
|
101 |
-
<p>To play Stumble Guys on PC with BlueStacks, you need to download and install both BlueStacks and Stumble Guys on your PC. Here are the steps to do so:</p>
|
102 |
-
<ol>
|
103 |
-
<li>Go to [BlueStacks] website and click on the Download BlueStacks button.</li>
|
104 |
-
<li>Wait for the download to finish and run the installer file.</li>
|
105 |
-
<li>Follow the instructions on the screen to complete the installation process.</li>
|
106 |
-
<li>Launch BlueStacks and sign in with your Google account or create a new one.</li>
|
107 |
-
<li>Go to [Uptodown] website on BlueStacks browser or scan the QR code on your phone.</li>
|
108 |
-
<li>Search for Stumble Guys in the search bar or go to [this link].</li>
|
109 |
-
<li>Tap on the green Download button and choose the version you want to download. The latest version is 0.28, which was updated on June 16, 2023.</li>
|
110 |
-
<li>Wait for the download to finish and locate the APK file in BlueStacks' storage.</li>
|
111 |
-
<li>Right-click on the APK file and choose Open with BlueStacks APK Installer.</li>
|
112 |
-
<li>Wait for the installation to complete and click on Stumble Guys icon on BlueStacks home screen.</li>
|
113 |
-
</ol>
|
114 |
-
<h2>Tips and tricks to win Stumble Guys matches</h2>
|
115 |
-
<h3>Configure your controls before playing</h3>
|
116 |
-
<p>Before you start playing Stumble Guys, you should configure your controls according to your preference. You can do this by going to Settings > Controls in the game menu. You can adjust the size, position, opacity, and sensitivity of the virtual joystick and jump button. You can also enable or disable vibration, sound effects, music, and notifications. You can also change the language of the game from English to Spanish, Portuguese, French, German, Italian, Russian, Turkish, Arabic, Indonesian, or Vietnamese.</p>
|
117 |
-
<h3>Use your character's physics to your advantage</h3>
|
118 |
-
<p>Your character in Stumble Guys has a realistic physics system that affects its movement and interaction with other objects. You can use this physics system to your advantage by doing some tricks such as:</p>
|
119 |
-
<ul>
|
120 |
-
<li>Dashing forward by tapping twice on the joystick. This will give you a boost of speed that can help you overcome some obstacles or reach some platforms.</li>
|
121 |
-
<li>Sliding down by holding the jump button while moving. This will make you slide on the ground or on some surfaces, which can help you avoid some obstacles or gain some momentum.</li>
|
122 |
-
<li>Bumping into other players by running into them or jumping on them. This will make them stumble or fall, which can give you an advantage or a disadvantage depending on the situation. You can also use this to cooperate with your friends or sabotage your enemies.</li>
|
123 |
-
</ul>
|
124 |
-
<h3>Learn the maps and shortcuts</h3>
|
125 |
-
<p>Stumble Guys has 17 different maps that vary in difficulty and design. Each map has its own obstacles, hazards, and secrets. You should learn the maps and their features by playing them repeatedly and observing how they work. You should also look for shortcuts and alternative routes that can save you time or help you avoid some dangers. For example, you can jump over some gaps, use some ramps, or go through some hidden passages. However, be careful not to fall into traps or dead ends that can cost you the game.</p>
|
126 |
-
<h3>Customize your outfit and emotes</h3>
|
127 |
-
<p>One of the fun aspects of Stumble Guys is that you can customize your character's outfit and emotes. You can unlock various outfits and emotes by playing the game and earning coins. You can also buy some outfits and emotes with real money. You can mix and match different parts of your outfit, such as head, body, legs, and accessories. You can also choose different emotes, such as dance, wave, laugh, cry, and more. You can use your outfit and emotes to express your personality, style, mood, or humor in the game.</p>
|
128 |
-
<h2>Conclusion</h2>
|
129 |
-
<p>Stumble Guys is a fun and addictive multiplayer party knockout game that you can play on your Android device or on your PC with BlueStacks. You can download and install Stumble Guys primera version APK from Uptodown, a website that offers safe and verified APK files. You can also follow our tips and tricks to improve your skills and chances of winning in Stumble Guys. We hope you enjoy playing Stumble Guys and have a great time with your friends or other players online.</p>
|
130 |
-
<h2>FAQs</h2>
|
131 |
-
<h4>Q: Is Stumble Guys free to play?</h4>
|
132 |
-
<p>A: Yes, Stumble Guys is free to play. However, it contains ads and in-app purchases that can enhance your gaming experience.</p>
|
133 |
-
<h4>Q: Is Stumble Guys available for iOS devices?</h4>
|
134 |
-
<p>A: No, Stumble Guys is not available for iOS devices at the moment. It is only available for Android devices.</p>
|
135 |
-
<h4>Q: How many players can play Stumble Guys online?</h4>
|
136 |
-
<p>A: Stumble Guys supports up to 32 players online in each match.</p>
|
137 |
-
<h4>Q: Can I play Stumble Guys offline?</h4>
|
138 |
-
<p>A: No, Stumble Guys requires an internet connection to play online.</p>
|
139 |
-
<h4>Q: Can I play Stumble Guys with my friends?</h4>
|
140 |
-
<p>A: Yes, you can play Stumble Guys with your friends in party mode. You can create a private match and invite your friends to join by sharing a code.</p> 401be4b1e0<br />
|
141 |
-
<br />
|
142 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Your Love Has Taken Over Me MP3 - The Best Gospel Song by Frank Edwards.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download "Your Love Has Taken Over Me" MP3 by Frank Edwards</h1>
|
3 |
-
<p>If you are looking for a song that will inspire you to trust in God's love and protection, you might want to download "Your Love Has Taken Over Me" by Frank Edwards. This is a gospel song that celebrates God's faithfulness and goodness in every situation. In this article, we will show you how to download this song from different sources, so you can enjoy it anytime and anywhere.</p>
|
4 |
-
<h2>download your love has taken over me mp3</h2><br /><p><b><b>Download Zip</b> ☆☆☆ <a href="https://jinyurl.com/2uNKG8">https://jinyurl.com/2uNKG8</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is the song about?</h3>
|
7 |
-
<p>"Your Love Has Taken Over Me" is a song by Frank Edwards, a Nigerian gospel singer and producer. The song was released in 2016 as part of his album "Frankincense". The song expresses gratitude to God for His love that has taken over the singer's life. The singer declares that he depends on God and has confidence in Him, and that God covers him under His canopy and gives him security. The song also proclaims that God is not a deceiver, but a blesser who makes all things work together for good.</p>
|
8 |
-
<h3>Why should you download it?</h3>
|
9 |
-
<p>This song is a great way to remind yourself of God's love and power in your life. It can uplift your spirit and encourage you to trust in God's promises. It can also help you worship God and praise Him for His goodness and mercy. The song has a catchy melody and a lively beat that will make you want to dance and sing along. The song is also available in different formats, such as MP3, video, and lyrics, so you can choose the one that suits your preference.</p>
|
10 |
-
<h2>How to download the song from different sources</h2>
|
11 |
-
<h3>From PraiseZion.com</h3>
|
12 |
-
<p>PraiseZion.com is a website that offers free gospel music downloads from various artists. You can find "Your Love Has Taken Over Me" by Frank Edwards on this website by following these steps:</p>
|
13 |
-
<h4>Step 1: Visit the website</h4>
|
14 |
-
<p>Go to [PraiseZion.com](^1^) on your browser. You will see a homepage with different categories of gospel music, such as Nigerian Gospel Music, Foreign Gospel Music, Gospel Mixtapes, etc.</p>
|
15 |
-
<h4>Step 2: Search for the song</h4>
|
16 |
-
<p>Type "Frank Edwards Under The Canopy" in the search box at the top right corner of the homepage. You will see a list of results related to your search query. Click on the one that says "Song Mp3 Download: Frank Edwards - Under The Canopy".</p>
|
17 |
-
<p>* download frank edwards under the canopy mp3<br />
|
18 |
-
* download your love is taking over me by life worship mp3<br />
|
19 |
-
* download your love has taken over me lyrics mp3<br />
|
20 |
-
* download your love has taken over me gospel song mp3<br />
|
21 |
-
* download your love has taken over me frank edwards mp3<br />
|
22 |
-
* download your love is taking over me life worship lyrics mp3<br />
|
23 |
-
* download your love has taken over me video mp3<br />
|
24 |
-
* download your love has taken over me praisezion mp3<br />
|
25 |
-
* download your love is taking over me official lyric video mp3<br />
|
26 |
-
* download your love has taken over me genius lyrics mp3<br />
|
27 |
-
* download under the canopy by frank edwards audio mp3<br />
|
28 |
-
* download life worship your love is taking over me mp3<br />
|
29 |
-
* download your love has taken over me song mp3<br />
|
30 |
-
* download your love has taken over me worship song mp3<br />
|
31 |
-
* download your love has taken over me frank edwards lyrics mp3<br />
|
32 |
-
* download life worship your love is taking over me lyrics mp3<br />
|
33 |
-
* download your love has taken over me live mp3<br />
|
34 |
-
* download your love has taken over me instrumental mp3<br />
|
35 |
-
* download your love is taking over me life worship chords mp3<br />
|
36 |
-
* download your love has taken over me genius mp3<br />
|
37 |
-
* download under the canopy by frank edwards video mp3<br />
|
38 |
-
* download life worship your love is taking over me chords mp3<br />
|
39 |
-
* download your love has taken over me free mp3<br />
|
40 |
-
* download your love has taken over me piano mp3<br />
|
41 |
-
* download your love has taken over me frank edwards chords mp3<br />
|
42 |
-
* download life worship your love is taking over me instrumental mp3<br />
|
43 |
-
* download your love has taken over me acoustic mp3<br />
|
44 |
-
* download your love has taken over me karaoke mp3<br />
|
45 |
-
* download your love is taking over me life worship piano mp3<br />
|
46 |
-
* download your love has taken over me remix mp3</p>
|
47 |
-
<h4>Step 3: Click on the download link</h4>
|
48 |
-
<p>You will be directed to a page with more information about the song, such as the lyrics, video, and download link. Scroll down to find the download link that says "Download Mp3 Here". Click on it and wait for the download to start. You can also watch the video or read the lyrics of the song on this page.</p>
|
49 |
-
<h3>From Genius.com</h3>
|
50 |
-
<p>Genius.com is a website that provides lyrics and annotations for various songs. You can find "Your Love Has Taken Over Me" by Frank Edwards on this website by following these steps:</p>
|
51 |
-
<h4>Step 1: Visit the website</h4>
|
52 |
-
<p>Go to [Genius.com] on your browser. You will see a homepage with different genres of music, such as Pop, Hip-Hop, Rock, etc.</p>
|
53 |
-
<h4>Step 2: Search for the song</h4>
|
54 |
-
<p>Type "Frank Edwards Under The Canopy" in the search box at the top of the homepage. You will see a list of results related to your search query. Click on the one that says "Frank Edwards - Under The Canopy".</p>
|
55 |
-
<h4>Step 3: Click on the play button</h4>
|
56 |
-
<p>You will be directed to a page with the lyrics and annotations of the song. You will also see a play button at the top right corner of the page. Click on it and wait for the song to load. You can also read the lyrics and annotations of the song on this page.</p>
|
57 |
-
<h4>Step 4: Right-click on the audio and save as MP3</h4>
|
58 |
-
<p>Once the song is playing, you can right-click on the audio and choose "Save audio as" from the menu. You will be prompted to choose a location and a name for the MP3 file. Click on "Save" and wait for the download to finish.</p>
|
59 |
-
<h3>From YouTube.com</h3>
|
60 |
-
<p>YouTube.com is a website that hosts videos from various creators and channels. You can find "Your Love Has Taken Over Me" by Frank Edwards on this website by following these steps:</p>
|
61 |
-
<h4>Step 1: Visit the website</h4>
|
62 |
-
<p>Go to [YouTube.com] on your browser. You will see a homepage with different categories of videos, such as Music, Gaming, News, etc.</p>
|
63 |
-
<h4>Step 2: Search for the song</h4>
|
64 |
-
<p>Type "Frank Edwards Under The Canopy" in the search box at the top of the homepage. You will see a list of results related to your search query. Click on the one that says "Frank Edwards - Under The Canopy (Official Music Video)".</p>
|
65 |
-
<h4>Step 3: Copy the video URL</h4>
|
66 |
-
<p>You will be directed to a page with the video and some information about it, such as the title, description, views, likes, etc. You will also see a URL in the address bar of your browser. This is the link to the video. Copy it by selecting it and pressing Ctrl+C or right-clicking and choosing "Copy".</p>
|
67 |
-
<h4>Step 4: Paste the URL into a YouTube to MP3 converter</h4>
|
68 |
-
<p>Go to a YouTube to MP3 converter website, such as [ytmp3.cc]. You will see a box where you can paste the URL of the video you want to convert. Paste it by pressing Ctrl+V or right-clicking and choosing "Paste". Then, click on "Convert".</p>
|
69 |
-
<h4>Step 5: Download the MP3 file</h4>
|
70 |
-
<p>You will see a page with a download link for the MP3 file. Click on it and wait for the download to start. You can also choose to download the video or edit it before downloading.</p>
|
71 |
-
<h2>Conclusion</h2>
|
72 |
-
<h3>Summary of the main points</h3>
|
73 |
-
<p>In this article, we have shown you how to download "Your Love Has Taken Over Me" by Frank Edwards from different sources, such as PraiseZion.com, Genius.com, and YouTube.com. This is a gospel song that celebrates God's love and protection in every situation. It can inspire you to trust in God's promises and worship Him for His goodness and mercy.</p>
|
74 |
-
<h3>Call to action</h3>
|
75 |
-
<p>If you have not downloaded this song yet, we encourage you to do so now and enjoy its uplifting message and melody. You can also share it with your friends and family who might need some encouragement and hope in their lives. We hope you have found this article helpful and informative. Thank you for reading!</p>
|
76 |
-
FAQs - Q: Who is Frank Edwards? - A: Frank Edwards is a Nigerian gospel singer and producer who has won several awards and recognition for his music. - Q: What is the name of his album that contains "Your Love Has Taken Over Me"? - A: The name of his album is "Frankincense", which was released in 2016. - Q: What are some other songs by Frank Edwards that you can download? - A: Some other songs by Frank Edwards that you can download are "Mma Mma", "Okaka", "I See Him", "Miracle Rain", etc. - Q: How can you support Frank Edwards and his music? - A: You can support Frank Edwards and his music by following him on his social media platforms, such as Facebook, Twitter, Instagram, etc. You can also buy his albums or songs from online stores, such as iTunes, Amazon, Spotify, etc. You can also attend his concerts or events if he is performing near you. - Q: How can you learn more about gospel music and its benefits? - A: You can learn more about gospel music and its benefits by reading articles, books, blogs, magazines, etc. that talk about the history, culture, genres, artists, and impact of gospel music. You can also listen to gospel radio stations or podcasts that feature gospel music and interviews with gospel musicians. You can also join gospel music communities or groups online or offline that share your passion and interest in gospel music.</p> 197e85843d<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Dummy Images for Any Project - Free and High-Quality.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Dummy Images for Your Web Design Projects</h1>
|
3 |
-
<p>If you are a web designer, you know how important it is to have good images for your website. Images can make or break your design, attract or repel your visitors, and convey or confuse your message. However, finding and creating the perfect images for your website can be challenging and time-consuming. Sometimes, you may not have the final images ready when you are working on your layout, or you may want to experiment with different options before committing to one.</p>
|
4 |
-
<h2>download dummy images</h2><br /><p><b><b>DOWNLOAD</b> ✔ <a href="https://jinyurl.com/2uNNm9">https://jinyurl.com/2uNNm9</a></b></p><br /><br />
|
5 |
-
<p>That's where dummy images come in handy. Dummy images are placeholder images that you can use to fill in the gaps in your web design projects until you have the final images ready. They can help you to visualize how your layout will look, test different sizes and formats, and avoid delays in your design process.</p>
|
6 |
-
<p>In this article, we will show you how to download dummy images for your web design projects using some of the best online tools available. We will also give you some tips on how to use them effectively and replace them with real ones when you are done.</p>
|
7 |
-
<h2>What are Dummy Images and Why Use Them?</h2>
|
8 |
-
<h3>Dummy images are placeholder images that you can use to fill in the gaps in your web design projects until you have the final images ready.</h3>
|
9 |
-
<p>Dummy images are exactly what they sound like – they’re temporary, universal images that temporarily replace graphics or text on a webpage. They’re important for designers and developers who want to present a design concept to their client before finalizing the layout and content.</p>
|
10 |
-
<p>Dummy images can be anything from a solid color block, a random photo, a text overlay, or a custom image that matches your theme or style. You can create them yourself using an image editor, or use one of the many online tools that can generate them for you.</p>
|
11 |
-
<h3>Dummy images can help you to visualize how your layout will look, test different sizes and formats, and avoid delays in your design process.</h3>
|
12 |
-
<p>Using dummy images has many benefits for web designers. Here are some of them:</p>
|
13 |
-
<ul>
|
14 |
-
<li>They allow you to see how your layout will look with different types of images, such as photos, illustrations, icons, logos, etc.</li>
|
15 |
-
<li>They help you to test how your layout will respond to different image sizes and formats, such as landscape, portrait, square, circle, etc.</li>
|
16 |
-
<li>They enable you to experiment with different image styles and effects, such as grayscale, blur, opacity, etc.</li>
|
17 |
-
<li>They save you time and hassle by allowing you to work on your layout without waiting for the final images to be ready or approved.</li>
|
18 |
-
<li>They prevent you from using low-quality or inappropriate images that may ruin your design or cause legal issues.</li>
|
19 |
-
</ul>
|
20 |
-
<h2>How to Find and Download Dummy Images Online</h2>
|
21 |
-
<h3>There are many online tools that can help you to generate and download dummy images for free Here are some of the best ones:</h3>
|
22 |
-
<h4>Lorem Picsum: The Lorem Ipsum for Photos</h4>
|
23 |
-
<p>Lorem Picsum is a simple and elegant tool that allows you to download random photos from the popular online photo platform Unsplash. You can specify the dimensions, format, and category of the photos you want, or let the tool choose them for you. You can also add filters, blur, and grayscale effects to your photos. To use Lorem Picsum, simply visit their website and enter the URL of the image you want, such as https://picsum.photos/200/300. You can also download multiple images at once by using the list feature, such as https://picsum.photos/v2/list?limit=10.</p>
|
24 |
-
<p>download dummy images for free<br />
|
25 |
-
download dummy images for web design<br />
|
26 |
-
download dummy images for testing<br />
|
27 |
-
download dummy images for placeholders<br />
|
28 |
-
download dummy images for mockups<br />
|
29 |
-
download dummy images for layout<br />
|
30 |
-
download dummy images for commercial use<br />
|
31 |
-
download dummy images for creative commons<br />
|
32 |
-
download dummy images from freepik<br />
|
33 |
-
download dummy images from placeimg<br />
|
34 |
-
download dummy images from unsplash<br />
|
35 |
-
download dummy images from lorem picsum<br />
|
36 |
-
download dummy images from pixabay<br />
|
37 |
-
download dummy images from pexels<br />
|
38 |
-
download dummy images from placeholder.com<br />
|
39 |
-
download dummy photos of people<br />
|
40 |
-
download dummy photos of animals<br />
|
41 |
-
download dummy photos of nature<br />
|
42 |
-
download dummy photos of food<br />
|
43 |
-
download dummy photos of cars<br />
|
44 |
-
download dummy photos of fashion<br />
|
45 |
-
download dummy photos of travel<br />
|
46 |
-
download dummy photos of sports<br />
|
47 |
-
download dummy photos of business<br />
|
48 |
-
download dummy photos of technology<br />
|
49 |
-
download dummy vectors of icons<br />
|
50 |
-
download dummy vectors of logos<br />
|
51 |
-
download dummy vectors of shapes<br />
|
52 |
-
download dummy vectors of patterns<br />
|
53 |
-
download dummy vectors of illustrations<br />
|
54 |
-
download dummy vectors of backgrounds<br />
|
55 |
-
download dummy vectors of infographics<br />
|
56 |
-
download dummy vectors of banners<br />
|
57 |
-
download dummy vectors of stickers<br />
|
58 |
-
download dummy vectors of cartoons<br />
|
59 |
-
download dummy PSD files for photoshop<br />
|
60 |
-
download dummy PSD files for editing<br />
|
61 |
-
download dummy PSD files for layers<br />
|
62 |
-
download dummy PSD files for templates<br />
|
63 |
-
download dummy PSD files for graphics<br />
|
64 |
-
download high-quality dummy images <br />
|
65 |
-
download low-quality dummy images <br />
|
66 |
-
download random dummy images <br />
|
67 |
-
download specific dummy images <br />
|
68 |
-
how to download dummy images <br />
|
69 |
-
where to download dummy images <br />
|
70 |
-
why to download dummy images <br />
|
71 |
-
best sites to download dummy images <br />
|
72 |
-
best tools to download dummy images</p>
|
73 |
-
<h4>Placeholder.com: A Simple and Versatile Image Generator</h4>
|
74 |
-
<p>Placeholder.com is a handy tool that allows you to create and download dummy images of any size, color, and text. You can use it to generate solid color blocks, gradients, patterns, text overlays, and more. You can also customize the font, size, alignment, and color of the text. To use Placeholder.com, simply visit their website and enter the URL of the image you want, such as https://via.placeholder.com/300x200.png/09f/fff?text=Dummy+Image. You can also use their API to generate images dynamically in your code.</p>
|
75 |
-
<h4>LoremFlickr: A Flickr-Based Image Generator</h4>
|
76 |
-
<p>LoremFlickr is a useful tool that allows you to download random photos from the popular online photo platform Flickr. You can specify the dimensions, format, and keyword of the photos you want, or let the tool choose them for you. You can also add filters, blur, and grayscale effects to your photos. To use LoremFlickr, simply visit their website and enter the URL of the image you want, such as https://loremflickr.com/320/240/dog. You can also download multiple images at once by using the g feature, such as https://loremflickr.com/g/320/240/dog/all.</p>
|
77 |
-
<h4>Dummy Image Generator: A Customizable Image Tool</h4>
|
78 |
-
<p>Dummy Image Generator is a powerful tool that allows you to create and download dummy images of any size, color, shape, and text. You can use it to generate circles, squares, triangles, stars, hearts, and more. You can also customize the background color, foreground color, border color, border width, font family, font size, font style, font color, and text content of your images. To use Dummy Image Generator, simply visit their website and enter the parameters of the image you want in the form. You can also use their API to generate images dynamically in your code.</p>
|
79 |
-
<h2>How to Use Dummy Images in Your Web Design Projects</h2>
|
80 |
-
<h3>Once you have downloaded your dummy images, you can use them in your web design projects in various ways. Here are some tips:</h3>
|
81 |
-
<h4>Use the same dimensions and formats as your final images</h4>
|
82 |
-
<p>One of the main purposes of using dummy images is to test how your layout will look with different types of images. Therefore, it is important to use dummy images that have the same dimensions and formats as your final images. This will help you to avoid any surprises or errors when you replace them with real ones. For example, if your final images are 300x200 pixels in JPEG format, you should use dummy images that are 300x200 pixels in JPEG format as well.</p>
|
83 |
-
<h4>Use descriptive file names and alt text for your dummy images</h4>
|
84 |
-
<p>Another purpose of using dummy images is to remind yourself and others what kind of images you need for your web design projects. Therefore, it is helpful to use descriptive file names and alt text for your dummy images. This will help you to keep track of what each image is supposed to represent and what content it should convey. For example, if your dummy image is a placeholder for a logo of a company called ABC Inc., you could name it abc-logo.jpg and use alt text like "Logo of ABC Inc."</p>
|
85 |
-
<h4>Replace your dummy images with real ones as soon as possible</h4>
|
86 |
-
<p>The final purpose of using dummy images is to speed up your web design process by allowing you to work on your layout without waiting for the final images to be ready or approved. However, you should always remember to replace your dummy images with real ones as soon as possible. This will help you to avoid any confusion or misunderstanding with your clients or users. It will also help you to improve the quality and credibility of your website.</p>
|
87 |
-
<h2>Conclusion</h2>
|
88 |
-
<h3>Dummy images are a useful way to create realistic mockups and prototypes for your web design projects. They can save you time, hassle, and frustration. However, you should always remember to replace them with high-quality images before launching your website.</h3>
|
89 |
-
<p>In this article, we have shown you how to download dummy images for your web design projects using some of the best online tools available. We have also given you some tips on how to use them effectively and replace them with real ones when you are done. We hope you have found this article helpful and informative. If you have any questions or comments, please feel free to leave them below.</p>
|
90 |
-
<h2>FAQs</h2>
|
91 |
-
<h3>What are dummy images?</h3>
|
92 |
-
<p>Dummy images are placeholder images that you can use to fill in the gaps in your web design projects until you have the final images ready.</p>
|
93 |
-
<h3>Why use dummy images?</h3>
|
94 |
-
<p>Dummy images can help you to visualize how your layout will look, test different sizes and formats, and avoid delays in your design process.</p>
|
95 |
-
<h3>How to download dummy images?</h3>
|
96 |
-
<p>You can download dummy images from various online tools that can generate them for you, such as Lorem Picsum, Placeholder.com, LoremFlickr, and Dummy Image Generator.</p>
|
97 |
-
<h3>How to use dummy images?</h3>
|
98 |
-
<p>You can use dummy images in your web design projects by inserting them in your code or image editor. You should use the same dimensions and formats as your final images, use descriptive file names and alt text for your dummy images, and replace them with real ones as soon as possible.</p>
|
99 |
-
<h3>Where to find high-quality images for your website?</h3>
|
100 |
-
<p>You can find high-quality images for your website from various online sources, such as stock photo websites, free image websites, or your own photography. You should always make sure that the images you use are relevant, appropriate, and legal for your website.</p> 401be4b1e0<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/providers.tsx
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import { ThemeProvider as NextThemesProvider } from 'next-themes'
|
5 |
-
import { ThemeProviderProps } from 'next-themes/dist/types'
|
6 |
-
|
7 |
-
import { TooltipProvider } from '@/components/ui/tooltip'
|
8 |
-
|
9 |
-
export function Providers({ children, ...props }: ThemeProviderProps) {
|
10 |
-
return (
|
11 |
-
<NextThemesProvider {...props}>
|
12 |
-
<TooltipProvider>{children}</TooltipProvider>
|
13 |
-
</NextThemesProvider>
|
14 |
-
)
|
15 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from uvr5_pack.lib_v5 import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.bottleneck = nn.Sequential(
|
104 |
-
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
-
)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
_, _, h, w = x.size()
|
109 |
-
feat1 = F.interpolate(
|
110 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
-
)
|
112 |
-
feat2 = self.conv2(x)
|
113 |
-
feat3 = self.conv3(x)
|
114 |
-
feat4 = self.conv4(x)
|
115 |
-
feat5 = self.conv5(x)
|
116 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
-
bottle = self.bottleneck(out)
|
118 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/grids/diffusion/_explorers.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import treetable as tt
|
8 |
-
|
9 |
-
from .._base_explorers import BaseExplorer
|
10 |
-
|
11 |
-
|
12 |
-
class DiffusionExplorer(BaseExplorer):
|
13 |
-
eval_metrics = ["sisnr", "visqol"]
|
14 |
-
|
15 |
-
def stages(self):
|
16 |
-
return ["train", "valid", "valid_ema", "evaluate", "evaluate_ema"]
|
17 |
-
|
18 |
-
def get_grid_meta(self):
|
19 |
-
"""Returns the list of Meta information to display for each XP/job.
|
20 |
-
"""
|
21 |
-
return [
|
22 |
-
tt.leaf("index", align=">"),
|
23 |
-
tt.leaf("name", wrap=140),
|
24 |
-
tt.leaf("state"),
|
25 |
-
tt.leaf("sig", align=">"),
|
26 |
-
]
|
27 |
-
|
28 |
-
def get_grid_metrics(self):
|
29 |
-
"""Return the metrics that should be displayed in the tracking table.
|
30 |
-
"""
|
31 |
-
return [
|
32 |
-
tt.group(
|
33 |
-
"train",
|
34 |
-
[
|
35 |
-
tt.leaf("epoch"),
|
36 |
-
tt.leaf("loss", ".3%"),
|
37 |
-
],
|
38 |
-
align=">",
|
39 |
-
),
|
40 |
-
tt.group(
|
41 |
-
"valid",
|
42 |
-
[
|
43 |
-
tt.leaf("loss", ".3%"),
|
44 |
-
# tt.leaf("loss_0", ".3%"),
|
45 |
-
],
|
46 |
-
align=">",
|
47 |
-
),
|
48 |
-
tt.group(
|
49 |
-
"valid_ema",
|
50 |
-
[
|
51 |
-
tt.leaf("loss", ".3%"),
|
52 |
-
# tt.leaf("loss_0", ".3%"),
|
53 |
-
],
|
54 |
-
align=">",
|
55 |
-
),
|
56 |
-
tt.group(
|
57 |
-
"evaluate", [tt.leaf("rvm", ".4f"), tt.leaf("rvm_0", ".4f"),
|
58 |
-
tt.leaf("rvm_1", ".4f"), tt.leaf("rvm_2", ".4f"),
|
59 |
-
tt.leaf("rvm_3", ".4f"), ], align=">"
|
60 |
-
),
|
61 |
-
tt.group(
|
62 |
-
"evaluate_ema", [tt.leaf("rvm", ".4f"), tt.leaf("rvm_0", ".4f"),
|
63 |
-
tt.leaf("rvm_1", ".4f"), tt.leaf("rvm_2", ".4f"),
|
64 |
-
tt.leaf("rvm_3", ".4f")], align=">"
|
65 |
-
),
|
66 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/utils/__init__.py
DELETED
File without changes
|
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/pyglet_platform.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
from pyrender.constants import (TARGET_OPEN_GL_MAJOR, TARGET_OPEN_GL_MINOR,
|
2 |
-
MIN_OPEN_GL_MAJOR, MIN_OPEN_GL_MINOR)
|
3 |
-
from .base import Platform
|
4 |
-
|
5 |
-
import OpenGL
|
6 |
-
|
7 |
-
|
8 |
-
__all__ = ['PygletPlatform']
|
9 |
-
|
10 |
-
|
11 |
-
class PygletPlatform(Platform):
|
12 |
-
"""Renders on-screen using a 1x1 hidden Pyglet window for getting
|
13 |
-
an OpenGL context.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, viewport_width, viewport_height):
|
17 |
-
super(PygletPlatform, self).__init__(viewport_width, viewport_height)
|
18 |
-
self._window = None
|
19 |
-
|
20 |
-
def init_context(self):
|
21 |
-
import pyglet
|
22 |
-
pyglet.options['shadow_window'] = False
|
23 |
-
|
24 |
-
try:
|
25 |
-
pyglet.lib.x11.xlib.XInitThreads()
|
26 |
-
except Exception:
|
27 |
-
pass
|
28 |
-
|
29 |
-
self._window = None
|
30 |
-
confs = [pyglet.gl.Config(sample_buffers=1, samples=4,
|
31 |
-
depth_size=24,
|
32 |
-
double_buffer=True,
|
33 |
-
major_version=TARGET_OPEN_GL_MAJOR,
|
34 |
-
minor_version=TARGET_OPEN_GL_MINOR),
|
35 |
-
pyglet.gl.Config(depth_size=24,
|
36 |
-
double_buffer=True,
|
37 |
-
major_version=TARGET_OPEN_GL_MAJOR,
|
38 |
-
minor_version=TARGET_OPEN_GL_MINOR),
|
39 |
-
pyglet.gl.Config(sample_buffers=1, samples=4,
|
40 |
-
depth_size=24,
|
41 |
-
double_buffer=True,
|
42 |
-
major_version=MIN_OPEN_GL_MAJOR,
|
43 |
-
minor_version=MIN_OPEN_GL_MINOR),
|
44 |
-
pyglet.gl.Config(depth_size=24,
|
45 |
-
double_buffer=True,
|
46 |
-
major_version=MIN_OPEN_GL_MAJOR,
|
47 |
-
minor_version=MIN_OPEN_GL_MINOR)]
|
48 |
-
for conf in confs:
|
49 |
-
try:
|
50 |
-
self._window = pyglet.window.Window(config=conf, visible=False,
|
51 |
-
resizable=False,
|
52 |
-
width=1, height=1)
|
53 |
-
break
|
54 |
-
except pyglet.window.NoSuchConfigException as e:
|
55 |
-
pass
|
56 |
-
|
57 |
-
if not self._window:
|
58 |
-
raise ValueError(
|
59 |
-
'Failed to initialize Pyglet window with an OpenGL >= 3+ '
|
60 |
-
'context. If you\'re logged in via SSH, ensure that you\'re '
|
61 |
-
'running your script with vglrun (i.e. VirtualGL). The '
|
62 |
-
'internal error message was "{}"'.format(e)
|
63 |
-
)
|
64 |
-
|
65 |
-
def make_current(self):
|
66 |
-
if self._window:
|
67 |
-
self._window.switch_to()
|
68 |
-
|
69 |
-
def make_uncurrent(self):
|
70 |
-
try:
|
71 |
-
import pyglet
|
72 |
-
pyglet.gl.xlib.glx.glXMakeContextCurrent(self._window.context.x_display, 0, 0, None)
|
73 |
-
except Exception:
|
74 |
-
pass
|
75 |
-
|
76 |
-
def delete_context(self):
|
77 |
-
if self._window is not None:
|
78 |
-
self.make_current()
|
79 |
-
cid = OpenGL.contextdata.getContext()
|
80 |
-
try:
|
81 |
-
self._window.context.destroy()
|
82 |
-
self._window.close()
|
83 |
-
except Exception:
|
84 |
-
pass
|
85 |
-
self._window = None
|
86 |
-
OpenGL.contextdata.cleanupContext(cid)
|
87 |
-
del cid
|
88 |
-
|
89 |
-
def supports_framebuffers(self):
|
90 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/metrics/laplace_var.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
import scipy.ndimage
|
2 |
-
|
3 |
-
def laplace_var(x):
|
4 |
-
return scipy.ndimage.laplace(x).var()
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/model.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
|
5 |
-
class VGGishish(nn.Module):
|
6 |
-
|
7 |
-
def __init__(self, conv_layers, use_bn, num_classes):
|
8 |
-
'''
|
9 |
-
Mostly from
|
10 |
-
https://pytorch.org/vision/0.8/_modules/torchvision/models/vgg.html
|
11 |
-
'''
|
12 |
-
super().__init__()
|
13 |
-
layers = []
|
14 |
-
in_channels = 1
|
15 |
-
|
16 |
-
# a list of channels with 'MP' (maxpool) from config
|
17 |
-
for v in conv_layers:
|
18 |
-
if v == 'MP':
|
19 |
-
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
|
20 |
-
else:
|
21 |
-
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, stride=1)
|
22 |
-
if use_bn:
|
23 |
-
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
|
24 |
-
else:
|
25 |
-
layers += [conv2d, nn.ReLU(inplace=True)]
|
26 |
-
in_channels = v
|
27 |
-
self.features = nn.Sequential(*layers)
|
28 |
-
|
29 |
-
self.avgpool = nn.AdaptiveAvgPool2d((5, 10))
|
30 |
-
|
31 |
-
self.flatten = nn.Flatten()
|
32 |
-
self.classifier = nn.Sequential(
|
33 |
-
nn.Linear(512 * 5 * 10, 4096),
|
34 |
-
nn.ReLU(True),
|
35 |
-
nn.Linear(4096, 4096),
|
36 |
-
nn.ReLU(True),
|
37 |
-
nn.Linear(4096, num_classes)
|
38 |
-
)
|
39 |
-
|
40 |
-
# weight init
|
41 |
-
self.reset_parameters()
|
42 |
-
|
43 |
-
def forward(self, x):
|
44 |
-
# adding channel dim for conv2d (B, 1, F, T) <-
|
45 |
-
x = x.unsqueeze(1)
|
46 |
-
# backbone (B, 1, 5, 53) <- (B, 1, 80, 860)
|
47 |
-
x = self.features(x)
|
48 |
-
# adaptive avg pooling (B, 1, 5, 10) <- (B, 1, 5, 53) – if no MP is used as the end of VGG
|
49 |
-
x = self.avgpool(x)
|
50 |
-
# flatten
|
51 |
-
x = self.flatten(x)
|
52 |
-
# classify
|
53 |
-
x = self.classifier(x)
|
54 |
-
return x
|
55 |
-
|
56 |
-
def reset_parameters(self):
|
57 |
-
for m in self.modules():
|
58 |
-
if isinstance(m, nn.Conv2d):
|
59 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
60 |
-
if m.bias is not None:
|
61 |
-
nn.init.constant_(m.bias, 0)
|
62 |
-
elif isinstance(m, nn.BatchNorm2d):
|
63 |
-
nn.init.constant_(m.weight, 1)
|
64 |
-
nn.init.constant_(m.bias, 0)
|
65 |
-
elif isinstance(m, nn.Linear):
|
66 |
-
nn.init.normal_(m.weight, 0, 0.01)
|
67 |
-
nn.init.constant_(m.bias, 0)
|
68 |
-
|
69 |
-
|
70 |
-
if __name__ == '__main__':
|
71 |
-
num_classes = 309
|
72 |
-
inputs = torch.rand(3, 80, 848)
|
73 |
-
conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
|
74 |
-
# conv_layers = [64, 'MP', 128, 'MP', 256, 256, 'MP', 512, 512, 'MP']
|
75 |
-
model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes)
|
76 |
-
outputs = model(inputs)
|
77 |
-
print(outputs.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/SEED-LLaMA/gradio_demo/seed_llama_flask.py
DELETED
@@ -1,231 +0,0 @@
|
|
1 |
-
import hydra
|
2 |
-
|
3 |
-
import pyrootutils
|
4 |
-
import os
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
from flask import Flask, request
|
9 |
-
import json
|
10 |
-
from typing import Optional
|
11 |
-
import transformers
|
12 |
-
from dataclasses import dataclass, field
|
13 |
-
import io
|
14 |
-
import base64
|
15 |
-
from PIL import Image
|
16 |
-
import gc
|
17 |
-
|
18 |
-
pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
|
19 |
-
|
20 |
-
BOI_TOKEN = '<img>'
|
21 |
-
EOI_TOKEN = '</img>'
|
22 |
-
IMG_TOKEN = '<img_{:05d}>'
|
23 |
-
|
24 |
-
IMG_FLAG = '<image>'
|
25 |
-
NUM_IMG_TOKNES = 32
|
26 |
-
NUM_IMG_CODES = 8192
|
27 |
-
|
28 |
-
app = Flask(__name__)
|
29 |
-
|
30 |
-
|
31 |
-
def decode_image(encoded_image: str) -> Image:
|
32 |
-
decoded_bytes = base64.b64decode(encoded_image.encode('utf-8'))
|
33 |
-
buffer = io.BytesIO(decoded_bytes)
|
34 |
-
image = Image.open(buffer)
|
35 |
-
return image
|
36 |
-
|
37 |
-
|
38 |
-
def encode_image(image: Image.Image, format: str = 'PNG') -> str:
|
39 |
-
with io.BytesIO() as buffer:
|
40 |
-
image.save(buffer, format=format)
|
41 |
-
encoded_image = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
42 |
-
return encoded_image
|
43 |
-
|
44 |
-
|
45 |
-
@dataclass
|
46 |
-
class Arguments:
|
47 |
-
image_transform: Optional[str] = field(default=None, metadata={"help": "config path of image transform"})
|
48 |
-
tokenizer: Optional[str] = field(default=None, metadata={"help": "config path of tokenizer used to initialize tokenizer"})
|
49 |
-
model: Optional[str] = field(default=None, metadata={"help": "config path of llm"})
|
50 |
-
port: Optional[str] = field(default=80, metadata={"help": "network port"})
|
51 |
-
llm_device: Optional[str] = field(default='cuda:0', metadata={"help": "llm device"})
|
52 |
-
tokenizer_device: Optional[str] = field(default='cuda:0', metadata={"help": "tokenizer device"})
|
53 |
-
offload_encoder: Optional[bool] = field(default=False, metadata={"help": "offload image tokenizer"})
|
54 |
-
offload_decoder: Optional[bool] = field(default=False, metadata={"help": "offload image tokenizer"})
|
55 |
-
|
56 |
-
|
57 |
-
parser = transformers.HfArgumentParser(Arguments)
|
58 |
-
args, = parser.parse_args_into_dataclasses()
|
59 |
-
|
60 |
-
|
61 |
-
class LLMService:
|
62 |
-
def __init__(self, args) -> None:
|
63 |
-
image_transform_cfg = OmegaConf.load(args.image_transform)
|
64 |
-
tokenizer_cfg = OmegaConf.load(args.tokenizer)
|
65 |
-
model_cfg = OmegaConf.load(args.model)
|
66 |
-
self.image_id_shift = 32000
|
67 |
-
|
68 |
-
self.image_transform = hydra.utils.instantiate(image_transform_cfg)
|
69 |
-
|
70 |
-
model = hydra.utils.instantiate(model_cfg, device_map=args.llm_device).eval()
|
71 |
-
self.model = model
|
72 |
-
print(model.get_memory_footprint())
|
73 |
-
|
74 |
-
self.tokenizer = hydra.utils.instantiate(tokenizer_cfg, device=args.tokenizer_device, load_diffusion=True)
|
75 |
-
if args.offload_encoder:
|
76 |
-
self.tokenizer.image_tokenizer.model.visual_encoder.to('cpu')
|
77 |
-
if args.offload_decoder:
|
78 |
-
self.tokenizer.image_tokenizer.diffusion_model.to('cpu')
|
79 |
-
|
80 |
-
# model = hydra.utils.instantiate(model_cfg, torch_dtype=torch.float16)
|
81 |
-
# self.model = model.eval().to(args.llm_device)
|
82 |
-
self.llm_device = args.llm_device
|
83 |
-
self.tokenizer_device = args.tokenizer_device
|
84 |
-
self.offload_encoder = args.offload_encoder
|
85 |
-
self.offload_decoder = args.offload_decoder
|
86 |
-
self.boi_token_id = self.tokenizer(BOI_TOKEN, add_special_tokens=False).input_ids[0]
|
87 |
-
self.eoi_token_id = self.tokenizer(EOI_TOKEN, add_special_tokens=False).input_ids[0]
|
88 |
-
print('Init Done...')
|
89 |
-
|
90 |
-
|
91 |
-
service = LLMService(args)
|
92 |
-
|
93 |
-
|
94 |
-
@app.route('/generate', methods=['GET', 'POST'])
|
95 |
-
def generate():
|
96 |
-
|
97 |
-
request_info = request.get_json()
|
98 |
-
|
99 |
-
text_list = request_info['text'].split(IMG_FLAG)
|
100 |
-
image_list = request_info['images']
|
101 |
-
temperature = request_info.get('temperature', 0.7)
|
102 |
-
num_beams = request_info.get('num_beams', 1)
|
103 |
-
max_new_tokens = request_info.get('max_new_tokens', 256)
|
104 |
-
top_p = request_info.get('top_p', 0.5)
|
105 |
-
force_boi = request_info.get('force_boi', False)
|
106 |
-
|
107 |
-
assert len(text_list) == len(image_list) + 1
|
108 |
-
|
109 |
-
if len(image_list) > 0:
|
110 |
-
images_tensor_list = []
|
111 |
-
images_tensor_indices = []
|
112 |
-
images_ids_list = []
|
113 |
-
images_ids_indices = []
|
114 |
-
for idx, image_item in enumerate(image_list):
|
115 |
-
if isinstance(image_item, str):
|
116 |
-
image = decode_image(image_item)
|
117 |
-
image_tensor = service.image_transform(image)
|
118 |
-
images_tensor_list.append(image_tensor)
|
119 |
-
images_tensor_indices.append(idx)
|
120 |
-
else:
|
121 |
-
images_ids_list.append(image_item)
|
122 |
-
images_ids_indices.append(idx)
|
123 |
-
|
124 |
-
if len(images_tensor_list) > 0:
|
125 |
-
images_tensor = torch.stack(images_tensor_list, dim=0).to(service.tokenizer_device)
|
126 |
-
if service.offload_encoder:
|
127 |
-
service.tokenizer.image_tokenizer.model.visual_encoder.to(service.tokenizer_device)
|
128 |
-
|
129 |
-
images_ids_1 = service.tokenizer.encode_image(image_torch=images_tensor).cpu()
|
130 |
-
if args.offload_encoder:
|
131 |
-
service.tokenizer.image_tokenizer.model.visual_encoder.to('cpu')
|
132 |
-
torch.cuda.empty_cache()
|
133 |
-
gc.collect()
|
134 |
-
num_image_ids = images_ids_1.shape[-1]
|
135 |
-
else:
|
136 |
-
num_image_ids = len(images_ids_list[-1])
|
137 |
-
images_ids_2 = torch.tensor(images_ids_list, dtype=torch.long)
|
138 |
-
|
139 |
-
images_ids = torch.zeros((len(image_list), num_image_ids), dtype=torch.long)
|
140 |
-
if len(images_tensor_indices) > 0:
|
141 |
-
images_ids[images_tensor_indices, :] = images_ids_1
|
142 |
-
if len(images_ids_indices) > 0:
|
143 |
-
images_ids[images_ids_indices, :] = images_ids_2
|
144 |
-
|
145 |
-
input_text = ''
|
146 |
-
for i in range(images_ids.shape[0]):
|
147 |
-
single_image_ids = images_ids[i].view(-1).tolist()
|
148 |
-
image_tokens = BOI_TOKEN + ''.join([IMG_TOKEN.format(int(item)) for item in single_image_ids]) + EOI_TOKEN
|
149 |
-
input_text += text_list[i] + image_tokens
|
150 |
-
|
151 |
-
input_text = service.tokenizer.bos_token + input_text + text_list[-1]
|
152 |
-
|
153 |
-
images_ids_list = images_ids.tolist()
|
154 |
-
else:
|
155 |
-
|
156 |
-
input_text = service.tokenizer.bos_token + ''.join(text_list)
|
157 |
-
images_ids_list = []
|
158 |
-
|
159 |
-
if force_boi:
|
160 |
-
input_text += BOI_TOKEN
|
161 |
-
|
162 |
-
print(input_text)
|
163 |
-
input_ids = service.tokenizer(input_text, add_special_tokens=False, return_tensors='pt').input_ids
|
164 |
-
input_ids = input_ids.to(service.llm_device)
|
165 |
-
generation_config = {
|
166 |
-
'temperature': temperature,
|
167 |
-
'num_beams': num_beams,
|
168 |
-
'max_new_tokens': max_new_tokens,
|
169 |
-
'top_p': top_p,
|
170 |
-
'do_sample': True
|
171 |
-
}
|
172 |
-
|
173 |
-
generate_ids = service.model.generate(input_ids=input_ids, **generation_config)
|
174 |
-
|
175 |
-
if force_boi:
|
176 |
-
generate_ids = generate_ids[0][input_ids.shape[1] - 1:]
|
177 |
-
else:
|
178 |
-
generate_ids = generate_ids[0][input_ids.shape[1]:]
|
179 |
-
print('generated_ids: ', generate_ids)
|
180 |
-
boi_indices = torch.where(generate_ids == service.boi_token_id)[0].tolist()
|
181 |
-
eoi_indices = torch.where(generate_ids == service.eoi_token_id)[0].tolist()
|
182 |
-
# assert len(boi_indices) == len(eoi_indices)
|
183 |
-
|
184 |
-
generated_image_base64_list = []
|
185 |
-
text_mask = torch.ones_like(generate_ids, dtype=torch.bool)
|
186 |
-
|
187 |
-
error_msg = []
|
188 |
-
if len(boi_indices) != len(eoi_indices):
|
189 |
-
error_msg.append(
|
190 |
-
f'Num of BOI (begain of image) tokens: {len(boi_indices)} is not equal to EOI(end of image tokens): {len(eoi_indices)}, some image Some images will fail to decode.'
|
191 |
-
)
|
192 |
-
|
193 |
-
num_images = min(len(boi_indices), len(eoi_indices))
|
194 |
-
for idx in range(num_images):
|
195 |
-
boi_index, eoi_index = boi_indices[idx], eoi_indices[idx]
|
196 |
-
# for boi_index, eoi_index in zip(boi_indices, eoi_indices):
|
197 |
-
image_ids = generate_ids[boi_index + 1:eoi_index].unsqueeze(0).to(service.tokenizer_device)
|
198 |
-
image_ids = image_ids - service.image_id_shift
|
199 |
-
if image_ids.shape[-1] != NUM_IMG_TOKNES:
|
200 |
-
error_msg.append(f'Len(image_ids) {image_ids.shape[-1]} is not equal to {NUM_IMG_TOKNES}')
|
201 |
-
image_base64 = ''
|
202 |
-
elif (image_ids < 0).any() or (image_ids >= NUM_IMG_CODES).any():
|
203 |
-
error_msg.append(f'Some image_id out of range: [0, {NUM_IMG_CODES})')
|
204 |
-
image_base64 = ''
|
205 |
-
else:
|
206 |
-
if service.offload_decoder:
|
207 |
-
service.tokenizer.image_tokenizer.diffusion_model.to(service.tokenizer_device)
|
208 |
-
image = service.tokenizer.decode_image(image_ids)[0]
|
209 |
-
if service.offload_decoder:
|
210 |
-
service.tokenizer.image_tokenizer.diffusion_model.to('cpu')
|
211 |
-
torch.cuda.empty_cache()
|
212 |
-
gc.collect()
|
213 |
-
image_base64 = encode_image(image)
|
214 |
-
|
215 |
-
generated_image_base64_list.append(image_base64)
|
216 |
-
text_mask[boi_index + 1:eoi_index] = False
|
217 |
-
images_ids_list.append(image_ids.view(-1).tolist())
|
218 |
-
generate_ids = generate_ids[text_mask]
|
219 |
-
|
220 |
-
# print('generate_ids: ', generate_ids)
|
221 |
-
# generate_text = service.tokenizer.decode(generate_ids, skip_special_tokens=True)
|
222 |
-
generate_text = service.tokenizer.decode(generate_ids, skip_special_tokens=False)
|
223 |
-
# print('generate_text before: ', generate_text)
|
224 |
-
generate_text = generate_text.replace(BOI_TOKEN + ' ' + EOI_TOKEN + ' ', IMG_FLAG)
|
225 |
-
generate_text = generate_text.replace(service.tokenizer.eos_token, '')
|
226 |
-
print('generate_text: ', generate_text)
|
227 |
-
return {'text': generate_text, 'images': generated_image_base64_list, 'images_ids': images_ids_list, 'error_msg': error_msg}
|
228 |
-
|
229 |
-
|
230 |
-
if __name__ == '__main__':
|
231 |
-
app.run(host='0.0.0.0', port=args.port)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/Free-Accounts-Generator/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/openpose/body.py
DELETED
@@ -1,211 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import math
|
3 |
-
import matplotlib
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
import numpy as np
|
6 |
-
import time
|
7 |
-
import torch
|
8 |
-
from scipy.ndimage.filters import gaussian_filter
|
9 |
-
from torchvision import transforms
|
10 |
-
|
11 |
-
from . import util
|
12 |
-
from .model import bodypose_model
|
13 |
-
|
14 |
-
|
15 |
-
class Body(object):
|
16 |
-
|
17 |
-
def __init__(self, model_path):
|
18 |
-
self.model = bodypose_model()
|
19 |
-
if torch.cuda.is_available():
|
20 |
-
self.model = self.model.cuda()
|
21 |
-
print('cuda')
|
22 |
-
model_dict = util.transfer(self.model, torch.load(model_path))
|
23 |
-
self.model.load_state_dict(model_dict)
|
24 |
-
self.model.eval()
|
25 |
-
|
26 |
-
def __call__(self, oriImg):
|
27 |
-
# scale_search = [0.5, 1.0, 1.5, 2.0]
|
28 |
-
scale_search = [0.5]
|
29 |
-
boxsize = 368
|
30 |
-
stride = 8
|
31 |
-
padValue = 128
|
32 |
-
thre1 = 0.1
|
33 |
-
thre2 = 0.05
|
34 |
-
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
|
35 |
-
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
|
36 |
-
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
|
37 |
-
|
38 |
-
for m in range(len(multiplier)):
|
39 |
-
scale = multiplier[m]
|
40 |
-
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
|
41 |
-
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
|
42 |
-
im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
|
43 |
-
im = np.ascontiguousarray(im)
|
44 |
-
|
45 |
-
data = torch.from_numpy(im).float()
|
46 |
-
if torch.cuda.is_available():
|
47 |
-
data = data.cuda()
|
48 |
-
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
|
49 |
-
with torch.no_grad():
|
50 |
-
Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
|
51 |
-
Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
|
52 |
-
Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
|
53 |
-
|
54 |
-
# extract outputs, resize, and remove padding
|
55 |
-
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
|
56 |
-
heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
|
57 |
-
heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
|
58 |
-
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
|
59 |
-
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
|
60 |
-
|
61 |
-
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
|
62 |
-
paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
|
63 |
-
paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
|
64 |
-
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
|
65 |
-
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
|
66 |
-
|
67 |
-
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
|
68 |
-
paf_avg += +paf / len(multiplier)
|
69 |
-
|
70 |
-
all_peaks = []
|
71 |
-
peak_counter = 0
|
72 |
-
|
73 |
-
for part in range(18):
|
74 |
-
map_ori = heatmap_avg[:, :, part]
|
75 |
-
one_heatmap = gaussian_filter(map_ori, sigma=3)
|
76 |
-
|
77 |
-
map_left = np.zeros(one_heatmap.shape)
|
78 |
-
map_left[1:, :] = one_heatmap[:-1, :]
|
79 |
-
map_right = np.zeros(one_heatmap.shape)
|
80 |
-
map_right[:-1, :] = one_heatmap[1:, :]
|
81 |
-
map_up = np.zeros(one_heatmap.shape)
|
82 |
-
map_up[:, 1:] = one_heatmap[:, :-1]
|
83 |
-
map_down = np.zeros(one_heatmap.shape)
|
84 |
-
map_down[:, :-1] = one_heatmap[:, 1:]
|
85 |
-
|
86 |
-
peaks_binary = np.logical_and.reduce((one_heatmap >= map_left, one_heatmap >= map_right,
|
87 |
-
one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
|
88 |
-
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
|
89 |
-
peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
|
90 |
-
peak_id = range(peak_counter, peak_counter + len(peaks))
|
91 |
-
peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i], ) for i in range(len(peak_id))]
|
92 |
-
|
93 |
-
all_peaks.append(peaks_with_score_and_id)
|
94 |
-
peak_counter += len(peaks)
|
95 |
-
|
96 |
-
# find connection in the specified sequence, center 29 is in the position 15
|
97 |
-
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
|
98 |
-
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
|
99 |
-
[1, 16], [16, 18], [3, 17], [6, 18]]
|
100 |
-
# the middle joints heatmap correpondence
|
101 |
-
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
|
102 |
-
[23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
|
103 |
-
[55, 56], [37, 38], [45, 46]]
|
104 |
-
|
105 |
-
connection_all = []
|
106 |
-
special_k = []
|
107 |
-
mid_num = 10
|
108 |
-
|
109 |
-
for k in range(len(mapIdx)):
|
110 |
-
score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
|
111 |
-
candA = all_peaks[limbSeq[k][0] - 1]
|
112 |
-
candB = all_peaks[limbSeq[k][1] - 1]
|
113 |
-
nA = len(candA)
|
114 |
-
nB = len(candB)
|
115 |
-
indexA, indexB = limbSeq[k]
|
116 |
-
if (nA != 0 and nB != 0):
|
117 |
-
connection_candidate = []
|
118 |
-
for i in range(nA):
|
119 |
-
for j in range(nB):
|
120 |
-
vec = np.subtract(candB[j][:2], candA[i][:2])
|
121 |
-
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
|
122 |
-
norm = max(0.001, norm)
|
123 |
-
vec = np.divide(vec, norm)
|
124 |
-
|
125 |
-
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
|
126 |
-
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
|
127 |
-
|
128 |
-
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
|
129 |
-
for I in range(len(startend))])
|
130 |
-
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
|
131 |
-
for I in range(len(startend))])
|
132 |
-
|
133 |
-
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
|
134 |
-
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
|
135 |
-
0.5 * oriImg.shape[0] / norm - 1, 0)
|
136 |
-
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
|
137 |
-
criterion2 = score_with_dist_prior > 0
|
138 |
-
if criterion1 and criterion2:
|
139 |
-
connection_candidate.append(
|
140 |
-
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
|
141 |
-
|
142 |
-
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
|
143 |
-
connection = np.zeros((0, 5))
|
144 |
-
for c in range(len(connection_candidate)):
|
145 |
-
i, j, s = connection_candidate[c][0:3]
|
146 |
-
if (i not in connection[:, 3] and j not in connection[:, 4]):
|
147 |
-
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
|
148 |
-
if (len(connection) >= min(nA, nB)):
|
149 |
-
break
|
150 |
-
|
151 |
-
connection_all.append(connection)
|
152 |
-
else:
|
153 |
-
special_k.append(k)
|
154 |
-
connection_all.append([])
|
155 |
-
|
156 |
-
# last number in each row is the total parts number of that person
|
157 |
-
# the second last number in each row is the score of the overall configuration
|
158 |
-
subset = -1 * np.ones((0, 20))
|
159 |
-
candidate = np.array([item for sublist in all_peaks for item in sublist])
|
160 |
-
|
161 |
-
for k in range(len(mapIdx)):
|
162 |
-
if k not in special_k:
|
163 |
-
partAs = connection_all[k][:, 0]
|
164 |
-
partBs = connection_all[k][:, 1]
|
165 |
-
indexA, indexB = np.array(limbSeq[k]) - 1
|
166 |
-
|
167 |
-
for i in range(len(connection_all[k])): # = 1:size(temp,1)
|
168 |
-
found = 0
|
169 |
-
subset_idx = [-1, -1]
|
170 |
-
for j in range(len(subset)): # 1:size(subset,1):
|
171 |
-
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
|
172 |
-
subset_idx[found] = j
|
173 |
-
found += 1
|
174 |
-
|
175 |
-
if found == 1:
|
176 |
-
j = subset_idx[0]
|
177 |
-
if subset[j][indexB] != partBs[i]:
|
178 |
-
subset[j][indexB] = partBs[i]
|
179 |
-
subset[j][-1] += 1
|
180 |
-
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
|
181 |
-
elif found == 2: # if found 2 and disjoint, merge them
|
182 |
-
j1, j2 = subset_idx
|
183 |
-
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
|
184 |
-
if len(np.nonzero(membership == 2)[0]) == 0: # merge
|
185 |
-
subset[j1][:-2] += (subset[j2][:-2] + 1)
|
186 |
-
subset[j1][-2:] += subset[j2][-2:]
|
187 |
-
subset[j1][-2] += connection_all[k][i][2]
|
188 |
-
subset = np.delete(subset, j2, 0)
|
189 |
-
else: # as like found == 1
|
190 |
-
subset[j1][indexB] = partBs[i]
|
191 |
-
subset[j1][-1] += 1
|
192 |
-
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
|
193 |
-
|
194 |
-
# if find no partA in the subset, create a new subset
|
195 |
-
elif not found and k < 17:
|
196 |
-
row = -1 * np.ones(20)
|
197 |
-
row[indexA] = partAs[i]
|
198 |
-
row[indexB] = partBs[i]
|
199 |
-
row[-1] = 2
|
200 |
-
row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
|
201 |
-
subset = np.vstack([subset, row])
|
202 |
-
# delete some rows of subset which has few parts occur
|
203 |
-
deleteIdx = []
|
204 |
-
for i in range(len(subset)):
|
205 |
-
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
|
206 |
-
deleteIdx.append(i)
|
207 |
-
subset = np.delete(subset, deleteIdx, axis=0)
|
208 |
-
|
209 |
-
# subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
|
210 |
-
# candidate: x, y, score, id
|
211 |
-
return candidate, subset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/perlin.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Perlin from './utils/math/noise/Perlin.js';
|
2 |
-
export default Perlin;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/BadgeLabel.d.ts
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import OverlapSizer from '../overlapsizer/OverlapSizer';
|
3 |
-
|
4 |
-
export default BadgeLabel;
|
5 |
-
|
6 |
-
declare namespace BadgeLabel {
|
7 |
-
|
8 |
-
interface IConfig extends OverlapSizer.IConfig {
|
9 |
-
background?: Phaser.GameObjects.GameObject,
|
10 |
-
main?: Phaser.GameObjects.GameObject,
|
11 |
-
|
12 |
-
leftTop?: Phaser.GameObjects.GameObject,
|
13 |
-
centerTop?: Phaser.GameObjects.GameObject,
|
14 |
-
rightTop?: Phaser.GameObjects.GameObject,
|
15 |
-
leftCenter?: Phaser.GameObjects.GameObject,
|
16 |
-
center?: Phaser.GameObjects.GameObject,
|
17 |
-
rightCenter?: Phaser.GameObjects.GameObject,
|
18 |
-
leftBottom?: Phaser.GameObjects.GameObject,
|
19 |
-
centerBottom?: Phaser.GameObjects.GameObject,
|
20 |
-
rightBottom?: Phaser.GameObjects.GameObject,
|
21 |
-
}
|
22 |
-
}
|
23 |
-
|
24 |
-
declare class BadgeLabel extends OverlapSizer {
|
25 |
-
|
26 |
-
constructor(
|
27 |
-
scene: Phaser.Scene,
|
28 |
-
config?: BadgeLabel.IConfig
|
29 |
-
);
|
30 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/rotate/Rotate.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { Rotate } from '../../../plugins/gestures';
|
2 |
-
export default Rotate;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/OnDragThumb.js
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import PositionToPercent from './PositionToPercent.js';
|
2 |
-
|
3 |
-
var OnDragThumb = function (pointer, dragX, dragY) {
|
4 |
-
if (!this.enable) {
|
5 |
-
return;
|
6 |
-
}
|
7 |
-
tmpPoint.x = dragX;
|
8 |
-
tmpPoint.y = dragY;
|
9 |
-
|
10 |
-
var startPoint, endPoint;
|
11 |
-
if (!this.reverseAxis) {
|
12 |
-
startPoint = this.getStartPoint();
|
13 |
-
endPoint = this.getEndPoint();
|
14 |
-
} else {
|
15 |
-
startPoint = this.getEndPoint();
|
16 |
-
endPoint = this.getStartPoint();
|
17 |
-
}
|
18 |
-
this.value = PositionToPercent(startPoint, endPoint, tmpPoint);
|
19 |
-
}
|
20 |
-
var tmpPoint = {};
|
21 |
-
|
22 |
-
export default OnDragThumb;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ali-Maq/Calorie_Calculator/app.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import gradio as gr
|
3 |
-
import requests
|
4 |
-
import json
|
5 |
-
|
6 |
-
def list_to_dict(data):
|
7 |
-
results = {}
|
8 |
-
|
9 |
-
for i in range(len(data)):
|
10 |
-
# Access the i-th dictionary in the list using an integer index
|
11 |
-
d = data[i]
|
12 |
-
# Assign the value of the 'label' key to the 'score' value in the results dictionary
|
13 |
-
results[d['label']] = d['score']
|
14 |
-
|
15 |
-
# The results dictionary will now contain the label-score pairs from the data list
|
16 |
-
return results
|
17 |
-
|
18 |
-
API_URL = "https://api-inference.huggingface.co/models/nateraw/food"
|
19 |
-
headers = {"Authorization": "Bearer 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"}
|
20 |
-
|
21 |
-
def query(filename):
|
22 |
-
with open(filename, "rb") as f:
|
23 |
-
data = f.read()
|
24 |
-
response = requests.request("POST", API_URL, headers=headers, data=data)
|
25 |
-
output = json.loads(response.content.decode("utf-8"))
|
26 |
-
return list_to_dict(output),json.dumps(output, indent=2, sort_keys=True)
|
27 |
-
|
28 |
-
def get_nutrition_info(food_name):
|
29 |
-
#Make request to Nutritionix API
|
30 |
-
response = requests.get(
|
31 |
-
"https://trackapi.nutritionix.com/v2/search/instant",
|
32 |
-
params={"query": food_name},
|
33 |
-
headers={
|
34 |
-
"x-app-id": "63a710ef",
|
35 |
-
"x-app-key": "3ddc7e3feda88e1cf6dd355fb26cb261"
|
36 |
-
}
|
37 |
-
)
|
38 |
-
#Parse response and return relevant information
|
39 |
-
data = response.json()
|
40 |
-
response = data["branded"][0]["photo"]["thumb"]
|
41 |
-
val = {
|
42 |
-
"food_name": data["branded"][0]["food_name"],
|
43 |
-
"calories": data["branded"][0]["nf_calories"],
|
44 |
-
"serving_size": data["branded"][0]["serving_qty"],
|
45 |
-
"serving_unit": data["branded"][0]["serving_unit"],
|
46 |
-
#"images": data["branded"][0]["photo"]
|
47 |
-
}
|
48 |
-
# Open the image using PIL
|
49 |
-
output = json.dumps(val, indent=2, sort_keys=True)
|
50 |
-
return output,response
|
51 |
-
|
52 |
-
def volume_estimations(ali):
|
53 |
-
return None
|
54 |
-
|
55 |
-
with gr.Blocks() as demo:
|
56 |
-
gr.Markdown("Food-Classification-Calorie-Estimation and Volume-Estimation")
|
57 |
-
with gr.Tab("Food Classification"):
|
58 |
-
text_input = gr.Image(type="filepath",interactive=True,label="Upload the food Image and Zoom in to the item you want to get the calorie for")
|
59 |
-
text_output = [gr.Label(num_top_classes=6),
|
60 |
-
gr.Textbox()
|
61 |
-
]
|
62 |
-
text_button = gr.Button("Food Classification")
|
63 |
-
with gr.Tab("Food Calorie Estimation"):
|
64 |
-
image_input = gr.Textbox(label="Please enter the name of the Food you want to get calorie")
|
65 |
-
image_output = [gr.Textbox(),
|
66 |
-
gr.Image(type="filepath")
|
67 |
-
]
|
68 |
-
image_button = gr.Button("Estimate Calories!")
|
69 |
-
with gr.Tab("Volume Estimation"):
|
70 |
-
_image_input = gr.Textbox(label="Please Download the Photogrammetry File trained on APPLE AR KIT and follow the instruction mention below to generate the 3D Vortex of the object")
|
71 |
-
_image_output = gr.Image()
|
72 |
-
gr.Markdown("-----------------------------------------------------------------------------")
|
73 |
-
gr.Markdown("Directory where HelloPhotogrammetry app Saved. Example:/Users/ali/Desktop/HelloPhotogrammetry")
|
74 |
-
gr.Markdown("Directory where all the images are saved. Example:: ~/Desktop/Burger_Data_3")
|
75 |
-
gr.Markdown("Directory where the usdz or obj file has to be saved. Example: ~/Desktop/Burger_Data_3/Burger.usdz")
|
76 |
-
gr.Markdown("File Quality that you want your 3D model to be. Example: --detail medium ")
|
77 |
-
gr.Markdown("-----------------------------------------------------------------------------")
|
78 |
-
gr.Markdown("/Users/ali/Desktop/HelloPhotogrammetry ~/Desktop/Burger_Data_3 ~/Desktop/Burger_Data_3/Burger.obj --detail medium")
|
79 |
-
gr.Markdown("You can download the photogrammetry demo and files using this Google drive link")
|
80 |
-
gr.Markdown("-----------------------------------------------------------------------------")
|
81 |
-
gr.Markdown("https://drive.google.com/drive/folders/1QrL0Vhvw5GvIQ8fbHfb9EOsnOlPMmXLG?usp=share_link")
|
82 |
-
gr.Markdown("-----------------------------------------------------------------------------")
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
_image_button = gr.Button("Volume Calculation")
|
87 |
-
with gr.Tab("Future Works"):
|
88 |
-
gr.Markdown("Future work on Food Classification")
|
89 |
-
gr.Markdown(
|
90 |
-
"Currently the Model is trained on food-101 Dataset, which has 100 classes, In the future iteration of the project we would like to train the model on UNIMIB Dataset with 256 Food Classes")
|
91 |
-
gr.Markdown("Future work on Volume Estimation")
|
92 |
-
gr.Markdown(
|
93 |
-
"The volume model has been trained on Apple AR Toolkit and thus can be executred only on Apple devices ie a iOS platform, In futur we would like to train the volume model such that it is Platform independent")
|
94 |
-
gr.Markdown("Future work on Calorie Estimation")
|
95 |
-
gr.Markdown(
|
96 |
-
"The Calorie Estimation currently relies on Nutritionix API , In Future Iteration we would like to build our own Custom Database of Major Food Product across New York Restaurent")
|
97 |
-
gr.Markdown("https://github.com/Ali-Maq/Food-Classification-Volume-Estimation-and-Calorie-Estimation/blob/main/README.md")
|
98 |
-
|
99 |
-
text_button.click(query, inputs=text_input, outputs=text_output,scroll_to_output=True,show_progress=True)
|
100 |
-
image_button.click(get_nutrition_info, inputs=image_input, outputs=image_output,scroll_to_output=True,show_progress=True)
|
101 |
-
#_image_button.click(get_nutrition_info, inputs=_image_input, outputs=_image_output)
|
102 |
-
with gr.Accordion("Open for More!"):
|
103 |
-
gr.Markdown("🍎 Designed and built by Ali Under the Guidance of Professor Dennis Shasha")
|
104 |
-
gr.Markdown("Contact me at [email protected] 😊")
|
105 |
-
|
106 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/training_stats.py
DELETED
@@ -1,285 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
"""Facilities for reporting and collecting training statistics across
|
12 |
-
multiple processes and devices. The interface is designed to minimize
|
13 |
-
synchronization overhead as well as the amount of boilerplate in user
|
14 |
-
code."""
|
15 |
-
|
16 |
-
import re
|
17 |
-
import numpy as np
|
18 |
-
import torch
|
19 |
-
import dnnlib
|
20 |
-
|
21 |
-
from . import misc
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
|
25 |
-
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
|
26 |
-
# Data type to use for initial per-tensor reduction.
|
27 |
-
_reduce_dtype = torch.float32
|
28 |
-
_counter_dtype = torch.float64 # Data type to use for the internal counters.
|
29 |
-
_rank = 0 # Rank of the current process.
|
30 |
-
# Device to use for multiprocess communication. None = single-process.
|
31 |
-
_sync_device = None
|
32 |
-
_sync_called = False # Has _sync() been called yet?
|
33 |
-
# Running counters on each device, updated by report(): name => device => torch.Tensor
|
34 |
-
_counters = dict()
|
35 |
-
# Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
|
36 |
-
_cumulative = dict()
|
37 |
-
|
38 |
-
# ----------------------------------------------------------------------------
|
39 |
-
|
40 |
-
|
41 |
-
def init_multiprocessing(rank, sync_device):
|
42 |
-
r"""Initializes `torch_utils.training_stats` for collecting statistics
|
43 |
-
across multiple processes.
|
44 |
-
|
45 |
-
This function must be called after
|
46 |
-
`torch.distributed.init_process_group()` and before `Collector.update()`.
|
47 |
-
The call is not necessary if multi-process collection is not needed.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
rank: Rank of the current process.
|
51 |
-
sync_device: PyTorch device to use for inter-process
|
52 |
-
communication, or None to disable multi-process
|
53 |
-
collection. Typically `torch.device('cuda', rank)`.
|
54 |
-
"""
|
55 |
-
global _rank, _sync_device
|
56 |
-
assert not _sync_called
|
57 |
-
_rank = rank
|
58 |
-
_sync_device = sync_device
|
59 |
-
|
60 |
-
# ----------------------------------------------------------------------------
|
61 |
-
|
62 |
-
|
63 |
-
@misc.profiled_function
|
64 |
-
def report(name, value):
|
65 |
-
r"""Broadcasts the given set of scalars to all interested instances of
|
66 |
-
`Collector`, across device and process boundaries.
|
67 |
-
|
68 |
-
This function is expected to be extremely cheap and can be safely
|
69 |
-
called from anywhere in the training loop, loss function, or inside a
|
70 |
-
`torch.nn.Module`.
|
71 |
-
|
72 |
-
Warning: The current implementation expects the set of unique names to
|
73 |
-
be consistent across processes. Please make sure that `report()` is
|
74 |
-
called at least once for each unique name by each process, and in the
|
75 |
-
same order. If a given process has no scalars to broadcast, it can do
|
76 |
-
`report(name, [])` (empty list).
|
77 |
-
|
78 |
-
Args:
|
79 |
-
name: Arbitrary string specifying the name of the statistic.
|
80 |
-
Averages are accumulated separately for each unique name.
|
81 |
-
value: Arbitrary set of scalars. Can be a list, tuple,
|
82 |
-
NumPy array, PyTorch tensor, or Python scalar.
|
83 |
-
|
84 |
-
Returns:
|
85 |
-
The same `value` that was passed in.
|
86 |
-
"""
|
87 |
-
if name not in _counters:
|
88 |
-
_counters[name] = dict()
|
89 |
-
|
90 |
-
elems = torch.as_tensor(value)
|
91 |
-
if elems.numel() == 0:
|
92 |
-
return value
|
93 |
-
|
94 |
-
elems = elems.detach().flatten().to(_reduce_dtype)
|
95 |
-
moments = torch.stack([
|
96 |
-
torch.ones_like(elems).sum(),
|
97 |
-
elems.sum(),
|
98 |
-
elems.square().sum(),
|
99 |
-
])
|
100 |
-
assert moments.ndim == 1 and moments.shape[0] == _num_moments
|
101 |
-
moments = moments.to(_counter_dtype)
|
102 |
-
|
103 |
-
device = moments.device
|
104 |
-
if device not in _counters[name]:
|
105 |
-
_counters[name][device] = torch.zeros_like(moments)
|
106 |
-
_counters[name][device].add_(moments)
|
107 |
-
return value
|
108 |
-
|
109 |
-
# ----------------------------------------------------------------------------
|
110 |
-
|
111 |
-
|
112 |
-
def report0(name, value):
|
113 |
-
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
|
114 |
-
but ignores any scalars provided by the other processes.
|
115 |
-
See `report()` for further details.
|
116 |
-
"""
|
117 |
-
report(name, value if _rank == 0 else [])
|
118 |
-
return value
|
119 |
-
|
120 |
-
# ----------------------------------------------------------------------------
|
121 |
-
|
122 |
-
|
123 |
-
class Collector:
|
124 |
-
r"""Collects the scalars broadcasted by `report()` and `report0()` and
|
125 |
-
computes their long-term averages (mean and standard deviation) over
|
126 |
-
user-defined periods of time.
|
127 |
-
|
128 |
-
The averages are first collected into internal counters that are not
|
129 |
-
directly visible to the user. They are then copied to the user-visible
|
130 |
-
state as a result of calling `update()` and can then be queried using
|
131 |
-
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
|
132 |
-
internal counters for the next round, so that the user-visible state
|
133 |
-
effectively reflects averages collected between the last two calls to
|
134 |
-
`update()`.
|
135 |
-
|
136 |
-
Args:
|
137 |
-
regex: Regular expression defining which statistics to
|
138 |
-
collect. The default is to collect everything.
|
139 |
-
keep_previous: Whether to retain the previous averages if no
|
140 |
-
scalars were collected on a given round
|
141 |
-
(default: True).
|
142 |
-
"""
|
143 |
-
|
144 |
-
def __init__(self, regex='.*', keep_previous=True):
|
145 |
-
self._regex = re.compile(regex)
|
146 |
-
self._keep_previous = keep_previous
|
147 |
-
self._cumulative = dict()
|
148 |
-
self._moments = dict()
|
149 |
-
self.update()
|
150 |
-
self._moments.clear()
|
151 |
-
|
152 |
-
def names(self):
|
153 |
-
r"""Returns the names of all statistics broadcasted so far that
|
154 |
-
match the regular expression specified at construction time.
|
155 |
-
"""
|
156 |
-
return [name for name in _counters if self._regex.fullmatch(name)]
|
157 |
-
|
158 |
-
def update(self):
|
159 |
-
r"""Copies current values of the internal counters to the
|
160 |
-
user-visible state and resets them for the next round.
|
161 |
-
|
162 |
-
If `keep_previous=True` was specified at construction time, the
|
163 |
-
operation is skipped for statistics that have received no scalars
|
164 |
-
since the last update, retaining their previous averages.
|
165 |
-
|
166 |
-
This method performs a number of GPU-to-CPU transfers and one
|
167 |
-
`torch.distributed.all_reduce()`. It is intended to be called
|
168 |
-
periodically in the main training loop, typically once every
|
169 |
-
N training steps.
|
170 |
-
"""
|
171 |
-
if not self._keep_previous:
|
172 |
-
self._moments.clear()
|
173 |
-
for name, cumulative in _sync(self.names()):
|
174 |
-
if name not in self._cumulative:
|
175 |
-
self._cumulative[name] = torch.zeros(
|
176 |
-
[_num_moments], dtype=_counter_dtype)
|
177 |
-
delta = cumulative - self._cumulative[name]
|
178 |
-
self._cumulative[name].copy_(cumulative)
|
179 |
-
if float(delta[0]) != 0:
|
180 |
-
self._moments[name] = delta
|
181 |
-
|
182 |
-
def _get_delta(self, name):
|
183 |
-
r"""Returns the raw moments that were accumulated for the given
|
184 |
-
statistic between the last two calls to `update()`, or zero if
|
185 |
-
no scalars were collected.
|
186 |
-
"""
|
187 |
-
assert self._regex.fullmatch(name)
|
188 |
-
if name not in self._moments:
|
189 |
-
self._moments[name] = torch.zeros(
|
190 |
-
[_num_moments], dtype=_counter_dtype)
|
191 |
-
return self._moments[name]
|
192 |
-
|
193 |
-
def num(self, name):
|
194 |
-
r"""Returns the number of scalars that were accumulated for the given
|
195 |
-
statistic between the last two calls to `update()`, or zero if
|
196 |
-
no scalars were collected.
|
197 |
-
"""
|
198 |
-
delta = self._get_delta(name)
|
199 |
-
return int(delta[0])
|
200 |
-
|
201 |
-
def mean(self, name):
|
202 |
-
r"""Returns the mean of the scalars that were accumulated for the
|
203 |
-
given statistic between the last two calls to `update()`, or NaN if
|
204 |
-
no scalars were collected.
|
205 |
-
"""
|
206 |
-
delta = self._get_delta(name)
|
207 |
-
if int(delta[0]) == 0:
|
208 |
-
return float('nan')
|
209 |
-
return float(delta[1] / delta[0])
|
210 |
-
|
211 |
-
def std(self, name):
|
212 |
-
r"""Returns the standard deviation of the scalars that were
|
213 |
-
accumulated for the given statistic between the last two calls to
|
214 |
-
`update()`, or NaN if no scalars were collected.
|
215 |
-
"""
|
216 |
-
delta = self._get_delta(name)
|
217 |
-
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
|
218 |
-
return float('nan')
|
219 |
-
if int(delta[0]) == 1:
|
220 |
-
return float(0)
|
221 |
-
mean = float(delta[1] / delta[0])
|
222 |
-
raw_var = float(delta[2] / delta[0])
|
223 |
-
return np.sqrt(max(raw_var - np.square(mean), 0))
|
224 |
-
|
225 |
-
def as_dict(self):
|
226 |
-
r"""Returns the averages accumulated between the last two calls to
|
227 |
-
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
|
228 |
-
|
229 |
-
dnnlib.EasyDict(
|
230 |
-
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
|
231 |
-
...
|
232 |
-
)
|
233 |
-
"""
|
234 |
-
stats = dnnlib.EasyDict()
|
235 |
-
for name in self.names():
|
236 |
-
stats[name] = dnnlib.EasyDict(num=self.num(
|
237 |
-
name), mean=self.mean(name), std=self.std(name))
|
238 |
-
return stats
|
239 |
-
|
240 |
-
def __getitem__(self, name):
|
241 |
-
r"""Convenience getter.
|
242 |
-
`collector[name]` is a synonym for `collector.mean(name)`.
|
243 |
-
"""
|
244 |
-
return self.mean(name)
|
245 |
-
|
246 |
-
# ----------------------------------------------------------------------------
|
247 |
-
|
248 |
-
|
249 |
-
def _sync(names):
|
250 |
-
r"""Synchronize the global cumulative counters across devices and
|
251 |
-
processes. Called internally by `Collector.update()`.
|
252 |
-
"""
|
253 |
-
if len(names) == 0:
|
254 |
-
return []
|
255 |
-
global _sync_called
|
256 |
-
_sync_called = True
|
257 |
-
|
258 |
-
# Collect deltas within current rank.
|
259 |
-
deltas = []
|
260 |
-
device = _sync_device if _sync_device is not None else torch.device('cpu')
|
261 |
-
for name in names:
|
262 |
-
delta = torch.zeros(
|
263 |
-
[_num_moments], dtype=_counter_dtype, device=device)
|
264 |
-
for counter in _counters[name].values():
|
265 |
-
delta.add_(counter.to(device))
|
266 |
-
counter.copy_(torch.zeros_like(counter))
|
267 |
-
deltas.append(delta)
|
268 |
-
deltas = torch.stack(deltas)
|
269 |
-
|
270 |
-
# Sum deltas across ranks.
|
271 |
-
if _sync_device is not None:
|
272 |
-
torch.distributed.all_reduce(deltas)
|
273 |
-
|
274 |
-
# Update cumulative values.
|
275 |
-
deltas = deltas.cpu()
|
276 |
-
for idx, name in enumerate(names):
|
277 |
-
if name not in _cumulative:
|
278 |
-
_cumulative[name] = torch.zeros(
|
279 |
-
[_num_moments], dtype=_counter_dtype)
|
280 |
-
_cumulative[name].add_(deltas[idx])
|
281 |
-
|
282 |
-
# Return name-value pairs.
|
283 |
-
return [(name, _cumulative[name]) for name in names]
|
284 |
-
|
285 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/device.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
from typing import Any
|
2 |
-
import torch
|
3 |
-
|
4 |
-
|
5 |
-
def detach(obj: Any):
|
6 |
-
"""Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283
|
7 |
-
Arguments:
|
8 |
-
obj {dict, list} -- Object to be moved to cpu
|
9 |
-
Raises:
|
10 |
-
TypeError: Invalid type for detach
|
11 |
-
Returns:
|
12 |
-
type(obj) -- same object but moved to cpu
|
13 |
-
"""
|
14 |
-
if torch.is_tensor(obj):
|
15 |
-
return obj.detach()
|
16 |
-
elif isinstance(obj, dict):
|
17 |
-
res = {k: detach(v) for k, v in obj.items()}
|
18 |
-
return res
|
19 |
-
elif isinstance(obj, list):
|
20 |
-
return [detach(v) for v in obj]
|
21 |
-
elif isinstance(obj, tuple):
|
22 |
-
return tuple(detach(list(obj)))
|
23 |
-
else:
|
24 |
-
raise TypeError("Invalid type for detach")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion.py
DELETED
@@ -1,347 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
from typing import List, Optional, Union
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
from torchvision import transforms
|
8 |
-
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
9 |
-
|
10 |
-
from diffusers import (
|
11 |
-
AutoencoderKL,
|
12 |
-
DDIMScheduler,
|
13 |
-
DiffusionPipeline,
|
14 |
-
DPMSolverMultistepScheduler,
|
15 |
-
LMSDiscreteScheduler,
|
16 |
-
PNDMScheduler,
|
17 |
-
UNet2DConditionModel,
|
18 |
-
)
|
19 |
-
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
20 |
-
|
21 |
-
|
22 |
-
class MakeCutouts(nn.Module):
|
23 |
-
def __init__(self, cut_size, cut_power=1.0):
|
24 |
-
super().__init__()
|
25 |
-
|
26 |
-
self.cut_size = cut_size
|
27 |
-
self.cut_power = cut_power
|
28 |
-
|
29 |
-
def forward(self, pixel_values, num_cutouts):
|
30 |
-
sideY, sideX = pixel_values.shape[2:4]
|
31 |
-
max_size = min(sideX, sideY)
|
32 |
-
min_size = min(sideX, sideY, self.cut_size)
|
33 |
-
cutouts = []
|
34 |
-
for _ in range(num_cutouts):
|
35 |
-
size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
|
36 |
-
offsetx = torch.randint(0, sideX - size + 1, ())
|
37 |
-
offsety = torch.randint(0, sideY - size + 1, ())
|
38 |
-
cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
|
39 |
-
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
|
40 |
-
return torch.cat(cutouts)
|
41 |
-
|
42 |
-
|
43 |
-
def spherical_dist_loss(x, y):
|
44 |
-
x = F.normalize(x, dim=-1)
|
45 |
-
y = F.normalize(y, dim=-1)
|
46 |
-
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
47 |
-
|
48 |
-
|
49 |
-
def set_requires_grad(model, value):
|
50 |
-
for param in model.parameters():
|
51 |
-
param.requires_grad = value
|
52 |
-
|
53 |
-
|
54 |
-
class CLIPGuidedStableDiffusion(DiffusionPipeline):
|
55 |
-
"""CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
|
56 |
-
- https://github.com/Jack000/glid-3-xl
|
57 |
-
- https://github.dev/crowsonkb/k-diffusion
|
58 |
-
"""
|
59 |
-
|
60 |
-
def __init__(
|
61 |
-
self,
|
62 |
-
vae: AutoencoderKL,
|
63 |
-
text_encoder: CLIPTextModel,
|
64 |
-
clip_model: CLIPModel,
|
65 |
-
tokenizer: CLIPTokenizer,
|
66 |
-
unet: UNet2DConditionModel,
|
67 |
-
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
68 |
-
feature_extractor: CLIPImageProcessor,
|
69 |
-
):
|
70 |
-
super().__init__()
|
71 |
-
self.register_modules(
|
72 |
-
vae=vae,
|
73 |
-
text_encoder=text_encoder,
|
74 |
-
clip_model=clip_model,
|
75 |
-
tokenizer=tokenizer,
|
76 |
-
unet=unet,
|
77 |
-
scheduler=scheduler,
|
78 |
-
feature_extractor=feature_extractor,
|
79 |
-
)
|
80 |
-
|
81 |
-
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
82 |
-
self.cut_out_size = (
|
83 |
-
feature_extractor.size
|
84 |
-
if isinstance(feature_extractor.size, int)
|
85 |
-
else feature_extractor.size["shortest_edge"]
|
86 |
-
)
|
87 |
-
self.make_cutouts = MakeCutouts(self.cut_out_size)
|
88 |
-
|
89 |
-
set_requires_grad(self.text_encoder, False)
|
90 |
-
set_requires_grad(self.clip_model, False)
|
91 |
-
|
92 |
-
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
93 |
-
if slice_size == "auto":
|
94 |
-
# half the attention head size is usually a good trade-off between
|
95 |
-
# speed and memory
|
96 |
-
slice_size = self.unet.config.attention_head_dim // 2
|
97 |
-
self.unet.set_attention_slice(slice_size)
|
98 |
-
|
99 |
-
def disable_attention_slicing(self):
|
100 |
-
self.enable_attention_slicing(None)
|
101 |
-
|
102 |
-
def freeze_vae(self):
|
103 |
-
set_requires_grad(self.vae, False)
|
104 |
-
|
105 |
-
def unfreeze_vae(self):
|
106 |
-
set_requires_grad(self.vae, True)
|
107 |
-
|
108 |
-
def freeze_unet(self):
|
109 |
-
set_requires_grad(self.unet, False)
|
110 |
-
|
111 |
-
def unfreeze_unet(self):
|
112 |
-
set_requires_grad(self.unet, True)
|
113 |
-
|
114 |
-
@torch.enable_grad()
|
115 |
-
def cond_fn(
|
116 |
-
self,
|
117 |
-
latents,
|
118 |
-
timestep,
|
119 |
-
index,
|
120 |
-
text_embeddings,
|
121 |
-
noise_pred_original,
|
122 |
-
text_embeddings_clip,
|
123 |
-
clip_guidance_scale,
|
124 |
-
num_cutouts,
|
125 |
-
use_cutouts=True,
|
126 |
-
):
|
127 |
-
latents = latents.detach().requires_grad_()
|
128 |
-
|
129 |
-
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
130 |
-
|
131 |
-
# predict the noise residual
|
132 |
-
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
133 |
-
|
134 |
-
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
135 |
-
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
136 |
-
beta_prod_t = 1 - alpha_prod_t
|
137 |
-
# compute predicted original sample from predicted noise also called
|
138 |
-
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
139 |
-
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
140 |
-
|
141 |
-
fac = torch.sqrt(beta_prod_t)
|
142 |
-
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
143 |
-
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
144 |
-
sigma = self.scheduler.sigmas[index]
|
145 |
-
sample = latents - sigma * noise_pred
|
146 |
-
else:
|
147 |
-
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
148 |
-
|
149 |
-
sample = 1 / self.vae.config.scaling_factor * sample
|
150 |
-
image = self.vae.decode(sample).sample
|
151 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
152 |
-
|
153 |
-
if use_cutouts:
|
154 |
-
image = self.make_cutouts(image, num_cutouts)
|
155 |
-
else:
|
156 |
-
image = transforms.Resize(self.cut_out_size)(image)
|
157 |
-
image = self.normalize(image).to(latents.dtype)
|
158 |
-
|
159 |
-
image_embeddings_clip = self.clip_model.get_image_features(image)
|
160 |
-
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
161 |
-
|
162 |
-
if use_cutouts:
|
163 |
-
dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
|
164 |
-
dists = dists.view([num_cutouts, sample.shape[0], -1])
|
165 |
-
loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
|
166 |
-
else:
|
167 |
-
loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
|
168 |
-
|
169 |
-
grads = -torch.autograd.grad(loss, latents)[0]
|
170 |
-
|
171 |
-
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
172 |
-
latents = latents.detach() + grads * (sigma**2)
|
173 |
-
noise_pred = noise_pred_original
|
174 |
-
else:
|
175 |
-
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
176 |
-
return noise_pred, latents
|
177 |
-
|
178 |
-
@torch.no_grad()
|
179 |
-
def __call__(
|
180 |
-
self,
|
181 |
-
prompt: Union[str, List[str]],
|
182 |
-
height: Optional[int] = 512,
|
183 |
-
width: Optional[int] = 512,
|
184 |
-
num_inference_steps: Optional[int] = 50,
|
185 |
-
guidance_scale: Optional[float] = 7.5,
|
186 |
-
num_images_per_prompt: Optional[int] = 1,
|
187 |
-
eta: float = 0.0,
|
188 |
-
clip_guidance_scale: Optional[float] = 100,
|
189 |
-
clip_prompt: Optional[Union[str, List[str]]] = None,
|
190 |
-
num_cutouts: Optional[int] = 4,
|
191 |
-
use_cutouts: Optional[bool] = True,
|
192 |
-
generator: Optional[torch.Generator] = None,
|
193 |
-
latents: Optional[torch.FloatTensor] = None,
|
194 |
-
output_type: Optional[str] = "pil",
|
195 |
-
return_dict: bool = True,
|
196 |
-
):
|
197 |
-
if isinstance(prompt, str):
|
198 |
-
batch_size = 1
|
199 |
-
elif isinstance(prompt, list):
|
200 |
-
batch_size = len(prompt)
|
201 |
-
else:
|
202 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
203 |
-
|
204 |
-
if height % 8 != 0 or width % 8 != 0:
|
205 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
206 |
-
|
207 |
-
# get prompt text embeddings
|
208 |
-
text_input = self.tokenizer(
|
209 |
-
prompt,
|
210 |
-
padding="max_length",
|
211 |
-
max_length=self.tokenizer.model_max_length,
|
212 |
-
truncation=True,
|
213 |
-
return_tensors="pt",
|
214 |
-
)
|
215 |
-
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
216 |
-
# duplicate text embeddings for each generation per prompt
|
217 |
-
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
218 |
-
|
219 |
-
if clip_guidance_scale > 0:
|
220 |
-
if clip_prompt is not None:
|
221 |
-
clip_text_input = self.tokenizer(
|
222 |
-
clip_prompt,
|
223 |
-
padding="max_length",
|
224 |
-
max_length=self.tokenizer.model_max_length,
|
225 |
-
truncation=True,
|
226 |
-
return_tensors="pt",
|
227 |
-
).input_ids.to(self.device)
|
228 |
-
else:
|
229 |
-
clip_text_input = text_input.input_ids.to(self.device)
|
230 |
-
text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
|
231 |
-
text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
232 |
-
# duplicate text embeddings clip for each generation per prompt
|
233 |
-
text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
|
234 |
-
|
235 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
236 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
237 |
-
# corresponds to doing no classifier free guidance.
|
238 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
239 |
-
# get unconditional embeddings for classifier free guidance
|
240 |
-
if do_classifier_free_guidance:
|
241 |
-
max_length = text_input.input_ids.shape[-1]
|
242 |
-
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
243 |
-
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
244 |
-
# duplicate unconditional embeddings for each generation per prompt
|
245 |
-
uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
246 |
-
|
247 |
-
# For classifier free guidance, we need to do two forward passes.
|
248 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
249 |
-
# to avoid doing two forward passes
|
250 |
-
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
251 |
-
|
252 |
-
# get the initial random noise unless the user supplied it
|
253 |
-
|
254 |
-
# Unlike in other pipelines, latents need to be generated in the target device
|
255 |
-
# for 1-to-1 results reproducibility with the CompVis implementation.
|
256 |
-
# However this currently doesn't work in `mps`.
|
257 |
-
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
258 |
-
latents_dtype = text_embeddings.dtype
|
259 |
-
if latents is None:
|
260 |
-
if self.device.type == "mps":
|
261 |
-
# randn does not work reproducibly on mps
|
262 |
-
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
263 |
-
self.device
|
264 |
-
)
|
265 |
-
else:
|
266 |
-
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
267 |
-
else:
|
268 |
-
if latents.shape != latents_shape:
|
269 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
270 |
-
latents = latents.to(self.device)
|
271 |
-
|
272 |
-
# set timesteps
|
273 |
-
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
274 |
-
extra_set_kwargs = {}
|
275 |
-
if accepts_offset:
|
276 |
-
extra_set_kwargs["offset"] = 1
|
277 |
-
|
278 |
-
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
279 |
-
|
280 |
-
# Some schedulers like PNDM have timesteps as arrays
|
281 |
-
# It's more optimized to move all timesteps to correct device beforehand
|
282 |
-
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
283 |
-
|
284 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
285 |
-
latents = latents * self.scheduler.init_noise_sigma
|
286 |
-
|
287 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
288 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
289 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
290 |
-
# and should be between [0, 1]
|
291 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
292 |
-
extra_step_kwargs = {}
|
293 |
-
if accepts_eta:
|
294 |
-
extra_step_kwargs["eta"] = eta
|
295 |
-
|
296 |
-
# check if the scheduler accepts generator
|
297 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
298 |
-
if accepts_generator:
|
299 |
-
extra_step_kwargs["generator"] = generator
|
300 |
-
|
301 |
-
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
302 |
-
# expand the latents if we are doing classifier free guidance
|
303 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
304 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
305 |
-
|
306 |
-
# predict the noise residual
|
307 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
308 |
-
|
309 |
-
# perform classifier free guidance
|
310 |
-
if do_classifier_free_guidance:
|
311 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
312 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
313 |
-
|
314 |
-
# perform clip guidance
|
315 |
-
if clip_guidance_scale > 0:
|
316 |
-
text_embeddings_for_guidance = (
|
317 |
-
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
318 |
-
)
|
319 |
-
noise_pred, latents = self.cond_fn(
|
320 |
-
latents,
|
321 |
-
t,
|
322 |
-
i,
|
323 |
-
text_embeddings_for_guidance,
|
324 |
-
noise_pred,
|
325 |
-
text_embeddings_clip,
|
326 |
-
clip_guidance_scale,
|
327 |
-
num_cutouts,
|
328 |
-
use_cutouts,
|
329 |
-
)
|
330 |
-
|
331 |
-
# compute the previous noisy sample x_t -> x_t-1
|
332 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
333 |
-
|
334 |
-
# scale and decode the image latents with vae
|
335 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
336 |
-
image = self.vae.decode(latents).sample
|
337 |
-
|
338 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
339 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
340 |
-
|
341 |
-
if output_type == "pil":
|
342 |
-
image = self.numpy_to_pil(image)
|
343 |
-
|
344 |
-
if not return_dict:
|
345 |
-
return (image, None)
|
346 |
-
|
347 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/README.md
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
# Stable Diffusion
|
2 |
-
|
3 |
-
## Overview
|
4 |
-
|
5 |
-
Stable Diffusion was proposed in [Stable Diffusion Announcement](https://stability.ai/blog/stable-diffusion-announcement) by Patrick Esser and Robin Rombach and the Stability AI team.
|
6 |
-
|
7 |
-
The summary of the model is the following:
|
8 |
-
|
9 |
-
*Stable Diffusion is a text-to-image model that will empower billions of people to create stunning art within seconds. It is a breakthrough in speed and quality meaning that it can run on consumer GPUs. You can see some of the amazing output that has been created by this model without pre or post-processing on this page. The model itself builds upon the work of the team at CompVis and Runway in their widely used latent diffusion model combined with insights from the conditional diffusion models by our lead generative AI developer Katherine Crowson, Dall-E 2 by Open AI, Imagen by Google Brain and many others. We are delighted that AI media generation is a cooperative field and hope it can continue this way to bring the gift of creativity to all.*
|
10 |
-
|
11 |
-
## Tips:
|
12 |
-
|
13 |
-
- Stable Diffusion has the same architecture as [Latent Diffusion](https://arxiv.org/abs/2112.10752) but uses a frozen CLIP Text Encoder instead of training the text encoder jointly with the diffusion model.
|
14 |
-
- An in-detail explanation of the Stable Diffusion model can be found under [Stable Diffusion with 🧨 Diffusers](https://huggingface.co/blog/stable_diffusion).
|
15 |
-
- If you don't want to rely on the Hugging Face Hub and having to pass a authentication token, you can
|
16 |
-
download the weights with `git lfs install; git clone https://huggingface.co/runwayml/stable-diffusion-v1-5` and instead pass the local path to the cloned folder to `from_pretrained` as shown below.
|
17 |
-
- Stable Diffusion can work with a variety of different samplers as is shown below.
|
18 |
-
|
19 |
-
## Available Pipelines:
|
20 |
-
|
21 |
-
| Pipeline | Tasks | Colab
|
22 |
-
|---|---|:---:|
|
23 |
-
| [pipeline_stable_diffusion.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) | *Text-to-Image Generation* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
|
24 |
-
| [pipeline_stable_diffusion_img2img](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) | *Image-to-Image Text-Guided Generation* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
|
25 |
-
| [pipeline_stable_diffusion_inpaint](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | *Text-Guided Image Inpainting* | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
|
26 |
-
|
27 |
-
## Examples:
|
28 |
-
|
29 |
-
### Using Stable Diffusion without being logged into the Hub.
|
30 |
-
|
31 |
-
If you want to download the model weights using a single Python line, you need to be logged in via `huggingface-cli login`.
|
32 |
-
|
33 |
-
```python
|
34 |
-
from diffusers import DiffusionPipeline
|
35 |
-
|
36 |
-
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
37 |
-
```
|
38 |
-
|
39 |
-
This however can make it difficult to build applications on top of `diffusers` as you will always have to pass the token around. A potential way to solve this issue is by downloading the weights to a local path `"./stable-diffusion-v1-5"`:
|
40 |
-
|
41 |
-
```
|
42 |
-
git lfs install
|
43 |
-
git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
|
44 |
-
```
|
45 |
-
|
46 |
-
and simply passing the local path to `from_pretrained`:
|
47 |
-
|
48 |
-
```python
|
49 |
-
from diffusers import StableDiffusionPipeline
|
50 |
-
|
51 |
-
pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
|
52 |
-
```
|
53 |
-
|
54 |
-
### Text-to-Image with default PLMS scheduler
|
55 |
-
|
56 |
-
```python
|
57 |
-
# make sure you're logged in with `huggingface-cli login`
|
58 |
-
from diffusers import StableDiffusionPipeline
|
59 |
-
|
60 |
-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
61 |
-
pipe = pipe.to("cuda")
|
62 |
-
|
63 |
-
prompt = "a photo of an astronaut riding a horse on mars"
|
64 |
-
image = pipe(prompt).images[0]
|
65 |
-
|
66 |
-
image.save("astronaut_rides_horse.png")
|
67 |
-
```
|
68 |
-
|
69 |
-
### Text-to-Image with DDIM scheduler
|
70 |
-
|
71 |
-
```python
|
72 |
-
# make sure you're logged in with `huggingface-cli login`
|
73 |
-
from diffusers import StableDiffusionPipeline, DDIMScheduler
|
74 |
-
|
75 |
-
scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
76 |
-
|
77 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
78 |
-
"runwayml/stable-diffusion-v1-5",
|
79 |
-
scheduler=scheduler,
|
80 |
-
).to("cuda")
|
81 |
-
|
82 |
-
prompt = "a photo of an astronaut riding a horse on mars"
|
83 |
-
image = pipe(prompt).images[0]
|
84 |
-
|
85 |
-
image.save("astronaut_rides_horse.png")
|
86 |
-
```
|
87 |
-
|
88 |
-
### Text-to-Image with K-LMS scheduler
|
89 |
-
|
90 |
-
```python
|
91 |
-
# make sure you're logged in with `huggingface-cli login`
|
92 |
-
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
93 |
-
|
94 |
-
lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
95 |
-
|
96 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
97 |
-
"runwayml/stable-diffusion-v1-5",
|
98 |
-
scheduler=lms,
|
99 |
-
).to("cuda")
|
100 |
-
|
101 |
-
prompt = "a photo of an astronaut riding a horse on mars"
|
102 |
-
image = pipe(prompt).images[0]
|
103 |
-
|
104 |
-
image.save("astronaut_rides_horse.png")
|
105 |
-
```
|
106 |
-
|
107 |
-
### CycleDiffusion using Stable Diffusion and DDIM scheduler
|
108 |
-
|
109 |
-
```python
|
110 |
-
import requests
|
111 |
-
import torch
|
112 |
-
from PIL import Image
|
113 |
-
from io import BytesIO
|
114 |
-
|
115 |
-
from diffusers import CycleDiffusionPipeline, DDIMScheduler
|
116 |
-
|
117 |
-
|
118 |
-
# load the scheduler. CycleDiffusion only supports stochastic schedulers.
|
119 |
-
|
120 |
-
# load the pipeline
|
121 |
-
# make sure you're logged in with `huggingface-cli login`
|
122 |
-
model_id_or_path = "CompVis/stable-diffusion-v1-4"
|
123 |
-
scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler")
|
124 |
-
pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
|
125 |
-
|
126 |
-
# let's download an initial image
|
127 |
-
url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png"
|
128 |
-
response = requests.get(url)
|
129 |
-
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
130 |
-
init_image = init_image.resize((512, 512))
|
131 |
-
init_image.save("horse.png")
|
132 |
-
|
133 |
-
# let's specify a prompt
|
134 |
-
source_prompt = "An astronaut riding a horse"
|
135 |
-
prompt = "An astronaut riding an elephant"
|
136 |
-
|
137 |
-
# call the pipeline
|
138 |
-
image = pipe(
|
139 |
-
prompt=prompt,
|
140 |
-
source_prompt=source_prompt,
|
141 |
-
image=init_image,
|
142 |
-
num_inference_steps=100,
|
143 |
-
eta=0.1,
|
144 |
-
strength=0.8,
|
145 |
-
guidance_scale=2,
|
146 |
-
source_guidance_scale=1,
|
147 |
-
).images[0]
|
148 |
-
|
149 |
-
image.save("horse_to_elephant.png")
|
150 |
-
|
151 |
-
# let's try another example
|
152 |
-
# See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion
|
153 |
-
url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png"
|
154 |
-
response = requests.get(url)
|
155 |
-
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
156 |
-
init_image = init_image.resize((512, 512))
|
157 |
-
init_image.save("black.png")
|
158 |
-
|
159 |
-
source_prompt = "A black colored car"
|
160 |
-
prompt = "A blue colored car"
|
161 |
-
|
162 |
-
# call the pipeline
|
163 |
-
torch.manual_seed(0)
|
164 |
-
image = pipe(
|
165 |
-
prompt=prompt,
|
166 |
-
source_prompt=source_prompt,
|
167 |
-
image=init_image,
|
168 |
-
num_inference_steps=100,
|
169 |
-
eta=0.1,
|
170 |
-
strength=0.85,
|
171 |
-
guidance_scale=3,
|
172 |
-
source_guidance_scale=1,
|
173 |
-
).images[0]
|
174 |
-
|
175 |
-
image.save("black_to_blue.png")
|
176 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion.py
DELETED
@@ -1,1182 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
|
17 |
-
import gc
|
18 |
-
import tempfile
|
19 |
-
import time
|
20 |
-
import traceback
|
21 |
-
import unittest
|
22 |
-
|
23 |
-
import numpy as np
|
24 |
-
import torch
|
25 |
-
from huggingface_hub import hf_hub_download
|
26 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
27 |
-
|
28 |
-
from diffusers import (
|
29 |
-
AutoencoderKL,
|
30 |
-
DDIMScheduler,
|
31 |
-
DPMSolverMultistepScheduler,
|
32 |
-
EulerAncestralDiscreteScheduler,
|
33 |
-
EulerDiscreteScheduler,
|
34 |
-
LMSDiscreteScheduler,
|
35 |
-
PNDMScheduler,
|
36 |
-
StableDiffusionPipeline,
|
37 |
-
UNet2DConditionModel,
|
38 |
-
logging,
|
39 |
-
)
|
40 |
-
from diffusers.models.attention_processor import AttnProcessor, LoRAXFormersAttnProcessor
|
41 |
-
from diffusers.utils import load_numpy, nightly, slow, torch_device
|
42 |
-
from diffusers.utils.testing_utils import (
|
43 |
-
CaptureLogger,
|
44 |
-
enable_full_determinism,
|
45 |
-
require_torch_2,
|
46 |
-
require_torch_gpu,
|
47 |
-
run_test_in_subprocess,
|
48 |
-
)
|
49 |
-
|
50 |
-
from ...models.test_lora_layers import create_unet_lora_layers
|
51 |
-
from ...models.test_models_unet_2d_condition import create_lora_layers
|
52 |
-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
53 |
-
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
|
54 |
-
|
55 |
-
|
56 |
-
enable_full_determinism()
|
57 |
-
|
58 |
-
|
59 |
-
# Will be run via run_test_in_subprocess
|
60 |
-
def _test_stable_diffusion_compile(in_queue, out_queue, timeout):
|
61 |
-
error = None
|
62 |
-
try:
|
63 |
-
inputs = in_queue.get(timeout=timeout)
|
64 |
-
torch_device = inputs.pop("torch_device")
|
65 |
-
seed = inputs.pop("seed")
|
66 |
-
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
|
67 |
-
|
68 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
69 |
-
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
|
70 |
-
sd_pipe = sd_pipe.to(torch_device)
|
71 |
-
|
72 |
-
sd_pipe.unet.to(memory_format=torch.channels_last)
|
73 |
-
sd_pipe.unet = torch.compile(sd_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
74 |
-
|
75 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
76 |
-
|
77 |
-
image = sd_pipe(**inputs).images
|
78 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
79 |
-
|
80 |
-
assert image.shape == (1, 512, 512, 3)
|
81 |
-
expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239])
|
82 |
-
assert np.abs(image_slice - expected_slice).max() < 5e-3
|
83 |
-
except Exception:
|
84 |
-
error = f"{traceback.format_exc()}"
|
85 |
-
|
86 |
-
results = {"error": error}
|
87 |
-
out_queue.put(results, timeout=timeout)
|
88 |
-
out_queue.join()
|
89 |
-
|
90 |
-
|
91 |
-
class StableDiffusionPipelineFastTests(
|
92 |
-
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
|
93 |
-
):
|
94 |
-
pipeline_class = StableDiffusionPipeline
|
95 |
-
params = TEXT_TO_IMAGE_PARAMS
|
96 |
-
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
|
97 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
98 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
99 |
-
|
100 |
-
def get_dummy_components(self):
|
101 |
-
torch.manual_seed(0)
|
102 |
-
unet = UNet2DConditionModel(
|
103 |
-
block_out_channels=(32, 64),
|
104 |
-
layers_per_block=2,
|
105 |
-
sample_size=32,
|
106 |
-
in_channels=4,
|
107 |
-
out_channels=4,
|
108 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
109 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
110 |
-
cross_attention_dim=32,
|
111 |
-
)
|
112 |
-
scheduler = DDIMScheduler(
|
113 |
-
beta_start=0.00085,
|
114 |
-
beta_end=0.012,
|
115 |
-
beta_schedule="scaled_linear",
|
116 |
-
clip_sample=False,
|
117 |
-
set_alpha_to_one=False,
|
118 |
-
)
|
119 |
-
torch.manual_seed(0)
|
120 |
-
vae = AutoencoderKL(
|
121 |
-
block_out_channels=[32, 64],
|
122 |
-
in_channels=3,
|
123 |
-
out_channels=3,
|
124 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
125 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
126 |
-
latent_channels=4,
|
127 |
-
)
|
128 |
-
torch.manual_seed(0)
|
129 |
-
text_encoder_config = CLIPTextConfig(
|
130 |
-
bos_token_id=0,
|
131 |
-
eos_token_id=2,
|
132 |
-
hidden_size=32,
|
133 |
-
intermediate_size=37,
|
134 |
-
layer_norm_eps=1e-05,
|
135 |
-
num_attention_heads=4,
|
136 |
-
num_hidden_layers=5,
|
137 |
-
pad_token_id=1,
|
138 |
-
vocab_size=1000,
|
139 |
-
)
|
140 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
141 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
142 |
-
|
143 |
-
components = {
|
144 |
-
"unet": unet,
|
145 |
-
"scheduler": scheduler,
|
146 |
-
"vae": vae,
|
147 |
-
"text_encoder": text_encoder,
|
148 |
-
"tokenizer": tokenizer,
|
149 |
-
"safety_checker": None,
|
150 |
-
"feature_extractor": None,
|
151 |
-
}
|
152 |
-
return components
|
153 |
-
|
154 |
-
def get_dummy_inputs(self, device, seed=0):
|
155 |
-
if str(device).startswith("mps"):
|
156 |
-
generator = torch.manual_seed(seed)
|
157 |
-
else:
|
158 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
159 |
-
inputs = {
|
160 |
-
"prompt": "A painting of a squirrel eating a burger",
|
161 |
-
"generator": generator,
|
162 |
-
"num_inference_steps": 2,
|
163 |
-
"guidance_scale": 6.0,
|
164 |
-
"output_type": "numpy",
|
165 |
-
}
|
166 |
-
return inputs
|
167 |
-
|
168 |
-
def test_stable_diffusion_ddim(self):
|
169 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
170 |
-
|
171 |
-
components = self.get_dummy_components()
|
172 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
173 |
-
sd_pipe = sd_pipe.to(torch_device)
|
174 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
175 |
-
|
176 |
-
inputs = self.get_dummy_inputs(device)
|
177 |
-
output = sd_pipe(**inputs)
|
178 |
-
image = output.images
|
179 |
-
|
180 |
-
image_slice = image[0, -3:, -3:, -1]
|
181 |
-
|
182 |
-
assert image.shape == (1, 64, 64, 3)
|
183 |
-
expected_slice = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864])
|
184 |
-
|
185 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
186 |
-
|
187 |
-
def test_stable_diffusion_lora(self):
|
188 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
189 |
-
|
190 |
-
components = self.get_dummy_components()
|
191 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
192 |
-
sd_pipe = sd_pipe.to(torch_device)
|
193 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
194 |
-
|
195 |
-
# forward 1
|
196 |
-
inputs = self.get_dummy_inputs(device)
|
197 |
-
output = sd_pipe(**inputs)
|
198 |
-
image = output.images
|
199 |
-
image_slice = image[0, -3:, -3:, -1]
|
200 |
-
|
201 |
-
# set lora layers
|
202 |
-
lora_attn_procs = create_lora_layers(sd_pipe.unet)
|
203 |
-
sd_pipe.unet.set_attn_processor(lora_attn_procs)
|
204 |
-
sd_pipe = sd_pipe.to(torch_device)
|
205 |
-
|
206 |
-
# forward 2
|
207 |
-
inputs = self.get_dummy_inputs(device)
|
208 |
-
output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.0})
|
209 |
-
image = output.images
|
210 |
-
image_slice_1 = image[0, -3:, -3:, -1]
|
211 |
-
|
212 |
-
# forward 3
|
213 |
-
inputs = self.get_dummy_inputs(device)
|
214 |
-
output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.5})
|
215 |
-
image = output.images
|
216 |
-
image_slice_2 = image[0, -3:, -3:, -1]
|
217 |
-
|
218 |
-
assert np.abs(image_slice - image_slice_1).max() < 1e-2
|
219 |
-
assert np.abs(image_slice - image_slice_2).max() > 1e-2
|
220 |
-
|
221 |
-
def test_stable_diffusion_prompt_embeds(self):
|
222 |
-
components = self.get_dummy_components()
|
223 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
224 |
-
sd_pipe = sd_pipe.to(torch_device)
|
225 |
-
sd_pipe = sd_pipe.to(torch_device)
|
226 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
227 |
-
|
228 |
-
inputs = self.get_dummy_inputs(torch_device)
|
229 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
230 |
-
|
231 |
-
# forward
|
232 |
-
output = sd_pipe(**inputs)
|
233 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
234 |
-
|
235 |
-
inputs = self.get_dummy_inputs(torch_device)
|
236 |
-
prompt = 3 * [inputs.pop("prompt")]
|
237 |
-
|
238 |
-
text_inputs = sd_pipe.tokenizer(
|
239 |
-
prompt,
|
240 |
-
padding="max_length",
|
241 |
-
max_length=sd_pipe.tokenizer.model_max_length,
|
242 |
-
truncation=True,
|
243 |
-
return_tensors="pt",
|
244 |
-
)
|
245 |
-
text_inputs = text_inputs["input_ids"].to(torch_device)
|
246 |
-
|
247 |
-
prompt_embeds = sd_pipe.text_encoder(text_inputs)[0]
|
248 |
-
|
249 |
-
inputs["prompt_embeds"] = prompt_embeds
|
250 |
-
|
251 |
-
# forward
|
252 |
-
output = sd_pipe(**inputs)
|
253 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
254 |
-
|
255 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
256 |
-
|
257 |
-
def test_stable_diffusion_negative_prompt_embeds(self):
|
258 |
-
components = self.get_dummy_components()
|
259 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
260 |
-
sd_pipe = sd_pipe.to(torch_device)
|
261 |
-
sd_pipe = sd_pipe.to(torch_device)
|
262 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
263 |
-
|
264 |
-
inputs = self.get_dummy_inputs(torch_device)
|
265 |
-
negative_prompt = 3 * ["this is a negative prompt"]
|
266 |
-
inputs["negative_prompt"] = negative_prompt
|
267 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
268 |
-
|
269 |
-
# forward
|
270 |
-
output = sd_pipe(**inputs)
|
271 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
272 |
-
|
273 |
-
inputs = self.get_dummy_inputs(torch_device)
|
274 |
-
prompt = 3 * [inputs.pop("prompt")]
|
275 |
-
|
276 |
-
embeds = []
|
277 |
-
for p in [prompt, negative_prompt]:
|
278 |
-
text_inputs = sd_pipe.tokenizer(
|
279 |
-
p,
|
280 |
-
padding="max_length",
|
281 |
-
max_length=sd_pipe.tokenizer.model_max_length,
|
282 |
-
truncation=True,
|
283 |
-
return_tensors="pt",
|
284 |
-
)
|
285 |
-
text_inputs = text_inputs["input_ids"].to(torch_device)
|
286 |
-
|
287 |
-
embeds.append(sd_pipe.text_encoder(text_inputs)[0])
|
288 |
-
|
289 |
-
inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds
|
290 |
-
|
291 |
-
# forward
|
292 |
-
output = sd_pipe(**inputs)
|
293 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
294 |
-
|
295 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
296 |
-
|
297 |
-
def test_stable_diffusion_prompt_embeds_with_plain_negative_prompt_list(self):
|
298 |
-
components = self.get_dummy_components()
|
299 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
300 |
-
sd_pipe = sd_pipe.to(torch_device)
|
301 |
-
sd_pipe = sd_pipe.to(torch_device)
|
302 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
303 |
-
|
304 |
-
inputs = self.get_dummy_inputs(torch_device)
|
305 |
-
negative_prompt = 3 * ["this is a negative prompt"]
|
306 |
-
inputs["negative_prompt"] = negative_prompt
|
307 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
308 |
-
|
309 |
-
# forward
|
310 |
-
output = sd_pipe(**inputs)
|
311 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
312 |
-
|
313 |
-
inputs = self.get_dummy_inputs(torch_device)
|
314 |
-
inputs["negative_prompt"] = negative_prompt
|
315 |
-
prompt = 3 * [inputs.pop("prompt")]
|
316 |
-
|
317 |
-
text_inputs = sd_pipe.tokenizer(
|
318 |
-
prompt,
|
319 |
-
padding="max_length",
|
320 |
-
max_length=sd_pipe.tokenizer.model_max_length,
|
321 |
-
truncation=True,
|
322 |
-
return_tensors="pt",
|
323 |
-
)
|
324 |
-
text_inputs = text_inputs["input_ids"].to(torch_device)
|
325 |
-
|
326 |
-
prompt_embeds = sd_pipe.text_encoder(text_inputs)[0]
|
327 |
-
|
328 |
-
inputs["prompt_embeds"] = prompt_embeds
|
329 |
-
|
330 |
-
# forward
|
331 |
-
output = sd_pipe(**inputs)
|
332 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
333 |
-
|
334 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
335 |
-
|
336 |
-
def test_stable_diffusion_ddim_factor_8(self):
|
337 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
338 |
-
|
339 |
-
components = self.get_dummy_components()
|
340 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
341 |
-
sd_pipe = sd_pipe.to(device)
|
342 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
343 |
-
|
344 |
-
inputs = self.get_dummy_inputs(device)
|
345 |
-
output = sd_pipe(**inputs, height=136, width=136)
|
346 |
-
image = output.images
|
347 |
-
|
348 |
-
image_slice = image[0, -3:, -3:, -1]
|
349 |
-
|
350 |
-
assert image.shape == (1, 136, 136, 3)
|
351 |
-
expected_slice = np.array([0.5524, 0.5626, 0.6069, 0.4727, 0.386, 0.3995, 0.4613, 0.4328, 0.4269])
|
352 |
-
|
353 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
354 |
-
|
355 |
-
def test_stable_diffusion_pndm(self):
|
356 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
357 |
-
components = self.get_dummy_components()
|
358 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
359 |
-
sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True)
|
360 |
-
sd_pipe = sd_pipe.to(device)
|
361 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
362 |
-
|
363 |
-
inputs = self.get_dummy_inputs(device)
|
364 |
-
output = sd_pipe(**inputs)
|
365 |
-
image = output.images
|
366 |
-
image_slice = image[0, -3:, -3:, -1]
|
367 |
-
|
368 |
-
assert image.shape == (1, 64, 64, 3)
|
369 |
-
expected_slice = np.array([0.5122, 0.5712, 0.4825, 0.5053, 0.5646, 0.4769, 0.5179, 0.4894, 0.4994])
|
370 |
-
|
371 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
372 |
-
|
373 |
-
@unittest.skipIf(not torch.cuda.is_available(), reason="xformers requires cuda")
|
374 |
-
def test_stable_diffusion_attn_processors(self):
|
375 |
-
# disable_full_determinism()
|
376 |
-
device = "cuda" # ensure determinism for the device-dependent torch.Generator
|
377 |
-
components = self.get_dummy_components()
|
378 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
379 |
-
sd_pipe = sd_pipe.to(device)
|
380 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
381 |
-
|
382 |
-
inputs = self.get_dummy_inputs(device)
|
383 |
-
|
384 |
-
# run normal sd pipe
|
385 |
-
image = sd_pipe(**inputs).images
|
386 |
-
assert image.shape == (1, 64, 64, 3)
|
387 |
-
|
388 |
-
# run xformers attention
|
389 |
-
sd_pipe.enable_xformers_memory_efficient_attention()
|
390 |
-
image = sd_pipe(**inputs).images
|
391 |
-
assert image.shape == (1, 64, 64, 3)
|
392 |
-
|
393 |
-
# run attention slicing
|
394 |
-
sd_pipe.enable_attention_slicing()
|
395 |
-
image = sd_pipe(**inputs).images
|
396 |
-
assert image.shape == (1, 64, 64, 3)
|
397 |
-
|
398 |
-
# run vae attention slicing
|
399 |
-
sd_pipe.enable_vae_slicing()
|
400 |
-
image = sd_pipe(**inputs).images
|
401 |
-
assert image.shape == (1, 64, 64, 3)
|
402 |
-
|
403 |
-
# run lora attention
|
404 |
-
attn_processors, _ = create_unet_lora_layers(sd_pipe.unet)
|
405 |
-
attn_processors = {k: v.to("cuda") for k, v in attn_processors.items()}
|
406 |
-
sd_pipe.unet.set_attn_processor(attn_processors)
|
407 |
-
image = sd_pipe(**inputs).images
|
408 |
-
assert image.shape == (1, 64, 64, 3)
|
409 |
-
|
410 |
-
# run lora xformers attention
|
411 |
-
attn_processors, _ = create_unet_lora_layers(sd_pipe.unet)
|
412 |
-
attn_processors = {
|
413 |
-
k: LoRAXFormersAttnProcessor(hidden_size=v.hidden_size, cross_attention_dim=v.cross_attention_dim)
|
414 |
-
for k, v in attn_processors.items()
|
415 |
-
}
|
416 |
-
attn_processors = {k: v.to("cuda") for k, v in attn_processors.items()}
|
417 |
-
sd_pipe.unet.set_attn_processor(attn_processors)
|
418 |
-
image = sd_pipe(**inputs).images
|
419 |
-
assert image.shape == (1, 64, 64, 3)
|
420 |
-
|
421 |
-
# enable_full_determinism()
|
422 |
-
|
423 |
-
def test_stable_diffusion_no_safety_checker(self):
|
424 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
425 |
-
"hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None
|
426 |
-
)
|
427 |
-
assert isinstance(pipe, StableDiffusionPipeline)
|
428 |
-
assert isinstance(pipe.scheduler, LMSDiscreteScheduler)
|
429 |
-
assert pipe.safety_checker is None
|
430 |
-
|
431 |
-
image = pipe("example prompt", num_inference_steps=2).images[0]
|
432 |
-
assert image is not None
|
433 |
-
|
434 |
-
# check that there's no error when saving a pipeline with one of the models being None
|
435 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
436 |
-
pipe.save_pretrained(tmpdirname)
|
437 |
-
pipe = StableDiffusionPipeline.from_pretrained(tmpdirname)
|
438 |
-
|
439 |
-
# sanity check that the pipeline still works
|
440 |
-
assert pipe.safety_checker is None
|
441 |
-
image = pipe("example prompt", num_inference_steps=2).images[0]
|
442 |
-
assert image is not None
|
443 |
-
|
444 |
-
def test_stable_diffusion_k_lms(self):
|
445 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
446 |
-
|
447 |
-
components = self.get_dummy_components()
|
448 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
449 |
-
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
450 |
-
sd_pipe = sd_pipe.to(device)
|
451 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
452 |
-
|
453 |
-
inputs = self.get_dummy_inputs(device)
|
454 |
-
output = sd_pipe(**inputs)
|
455 |
-
image = output.images
|
456 |
-
image_slice = image[0, -3:, -3:, -1]
|
457 |
-
|
458 |
-
assert image.shape == (1, 64, 64, 3)
|
459 |
-
expected_slice = np.array([0.4873, 0.5443, 0.4845, 0.5004, 0.5549, 0.4850, 0.5191, 0.4941, 0.5065])
|
460 |
-
|
461 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
462 |
-
|
463 |
-
def test_stable_diffusion_k_euler_ancestral(self):
|
464 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
465 |
-
|
466 |
-
components = self.get_dummy_components()
|
467 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
468 |
-
sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
469 |
-
sd_pipe = sd_pipe.to(device)
|
470 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
471 |
-
|
472 |
-
inputs = self.get_dummy_inputs(device)
|
473 |
-
output = sd_pipe(**inputs)
|
474 |
-
image = output.images
|
475 |
-
image_slice = image[0, -3:, -3:, -1]
|
476 |
-
|
477 |
-
assert image.shape == (1, 64, 64, 3)
|
478 |
-
expected_slice = np.array([0.4872, 0.5444, 0.4846, 0.5003, 0.5549, 0.4850, 0.5189, 0.4941, 0.5067])
|
479 |
-
|
480 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
481 |
-
|
482 |
-
def test_stable_diffusion_k_euler(self):
|
483 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
484 |
-
|
485 |
-
components = self.get_dummy_components()
|
486 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
487 |
-
sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
488 |
-
sd_pipe = sd_pipe.to(device)
|
489 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
490 |
-
|
491 |
-
inputs = self.get_dummy_inputs(device)
|
492 |
-
output = sd_pipe(**inputs)
|
493 |
-
image = output.images
|
494 |
-
image_slice = image[0, -3:, -3:, -1]
|
495 |
-
|
496 |
-
assert image.shape == (1, 64, 64, 3)
|
497 |
-
expected_slice = np.array([0.4873, 0.5443, 0.4845, 0.5004, 0.5549, 0.4850, 0.5191, 0.4941, 0.5065])
|
498 |
-
|
499 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
500 |
-
|
501 |
-
def test_stable_diffusion_vae_slicing(self):
|
502 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
503 |
-
components = self.get_dummy_components()
|
504 |
-
components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config)
|
505 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
506 |
-
sd_pipe = sd_pipe.to(device)
|
507 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
508 |
-
|
509 |
-
image_count = 4
|
510 |
-
|
511 |
-
inputs = self.get_dummy_inputs(device)
|
512 |
-
inputs["prompt"] = [inputs["prompt"]] * image_count
|
513 |
-
output_1 = sd_pipe(**inputs)
|
514 |
-
|
515 |
-
# make sure sliced vae decode yields the same result
|
516 |
-
sd_pipe.enable_vae_slicing()
|
517 |
-
inputs = self.get_dummy_inputs(device)
|
518 |
-
inputs["prompt"] = [inputs["prompt"]] * image_count
|
519 |
-
output_2 = sd_pipe(**inputs)
|
520 |
-
|
521 |
-
# there is a small discrepancy at image borders vs. full batch decode
|
522 |
-
assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3
|
523 |
-
|
524 |
-
def test_stable_diffusion_vae_tiling(self):
|
525 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
526 |
-
components = self.get_dummy_components()
|
527 |
-
|
528 |
-
# make sure here that pndm scheduler skips prk
|
529 |
-
components["safety_checker"] = None
|
530 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
531 |
-
sd_pipe = sd_pipe.to(device)
|
532 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
533 |
-
|
534 |
-
prompt = "A painting of a squirrel eating a burger"
|
535 |
-
|
536 |
-
# Test that tiled decode at 512x512 yields the same result as the non-tiled decode
|
537 |
-
generator = torch.Generator(device=device).manual_seed(0)
|
538 |
-
output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
|
539 |
-
|
540 |
-
# make sure tiled vae decode yields the same result
|
541 |
-
sd_pipe.enable_vae_tiling()
|
542 |
-
generator = torch.Generator(device=device).manual_seed(0)
|
543 |
-
output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
|
544 |
-
|
545 |
-
assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 5e-1
|
546 |
-
|
547 |
-
# test that tiled decode works with various shapes
|
548 |
-
shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)]
|
549 |
-
for shape in shapes:
|
550 |
-
zeros = torch.zeros(shape).to(device)
|
551 |
-
sd_pipe.vae.decode(zeros)
|
552 |
-
|
553 |
-
def test_stable_diffusion_negative_prompt(self):
|
554 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
555 |
-
components = self.get_dummy_components()
|
556 |
-
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
|
557 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
558 |
-
sd_pipe = sd_pipe.to(device)
|
559 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
560 |
-
|
561 |
-
inputs = self.get_dummy_inputs(device)
|
562 |
-
negative_prompt = "french fries"
|
563 |
-
output = sd_pipe(**inputs, negative_prompt=negative_prompt)
|
564 |
-
|
565 |
-
image = output.images
|
566 |
-
image_slice = image[0, -3:, -3:, -1]
|
567 |
-
|
568 |
-
assert image.shape == (1, 64, 64, 3)
|
569 |
-
expected_slice = np.array([0.5114, 0.5706, 0.4772, 0.5028, 0.5637, 0.4732, 0.5169, 0.4881, 0.4977])
|
570 |
-
|
571 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
572 |
-
|
573 |
-
def test_stable_diffusion_long_prompt(self):
|
574 |
-
components = self.get_dummy_components()
|
575 |
-
components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config)
|
576 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
577 |
-
sd_pipe = sd_pipe.to(torch_device)
|
578 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
579 |
-
|
580 |
-
do_classifier_free_guidance = True
|
581 |
-
negative_prompt = None
|
582 |
-
num_images_per_prompt = 1
|
583 |
-
logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion")
|
584 |
-
|
585 |
-
prompt = 25 * "@"
|
586 |
-
with CaptureLogger(logger) as cap_logger_3:
|
587 |
-
text_embeddings_3 = sd_pipe._encode_prompt(
|
588 |
-
prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
589 |
-
)
|
590 |
-
|
591 |
-
prompt = 100 * "@"
|
592 |
-
with CaptureLogger(logger) as cap_logger:
|
593 |
-
text_embeddings = sd_pipe._encode_prompt(
|
594 |
-
prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
595 |
-
)
|
596 |
-
|
597 |
-
negative_prompt = "Hello"
|
598 |
-
with CaptureLogger(logger) as cap_logger_2:
|
599 |
-
text_embeddings_2 = sd_pipe._encode_prompt(
|
600 |
-
prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
601 |
-
)
|
602 |
-
|
603 |
-
assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape
|
604 |
-
assert text_embeddings.shape[1] == 77
|
605 |
-
|
606 |
-
assert cap_logger.out == cap_logger_2.out
|
607 |
-
# 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25
|
608 |
-
assert cap_logger.out.count("@") == 25
|
609 |
-
assert cap_logger_3.out == ""
|
610 |
-
|
611 |
-
def test_stable_diffusion_height_width_opt(self):
|
612 |
-
components = self.get_dummy_components()
|
613 |
-
components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config)
|
614 |
-
sd_pipe = StableDiffusionPipeline(**components)
|
615 |
-
sd_pipe = sd_pipe.to(torch_device)
|
616 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
617 |
-
|
618 |
-
prompt = "hey"
|
619 |
-
|
620 |
-
output = sd_pipe(prompt, num_inference_steps=1, output_type="np")
|
621 |
-
image_shape = output.images[0].shape[:2]
|
622 |
-
assert image_shape == (64, 64)
|
623 |
-
|
624 |
-
output = sd_pipe(prompt, num_inference_steps=1, height=96, width=96, output_type="np")
|
625 |
-
image_shape = output.images[0].shape[:2]
|
626 |
-
assert image_shape == (96, 96)
|
627 |
-
|
628 |
-
config = dict(sd_pipe.unet.config)
|
629 |
-
config["sample_size"] = 96
|
630 |
-
sd_pipe.unet = UNet2DConditionModel.from_config(config).to(torch_device)
|
631 |
-
output = sd_pipe(prompt, num_inference_steps=1, output_type="np")
|
632 |
-
image_shape = output.images[0].shape[:2]
|
633 |
-
assert image_shape == (192, 192)
|
634 |
-
|
635 |
-
def test_attention_slicing_forward_pass(self):
|
636 |
-
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
|
637 |
-
|
638 |
-
def test_inference_batch_single_identical(self):
|
639 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
640 |
-
|
641 |
-
|
642 |
-
@slow
|
643 |
-
@require_torch_gpu
|
644 |
-
class StableDiffusionPipelineSlowTests(unittest.TestCase):
|
645 |
-
def setUp(self):
|
646 |
-
gc.collect()
|
647 |
-
torch.cuda.empty_cache()
|
648 |
-
|
649 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
650 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
651 |
-
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
|
652 |
-
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
|
653 |
-
inputs = {
|
654 |
-
"prompt": "a photograph of an astronaut riding a horse",
|
655 |
-
"latents": latents,
|
656 |
-
"generator": generator,
|
657 |
-
"num_inference_steps": 3,
|
658 |
-
"guidance_scale": 7.5,
|
659 |
-
"output_type": "numpy",
|
660 |
-
}
|
661 |
-
return inputs
|
662 |
-
|
663 |
-
def test_stable_diffusion_1_1_pndm(self):
|
664 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
|
665 |
-
sd_pipe = sd_pipe.to(torch_device)
|
666 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
667 |
-
|
668 |
-
inputs = self.get_inputs(torch_device)
|
669 |
-
image = sd_pipe(**inputs).images
|
670 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
671 |
-
|
672 |
-
assert image.shape == (1, 512, 512, 3)
|
673 |
-
expected_slice = np.array([0.43625, 0.43554, 0.36670, 0.40660, 0.39703, 0.38658, 0.43936, 0.43557, 0.40592])
|
674 |
-
assert np.abs(image_slice - expected_slice).max() < 3e-3
|
675 |
-
|
676 |
-
def test_stable_diffusion_1_4_pndm(self):
|
677 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
678 |
-
sd_pipe = sd_pipe.to(torch_device)
|
679 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
680 |
-
|
681 |
-
inputs = self.get_inputs(torch_device)
|
682 |
-
image = sd_pipe(**inputs).images
|
683 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
684 |
-
|
685 |
-
assert image.shape == (1, 512, 512, 3)
|
686 |
-
expected_slice = np.array([0.57400, 0.47841, 0.31625, 0.63583, 0.58306, 0.55056, 0.50825, 0.56306, 0.55748])
|
687 |
-
assert np.abs(image_slice - expected_slice).max() < 3e-3
|
688 |
-
|
689 |
-
def test_stable_diffusion_ddim(self):
|
690 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
691 |
-
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
|
692 |
-
sd_pipe = sd_pipe.to(torch_device)
|
693 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
694 |
-
|
695 |
-
inputs = self.get_inputs(torch_device)
|
696 |
-
image = sd_pipe(**inputs).images
|
697 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
698 |
-
|
699 |
-
assert image.shape == (1, 512, 512, 3)
|
700 |
-
expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239])
|
701 |
-
assert np.abs(image_slice - expected_slice).max() < 1e-4
|
702 |
-
|
703 |
-
def test_stable_diffusion_lms(self):
|
704 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
705 |
-
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
706 |
-
sd_pipe = sd_pipe.to(torch_device)
|
707 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
708 |
-
|
709 |
-
inputs = self.get_inputs(torch_device)
|
710 |
-
image = sd_pipe(**inputs).images
|
711 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
712 |
-
|
713 |
-
assert image.shape == (1, 512, 512, 3)
|
714 |
-
expected_slice = np.array([0.10542, 0.09620, 0.07332, 0.09015, 0.09382, 0.07597, 0.08496, 0.07806, 0.06455])
|
715 |
-
assert np.abs(image_slice - expected_slice).max() < 3e-3
|
716 |
-
|
717 |
-
def test_stable_diffusion_dpm(self):
|
718 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
|
719 |
-
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
|
720 |
-
sd_pipe = sd_pipe.to(torch_device)
|
721 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
722 |
-
|
723 |
-
inputs = self.get_inputs(torch_device)
|
724 |
-
image = sd_pipe(**inputs).images
|
725 |
-
image_slice = image[0, -3:, -3:, -1].flatten()
|
726 |
-
|
727 |
-
assert image.shape == (1, 512, 512, 3)
|
728 |
-
expected_slice = np.array([0.03503, 0.03494, 0.01087, 0.03128, 0.02552, 0.00803, 0.00742, 0.00372, 0.00000])
|
729 |
-
assert np.abs(image_slice - expected_slice).max() < 3e-3
|
730 |
-
|
731 |
-
def test_stable_diffusion_attention_slicing(self):
|
732 |
-
torch.cuda.reset_peak_memory_stats()
|
733 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
|
734 |
-
pipe = pipe.to(torch_device)
|
735 |
-
pipe.set_progress_bar_config(disable=None)
|
736 |
-
|
737 |
-
# enable attention slicing
|
738 |
-
pipe.enable_attention_slicing()
|
739 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
740 |
-
image_sliced = pipe(**inputs).images
|
741 |
-
|
742 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
743 |
-
torch.cuda.reset_peak_memory_stats()
|
744 |
-
# make sure that less than 3.75 GB is allocated
|
745 |
-
assert mem_bytes < 3.75 * 10**9
|
746 |
-
|
747 |
-
# disable slicing
|
748 |
-
pipe.disable_attention_slicing()
|
749 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
750 |
-
image = pipe(**inputs).images
|
751 |
-
|
752 |
-
# make sure that more than 3.75 GB is allocated
|
753 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
754 |
-
assert mem_bytes > 3.75 * 10**9
|
755 |
-
assert np.abs(image_sliced - image).max() < 1e-3
|
756 |
-
|
757 |
-
def test_stable_diffusion_vae_slicing(self):
|
758 |
-
torch.cuda.reset_peak_memory_stats()
|
759 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
|
760 |
-
pipe = pipe.to(torch_device)
|
761 |
-
pipe.set_progress_bar_config(disable=None)
|
762 |
-
pipe.enable_attention_slicing()
|
763 |
-
|
764 |
-
# enable vae slicing
|
765 |
-
pipe.enable_vae_slicing()
|
766 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
767 |
-
inputs["prompt"] = [inputs["prompt"]] * 4
|
768 |
-
inputs["latents"] = torch.cat([inputs["latents"]] * 4)
|
769 |
-
image_sliced = pipe(**inputs).images
|
770 |
-
|
771 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
772 |
-
torch.cuda.reset_peak_memory_stats()
|
773 |
-
# make sure that less than 4 GB is allocated
|
774 |
-
assert mem_bytes < 4e9
|
775 |
-
|
776 |
-
# disable vae slicing
|
777 |
-
pipe.disable_vae_slicing()
|
778 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
779 |
-
inputs["prompt"] = [inputs["prompt"]] * 4
|
780 |
-
inputs["latents"] = torch.cat([inputs["latents"]] * 4)
|
781 |
-
image = pipe(**inputs).images
|
782 |
-
|
783 |
-
# make sure that more than 4 GB is allocated
|
784 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
785 |
-
assert mem_bytes > 4e9
|
786 |
-
# There is a small discrepancy at the image borders vs. a fully batched version.
|
787 |
-
assert np.abs(image_sliced - image).max() < 1e-2
|
788 |
-
|
789 |
-
def test_stable_diffusion_vae_tiling(self):
|
790 |
-
torch.cuda.reset_peak_memory_stats()
|
791 |
-
model_id = "CompVis/stable-diffusion-v1-4"
|
792 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16)
|
793 |
-
pipe.set_progress_bar_config(disable=None)
|
794 |
-
pipe.enable_attention_slicing()
|
795 |
-
pipe.unet = pipe.unet.to(memory_format=torch.channels_last)
|
796 |
-
pipe.vae = pipe.vae.to(memory_format=torch.channels_last)
|
797 |
-
|
798 |
-
prompt = "a photograph of an astronaut riding a horse"
|
799 |
-
|
800 |
-
# enable vae tiling
|
801 |
-
pipe.enable_vae_tiling()
|
802 |
-
pipe.enable_model_cpu_offload()
|
803 |
-
generator = torch.Generator(device="cpu").manual_seed(0)
|
804 |
-
output_chunked = pipe(
|
805 |
-
[prompt],
|
806 |
-
width=1024,
|
807 |
-
height=1024,
|
808 |
-
generator=generator,
|
809 |
-
guidance_scale=7.5,
|
810 |
-
num_inference_steps=2,
|
811 |
-
output_type="numpy",
|
812 |
-
)
|
813 |
-
image_chunked = output_chunked.images
|
814 |
-
|
815 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
816 |
-
|
817 |
-
# disable vae tiling
|
818 |
-
pipe.disable_vae_tiling()
|
819 |
-
generator = torch.Generator(device="cpu").manual_seed(0)
|
820 |
-
output = pipe(
|
821 |
-
[prompt],
|
822 |
-
width=1024,
|
823 |
-
height=1024,
|
824 |
-
generator=generator,
|
825 |
-
guidance_scale=7.5,
|
826 |
-
num_inference_steps=2,
|
827 |
-
output_type="numpy",
|
828 |
-
)
|
829 |
-
image = output.images
|
830 |
-
|
831 |
-
assert mem_bytes < 1e10
|
832 |
-
assert np.abs(image_chunked.flatten() - image.flatten()).max() < 1e-2
|
833 |
-
|
834 |
-
def test_stable_diffusion_fp16_vs_autocast(self):
|
835 |
-
# this test makes sure that the original model with autocast
|
836 |
-
# and the new model with fp16 yield the same result
|
837 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
|
838 |
-
pipe = pipe.to(torch_device)
|
839 |
-
pipe.set_progress_bar_config(disable=None)
|
840 |
-
|
841 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
842 |
-
image_fp16 = pipe(**inputs).images
|
843 |
-
|
844 |
-
with torch.autocast(torch_device):
|
845 |
-
inputs = self.get_inputs(torch_device)
|
846 |
-
image_autocast = pipe(**inputs).images
|
847 |
-
|
848 |
-
# Make sure results are close enough
|
849 |
-
diff = np.abs(image_fp16.flatten() - image_autocast.flatten())
|
850 |
-
# They ARE different since ops are not run always at the same precision
|
851 |
-
# however, they should be extremely close.
|
852 |
-
assert diff.mean() < 2e-2
|
853 |
-
|
854 |
-
def test_stable_diffusion_intermediate_state(self):
|
855 |
-
number_of_steps = 0
|
856 |
-
|
857 |
-
def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
|
858 |
-
callback_fn.has_been_called = True
|
859 |
-
nonlocal number_of_steps
|
860 |
-
number_of_steps += 1
|
861 |
-
if step == 1:
|
862 |
-
latents = latents.detach().cpu().numpy()
|
863 |
-
assert latents.shape == (1, 4, 64, 64)
|
864 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
865 |
-
expected_slice = np.array(
|
866 |
-
[-0.5693, -0.3018, -0.9746, 0.0518, -0.8770, 0.7559, -1.7402, 0.1022, 1.1582]
|
867 |
-
)
|
868 |
-
|
869 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
870 |
-
elif step == 2:
|
871 |
-
latents = latents.detach().cpu().numpy()
|
872 |
-
assert latents.shape == (1, 4, 64, 64)
|
873 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
874 |
-
expected_slice = np.array(
|
875 |
-
[-0.1958, -0.2993, -1.0166, -0.5005, -0.4810, 0.6162, -0.9492, 0.6621, 1.4492]
|
876 |
-
)
|
877 |
-
|
878 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
879 |
-
|
880 |
-
callback_fn.has_been_called = False
|
881 |
-
|
882 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
|
883 |
-
pipe = pipe.to(torch_device)
|
884 |
-
pipe.set_progress_bar_config(disable=None)
|
885 |
-
pipe.enable_attention_slicing()
|
886 |
-
|
887 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
888 |
-
pipe(**inputs, callback=callback_fn, callback_steps=1)
|
889 |
-
assert callback_fn.has_been_called
|
890 |
-
assert number_of_steps == inputs["num_inference_steps"]
|
891 |
-
|
892 |
-
def test_stable_diffusion_low_cpu_mem_usage(self):
|
893 |
-
pipeline_id = "CompVis/stable-diffusion-v1-4"
|
894 |
-
|
895 |
-
start_time = time.time()
|
896 |
-
pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16)
|
897 |
-
pipeline_low_cpu_mem_usage.to(torch_device)
|
898 |
-
low_cpu_mem_usage_time = time.time() - start_time
|
899 |
-
|
900 |
-
start_time = time.time()
|
901 |
-
_ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False)
|
902 |
-
normal_load_time = time.time() - start_time
|
903 |
-
|
904 |
-
assert 2 * low_cpu_mem_usage_time < normal_load_time
|
905 |
-
|
906 |
-
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
|
907 |
-
torch.cuda.empty_cache()
|
908 |
-
torch.cuda.reset_max_memory_allocated()
|
909 |
-
torch.cuda.reset_peak_memory_stats()
|
910 |
-
|
911 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
|
912 |
-
pipe = pipe.to(torch_device)
|
913 |
-
pipe.set_progress_bar_config(disable=None)
|
914 |
-
pipe.enable_attention_slicing(1)
|
915 |
-
pipe.enable_sequential_cpu_offload()
|
916 |
-
|
917 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
918 |
-
_ = pipe(**inputs)
|
919 |
-
|
920 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
921 |
-
# make sure that less than 2.8 GB is allocated
|
922 |
-
assert mem_bytes < 2.8 * 10**9
|
923 |
-
|
924 |
-
def test_stable_diffusion_pipeline_with_model_offloading(self):
|
925 |
-
torch.cuda.empty_cache()
|
926 |
-
torch.cuda.reset_max_memory_allocated()
|
927 |
-
torch.cuda.reset_peak_memory_stats()
|
928 |
-
|
929 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
930 |
-
|
931 |
-
# Normal inference
|
932 |
-
|
933 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
934 |
-
"CompVis/stable-diffusion-v1-4",
|
935 |
-
torch_dtype=torch.float16,
|
936 |
-
)
|
937 |
-
pipe.unet.set_default_attn_processor()
|
938 |
-
pipe.to(torch_device)
|
939 |
-
pipe.set_progress_bar_config(disable=None)
|
940 |
-
outputs = pipe(**inputs)
|
941 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
942 |
-
|
943 |
-
# With model offloading
|
944 |
-
|
945 |
-
# Reload but don't move to cuda
|
946 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
947 |
-
"CompVis/stable-diffusion-v1-4",
|
948 |
-
torch_dtype=torch.float16,
|
949 |
-
)
|
950 |
-
pipe.unet.set_default_attn_processor()
|
951 |
-
|
952 |
-
torch.cuda.empty_cache()
|
953 |
-
torch.cuda.reset_max_memory_allocated()
|
954 |
-
torch.cuda.reset_peak_memory_stats()
|
955 |
-
|
956 |
-
pipe.enable_model_cpu_offload()
|
957 |
-
pipe.set_progress_bar_config(disable=None)
|
958 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
959 |
-
|
960 |
-
outputs_offloaded = pipe(**inputs)
|
961 |
-
mem_bytes_offloaded = torch.cuda.max_memory_allocated()
|
962 |
-
|
963 |
-
assert np.abs(outputs.images - outputs_offloaded.images).max() < 1e-3
|
964 |
-
assert mem_bytes_offloaded < mem_bytes
|
965 |
-
assert mem_bytes_offloaded < 3.5 * 10**9
|
966 |
-
for module in pipe.text_encoder, pipe.unet, pipe.vae, pipe.safety_checker:
|
967 |
-
assert module.device == torch.device("cpu")
|
968 |
-
|
969 |
-
# With attention slicing
|
970 |
-
torch.cuda.empty_cache()
|
971 |
-
torch.cuda.reset_max_memory_allocated()
|
972 |
-
torch.cuda.reset_peak_memory_stats()
|
973 |
-
|
974 |
-
pipe.enable_attention_slicing()
|
975 |
-
_ = pipe(**inputs)
|
976 |
-
mem_bytes_slicing = torch.cuda.max_memory_allocated()
|
977 |
-
|
978 |
-
assert mem_bytes_slicing < mem_bytes_offloaded
|
979 |
-
assert mem_bytes_slicing < 3 * 10**9
|
980 |
-
|
981 |
-
def test_stable_diffusion_textual_inversion(self):
|
982 |
-
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
983 |
-
pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons")
|
984 |
-
|
985 |
-
a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt")
|
986 |
-
a111_file_neg = hf_hub_download(
|
987 |
-
"hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt"
|
988 |
-
)
|
989 |
-
pipe.load_textual_inversion(a111_file)
|
990 |
-
pipe.load_textual_inversion(a111_file_neg)
|
991 |
-
pipe.to("cuda")
|
992 |
-
|
993 |
-
generator = torch.Generator(device="cpu").manual_seed(1)
|
994 |
-
|
995 |
-
prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>"
|
996 |
-
neg_prompt = "Style-Winter-neg"
|
997 |
-
|
998 |
-
image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0]
|
999 |
-
expected_image = load_numpy(
|
1000 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy"
|
1001 |
-
)
|
1002 |
-
|
1003 |
-
max_diff = np.abs(expected_image - image).max()
|
1004 |
-
assert max_diff < 8e-1
|
1005 |
-
|
1006 |
-
@require_torch_2
|
1007 |
-
def test_stable_diffusion_compile(self):
|
1008 |
-
seed = 0
|
1009 |
-
inputs = self.get_inputs(torch_device, seed=seed)
|
1010 |
-
# Can't pickle a Generator object
|
1011 |
-
del inputs["generator"]
|
1012 |
-
inputs["torch_device"] = torch_device
|
1013 |
-
inputs["seed"] = seed
|
1014 |
-
run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=inputs)
|
1015 |
-
|
1016 |
-
|
1017 |
-
@slow
|
1018 |
-
@require_torch_gpu
|
1019 |
-
class StableDiffusionPipelineCkptTests(unittest.TestCase):
|
1020 |
-
def tearDown(self):
|
1021 |
-
super().tearDown()
|
1022 |
-
gc.collect()
|
1023 |
-
torch.cuda.empty_cache()
|
1024 |
-
|
1025 |
-
def test_download_from_hub(self):
|
1026 |
-
ckpt_paths = [
|
1027 |
-
"https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
|
1028 |
-
"https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix_base.ckpt",
|
1029 |
-
]
|
1030 |
-
|
1031 |
-
for ckpt_path in ckpt_paths:
|
1032 |
-
pipe = StableDiffusionPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16)
|
1033 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
1034 |
-
pipe.to("cuda")
|
1035 |
-
|
1036 |
-
image_out = pipe("test", num_inference_steps=1, output_type="np").images[0]
|
1037 |
-
|
1038 |
-
assert image_out.shape == (512, 512, 3)
|
1039 |
-
|
1040 |
-
def test_download_local(self):
|
1041 |
-
filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.ckpt")
|
1042 |
-
|
1043 |
-
pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16)
|
1044 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
1045 |
-
pipe.to("cuda")
|
1046 |
-
|
1047 |
-
image_out = pipe("test", num_inference_steps=1, output_type="np").images[0]
|
1048 |
-
|
1049 |
-
assert image_out.shape == (512, 512, 3)
|
1050 |
-
|
1051 |
-
def test_download_ckpt_diff_format_is_same(self):
|
1052 |
-
ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt"
|
1053 |
-
|
1054 |
-
pipe = StableDiffusionPipeline.from_single_file(ckpt_path)
|
1055 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
1056 |
-
pipe.unet.set_attn_processor(AttnProcessor())
|
1057 |
-
pipe.to("cuda")
|
1058 |
-
|
1059 |
-
generator = torch.Generator(device="cpu").manual_seed(0)
|
1060 |
-
image_ckpt = pipe("a turtle", num_inference_steps=5, generator=generator, output_type="np").images[0]
|
1061 |
-
|
1062 |
-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
1063 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
1064 |
-
pipe.unet.set_attn_processor(AttnProcessor())
|
1065 |
-
pipe.to("cuda")
|
1066 |
-
|
1067 |
-
generator = torch.Generator(device="cpu").manual_seed(0)
|
1068 |
-
image = pipe("a turtle", num_inference_steps=5, generator=generator, output_type="np").images[0]
|
1069 |
-
|
1070 |
-
assert np.max(np.abs(image - image_ckpt)) < 1e-4
|
1071 |
-
|
1072 |
-
|
1073 |
-
@nightly
|
1074 |
-
@require_torch_gpu
|
1075 |
-
class StableDiffusionPipelineNightlyTests(unittest.TestCase):
|
1076 |
-
def tearDown(self):
|
1077 |
-
super().tearDown()
|
1078 |
-
gc.collect()
|
1079 |
-
torch.cuda.empty_cache()
|
1080 |
-
|
1081 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
1082 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
1083 |
-
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
|
1084 |
-
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
|
1085 |
-
inputs = {
|
1086 |
-
"prompt": "a photograph of an astronaut riding a horse",
|
1087 |
-
"latents": latents,
|
1088 |
-
"generator": generator,
|
1089 |
-
"num_inference_steps": 50,
|
1090 |
-
"guidance_scale": 7.5,
|
1091 |
-
"output_type": "numpy",
|
1092 |
-
}
|
1093 |
-
return inputs
|
1094 |
-
|
1095 |
-
def test_stable_diffusion_1_4_pndm(self):
|
1096 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device)
|
1097 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
1098 |
-
|
1099 |
-
inputs = self.get_inputs(torch_device)
|
1100 |
-
image = sd_pipe(**inputs).images[0]
|
1101 |
-
|
1102 |
-
expected_image = load_numpy(
|
1103 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
1104 |
-
"/stable_diffusion_text2img/stable_diffusion_1_4_pndm.npy"
|
1105 |
-
)
|
1106 |
-
max_diff = np.abs(expected_image - image).max()
|
1107 |
-
assert max_diff < 1e-3
|
1108 |
-
|
1109 |
-
def test_stable_diffusion_1_5_pndm(self):
|
1110 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(torch_device)
|
1111 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
1112 |
-
|
1113 |
-
inputs = self.get_inputs(torch_device)
|
1114 |
-
image = sd_pipe(**inputs).images[0]
|
1115 |
-
|
1116 |
-
expected_image = load_numpy(
|
1117 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
1118 |
-
"/stable_diffusion_text2img/stable_diffusion_1_5_pndm.npy"
|
1119 |
-
)
|
1120 |
-
max_diff = np.abs(expected_image - image).max()
|
1121 |
-
assert max_diff < 1e-3
|
1122 |
-
|
1123 |
-
def test_stable_diffusion_ddim(self):
|
1124 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device)
|
1125 |
-
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
|
1126 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
1127 |
-
|
1128 |
-
inputs = self.get_inputs(torch_device)
|
1129 |
-
image = sd_pipe(**inputs).images[0]
|
1130 |
-
|
1131 |
-
expected_image = load_numpy(
|
1132 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
1133 |
-
"/stable_diffusion_text2img/stable_diffusion_1_4_ddim.npy"
|
1134 |
-
)
|
1135 |
-
max_diff = np.abs(expected_image - image).max()
|
1136 |
-
assert max_diff < 3e-3
|
1137 |
-
|
1138 |
-
def test_stable_diffusion_lms(self):
|
1139 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device)
|
1140 |
-
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
1141 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
1142 |
-
|
1143 |
-
inputs = self.get_inputs(torch_device)
|
1144 |
-
image = sd_pipe(**inputs).images[0]
|
1145 |
-
|
1146 |
-
expected_image = load_numpy(
|
1147 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
1148 |
-
"/stable_diffusion_text2img/stable_diffusion_1_4_lms.npy"
|
1149 |
-
)
|
1150 |
-
max_diff = np.abs(expected_image - image).max()
|
1151 |
-
assert max_diff < 1e-3
|
1152 |
-
|
1153 |
-
def test_stable_diffusion_euler(self):
|
1154 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device)
|
1155 |
-
sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
1156 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
1157 |
-
|
1158 |
-
inputs = self.get_inputs(torch_device)
|
1159 |
-
image = sd_pipe(**inputs).images[0]
|
1160 |
-
|
1161 |
-
expected_image = load_numpy(
|
1162 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
1163 |
-
"/stable_diffusion_text2img/stable_diffusion_1_4_euler.npy"
|
1164 |
-
)
|
1165 |
-
max_diff = np.abs(expected_image - image).max()
|
1166 |
-
assert max_diff < 1e-3
|
1167 |
-
|
1168 |
-
def test_stable_diffusion_dpm(self):
|
1169 |
-
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device)
|
1170 |
-
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
|
1171 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
1172 |
-
|
1173 |
-
inputs = self.get_inputs(torch_device)
|
1174 |
-
inputs["num_inference_steps"] = 25
|
1175 |
-
image = sd_pipe(**inputs).images[0]
|
1176 |
-
|
1177 |
-
expected_image = load_numpy(
|
1178 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
1179 |
-
"/stable_diffusion_text2img/stable_diffusion_1_4_dpm_multi.npy"
|
1180 |
-
)
|
1181 |
-
max_diff = np.abs(expected_image - image).max()
|
1182 |
-
assert max_diff < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim_parallel.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
# Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import torch
|
16 |
-
|
17 |
-
from diffusers import DDIMParallelScheduler
|
18 |
-
|
19 |
-
from .test_schedulers import SchedulerCommonTest
|
20 |
-
|
21 |
-
|
22 |
-
class DDIMParallelSchedulerTest(SchedulerCommonTest):
|
23 |
-
scheduler_classes = (DDIMParallelScheduler,)
|
24 |
-
forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
|
25 |
-
|
26 |
-
def get_scheduler_config(self, **kwargs):
|
27 |
-
config = {
|
28 |
-
"num_train_timesteps": 1000,
|
29 |
-
"beta_start": 0.0001,
|
30 |
-
"beta_end": 0.02,
|
31 |
-
"beta_schedule": "linear",
|
32 |
-
"clip_sample": True,
|
33 |
-
}
|
34 |
-
|
35 |
-
config.update(**kwargs)
|
36 |
-
return config
|
37 |
-
|
38 |
-
def full_loop(self, **config):
|
39 |
-
scheduler_class = self.scheduler_classes[0]
|
40 |
-
scheduler_config = self.get_scheduler_config(**config)
|
41 |
-
scheduler = scheduler_class(**scheduler_config)
|
42 |
-
|
43 |
-
num_inference_steps, eta = 10, 0.0
|
44 |
-
|
45 |
-
model = self.dummy_model()
|
46 |
-
sample = self.dummy_sample_deter
|
47 |
-
|
48 |
-
scheduler.set_timesteps(num_inference_steps)
|
49 |
-
|
50 |
-
for t in scheduler.timesteps:
|
51 |
-
residual = model(sample, t)
|
52 |
-
sample = scheduler.step(residual, t, sample, eta).prev_sample
|
53 |
-
|
54 |
-
return sample
|
55 |
-
|
56 |
-
def test_timesteps(self):
|
57 |
-
for timesteps in [100, 500, 1000]:
|
58 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
59 |
-
|
60 |
-
def test_steps_offset(self):
|
61 |
-
for steps_offset in [0, 1]:
|
62 |
-
self.check_over_configs(steps_offset=steps_offset)
|
63 |
-
|
64 |
-
scheduler_class = self.scheduler_classes[0]
|
65 |
-
scheduler_config = self.get_scheduler_config(steps_offset=1)
|
66 |
-
scheduler = scheduler_class(**scheduler_config)
|
67 |
-
scheduler.set_timesteps(5)
|
68 |
-
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
|
69 |
-
|
70 |
-
def test_betas(self):
|
71 |
-
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
|
72 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
73 |
-
|
74 |
-
def test_schedules(self):
|
75 |
-
for schedule in ["linear", "squaredcos_cap_v2"]:
|
76 |
-
self.check_over_configs(beta_schedule=schedule)
|
77 |
-
|
78 |
-
def test_prediction_type(self):
|
79 |
-
for prediction_type in ["epsilon", "v_prediction"]:
|
80 |
-
self.check_over_configs(prediction_type=prediction_type)
|
81 |
-
|
82 |
-
def test_clip_sample(self):
|
83 |
-
for clip_sample in [True, False]:
|
84 |
-
self.check_over_configs(clip_sample=clip_sample)
|
85 |
-
|
86 |
-
def test_timestep_spacing(self):
|
87 |
-
for timestep_spacing in ["trailing", "leading"]:
|
88 |
-
self.check_over_configs(timestep_spacing=timestep_spacing)
|
89 |
-
|
90 |
-
def test_rescale_betas_zero_snr(self):
|
91 |
-
for rescale_betas_zero_snr in [True, False]:
|
92 |
-
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
|
93 |
-
|
94 |
-
def test_thresholding(self):
|
95 |
-
self.check_over_configs(thresholding=False)
|
96 |
-
for threshold in [0.5, 1.0, 2.0]:
|
97 |
-
for prediction_type in ["epsilon", "v_prediction"]:
|
98 |
-
self.check_over_configs(
|
99 |
-
thresholding=True,
|
100 |
-
prediction_type=prediction_type,
|
101 |
-
sample_max_value=threshold,
|
102 |
-
)
|
103 |
-
|
104 |
-
def test_time_indices(self):
|
105 |
-
for t in [1, 10, 49]:
|
106 |
-
self.check_over_forward(time_step=t)
|
107 |
-
|
108 |
-
def test_inference_steps(self):
|
109 |
-
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
|
110 |
-
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
|
111 |
-
|
112 |
-
def test_eta(self):
|
113 |
-
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
|
114 |
-
self.check_over_forward(time_step=t, eta=eta)
|
115 |
-
|
116 |
-
def test_variance(self):
|
117 |
-
scheduler_class = self.scheduler_classes[0]
|
118 |
-
scheduler_config = self.get_scheduler_config()
|
119 |
-
scheduler = scheduler_class(**scheduler_config)
|
120 |
-
|
121 |
-
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
|
122 |
-
assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
|
123 |
-
assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
|
124 |
-
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
|
125 |
-
assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
|
126 |
-
assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
|
127 |
-
|
128 |
-
def test_batch_step_no_noise(self):
|
129 |
-
scheduler_class = self.scheduler_classes[0]
|
130 |
-
scheduler_config = self.get_scheduler_config()
|
131 |
-
scheduler = scheduler_class(**scheduler_config)
|
132 |
-
|
133 |
-
num_inference_steps, eta = 10, 0.0
|
134 |
-
scheduler.set_timesteps(num_inference_steps)
|
135 |
-
|
136 |
-
model = self.dummy_model()
|
137 |
-
sample1 = self.dummy_sample_deter
|
138 |
-
sample2 = self.dummy_sample_deter + 0.1
|
139 |
-
sample3 = self.dummy_sample_deter - 0.1
|
140 |
-
|
141 |
-
per_sample_batch = sample1.shape[0]
|
142 |
-
samples = torch.stack([sample1, sample2, sample3], dim=0)
|
143 |
-
timesteps = torch.arange(num_inference_steps)[0:3, None].repeat(1, per_sample_batch)
|
144 |
-
|
145 |
-
residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1))
|
146 |
-
pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1), eta)
|
147 |
-
|
148 |
-
result_sum = torch.sum(torch.abs(pred_prev_sample))
|
149 |
-
result_mean = torch.mean(torch.abs(pred_prev_sample))
|
150 |
-
|
151 |
-
assert abs(result_sum.item() - 1147.7904) < 1e-2
|
152 |
-
assert abs(result_mean.item() - 0.4982) < 1e-3
|
153 |
-
|
154 |
-
def test_full_loop_no_noise(self):
|
155 |
-
sample = self.full_loop()
|
156 |
-
|
157 |
-
result_sum = torch.sum(torch.abs(sample))
|
158 |
-
result_mean = torch.mean(torch.abs(sample))
|
159 |
-
|
160 |
-
assert abs(result_sum.item() - 172.0067) < 1e-2
|
161 |
-
assert abs(result_mean.item() - 0.223967) < 1e-3
|
162 |
-
|
163 |
-
def test_full_loop_with_v_prediction(self):
|
164 |
-
sample = self.full_loop(prediction_type="v_prediction")
|
165 |
-
|
166 |
-
result_sum = torch.sum(torch.abs(sample))
|
167 |
-
result_mean = torch.mean(torch.abs(sample))
|
168 |
-
|
169 |
-
assert abs(result_sum.item() - 52.5302) < 1e-2
|
170 |
-
assert abs(result_mean.item() - 0.0684) < 1e-3
|
171 |
-
|
172 |
-
def test_full_loop_with_set_alpha_to_one(self):
|
173 |
-
# We specify different beta, so that the first alpha is 0.99
|
174 |
-
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
|
175 |
-
result_sum = torch.sum(torch.abs(sample))
|
176 |
-
result_mean = torch.mean(torch.abs(sample))
|
177 |
-
|
178 |
-
assert abs(result_sum.item() - 149.8295) < 1e-2
|
179 |
-
assert abs(result_mean.item() - 0.1951) < 1e-3
|
180 |
-
|
181 |
-
def test_full_loop_with_no_set_alpha_to_one(self):
|
182 |
-
# We specify different beta, so that the first alpha is 0.99
|
183 |
-
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
|
184 |
-
result_sum = torch.sum(torch.abs(sample))
|
185 |
-
result_mean = torch.mean(torch.abs(sample))
|
186 |
-
|
187 |
-
assert abs(result_sum.item() - 149.0784) < 1e-2
|
188 |
-
assert abs(result_mean.item() - 0.1941) < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/visualization/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .image import (color_val_matplotlib, imshow_det_bboxes,
|
2 |
-
imshow_gt_det_bboxes)
|
3 |
-
|
4 |
-
__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib']
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/benchmark.py
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import time
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from mmcv import Config, DictAction
|
6 |
-
from mmcv.cnn import fuse_conv_bn
|
7 |
-
from mmcv.parallel import MMDataParallel
|
8 |
-
from mmcv.runner import load_checkpoint, wrap_fp16_model
|
9 |
-
|
10 |
-
from mmdet.datasets import (build_dataloader, build_dataset,
|
11 |
-
replace_ImageToTensor)
|
12 |
-
from mmdet.models import build_detector
|
13 |
-
|
14 |
-
|
15 |
-
def parse_args():
|
16 |
-
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
|
17 |
-
parser.add_argument('config', help='test config file path')
|
18 |
-
parser.add_argument('checkpoint', help='checkpoint file')
|
19 |
-
parser.add_argument(
|
20 |
-
'--log-interval', default=50, help='interval of logging')
|
21 |
-
parser.add_argument(
|
22 |
-
'--fuse-conv-bn',
|
23 |
-
action='store_true',
|
24 |
-
help='Whether to fuse conv and bn, this will slightly increase'
|
25 |
-
'the inference speed')
|
26 |
-
parser.add_argument(
|
27 |
-
'--cfg-options',
|
28 |
-
nargs='+',
|
29 |
-
action=DictAction,
|
30 |
-
help='override some settings in the used config, the key-value pair '
|
31 |
-
'in xxx=yyy format will be merged into config file. If the value to '
|
32 |
-
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
|
33 |
-
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
|
34 |
-
'Note that the quotation marks are necessary and that no white space '
|
35 |
-
'is allowed.')
|
36 |
-
args = parser.parse_args()
|
37 |
-
return args
|
38 |
-
|
39 |
-
|
40 |
-
def main():
|
41 |
-
args = parse_args()
|
42 |
-
|
43 |
-
cfg = Config.fromfile(args.config)
|
44 |
-
if args.cfg_options is not None:
|
45 |
-
cfg.merge_from_dict(args.cfg_options)
|
46 |
-
# import modules from string list.
|
47 |
-
if cfg.get('custom_imports', None):
|
48 |
-
from mmcv.utils import import_modules_from_strings
|
49 |
-
import_modules_from_strings(**cfg['custom_imports'])
|
50 |
-
# set cudnn_benchmark
|
51 |
-
if cfg.get('cudnn_benchmark', False):
|
52 |
-
torch.backends.cudnn.benchmark = True
|
53 |
-
cfg.model.pretrained = None
|
54 |
-
cfg.data.test.test_mode = True
|
55 |
-
|
56 |
-
# build the dataloader
|
57 |
-
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
|
58 |
-
if samples_per_gpu > 1:
|
59 |
-
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
|
60 |
-
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
|
61 |
-
dataset = build_dataset(cfg.data.test)
|
62 |
-
data_loader = build_dataloader(
|
63 |
-
dataset,
|
64 |
-
samples_per_gpu=1,
|
65 |
-
workers_per_gpu=cfg.data.workers_per_gpu,
|
66 |
-
dist=False,
|
67 |
-
shuffle=False)
|
68 |
-
|
69 |
-
# build the model and load checkpoint
|
70 |
-
cfg.model.train_cfg = None
|
71 |
-
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
|
72 |
-
fp16_cfg = cfg.get('fp16', None)
|
73 |
-
if fp16_cfg is not None:
|
74 |
-
wrap_fp16_model(model)
|
75 |
-
load_checkpoint(model, args.checkpoint, map_location='cpu')
|
76 |
-
if args.fuse_conv_bn:
|
77 |
-
model = fuse_conv_bn(model)
|
78 |
-
|
79 |
-
model = MMDataParallel(model, device_ids=[0])
|
80 |
-
|
81 |
-
model.eval()
|
82 |
-
|
83 |
-
# the first several iterations may be very slow so skip them
|
84 |
-
num_warmup = 5
|
85 |
-
pure_inf_time = 0
|
86 |
-
|
87 |
-
# benchmark with 2000 image and take the average
|
88 |
-
for i, data in enumerate(data_loader):
|
89 |
-
|
90 |
-
torch.cuda.synchronize()
|
91 |
-
start_time = time.perf_counter()
|
92 |
-
|
93 |
-
with torch.no_grad():
|
94 |
-
model(return_loss=False, rescale=True, **data)
|
95 |
-
|
96 |
-
torch.cuda.synchronize()
|
97 |
-
elapsed = time.perf_counter() - start_time
|
98 |
-
|
99 |
-
if i >= num_warmup:
|
100 |
-
pure_inf_time += elapsed
|
101 |
-
if (i + 1) % args.log_interval == 0:
|
102 |
-
fps = (i + 1 - num_warmup) / pure_inf_time
|
103 |
-
print(f'Done image [{i + 1:<3}/ 2000], fps: {fps:.1f} img / s')
|
104 |
-
|
105 |
-
if (i + 1) == 2000:
|
106 |
-
pure_inf_time += elapsed
|
107 |
-
fps = (i + 1 - num_warmup) / pure_inf_time
|
108 |
-
print(f'Overall fps: {fps:.1f} img / s')
|
109 |
-
break
|
110 |
-
|
111 |
-
|
112 |
-
if __name__ == '__main__':
|
113 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_video_demo/transforms.py
DELETED
@@ -1,443 +0,0 @@
|
|
1 |
-
import torchvision
|
2 |
-
import random
|
3 |
-
from PIL import Image, ImageOps
|
4 |
-
import numpy as np
|
5 |
-
import numbers
|
6 |
-
import math
|
7 |
-
import torch
|
8 |
-
|
9 |
-
|
10 |
-
class GroupRandomCrop(object):
|
11 |
-
def __init__(self, size):
|
12 |
-
if isinstance(size, numbers.Number):
|
13 |
-
self.size = (int(size), int(size))
|
14 |
-
else:
|
15 |
-
self.size = size
|
16 |
-
|
17 |
-
def __call__(self, img_group):
|
18 |
-
|
19 |
-
w, h = img_group[0].size
|
20 |
-
th, tw = self.size
|
21 |
-
|
22 |
-
out_images = list()
|
23 |
-
|
24 |
-
x1 = random.randint(0, w - tw)
|
25 |
-
y1 = random.randint(0, h - th)
|
26 |
-
|
27 |
-
for img in img_group:
|
28 |
-
assert(img.size[0] == w and img.size[1] == h)
|
29 |
-
if w == tw and h == th:
|
30 |
-
out_images.append(img)
|
31 |
-
else:
|
32 |
-
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
|
33 |
-
|
34 |
-
return out_images
|
35 |
-
|
36 |
-
|
37 |
-
class MultiGroupRandomCrop(object):
|
38 |
-
def __init__(self, size, groups=1):
|
39 |
-
if isinstance(size, numbers.Number):
|
40 |
-
self.size = (int(size), int(size))
|
41 |
-
else:
|
42 |
-
self.size = size
|
43 |
-
self.groups = groups
|
44 |
-
|
45 |
-
def __call__(self, img_group):
|
46 |
-
|
47 |
-
w, h = img_group[0].size
|
48 |
-
th, tw = self.size
|
49 |
-
|
50 |
-
out_images = list()
|
51 |
-
|
52 |
-
for i in range(self.groups):
|
53 |
-
x1 = random.randint(0, w - tw)
|
54 |
-
y1 = random.randint(0, h - th)
|
55 |
-
|
56 |
-
for img in img_group:
|
57 |
-
assert(img.size[0] == w and img.size[1] == h)
|
58 |
-
if w == tw and h == th:
|
59 |
-
out_images.append(img)
|
60 |
-
else:
|
61 |
-
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
|
62 |
-
|
63 |
-
return out_images
|
64 |
-
|
65 |
-
|
66 |
-
class GroupCenterCrop(object):
|
67 |
-
def __init__(self, size):
|
68 |
-
self.worker = torchvision.transforms.CenterCrop(size)
|
69 |
-
|
70 |
-
def __call__(self, img_group):
|
71 |
-
return [self.worker(img) for img in img_group]
|
72 |
-
|
73 |
-
|
74 |
-
class GroupRandomHorizontalFlip(object):
|
75 |
-
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
|
76 |
-
"""
|
77 |
-
|
78 |
-
def __init__(self, is_flow=False):
|
79 |
-
self.is_flow = is_flow
|
80 |
-
|
81 |
-
def __call__(self, img_group, is_flow=False):
|
82 |
-
v = random.random()
|
83 |
-
if v < 0.5:
|
84 |
-
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
|
85 |
-
if self.is_flow:
|
86 |
-
for i in range(0, len(ret), 2):
|
87 |
-
# invert flow pixel values when flipping
|
88 |
-
ret[i] = ImageOps.invert(ret[i])
|
89 |
-
return ret
|
90 |
-
else:
|
91 |
-
return img_group
|
92 |
-
|
93 |
-
|
94 |
-
class GroupNormalize(object):
|
95 |
-
def __init__(self, mean, std):
|
96 |
-
self.mean = mean
|
97 |
-
self.std = std
|
98 |
-
|
99 |
-
def __call__(self, tensor):
|
100 |
-
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
|
101 |
-
rep_std = self.std * (tensor.size()[0] // len(self.std))
|
102 |
-
|
103 |
-
# TODO: make efficient
|
104 |
-
for t, m, s in zip(tensor, rep_mean, rep_std):
|
105 |
-
t.sub_(m).div_(s)
|
106 |
-
|
107 |
-
return tensor
|
108 |
-
|
109 |
-
|
110 |
-
class GroupScale(object):
|
111 |
-
""" Rescales the input PIL.Image to the given 'size'.
|
112 |
-
'size' will be the size of the smaller edge.
|
113 |
-
For example, if height > width, then image will be
|
114 |
-
rescaled to (size * height / width, size)
|
115 |
-
size: size of the smaller edge
|
116 |
-
interpolation: Default: PIL.Image.BILINEAR
|
117 |
-
"""
|
118 |
-
|
119 |
-
def __init__(self, size, interpolation=Image.BILINEAR):
|
120 |
-
self.worker = torchvision.transforms.Resize(size, interpolation)
|
121 |
-
|
122 |
-
def __call__(self, img_group):
|
123 |
-
return [self.worker(img) for img in img_group]
|
124 |
-
|
125 |
-
|
126 |
-
class GroupOverSample(object):
|
127 |
-
def __init__(self, crop_size, scale_size=None, flip=True):
|
128 |
-
self.crop_size = crop_size if not isinstance(
|
129 |
-
crop_size, int) else (crop_size, crop_size)
|
130 |
-
|
131 |
-
if scale_size is not None:
|
132 |
-
self.scale_worker = GroupScale(scale_size)
|
133 |
-
else:
|
134 |
-
self.scale_worker = None
|
135 |
-
self.flip = flip
|
136 |
-
|
137 |
-
def __call__(self, img_group):
|
138 |
-
|
139 |
-
if self.scale_worker is not None:
|
140 |
-
img_group = self.scale_worker(img_group)
|
141 |
-
|
142 |
-
image_w, image_h = img_group[0].size
|
143 |
-
crop_w, crop_h = self.crop_size
|
144 |
-
|
145 |
-
offsets = GroupMultiScaleCrop.fill_fix_offset(
|
146 |
-
False, image_w, image_h, crop_w, crop_h)
|
147 |
-
oversample_group = list()
|
148 |
-
for o_w, o_h in offsets:
|
149 |
-
normal_group = list()
|
150 |
-
flip_group = list()
|
151 |
-
for i, img in enumerate(img_group):
|
152 |
-
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
|
153 |
-
normal_group.append(crop)
|
154 |
-
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
|
155 |
-
|
156 |
-
if img.mode == 'L' and i % 2 == 0:
|
157 |
-
flip_group.append(ImageOps.invert(flip_crop))
|
158 |
-
else:
|
159 |
-
flip_group.append(flip_crop)
|
160 |
-
|
161 |
-
oversample_group.extend(normal_group)
|
162 |
-
if self.flip:
|
163 |
-
oversample_group.extend(flip_group)
|
164 |
-
return oversample_group
|
165 |
-
|
166 |
-
|
167 |
-
class GroupFullResSample(object):
|
168 |
-
def __init__(self, crop_size, scale_size=None, flip=True):
|
169 |
-
self.crop_size = crop_size if not isinstance(
|
170 |
-
crop_size, int) else (crop_size, crop_size)
|
171 |
-
|
172 |
-
if scale_size is not None:
|
173 |
-
self.scale_worker = GroupScale(scale_size)
|
174 |
-
else:
|
175 |
-
self.scale_worker = None
|
176 |
-
self.flip = flip
|
177 |
-
|
178 |
-
def __call__(self, img_group):
|
179 |
-
|
180 |
-
if self.scale_worker is not None:
|
181 |
-
img_group = self.scale_worker(img_group)
|
182 |
-
|
183 |
-
image_w, image_h = img_group[0].size
|
184 |
-
crop_w, crop_h = self.crop_size
|
185 |
-
|
186 |
-
w_step = (image_w - crop_w) // 4
|
187 |
-
h_step = (image_h - crop_h) // 4
|
188 |
-
|
189 |
-
offsets = list()
|
190 |
-
offsets.append((0 * w_step, 2 * h_step)) # left
|
191 |
-
offsets.append((4 * w_step, 2 * h_step)) # right
|
192 |
-
offsets.append((2 * w_step, 2 * h_step)) # center
|
193 |
-
|
194 |
-
oversample_group = list()
|
195 |
-
for o_w, o_h in offsets:
|
196 |
-
normal_group = list()
|
197 |
-
flip_group = list()
|
198 |
-
for i, img in enumerate(img_group):
|
199 |
-
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
|
200 |
-
normal_group.append(crop)
|
201 |
-
if self.flip:
|
202 |
-
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
|
203 |
-
|
204 |
-
if img.mode == 'L' and i % 2 == 0:
|
205 |
-
flip_group.append(ImageOps.invert(flip_crop))
|
206 |
-
else:
|
207 |
-
flip_group.append(flip_crop)
|
208 |
-
|
209 |
-
oversample_group.extend(normal_group)
|
210 |
-
oversample_group.extend(flip_group)
|
211 |
-
return oversample_group
|
212 |
-
|
213 |
-
|
214 |
-
class GroupMultiScaleCrop(object):
|
215 |
-
|
216 |
-
def __init__(self, input_size, scales=None, max_distort=1,
|
217 |
-
fix_crop=True, more_fix_crop=True):
|
218 |
-
self.scales = scales if scales is not None else [1, .875, .75, .66]
|
219 |
-
self.max_distort = max_distort
|
220 |
-
self.fix_crop = fix_crop
|
221 |
-
self.more_fix_crop = more_fix_crop
|
222 |
-
self.input_size = input_size if not isinstance(input_size, int) else [
|
223 |
-
input_size, input_size]
|
224 |
-
self.interpolation = Image.BILINEAR
|
225 |
-
|
226 |
-
def __call__(self, img_group):
|
227 |
-
|
228 |
-
im_size = img_group[0].size
|
229 |
-
|
230 |
-
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
|
231 |
-
crop_img_group = [
|
232 |
-
img.crop(
|
233 |
-
(offset_w,
|
234 |
-
offset_h,
|
235 |
-
offset_w +
|
236 |
-
crop_w,
|
237 |
-
offset_h +
|
238 |
-
crop_h)) for img in img_group]
|
239 |
-
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
|
240 |
-
for img in crop_img_group]
|
241 |
-
return ret_img_group
|
242 |
-
|
243 |
-
def _sample_crop_size(self, im_size):
|
244 |
-
image_w, image_h = im_size[0], im_size[1]
|
245 |
-
|
246 |
-
# find a crop size
|
247 |
-
base_size = min(image_w, image_h)
|
248 |
-
crop_sizes = [int(base_size * x) for x in self.scales]
|
249 |
-
crop_h = [
|
250 |
-
self.input_size[1] if abs(
|
251 |
-
x - self.input_size[1]) < 3 else x for x in crop_sizes]
|
252 |
-
crop_w = [
|
253 |
-
self.input_size[0] if abs(
|
254 |
-
x - self.input_size[0]) < 3 else x for x in crop_sizes]
|
255 |
-
|
256 |
-
pairs = []
|
257 |
-
for i, h in enumerate(crop_h):
|
258 |
-
for j, w in enumerate(crop_w):
|
259 |
-
if abs(i - j) <= self.max_distort:
|
260 |
-
pairs.append((w, h))
|
261 |
-
|
262 |
-
crop_pair = random.choice(pairs)
|
263 |
-
if not self.fix_crop:
|
264 |
-
w_offset = random.randint(0, image_w - crop_pair[0])
|
265 |
-
h_offset = random.randint(0, image_h - crop_pair[1])
|
266 |
-
else:
|
267 |
-
w_offset, h_offset = self._sample_fix_offset(
|
268 |
-
image_w, image_h, crop_pair[0], crop_pair[1])
|
269 |
-
|
270 |
-
return crop_pair[0], crop_pair[1], w_offset, h_offset
|
271 |
-
|
272 |
-
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
|
273 |
-
offsets = self.fill_fix_offset(
|
274 |
-
self.more_fix_crop, image_w, image_h, crop_w, crop_h)
|
275 |
-
return random.choice(offsets)
|
276 |
-
|
277 |
-
@staticmethod
|
278 |
-
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
|
279 |
-
w_step = (image_w - crop_w) // 4
|
280 |
-
h_step = (image_h - crop_h) // 4
|
281 |
-
|
282 |
-
ret = list()
|
283 |
-
ret.append((0, 0)) # upper left
|
284 |
-
ret.append((4 * w_step, 0)) # upper right
|
285 |
-
ret.append((0, 4 * h_step)) # lower left
|
286 |
-
ret.append((4 * w_step, 4 * h_step)) # lower right
|
287 |
-
ret.append((2 * w_step, 2 * h_step)) # center
|
288 |
-
|
289 |
-
if more_fix_crop:
|
290 |
-
ret.append((0, 2 * h_step)) # center left
|
291 |
-
ret.append((4 * w_step, 2 * h_step)) # center right
|
292 |
-
ret.append((2 * w_step, 4 * h_step)) # lower center
|
293 |
-
ret.append((2 * w_step, 0 * h_step)) # upper center
|
294 |
-
|
295 |
-
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
|
296 |
-
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
|
297 |
-
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
|
298 |
-
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
|
299 |
-
|
300 |
-
return ret
|
301 |
-
|
302 |
-
|
303 |
-
class GroupRandomSizedCrop(object):
|
304 |
-
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
|
305 |
-
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
|
306 |
-
This is popularly used to train the Inception networks
|
307 |
-
size: size of the smaller edge
|
308 |
-
interpolation: Default: PIL.Image.BILINEAR
|
309 |
-
"""
|
310 |
-
|
311 |
-
def __init__(self, size, interpolation=Image.BILINEAR):
|
312 |
-
self.size = size
|
313 |
-
self.interpolation = interpolation
|
314 |
-
|
315 |
-
def __call__(self, img_group):
|
316 |
-
for attempt in range(10):
|
317 |
-
area = img_group[0].size[0] * img_group[0].size[1]
|
318 |
-
target_area = random.uniform(0.08, 1.0) * area
|
319 |
-
aspect_ratio = random.uniform(3. / 4, 4. / 3)
|
320 |
-
|
321 |
-
w = int(round(math.sqrt(target_area * aspect_ratio)))
|
322 |
-
h = int(round(math.sqrt(target_area / aspect_ratio)))
|
323 |
-
|
324 |
-
if random.random() < 0.5:
|
325 |
-
w, h = h, w
|
326 |
-
|
327 |
-
if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
|
328 |
-
x1 = random.randint(0, img_group[0].size[0] - w)
|
329 |
-
y1 = random.randint(0, img_group[0].size[1] - h)
|
330 |
-
found = True
|
331 |
-
break
|
332 |
-
else:
|
333 |
-
found = False
|
334 |
-
x1 = 0
|
335 |
-
y1 = 0
|
336 |
-
|
337 |
-
if found:
|
338 |
-
out_group = list()
|
339 |
-
for img in img_group:
|
340 |
-
img = img.crop((x1, y1, x1 + w, y1 + h))
|
341 |
-
assert(img.size == (w, h))
|
342 |
-
out_group.append(
|
343 |
-
img.resize(
|
344 |
-
(self.size, self.size), self.interpolation))
|
345 |
-
return out_group
|
346 |
-
else:
|
347 |
-
# Fallback
|
348 |
-
scale = GroupScale(self.size, interpolation=self.interpolation)
|
349 |
-
crop = GroupRandomCrop(self.size)
|
350 |
-
return crop(scale(img_group))
|
351 |
-
|
352 |
-
|
353 |
-
class ConvertDataFormat(object):
|
354 |
-
def __init__(self, model_type):
|
355 |
-
self.model_type = model_type
|
356 |
-
|
357 |
-
def __call__(self, images):
|
358 |
-
if self.model_type == '2D':
|
359 |
-
return images
|
360 |
-
tc, h, w = images.size()
|
361 |
-
t = tc // 3
|
362 |
-
images = images.view(t, 3, h, w)
|
363 |
-
images = images.permute(1, 0, 2, 3)
|
364 |
-
return images
|
365 |
-
|
366 |
-
|
367 |
-
class Stack(object):
|
368 |
-
|
369 |
-
def __init__(self, roll=False):
|
370 |
-
self.roll = roll
|
371 |
-
|
372 |
-
def __call__(self, img_group):
|
373 |
-
if img_group[0].mode == 'L':
|
374 |
-
return np.concatenate([np.expand_dims(x, 2)
|
375 |
-
for x in img_group], axis=2)
|
376 |
-
elif img_group[0].mode == 'RGB':
|
377 |
-
if self.roll:
|
378 |
-
return np.concatenate([np.array(x)[:, :, ::-1]
|
379 |
-
for x in img_group], axis=2)
|
380 |
-
else:
|
381 |
-
#print(np.concatenate(img_group, axis=2).shape)
|
382 |
-
# print(img_group[0].shape)
|
383 |
-
return np.concatenate(img_group, axis=2)
|
384 |
-
|
385 |
-
|
386 |
-
class ToTorchFormatTensor(object):
|
387 |
-
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
|
388 |
-
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
|
389 |
-
|
390 |
-
def __init__(self, div=True):
|
391 |
-
self.div = div
|
392 |
-
|
393 |
-
def __call__(self, pic):
|
394 |
-
if isinstance(pic, np.ndarray):
|
395 |
-
# handle numpy array
|
396 |
-
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
|
397 |
-
else:
|
398 |
-
# handle PIL Image
|
399 |
-
img = torch.ByteTensor(
|
400 |
-
torch.ByteStorage.from_buffer(
|
401 |
-
pic.tobytes()))
|
402 |
-
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
|
403 |
-
# put it from HWC to CHW format
|
404 |
-
# yikes, this transpose takes 80% of the loading time/CPU
|
405 |
-
img = img.transpose(0, 1).transpose(0, 2).contiguous()
|
406 |
-
return img.float().div(255) if self.div else img.float()
|
407 |
-
|
408 |
-
|
409 |
-
class IdentityTransform(object):
|
410 |
-
|
411 |
-
def __call__(self, data):
|
412 |
-
return data
|
413 |
-
|
414 |
-
|
415 |
-
if __name__ == "__main__":
|
416 |
-
trans = torchvision.transforms.Compose([
|
417 |
-
GroupScale(256),
|
418 |
-
GroupRandomCrop(224),
|
419 |
-
Stack(),
|
420 |
-
ToTorchFormatTensor(),
|
421 |
-
GroupNormalize(
|
422 |
-
mean=[.485, .456, .406],
|
423 |
-
std=[.229, .224, .225]
|
424 |
-
)]
|
425 |
-
)
|
426 |
-
|
427 |
-
im = Image.open('../tensorflow-model-zoo.torch/lena_299.png')
|
428 |
-
|
429 |
-
color_group = [im] * 3
|
430 |
-
rst = trans(color_group)
|
431 |
-
|
432 |
-
gray_group = [im.convert('L')] * 9
|
433 |
-
gray_rst = trans(gray_group)
|
434 |
-
|
435 |
-
trans2 = torchvision.transforms.Compose([
|
436 |
-
GroupRandomSizedCrop(256),
|
437 |
-
Stack(),
|
438 |
-
ToTorchFormatTensor(),
|
439 |
-
GroupNormalize(
|
440 |
-
mean=[.485, .456, .406],
|
441 |
-
std=[.229, .224, .225])
|
442 |
-
])
|
443 |
-
print(trans2(color_group))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2_hf.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from pathlib import Path
|
3 |
-
from typing import Any, Dict, Optional, Union
|
4 |
-
|
5 |
-
import torch
|
6 |
-
from exllamav2 import ExLlamaV2, ExLlamaV2Cache, ExLlamaV2Config
|
7 |
-
from torch.nn import CrossEntropyLoss
|
8 |
-
from transformers import GenerationConfig, PretrainedConfig, PreTrainedModel
|
9 |
-
from transformers.modeling_outputs import CausalLMOutputWithPast
|
10 |
-
|
11 |
-
from modules import shared
|
12 |
-
from modules.logging_colors import logger
|
13 |
-
|
14 |
-
try:
|
15 |
-
import flash_attn
|
16 |
-
except ModuleNotFoundError:
|
17 |
-
logger.warning(
|
18 |
-
'You are running ExLlamaV2 without flash-attention. This will cause the VRAM usage '
|
19 |
-
'to be a lot higher than it could be.\n'
|
20 |
-
'Try installing flash-attention following the instructions here: '
|
21 |
-
'https://github.com/Dao-AILab/flash-attention#installation-and-features'
|
22 |
-
)
|
23 |
-
pass
|
24 |
-
|
25 |
-
|
26 |
-
class Exllamav2HF(PreTrainedModel):
|
27 |
-
def __init__(self, config: ExLlamaV2Config):
|
28 |
-
super().__init__(PretrainedConfig())
|
29 |
-
self.ex_config = config
|
30 |
-
self.ex_model = ExLlamaV2(config)
|
31 |
-
split = None
|
32 |
-
if shared.args.gpu_split:
|
33 |
-
split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]
|
34 |
-
|
35 |
-
self.ex_model.load(split)
|
36 |
-
|
37 |
-
self.generation_config = GenerationConfig()
|
38 |
-
|
39 |
-
self.ex_cache = ExLlamaV2Cache(self.ex_model)
|
40 |
-
self.past_seq = None
|
41 |
-
|
42 |
-
if shared.args.cfg_cache:
|
43 |
-
self.ex_cache_negative = ExLlamaV2Cache(self.ex_model)
|
44 |
-
self.past_seq_negative = None
|
45 |
-
|
46 |
-
def _validate_model_class(self):
|
47 |
-
pass
|
48 |
-
|
49 |
-
def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
|
50 |
-
pass
|
51 |
-
|
52 |
-
def prepare_inputs_for_generation(self, input_ids, **kwargs):
|
53 |
-
return {'input_ids': input_ids, **kwargs}
|
54 |
-
|
55 |
-
@property
|
56 |
-
def device(self) -> torch.device:
|
57 |
-
return torch.device(0)
|
58 |
-
|
59 |
-
def __call__(self, *args, **kwargs):
|
60 |
-
use_cache = kwargs.get('use_cache', True)
|
61 |
-
labels = kwargs.get('labels', None)
|
62 |
-
past_key_values = kwargs.get('past_key_values', None)
|
63 |
-
|
64 |
-
if len(args) > 0:
|
65 |
-
if not shared.args.cfg_cache:
|
66 |
-
logger.error("Please enable the cfg-cache option to use CFG with ExLlamav2_HF.")
|
67 |
-
return
|
68 |
-
|
69 |
-
input_ids = args[0]
|
70 |
-
is_negative = True
|
71 |
-
past_seq = self.past_seq_negative
|
72 |
-
ex_cache = self.ex_cache_negative
|
73 |
-
else:
|
74 |
-
input_ids = kwargs['input_ids']
|
75 |
-
is_negative = False
|
76 |
-
past_seq = self.past_seq
|
77 |
-
ex_cache = self.ex_cache
|
78 |
-
|
79 |
-
seq = input_ids[0].tolist()
|
80 |
-
if is_negative and past_key_values is not None:
|
81 |
-
seq = past_key_values + seq
|
82 |
-
|
83 |
-
seq_tensor = torch.tensor(seq)
|
84 |
-
reset = True
|
85 |
-
|
86 |
-
# Make the forward call
|
87 |
-
if labels is None:
|
88 |
-
if past_seq is not None:
|
89 |
-
min_length = min(past_seq.shape[0], seq_tensor.shape[0])
|
90 |
-
indices = torch.nonzero(~torch.eq(past_seq[:min_length], seq_tensor[:min_length]))
|
91 |
-
if len(indices) > 0:
|
92 |
-
longest_prefix = indices[0].item()
|
93 |
-
else:
|
94 |
-
longest_prefix = min_length
|
95 |
-
|
96 |
-
if longest_prefix > 0:
|
97 |
-
reset = False
|
98 |
-
ex_cache.current_seq_len = longest_prefix
|
99 |
-
if len(seq_tensor) - longest_prefix > 1:
|
100 |
-
self.ex_model.forward(seq_tensor[longest_prefix:-1].view(1, -1), ex_cache, preprocess_only=True)
|
101 |
-
elif len(seq_tensor) == longest_prefix:
|
102 |
-
# Very tricky: if the prefix we are reusing *is* the input_ids, then we have to back up the cache pointer by one,
|
103 |
-
# because we feed input_ids[-1] to forward() below, but that last token is already in the cache!
|
104 |
-
ex_cache.current_seq_len -= 1
|
105 |
-
|
106 |
-
if reset:
|
107 |
-
ex_cache.current_seq_len = 0
|
108 |
-
if len(seq_tensor) > 1:
|
109 |
-
self.ex_model.forward(seq_tensor[:-1].view(1, -1), ex_cache, preprocess_only=True)
|
110 |
-
|
111 |
-
logits = self.ex_model.forward(seq_tensor[-1:].view(1, -1), ex_cache).to(input_ids.device)
|
112 |
-
else:
|
113 |
-
ex_cache.current_seq_len = 0
|
114 |
-
logits = self.ex_model.forward(seq_tensor.view(1, -1), ex_cache, last_id_only=False)
|
115 |
-
|
116 |
-
if is_negative:
|
117 |
-
self.past_seq_negative = seq_tensor
|
118 |
-
else:
|
119 |
-
self.past_seq = seq_tensor
|
120 |
-
|
121 |
-
loss = None
|
122 |
-
if labels is not None:
|
123 |
-
# Shift so that tokens < n predict n
|
124 |
-
shift_logits = logits[..., :-1, :].contiguous()
|
125 |
-
shift_labels = labels[..., 1:].contiguous()
|
126 |
-
# Flatten the tokens
|
127 |
-
loss_fct = CrossEntropyLoss()
|
128 |
-
shift_logits = shift_logits.view(-1, logits.shape[-1])
|
129 |
-
shift_labels = shift_labels.view(-1)
|
130 |
-
# Enable model parallelism
|
131 |
-
shift_labels = shift_labels.to(shift_logits.device)
|
132 |
-
loss = loss_fct(shift_logits, shift_labels)
|
133 |
-
|
134 |
-
return CausalLMOutputWithPast(logits=logits, past_key_values=seq if use_cache else None, loss=loss)
|
135 |
-
|
136 |
-
@classmethod
|
137 |
-
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
138 |
-
assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported"
|
139 |
-
if isinstance(pretrained_model_name_or_path, str):
|
140 |
-
pretrained_model_name_or_path = Path(pretrained_model_name_or_path)
|
141 |
-
|
142 |
-
pretrained_model_name_or_path = Path(f'{shared.args.model_dir}') / Path(pretrained_model_name_or_path)
|
143 |
-
|
144 |
-
config = ExLlamaV2Config()
|
145 |
-
config.model_dir = str(pretrained_model_name_or_path)
|
146 |
-
config.prepare()
|
147 |
-
|
148 |
-
config.max_seq_len = shared.args.max_seq_len
|
149 |
-
config.scale_pos_emb = shared.args.compress_pos_emb
|
150 |
-
config.scale_alpha_value = shared.args.alpha_value
|
151 |
-
|
152 |
-
return Exllamav2HF(config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/info.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import glob
|
3 |
-
import os
|
4 |
-
|
5 |
-
import torch
|
6 |
-
|
7 |
-
if torch.__version__ == 'parrots':
|
8 |
-
import parrots
|
9 |
-
|
10 |
-
def get_compiler_version():
|
11 |
-
return 'GCC ' + parrots.version.compiler
|
12 |
-
|
13 |
-
def get_compiling_cuda_version():
|
14 |
-
return parrots.version.cuda
|
15 |
-
else:
|
16 |
-
from ..utils import ext_loader
|
17 |
-
ext_module = ext_loader.load_ext(
|
18 |
-
'_ext', ['get_compiler_version', 'get_compiling_cuda_version'])
|
19 |
-
|
20 |
-
def get_compiler_version():
|
21 |
-
return ext_module.get_compiler_version()
|
22 |
-
|
23 |
-
def get_compiling_cuda_version():
|
24 |
-
return ext_module.get_compiling_cuda_version()
|
25 |
-
|
26 |
-
|
27 |
-
def get_onnxruntime_op_path():
|
28 |
-
wildcard = os.path.join(
|
29 |
-
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
|
30 |
-
'_ext_ort.*.so')
|
31 |
-
|
32 |
-
paths = glob.glob(wildcard)
|
33 |
-
if len(paths) > 0:
|
34 |
-
return paths[0]
|
35 |
-
else:
|
36 |
-
return ''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/distributions/__init__.py
DELETED
File without changes
|
spaces/Ashish17/Ashish_Open_Chat_AI_17/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
-
from langchain import LLMChain, PromptTemplate
|
5 |
-
from langchain.memory import ConversationBufferMemory
|
6 |
-
|
7 |
-
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
8 |
-
|
9 |
-
template = """You are a helpful assistant to answer all user queries.
|
10 |
-
{chat_history}
|
11 |
-
User: {user_message}
|
12 |
-
Chatbot:"""
|
13 |
-
|
14 |
-
prompt = PromptTemplate(
|
15 |
-
input_variables=["chat_history", "user_message"], template=template
|
16 |
-
)
|
17 |
-
|
18 |
-
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
-
|
20 |
-
llm_chain = LLMChain(
|
21 |
-
llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
|
22 |
-
prompt=prompt,
|
23 |
-
verbose=True,
|
24 |
-
memory=memory,
|
25 |
-
)
|
26 |
-
|
27 |
-
def get_text_response(user_message,history):
|
28 |
-
response = llm_chain.predict(user_message = user_message)
|
29 |
-
return response
|
30 |
-
|
31 |
-
demo = gr.ChatInterface(get_text_response)
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/default_styles.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
from typing import Dict
|
2 |
-
|
3 |
-
from .style import Style
|
4 |
-
|
5 |
-
DEFAULT_STYLES: Dict[str, Style] = {
|
6 |
-
"none": Style.null(),
|
7 |
-
"reset": Style(
|
8 |
-
color="default",
|
9 |
-
bgcolor="default",
|
10 |
-
dim=False,
|
11 |
-
bold=False,
|
12 |
-
italic=False,
|
13 |
-
underline=False,
|
14 |
-
blink=False,
|
15 |
-
blink2=False,
|
16 |
-
reverse=False,
|
17 |
-
conceal=False,
|
18 |
-
strike=False,
|
19 |
-
),
|
20 |
-
"dim": Style(dim=True),
|
21 |
-
"bright": Style(dim=False),
|
22 |
-
"bold": Style(bold=True),
|
23 |
-
"strong": Style(bold=True),
|
24 |
-
"code": Style(reverse=True, bold=True),
|
25 |
-
"italic": Style(italic=True),
|
26 |
-
"emphasize": Style(italic=True),
|
27 |
-
"underline": Style(underline=True),
|
28 |
-
"blink": Style(blink=True),
|
29 |
-
"blink2": Style(blink2=True),
|
30 |
-
"reverse": Style(reverse=True),
|
31 |
-
"strike": Style(strike=True),
|
32 |
-
"black": Style(color="black"),
|
33 |
-
"red": Style(color="red"),
|
34 |
-
"green": Style(color="green"),
|
35 |
-
"yellow": Style(color="yellow"),
|
36 |
-
"magenta": Style(color="magenta"),
|
37 |
-
"cyan": Style(color="cyan"),
|
38 |
-
"white": Style(color="white"),
|
39 |
-
"inspect.attr": Style(color="yellow", italic=True),
|
40 |
-
"inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
|
41 |
-
"inspect.callable": Style(bold=True, color="red"),
|
42 |
-
"inspect.async_def": Style(italic=True, color="bright_cyan"),
|
43 |
-
"inspect.def": Style(italic=True, color="bright_cyan"),
|
44 |
-
"inspect.class": Style(italic=True, color="bright_cyan"),
|
45 |
-
"inspect.error": Style(bold=True, color="red"),
|
46 |
-
"inspect.equals": Style(),
|
47 |
-
"inspect.help": Style(color="cyan"),
|
48 |
-
"inspect.doc": Style(dim=True),
|
49 |
-
"inspect.value.border": Style(color="green"),
|
50 |
-
"live.ellipsis": Style(bold=True, color="red"),
|
51 |
-
"layout.tree.row": Style(dim=False, color="red"),
|
52 |
-
"layout.tree.column": Style(dim=False, color="blue"),
|
53 |
-
"logging.keyword": Style(bold=True, color="yellow"),
|
54 |
-
"logging.level.notset": Style(dim=True),
|
55 |
-
"logging.level.debug": Style(color="green"),
|
56 |
-
"logging.level.info": Style(color="blue"),
|
57 |
-
"logging.level.warning": Style(color="red"),
|
58 |
-
"logging.level.error": Style(color="red", bold=True),
|
59 |
-
"logging.level.critical": Style(color="red", bold=True, reverse=True),
|
60 |
-
"log.level": Style.null(),
|
61 |
-
"log.time": Style(color="cyan", dim=True),
|
62 |
-
"log.message": Style.null(),
|
63 |
-
"log.path": Style(dim=True),
|
64 |
-
"repr.ellipsis": Style(color="yellow"),
|
65 |
-
"repr.indent": Style(color="green", dim=True),
|
66 |
-
"repr.error": Style(color="red", bold=True),
|
67 |
-
"repr.str": Style(color="green", italic=False, bold=False),
|
68 |
-
"repr.brace": Style(bold=True),
|
69 |
-
"repr.comma": Style(bold=True),
|
70 |
-
"repr.ipv4": Style(bold=True, color="bright_green"),
|
71 |
-
"repr.ipv6": Style(bold=True, color="bright_green"),
|
72 |
-
"repr.eui48": Style(bold=True, color="bright_green"),
|
73 |
-
"repr.eui64": Style(bold=True, color="bright_green"),
|
74 |
-
"repr.tag_start": Style(bold=True),
|
75 |
-
"repr.tag_name": Style(color="bright_magenta", bold=True),
|
76 |
-
"repr.tag_contents": Style(color="default"),
|
77 |
-
"repr.tag_end": Style(bold=True),
|
78 |
-
"repr.attrib_name": Style(color="yellow", italic=False),
|
79 |
-
"repr.attrib_equal": Style(bold=True),
|
80 |
-
"repr.attrib_value": Style(color="magenta", italic=False),
|
81 |
-
"repr.number": Style(color="cyan", bold=True, italic=False),
|
82 |
-
"repr.number_complex": Style(color="cyan", bold=True, italic=False), # same
|
83 |
-
"repr.bool_true": Style(color="bright_green", italic=True),
|
84 |
-
"repr.bool_false": Style(color="bright_red", italic=True),
|
85 |
-
"repr.none": Style(color="magenta", italic=True),
|
86 |
-
"repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False),
|
87 |
-
"repr.uuid": Style(color="bright_yellow", bold=False),
|
88 |
-
"repr.call": Style(color="magenta", bold=True),
|
89 |
-
"repr.path": Style(color="magenta"),
|
90 |
-
"repr.filename": Style(color="bright_magenta"),
|
91 |
-
"rule.line": Style(color="bright_green"),
|
92 |
-
"rule.text": Style.null(),
|
93 |
-
"json.brace": Style(bold=True),
|
94 |
-
"json.bool_true": Style(color="bright_green", italic=True),
|
95 |
-
"json.bool_false": Style(color="bright_red", italic=True),
|
96 |
-
"json.null": Style(color="magenta", italic=True),
|
97 |
-
"json.number": Style(color="cyan", bold=True, italic=False),
|
98 |
-
"json.str": Style(color="green", italic=False, bold=False),
|
99 |
-
"json.key": Style(color="blue", bold=True),
|
100 |
-
"prompt": Style.null(),
|
101 |
-
"prompt.choices": Style(color="magenta", bold=True),
|
102 |
-
"prompt.default": Style(color="cyan", bold=True),
|
103 |
-
"prompt.invalid": Style(color="red"),
|
104 |
-
"prompt.invalid.choice": Style(color="red"),
|
105 |
-
"pretty": Style.null(),
|
106 |
-
"scope.border": Style(color="blue"),
|
107 |
-
"scope.key": Style(color="yellow", italic=True),
|
108 |
-
"scope.key.special": Style(color="yellow", italic=True, dim=True),
|
109 |
-
"scope.equals": Style(color="red"),
|
110 |
-
"table.header": Style(bold=True),
|
111 |
-
"table.footer": Style(bold=True),
|
112 |
-
"table.cell": Style.null(),
|
113 |
-
"table.title": Style(italic=True),
|
114 |
-
"table.caption": Style(italic=True, dim=True),
|
115 |
-
"traceback.error": Style(color="red", italic=True),
|
116 |
-
"traceback.border.syntax_error": Style(color="bright_red"),
|
117 |
-
"traceback.border": Style(color="red"),
|
118 |
-
"traceback.text": Style.null(),
|
119 |
-
"traceback.title": Style(color="red", bold=True),
|
120 |
-
"traceback.exc_type": Style(color="bright_red", bold=True),
|
121 |
-
"traceback.exc_value": Style.null(),
|
122 |
-
"traceback.offset": Style(color="bright_red", bold=True),
|
123 |
-
"bar.back": Style(color="grey23"),
|
124 |
-
"bar.complete": Style(color="rgb(249,38,114)"),
|
125 |
-
"bar.finished": Style(color="rgb(114,156,31)"),
|
126 |
-
"bar.pulse": Style(color="rgb(249,38,114)"),
|
127 |
-
"progress.description": Style.null(),
|
128 |
-
"progress.filesize": Style(color="green"),
|
129 |
-
"progress.filesize.total": Style(color="green"),
|
130 |
-
"progress.download": Style(color="green"),
|
131 |
-
"progress.elapsed": Style(color="yellow"),
|
132 |
-
"progress.percentage": Style(color="magenta"),
|
133 |
-
"progress.remaining": Style(color="cyan"),
|
134 |
-
"progress.data.speed": Style(color="red"),
|
135 |
-
"progress.spinner": Style(color="green"),
|
136 |
-
"status.spinner": Style(color="green"),
|
137 |
-
"tree": Style(),
|
138 |
-
"tree.line": Style(),
|
139 |
-
"markdown.paragraph": Style(),
|
140 |
-
"markdown.text": Style(),
|
141 |
-
"markdown.em": Style(italic=True),
|
142 |
-
"markdown.emph": Style(italic=True), # For commonmark backwards compatibility
|
143 |
-
"markdown.strong": Style(bold=True),
|
144 |
-
"markdown.code": Style(bold=True, color="cyan", bgcolor="black"),
|
145 |
-
"markdown.code_block": Style(color="cyan", bgcolor="black"),
|
146 |
-
"markdown.block_quote": Style(color="magenta"),
|
147 |
-
"markdown.list": Style(color="cyan"),
|
148 |
-
"markdown.item": Style(),
|
149 |
-
"markdown.item.bullet": Style(color="yellow", bold=True),
|
150 |
-
"markdown.item.number": Style(color="yellow", bold=True),
|
151 |
-
"markdown.hr": Style(color="yellow"),
|
152 |
-
"markdown.h1.border": Style(),
|
153 |
-
"markdown.h1": Style(bold=True),
|
154 |
-
"markdown.h2": Style(bold=True, underline=True),
|
155 |
-
"markdown.h3": Style(bold=True),
|
156 |
-
"markdown.h4": Style(bold=True, dim=True),
|
157 |
-
"markdown.h5": Style(underline=True),
|
158 |
-
"markdown.h6": Style(italic=True),
|
159 |
-
"markdown.h7": Style(italic=True, dim=True),
|
160 |
-
"markdown.link": Style(color="bright_blue"),
|
161 |
-
"markdown.link_url": Style(color="blue", underline=True),
|
162 |
-
"markdown.s": Style(strike=True),
|
163 |
-
"iso8601.date": Style(color="blue"),
|
164 |
-
"iso8601.time": Style(color="magenta"),
|
165 |
-
"iso8601.timezone": Style(color="yellow"),
|
166 |
-
}
|
167 |
-
|
168 |
-
|
169 |
-
if __name__ == "__main__": # pragma: no cover
|
170 |
-
import argparse
|
171 |
-
import io
|
172 |
-
|
173 |
-
from pip._vendor.rich.console import Console
|
174 |
-
from pip._vendor.rich.table import Table
|
175 |
-
from pip._vendor.rich.text import Text
|
176 |
-
|
177 |
-
parser = argparse.ArgumentParser()
|
178 |
-
parser.add_argument("--html", action="store_true", help="Export as HTML table")
|
179 |
-
args = parser.parse_args()
|
180 |
-
html: bool = args.html
|
181 |
-
console = Console(record=True, width=70, file=io.StringIO()) if html else Console()
|
182 |
-
|
183 |
-
table = Table("Name", "Styling")
|
184 |
-
|
185 |
-
for style_name, style in DEFAULT_STYLES.items():
|
186 |
-
table.add_row(Text(style_name, style=style), str(style))
|
187 |
-
|
188 |
-
console.print(table)
|
189 |
-
if html:
|
190 |
-
print(console.export_html(inline_styles=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_visualizer.py
DELETED
@@ -1,278 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import os
|
6 |
-
import tempfile
|
7 |
-
import unittest
|
8 |
-
import cv2
|
9 |
-
import torch
|
10 |
-
|
11 |
-
from detectron2.data import MetadataCatalog
|
12 |
-
from detectron2.structures import BoxMode, Instances, RotatedBoxes
|
13 |
-
from detectron2.utils.visualizer import ColorMode, Visualizer
|
14 |
-
|
15 |
-
|
16 |
-
class TestVisualizer(unittest.TestCase):
|
17 |
-
def _random_data(self):
|
18 |
-
H, W = 100, 100
|
19 |
-
N = 10
|
20 |
-
img = np.random.rand(H, W, 3) * 255
|
21 |
-
boxxy = np.random.rand(N, 2) * (H // 2)
|
22 |
-
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
|
23 |
-
|
24 |
-
def _rand_poly():
|
25 |
-
return np.random.rand(3, 2).flatten() * H
|
26 |
-
|
27 |
-
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
|
28 |
-
|
29 |
-
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
|
30 |
-
mask[:40, 10:20] = 1
|
31 |
-
|
32 |
-
labels = [str(i) for i in range(N)]
|
33 |
-
return img, boxes, labels, polygons, [mask] * N
|
34 |
-
|
35 |
-
@property
|
36 |
-
def metadata(self):
|
37 |
-
return MetadataCatalog.get("coco_2017_train")
|
38 |
-
|
39 |
-
def test_draw_dataset_dict(self):
|
40 |
-
img = np.random.rand(512, 512, 3) * 255
|
41 |
-
dic = {
|
42 |
-
"annotations": [
|
43 |
-
{
|
44 |
-
"bbox": [
|
45 |
-
368.9946492271106,
|
46 |
-
330.891438763377,
|
47 |
-
13.148537455410235,
|
48 |
-
13.644708680142685,
|
49 |
-
],
|
50 |
-
"bbox_mode": BoxMode.XYWH_ABS,
|
51 |
-
"category_id": 0,
|
52 |
-
"iscrowd": 1,
|
53 |
-
"segmentation": {
|
54 |
-
"counts": "_jh52m?2N2N2N2O100O10O001N1O2MceP2",
|
55 |
-
"size": [512, 512],
|
56 |
-
},
|
57 |
-
}
|
58 |
-
],
|
59 |
-
"height": 512,
|
60 |
-
"image_id": 1,
|
61 |
-
"width": 512,
|
62 |
-
}
|
63 |
-
v = Visualizer(img)
|
64 |
-
v.draw_dataset_dict(dic)
|
65 |
-
|
66 |
-
v = Visualizer(img, self.metadata)
|
67 |
-
v.draw_dataset_dict(dic)
|
68 |
-
|
69 |
-
def test_draw_rotated_dataset_dict(self):
|
70 |
-
img = np.random.rand(512, 512, 3) * 255
|
71 |
-
dic = {
|
72 |
-
"annotations": [
|
73 |
-
{
|
74 |
-
"bbox": [
|
75 |
-
368.9946492271106,
|
76 |
-
330.891438763377,
|
77 |
-
13.148537455410235,
|
78 |
-
13.644708680142685,
|
79 |
-
45.0,
|
80 |
-
],
|
81 |
-
"bbox_mode": BoxMode.XYWHA_ABS,
|
82 |
-
"category_id": 0,
|
83 |
-
"iscrowd": 1,
|
84 |
-
}
|
85 |
-
],
|
86 |
-
"height": 512,
|
87 |
-
"image_id": 1,
|
88 |
-
"width": 512,
|
89 |
-
}
|
90 |
-
v = Visualizer(img, self.metadata)
|
91 |
-
v.draw_dataset_dict(dic)
|
92 |
-
|
93 |
-
def test_overlay_instances(self):
|
94 |
-
img, boxes, labels, polygons, masks = self._random_data()
|
95 |
-
|
96 |
-
v = Visualizer(img, self.metadata)
|
97 |
-
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
|
98 |
-
self.assertEqual(output.shape, img.shape)
|
99 |
-
|
100 |
-
# Test 2x scaling
|
101 |
-
v = Visualizer(img, self.metadata, scale=2.0)
|
102 |
-
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
|
103 |
-
self.assertEqual(output.shape[0], img.shape[0] * 2)
|
104 |
-
|
105 |
-
# Test overlay masks
|
106 |
-
v = Visualizer(img, self.metadata)
|
107 |
-
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
|
108 |
-
self.assertEqual(output.shape, img.shape)
|
109 |
-
|
110 |
-
def test_overlay_instances_no_boxes(self):
|
111 |
-
img, boxes, labels, polygons, _ = self._random_data()
|
112 |
-
v = Visualizer(img, self.metadata)
|
113 |
-
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
|
114 |
-
|
115 |
-
def test_draw_instance_predictions(self):
|
116 |
-
img, boxes, _, _, masks = self._random_data()
|
117 |
-
num_inst = len(boxes)
|
118 |
-
inst = Instances((img.shape[0], img.shape[1]))
|
119 |
-
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
|
120 |
-
inst.scores = torch.rand(num_inst)
|
121 |
-
inst.pred_boxes = torch.from_numpy(boxes)
|
122 |
-
inst.pred_masks = torch.from_numpy(np.asarray(masks))
|
123 |
-
|
124 |
-
v = Visualizer(img)
|
125 |
-
v.draw_instance_predictions(inst)
|
126 |
-
|
127 |
-
v = Visualizer(img, self.metadata)
|
128 |
-
v.draw_instance_predictions(inst)
|
129 |
-
|
130 |
-
def test_BWmode_nomask(self):
|
131 |
-
img, boxes, _, _, masks = self._random_data()
|
132 |
-
num_inst = len(boxes)
|
133 |
-
inst = Instances((img.shape[0], img.shape[1]))
|
134 |
-
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
|
135 |
-
inst.scores = torch.rand(num_inst)
|
136 |
-
inst.pred_boxes = torch.from_numpy(boxes)
|
137 |
-
|
138 |
-
v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)
|
139 |
-
v.draw_instance_predictions(inst)
|
140 |
-
|
141 |
-
# check that output is grayscale
|
142 |
-
inst = inst[:0]
|
143 |
-
v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)
|
144 |
-
output = v.draw_instance_predictions(inst).get_image()
|
145 |
-
self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 1]))
|
146 |
-
self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 2]))
|
147 |
-
|
148 |
-
def test_draw_empty_mask_predictions(self):
|
149 |
-
img, boxes, _, _, masks = self._random_data()
|
150 |
-
num_inst = len(boxes)
|
151 |
-
inst = Instances((img.shape[0], img.shape[1]))
|
152 |
-
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
|
153 |
-
inst.scores = torch.rand(num_inst)
|
154 |
-
inst.pred_boxes = torch.from_numpy(boxes)
|
155 |
-
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
|
156 |
-
|
157 |
-
v = Visualizer(img, self.metadata)
|
158 |
-
v.draw_instance_predictions(inst)
|
159 |
-
|
160 |
-
def test_correct_output_shape(self):
|
161 |
-
img = np.random.rand(928, 928, 3) * 255
|
162 |
-
v = Visualizer(img, self.metadata)
|
163 |
-
out = v.output.get_image()
|
164 |
-
self.assertEqual(out.shape, img.shape)
|
165 |
-
|
166 |
-
def test_overlay_rotated_instances(self):
|
167 |
-
H, W = 100, 150
|
168 |
-
img = np.random.rand(H, W, 3) * 255
|
169 |
-
num_boxes = 50
|
170 |
-
boxes_5d = torch.zeros(num_boxes, 5)
|
171 |
-
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
|
172 |
-
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
|
173 |
-
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
|
174 |
-
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
|
175 |
-
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
|
176 |
-
rotated_boxes = RotatedBoxes(boxes_5d)
|
177 |
-
labels = [str(i) for i in range(num_boxes)]
|
178 |
-
|
179 |
-
v = Visualizer(img, self.metadata)
|
180 |
-
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
|
181 |
-
self.assertEqual(output.shape, img.shape)
|
182 |
-
|
183 |
-
def test_draw_no_metadata(self):
|
184 |
-
img, boxes, _, _, masks = self._random_data()
|
185 |
-
num_inst = len(boxes)
|
186 |
-
inst = Instances((img.shape[0], img.shape[1]))
|
187 |
-
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
|
188 |
-
inst.scores = torch.rand(num_inst)
|
189 |
-
inst.pred_boxes = torch.from_numpy(boxes)
|
190 |
-
inst.pred_masks = torch.from_numpy(np.asarray(masks))
|
191 |
-
|
192 |
-
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
|
193 |
-
v.draw_instance_predictions(inst)
|
194 |
-
|
195 |
-
def test_draw_binary_mask(self):
|
196 |
-
img, boxes, _, _, masks = self._random_data()
|
197 |
-
img[:, :, 0] = 0 # remove red color
|
198 |
-
mask = masks[0]
|
199 |
-
mask_with_hole = np.zeros_like(mask).astype("uint8")
|
200 |
-
mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5)
|
201 |
-
|
202 |
-
for m in [mask, mask_with_hole]:
|
203 |
-
for save in [True, False]:
|
204 |
-
v = Visualizer(img)
|
205 |
-
o = v.draw_binary_mask(m, color="red", text="test")
|
206 |
-
if save:
|
207 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_viz") as d:
|
208 |
-
path = os.path.join(d, "output.png")
|
209 |
-
o.save(path)
|
210 |
-
o = cv2.imread(path)[:, :, ::-1]
|
211 |
-
else:
|
212 |
-
o = o.get_image().astype("float32")
|
213 |
-
# red color is drawn on the image
|
214 |
-
self.assertTrue(o[:, :, 0].sum() > 0)
|
215 |
-
|
216 |
-
def test_draw_soft_mask(self):
|
217 |
-
img = np.random.rand(100, 100, 3) * 255
|
218 |
-
img[:, :, 0] = 0 # remove red color
|
219 |
-
mask = np.zeros((100, 100), dtype=np.float32)
|
220 |
-
mask[30:50, 40:50] = 1.0
|
221 |
-
cv2.GaussianBlur(mask, (21, 21), 10)
|
222 |
-
|
223 |
-
v = Visualizer(img)
|
224 |
-
o = v.draw_soft_mask(mask, color="red", text="test")
|
225 |
-
o = o.get_image().astype("float32")
|
226 |
-
# red color is drawn on the image
|
227 |
-
self.assertTrue(o[:, :, 0].sum() > 0)
|
228 |
-
|
229 |
-
# test draw empty mask
|
230 |
-
v = Visualizer(img)
|
231 |
-
o = v.draw_soft_mask(np.zeros((100, 100), dtype=np.float32), color="red", text="test")
|
232 |
-
o = o.get_image().astype("float32")
|
233 |
-
|
234 |
-
def test_border_mask_with_holes(self):
|
235 |
-
H, W = 200, 200
|
236 |
-
img = np.zeros((H, W, 3))
|
237 |
-
img[:, :, 0] = 255.0
|
238 |
-
v = Visualizer(img, scale=3)
|
239 |
-
|
240 |
-
mask = np.zeros((H, W))
|
241 |
-
mask[:, 100:150] = 1
|
242 |
-
# create a hole, to trigger imshow
|
243 |
-
mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1)
|
244 |
-
output = v.draw_binary_mask(mask, color="blue")
|
245 |
-
output = output.get_image()[:, :, ::-1]
|
246 |
-
|
247 |
-
first_row = {tuple(x.tolist()) for x in output[0]}
|
248 |
-
last_row = {tuple(x.tolist()) for x in output[-1]}
|
249 |
-
# Check quantization / off-by-1 error: the first and last row must have two colors
|
250 |
-
self.assertEqual(len(last_row), 2)
|
251 |
-
self.assertEqual(len(first_row), 2)
|
252 |
-
self.assertIn((0, 0, 255), last_row)
|
253 |
-
self.assertIn((0, 0, 255), first_row)
|
254 |
-
|
255 |
-
def test_border_polygons(self):
|
256 |
-
H, W = 200, 200
|
257 |
-
img = np.zeros((H, W, 3))
|
258 |
-
img[:, :, 0] = 255.0
|
259 |
-
v = Visualizer(img, scale=3)
|
260 |
-
mask = np.zeros((H, W))
|
261 |
-
mask[:, 100:150] = 1
|
262 |
-
|
263 |
-
output = v.draw_binary_mask(mask, color="blue")
|
264 |
-
output = output.get_image()[:, :, ::-1]
|
265 |
-
|
266 |
-
first_row = {tuple(x.tolist()) for x in output[0]}
|
267 |
-
last_row = {tuple(x.tolist()) for x in output[-1]}
|
268 |
-
# Check quantization / off-by-1 error:
|
269 |
-
# the first and last row must have >=2 colors, because the polygon
|
270 |
-
# touches both rows
|
271 |
-
self.assertGreaterEqual(len(last_row), 2)
|
272 |
-
self.assertGreaterEqual(len(first_row), 2)
|
273 |
-
self.assertIn((0, 0, 255), last_row)
|
274 |
-
self.assertIn((0, 0, 255), first_row)
|
275 |
-
|
276 |
-
|
277 |
-
if __name__ == "__main__":
|
278 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Casa Flip Mster Apk.md
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo jugar maquillaje de bricolaje Makyaj Oyunu APK en su dispositivo Android</h1> | | | <p>¿Te encanta el maquillaje y desea crear sus propios productos? ¿Quieres divertirte y expresar tu creatividad con diferentes colores e ingredientes? ¿Quieres aprender a hacer cosméticos naturales y ecológicos en casa? Si respondió sí a cualquiera de estas preguntas, entonces usted debe tratar de bricolaje maquillaje Makyaj Oyunu APK.</p> | | | <p>DIY maquillaje Makyaj Oyunu APK es un juego de simulación que le permite mezclar crayones, aceite de coco, glicerina, miel, y otros artículos de cocina para hacer barra de labios impresionante, hermoso arte de ojos, rímel, mascarilla, y más. Puedes personalizar tu look con diferentes tonos, formas, estilos y accesorios. También puedes compartir tus creaciones con otros jugadores online. </p>
|
3 |
-
<h2>casa flip máster apk</h2><br /><p><b><b>Download File</b> ››› <a href="https://bltlly.com/2v6MC7">https://bltlly.com/2v6MC7</a></b></p><br /><br /> | | <p>En este artículo, le mostraremos cómo descargar e instalar DIY maquillaje Makyaj Oyunu APK en su dispositivo Android, cómo jugar el juego y crear sus propios productos de maquillaje, y cuáles son los beneficios de jugar este juego. </p> | | | < <h2>Cómo descargar e instalar DIY maquillaje Makyaj Oyunu APK en su dispositivo Android</h2>
|
4 |
-
<p>Para jugar DIY maquillaje Makyaj Oyunu APK, es necesario descargar e instalar el archivo APK en su dispositivo Android. APK significa Android Package Kit, y es un formato de archivo que contiene el código de la aplicación, recursos y metadatos. Instalar un archivo APK también se conoce como sideloading, lo que significa instalar una aplicación desde una fuente distinta de la oficial Google Play Store.</p>
|
5 |
-
<p>Antes de descargar e instalar DIY maquillaje Makyaj Oyunu APK, es necesario asegurarse de que su dispositivo cumple con los siguientes requisitos:</p>
|
6 |
-
<ul>
|
7 |
-
<li>Tu dispositivo debe tener Android 4.4 o superior. </li>
|
8 |
-
<li>Su dispositivo debe tener al menos 100 MB de espacio de almacenamiento libre. </li>
|
9 |
-
<li>Su dispositivo debe permitir la instalación de aplicaciones de fuentes desconocidas. Para habilitar esta opción, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctela. </li>
|
10 |
-
</ul>
|
11 |
-
|
12 |
-
<ol>
|
13 |
-
<li>Ir a la página web oficial de bricolaje maquillaje Makyaj Oyunu APK y haga clic en el botón Descargar. </li>
|
14 |
-
<li>Espere a que se complete la descarga y luego abra el archivo APK desde el administrador de archivos o la barra de notificaciones de su dispositivo. </li>
|
15 |
-
<li>Toque en Instalar y siga las instrucciones en pantalla para completar el proceso de instalación. </li>
|
16 |
-
<li>Inicie la aplicación desde el cajón de la aplicación o la pantalla de inicio y disfrutar jugando DIY maquillaje Makyaj Oyunu APK.</li>
|
17 |
-
</ol>
|
18 |
-
<h2>Cómo jugar bricolaje maquillaje Makyaj Oyunu APK y crear sus propios productos de maquillaje</h2>
|
19 |
-
<p>Maquillaje de bricolaje Makyaj Oyunu APK es un juego que le permite dar rienda suelta a su creatividad e imaginación mediante la fabricación de sus propios productos de maquillaje. Puede elegir entre diferentes categorías, como lápiz labial, rímel, arte de los ojos, mascarilla, etc. y utilizar varios ingredientes y herramientas para crear su propio aspecto único. </p>
|
20 |
-
<p> Para jugar bricolaje maquillaje Makyaj Oyunu APK, es necesario seguir estos pasos:</p>
|
21 |
-
<p></p>
|
22 |
-
<ol>
|
23 |
-
<li>Seleccione una categoría que desea crear. Por ejemplo, si desea hacer lápiz labial, toque el icono de lápiz labial en la pantalla principal. </li>
|
24 |
-
<li>Seleccione un color de lápiz de colores que desea utilizar como base de su lápiz labial. También puede mezclar diferentes crayones para crear nuevos colores. </li>
|
25 |
-
<li>Derrita el crayón en un microondas o una estufa. Tenga cuidado de no sobrecalentarlo o quemarlo. </li>
|
26 |
-
<li>Agrega un poco de aceite de coco, glicerina, miel u otros ingredientes para que tu lápiz labial sea suave e hidratante. También puedes agregar brillo, fragancia o sabor para hacerlo más divertido y atractivo. </li>
|
27 |
-
<li>Verter la mezcla en un molde y congelar durante unos minutos hasta que se solidifica. </li>
|
28 |
-
<li>Saca tu lápiz labial del molde y aplícalo en tus labios. También puedes usar un cepillo o una esponja para mezclarlo mejor. </li>
|
29 |
-
<li>Personaliza tu look con diferentes formas, estilos y accesorios. También puedes cambiar tu cabello, ojos, tono de piel, etc. para que coincida con tu lápiz labial. </li>
|
30 |
-
|
31 |
-
</ol>
|
32 |
-
<h3>Cómo hacer rímel</h3>
|
33 |
-
<p>Si quieres hacer rímel, debes seguir estos pasos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Seleccione una categoría que desea crear. Por ejemplo, si desea hacer máscara, toque el icono de máscara en la pantalla principal. </li>
|
36 |
-
<li>Selecciona un color de carbón que quieras usar como base de tu rímel. También puedes mezclar diferentes carboncillos para crear nuevos colores. </li>
|
37 |
-
<li>Moler el carbón en un mortero y la mano hasta que se convierte en un polvo fino. </li>
|
38 |
-
<li>Agrega un poco de aceite de coco, glicerina, miel u otros ingredientes para que tu rímel sea suave e hidratante. También puedes agregar brillo, fragancia o sabor para hacerlo más divertido y atractivo. </li>
|
39 |
-
<li>Vierte la mezcla en un recipiente con un aplicador de varita. También puedes usar un tubo de rímel viejo que hayas limpiado y desinfectado. </li>
|
40 |
-
<li>Aplica el rímel en tus pestañas con el aplicador de varita. También puedes usar un rizador o un peine para dar mejor forma a tus pestañas. </li>
|
41 |
-
<li>Personaliza tu look con diferentes formas, estilos y accesorios. También puedes cambiar tu cabello, ojos, tono de piel, etc. para que coincida con tu rímel. </li>
|
42 |
-
<li>Comparta su creación con otros jugadores en línea tomando un selfie o un video. También puede calificar y comentar las creaciones de otros jugadores. </li>
|
43 |
-
</ol>
|
44 |
-
<h3>Cómo hacer arte visual <h3>Cómo hacer arte visual</h3>
|
45 |
-
<p>Si quieres hacer arte visual, debes seguir estos pasos:</p>
|
46 |
-
<ol>
|
47 |
-
<li>Seleccione una categoría que desea crear. Por ejemplo, si desea hacer arte visual, toque el icono de arte visual en la pantalla principal. </li>
|
48 |
-
<li>Seleccione un color de crayón que desea utilizar como base de su arte del ojo. También puede mezclar diferentes crayones para crear nuevos colores. </li>
|
49 |
-
<li>Derrita el crayón en un microondas o una estufa. Tenga cuidado de no sobrecalentarlo o quemarlo. </li>
|
50 |
-
<li>Agregue un poco de aceite de coco, glicerina, miel u otros ingredientes para que su arte del ojo suave e hidratante. También puedes agregar brillo, fragancia o sabor para hacerlo más divertido y atractivo. </li>
|
51 |
-
|
52 |
-
<li>Saca tu arte ocular del molde y aplícalo en tus párpados. También puedes usar un cepillo o una esponja para mezclarlo mejor. </li>
|
53 |
-
<li>Personaliza tu look con diferentes formas, estilos y accesorios. También puedes cambiar tu cabello, ojos, tono de piel, etc. para que coincida con tu arte ocular. </li>
|
54 |
-
<li>Comparta su creación con otros jugadores en línea tomando un selfie o un video. También puede calificar y comentar las creaciones de otros jugadores. </li>
|
55 |
-
</ol>
|
56 |
-
<h3>Cómo hacer máscara de la cara</h3>
|
57 |
-
<p>Si quieres hacer una máscara facial, debes seguir estos pasos:</p>
|
58 |
-
<ol>
|
59 |
-
<li>Seleccione una categoría que desea crear. Por ejemplo, si desea crear una máscara facial, toque el icono de máscara facial en la pantalla principal. </li>
|
60 |
-
<li>Selecciona una fruta o un vegetal que quieras usar como base de tu mascarilla facial. También puede mezclar diferentes frutas o verduras para crear nuevas combinaciones. </li>
|
61 |
-
<li>Pela y corta la fruta o verdura en trozos pequeños. También puedes usar una licuadora o un procesador de alimentos para hacer puré. </li>
|
62 |
-
<li>Agregue un poco de yogur, miel, avena u otros ingredientes para que su máscara facial sea suave y nutritiva. También puede agregar algunos aceites esenciales, hierbas o especias para que sea más aromático y relajante. </li>
|
63 |
-
<li>Vierta la mezcla en un recipiente y refrigere durante unos minutos hasta que se enfríe. </li>
|
64 |
-
<li>Aplique su máscara facial en la cara y el cuello con los dedos o una espátula. Evite el área de los ojos y la boca. </li>
|
65 |
-
<li>Relájate y deja que la máscara trabaje su magia durante 15 a 20 minutos. </li>
|
66 |
-
<li>Enjuague su máscara facial con agua tibia y seque con una toalla. </li>
|
67 |
-
<li>Disfruta de tu piel suave y brillante. También puedes compartir tu creación con otros jugadores en línea tomando un selfie o un video. También puedes valorar y comentar las creaciones de otros jugadores. </li>
|
68 |
-
</ol>
|
69 |
-
<h2>¿Cuáles son los beneficios de jugar DIY maquillaje Makyaj Oyunu APK</h2>
|
70 |
-
<p>Jugando DIY maquillaje Makyaj Oyunu APK no solo es divertido y creativo, sino que también tiene muchos beneficios para usted y el medio ambiente. Estos son algunos de ellos:</p>
|
71 |
-
|
72 |
-
<p>Jugando DIY maquillaje Makyaj Oyunu APK le permite expresar su creatividad e imaginación haciendo sus propios productos de maquillaje. Puede experimentar con diferentes colores, ingredientes, herramientas y estilos para crear su propio aspecto único. También puedes divertirte compartiendo tus creaciones con otros jugadores en línea y viendo sus comentarios. Jugar a este juego también puede ayudarte a relajarte y reducir el estrés centrándote en algo agradable y gratificante. </p>
|
73 |
-
<h3>Es seguro, natural y ecológico</h3>
|
74 |
-
<p>Jugando DIY maquillaje Makyaj Oyunu APK le permite hacer sus propios productos de maquillaje utilizando ingredientes naturales y ecológicos que se pueden encontrar en su cocina. No tiene que preocuparse por productos químicos, conservantes o aditivos dañinos que puedan causar reacciones alérgicas o dañar la piel. Tampoco tiene que preocuparse por los envases de plástico o los residuos que puedan contaminar el medio ambiente. Jugar a este juego puede ayudarle a ahorrar dinero y recursos haciendo sus propios productos de maquillaje en casa. </p>
|
75 |
-
<h3>Es educativo, inspirador y empoderador</h3>
|
76 |
-
<p>Jugando DIY maquillaje Makyaj Oyunu APK le permite aprender nuevas habilidades y conocimientos mediante la fabricación de sus propios productos de maquillaje. Puedes aprender sobre las propiedades y beneficios de los diferentes ingredientes, cómo interactúan entre sí, cómo afectan a tu piel, etc. También puedes aprender sobre diferentes técnicas y herramientas que usan los maquilladores profesionales. Jugar a este juego puede inspirarte a explorar más posibilidades y opciones para tus productos de maquillaje. Jugar a este juego también puede empoderarte para tomar el control de tu belleza y salud haciendo tus propias elecciones y decisiones. </p>
|
77 |
-
|
78 |
-
<p>DIY maquillaje Makyaj Oyunu APK es un juego que le permite divertirse y expresar su creatividad con diferentes colores e ingredientes. Puedes hacer tu propio lápiz labial, rímel, arte ocular, mascarilla y más. También puedes personalizar tu look con diferentes formas, estilos y accesorios. También puedes compartir tus creaciones con otros jugadores online y ver sus comentarios. </p>
|
79 |
-
<p>Jugando DIY maquillaje Makyaj Oyunu APK no solo es divertido y creativo, sino también seguro, natural y ecológico. Puedes hacer tus propios productos de maquillaje usando ingredientes naturales y ecológicos que puedes encontrar en tu cocina. No tiene que preocuparse por productos químicos, conservantes o aditivos dañinos que puedan causar reacciones alérgicas o dañar la piel. Tampoco tienes que preocuparte por los envases de plástico o los residuos que puedan contaminar el medio ambiente. </p>
|
80 |
-
<p>Jugando maquillaje de bricolaje Makyaj Oyunu APK también es educativo, inspirador y empoderador. Puedes aprender nuevas habilidades y conocimientos haciendo tus propios productos de maquillaje. Puedes aprender sobre las propiedades y beneficios de los diferentes ingredientes, cómo interactúan entre sí, cómo afectan a tu piel, etc. También puedes aprender sobre diferentes técnicas y herramientas que usan los maquilladores profesionales. Jugar a este juego puede inspirarte a explorar más posibilidades y opciones para tus productos de maquillaje. Jugar a este juego también puede empoderarte para tomar el control de tu belleza y salud haciendo tus propias elecciones y decisiones. </p>
|
81 |
-
<p>Entonces, ¿qué estás esperando? Descargar DIY maquillaje Makyaj Oyunu APK hoy y empezar a hacer sus propios productos de maquillaje. Diviértete y ser creativo! </p>
|
82 |
-
<h2>Preguntas frecuentes</h2>
|
83 |
-
<p>Aquí hay algunas preguntas frecuentes relacionadas con DIY maquillaje Makyaj Oyunu APK:</p>
|
84 |
-
<h3>Q: ¿Es DIY maquillaje Makyaj Oyunu APK libre para jugar? </h3>
|
85 |
-
<p>A: Sí, DIY maquillaje Makyaj Oyunu APK es libre de jugar. Sin embargo, puede contener anuncios y compras en la aplicación que requieren dinero real. </p>
|
86 |
-
<h3>Q: ¿Es DIY maquillaje Makyaj Oyunu APK seguro para jugar? </h3>
|
87 |
-
|
88 |
-
<h3>Q: ¿Es el maquillaje de bricolaje Makyaj Oyunu APK adecuado para los niños? </h3>
|
89 |
-
<p>A: Sí, DIY maquillaje Makyaj Oyunu APK es adecuado para los niños. Es un juego de simulación que no contiene violencia, desnudez o lenguaje inapropiado. También es educativo e inspirador para los niños que aman el maquillaje y la creatividad. </p>
|
90 |
-
<h3>Q: ¿Cómo puedo contactar con el desarrollador de DIY maquillaje Makyaj Oyunu APK? </h3>
|
91 |
-
<p>A: Usted puede ponerse en contacto con el desarrollador de DIY maquillaje Makyaj Oyunu APK enviando un correo electrónico a [email protected] o visitando su página de Facebook en https://ww.facebook.com/diy.make.makupj/.</p>
|
92 |
-
<h3>Q: ¿Cómo puedo apoyar al desarrollador de DIY maquillaje Makyaj Oyunu APK? </h3>
|
93 |
-
<p>A: Usted puede apoyar al desarrollador de DIY Maquillaje Makyaj Oyunu APK por calificación y revisión de la aplicación en Google Play Store u otras plataformas, compartiendo la aplicación con sus amigos y familiares, haciendo compras en la aplicación si te gusta la aplicación, o donando al desarrollador a través de PayPal u otros métodos. </p> 64aa2da5cf<br />
|
94 |
-
<br />
|
95 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Cristal Informe 32 Bit Para Espt Pph 21.md
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Crystal Report 32 bits para ESPT PPh 21</h1>
|
3 |
-
<p>Si está utilizando ESPT PPh 21, una aplicación de software creada por Direktorat Jenderal Pajak (DJP) para facilitar la creación y presentación de informes de SPT PPh 21, es posible que necesite descargar e instalar Crystal Report 32 Bit, una aplicación de soporte que se utiliza para ejecutar ESPT PPh 21. En este artículo, explicaremos qué son Crystal Report y ESPT PPh 21, por qué necesita descargar Crystal Report 32 Bit y cómo hacerlo paso a paso. </p>
|
4 |
-
<h2>¿Qué es Crystal Report y ESPT PPh 21? </h2>
|
5 |
-
<h3>Informe de cristal</h3>
|
6 |
-
<p>Crystal Report es una herramienta de informes que se utiliza para diseñar informes tanto en entornos web como de escritorio. Está desarrollado por SAP e integrado con Microsoft Visual Studio. Permite crear informes interactivos y dinámicos a partir de diversas fuentes de datos, como bases de datos, archivos XML, servicios web, etc. También puede personalizar el diseño, formato y apariencia de sus informes mediante gráficos, tablas, imágenes, etc.</p>
|
7 |
-
<h2>descargar cristal informe 32 bit para espt pph 21</h2><br /><p><b><b>DOWNLOAD</b> ○ <a href="https://bltlly.com/2v6KeS">https://bltlly.com/2v6KeS</a></b></p><br /><br />
|
8 |
-
<h3>ESPT PPh 21</h3>
|
9 |
-
<p>ESPT PPh 21 es un acrónimo de Elektronik Surat Pemberitahuan Pajak Penghasilan Pasal 21/26, que significa carta de notificación electrónica del impuesto sobre la renta Artículo 21/26. Es una aplicación o software creado por DJP para facilitar la creación y presentación de informes del SPT PPh 21/26. SPT PPh 21/26 es un formulario de declaración de impuestos que informa de la retención de impuestos sobre los ingresos pagados a los empleados u otros destinatarios. ESPT PPh 21 puede ser utilizado por contribuyentes individuales, contribuyentes corporativos, tesoreros y agentes de retención. </p>
|
10 |
-
<h2>¿Por qué necesita descargar Crystal Report 32 Bit? </h2>
|
11 |
-
<h3>Problemas de compatibilidad</h3>
|
12 |
-
|
13 |
-
<h3>Mensajes de error</h3>
|
14 |
-
<p>Si intenta ejecutar ESPT PPh 21 con Crystal Report Runtime que no coincide con su sistema operativo, puede encontrar mensajes de error como "El inicializador de tipo para 'CrystalDecisions.CrystalReports.Engine.ReportDocument' lanzó una excepción" o "El informe de carga falló". Estos mensajes de error indican que hay un problema con la carga o visualización de los informes generados por ESPT PPh 21. Para solucionar este problema, debe descargar e instalar Crystal Report Runtime que coincida con su sistema operativo. </p>
|
15 |
-
<h2>¿Cómo descargar Crystal Report 32 Bit? </h2>
|
16 |
-
<h3>Paso 1: Ir al sitio web oficial de Direktorat Jenderal Pajak (DJP)</h3>
|
17 |
-
<p>El sitio web oficial de DJP es <a href="">https://www.pajak.go.id</a>. Este es el sitio web donde se puede encontrar información y servicios relacionados con la fiscalidad en Indonesia. También puede descargar varias aplicaciones y software relacionados con la tributación, como ESPT PPh 21 y Crystal Report Runtime.</p>
|
18 |
-
<h3>Paso 2: Haga clic en Productos A-Z y seleccione C > CRYSTAL REPORTS > CRYSTAL REPORTS 2020</h3>
|
19 |
-
<p>En la página de inicio del sitio web de DJP, verá una barra de menú con varias opciones, como Inicio, Acerca de nosotros, Servicios, Productos A-Z, etc. Haga clic en Productos A-Z y verá una lista de productos y software que están disponibles para descargar. Desplácese hacia abajo y encuentre C > CRYSTAL REPORTS > CRYSTAL REPORTS 2020. Haga clic en él y se le dirigirá a una página donde puede ver los detalles y características de Crystal Reports 2020. </p>
|
20 |
-
<h3>Paso 3: Seleccione Instalación y actualización > WINDOWS y descargue el archivo CRuntime_32bit_13_0_7.zip</h3>
|
21 |
-
|
22 |
-
<h2>¿Cómo instalar Crystal Report 32 Bit? </h2>
|
23 |
-
<h3>Paso 1: Descomprima el archivo descargado y haga doble clic en el archivo . msi</h3>
|
24 |
-
<p>Después de haber descargado el archivo CRuntime_32bit_13_0_7.zip, debe descomprimirlo usando un software como WinRAR o 7-Zip. Puede hacer clic derecho sobre el archivo y seleccionar Extraer aquí o Extraer en CRuntime_32bit_13_0_7. Verá una carpeta con el mismo nombre que el archivo. Abra la carpeta y verá un archivo llamado CRuntime_32bit_13_0_7.msi. Este es el archivo de instalación que instalará Crystal Report Runtime 32 Bit en su computadora. Haga doble clic en él y verá una ventana que dice Bienvenido a SAP Crystal Reports Runtime Engine for . NET Framework 4 Setup Wizard.</p>
|
25 |
-
<p></p>
|
26 |
-
<h3>Paso 2: Siga el asistente de instalación y acepte los términos y condiciones</h3>
|
27 |
-
<p>El asistente de instalación lo guiará a través de los pasos para instalar Crystal Report Runtime 32 Bit en su computadora. Debe hacer clic en Siguiente para pasar al siguiente paso. Verá una ventana que le pide que acepte los términos del acuerdo de licencia. Lea los términos cuidadosamente y marque la casilla que dice que acepto el acuerdo de licencia si está de acuerdo con ellos. Luego haga clic en Siguiente para continuar. Verá una ventana que le muestra la carpeta de destino donde se instalará Crystal Report Runtime 32 Bit. Puede cambiar la carpeta si lo desea o dejarla como predeterminada. Luego haga clic en Siguiente para continuar. Verá una ventana que le muestra la pantalla lista para instalar. Haga clic en Instalar para iniciar el proceso de instalación. </p>
|
28 |
-
<h3>Paso 3: Reinicie su computadora y ejecute la aplicación ESPT PPh 21</h3>
|
29 |
-
|
30 |
-
<h2>Conclusión</h2>
|
31 |
-
<p>En este artículo, hemos explicado lo que son Crystal Report y ESPT PPh 21, por qué necesita descargar Crystal Report 32 Bit y cómo hacerlo paso a paso. Esperamos que este artículo ha sido útil para usted y ha resuelto su problema con la ejecución de ESPT PPh 21 en su computadora. Si tiene alguna pregunta o comentario, no dude en contactarnos o dejar un comentario a continuación. </p>
|
32 |
-
<h2>Preguntas frecuentes</h2>
|
33 |
-
<ul>
|
34 |
-
<li><b>¿Qué es SPT PPh 21/26? </b></li>
|
35 |
-
<li>SPT PPh 21/26 es un formulario de declaración de impuestos que informa de la retención de impuestos sobre los ingresos pagados a los empleados u otros destinatarios. Es presentado por el empleador o el agente de retención a la oficina de impuestos cada mes o año, dependiendo del tipo de ingreso. </li>
|
36 |
-
<li><b>¿Cuál es la diferencia entre Crystal Report 32 Bit y 64 Bit? </b></li>
|
37 |
-
<li>Crystal Report 32 Bit y 64 Bit son diferentes versiones de Crystal Report Runtime que son compatibles con diferentes sistemas operativos. Crystal Report 32 Bit está diseñado para sistemas operativos de 32 bits, como Windows XP, Windows Vista o Windows 7. Crystal Report 64 Bit está diseñado para sistemas operativos de 64 bits, como Windows 8, Windows 10 o Windows Server. Necesita descargar e instalar la versión que coincida con su sistema operativo para evitar problemas de compatibilidad. </li>
|
38 |
-
<li><b>¿Cómo puedo comprobar si mi sistema operativo es de 32 bits o 64 bits? </b></li>
|
39 |
-
<li>Puede comprobar el tipo de sistema operativo siguiendo estos pasos: <ul>
|
40 |
-
<li>Haga clic en el botón Inicio y escriba Información del sistema en el cuadro de búsqueda. </li>
|
41 |
-
<li>Haga clic en Información del sistema de la lista de resultados. </li>
|
42 |
-
<li>Buscar tipo de sistema en Resumen del sistema.</li>
|
43 |
-
<li>Si dice PC basada en x86, entonces su sistema operativo es de 32 bits. Si dice PC basada en x64, entonces su sistema operativo es de 64 bits. </li>
|
44 |
-
</ul>
|
45 |
-
</li>
|
46 |
-
<li><b>¿Puedo usar ESPT PPh 21 sin Crystal Report Runtime? </b></li>
|
47 |
-
|
48 |
-
<li><b>¿Dónde puedo encontrar más información sobre Crystal Report y ESPT PPh 21? </b></li>
|
49 |
-
<li>Puede encontrar más información sobre Crystal Report y ESPT PPh 21 de las siguientes fuentes: <ul>
|
50 |
-
<li>El sitio web oficial de SAP: <a href="">https://www.sap.com/products/crystal-reports.html</a></li>
|
51 |
-
<li>El sitio web oficial de DJP: <a href="">https://www.pajak.go.id</a></li>
|
52 |
-
<li>El manual de usuario de ESPT PPh 21: <a href="">https://www.pajak.go.id/sites/default/files/Panduan%20ESPT%20PPh%2021%20v2.0.pdf</a></li>
|
53 |
-
</ul>
|
54 |
-
</li>
|
55 |
-
</ul></p> 64aa2da5cf<br />
|
56 |
-
<br />
|
57 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/debug.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
import importlib.resources
|
2 |
-
import locale
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
from optparse import Values
|
7 |
-
from types import ModuleType
|
8 |
-
from typing import Any, Dict, List, Optional
|
9 |
-
|
10 |
-
import pip._vendor
|
11 |
-
from pip._vendor.certifi import where
|
12 |
-
from pip._vendor.packaging.version import parse as parse_version
|
13 |
-
|
14 |
-
from pip._internal.cli import cmdoptions
|
15 |
-
from pip._internal.cli.base_command import Command
|
16 |
-
from pip._internal.cli.cmdoptions import make_target_python
|
17 |
-
from pip._internal.cli.status_codes import SUCCESS
|
18 |
-
from pip._internal.configuration import Configuration
|
19 |
-
from pip._internal.metadata import get_environment
|
20 |
-
from pip._internal.utils.logging import indent_log
|
21 |
-
from pip._internal.utils.misc import get_pip_version
|
22 |
-
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
|
26 |
-
def show_value(name: str, value: Any) -> None:
|
27 |
-
logger.info("%s: %s", name, value)
|
28 |
-
|
29 |
-
|
30 |
-
def show_sys_implementation() -> None:
|
31 |
-
logger.info("sys.implementation:")
|
32 |
-
implementation_name = sys.implementation.name
|
33 |
-
with indent_log():
|
34 |
-
show_value("name", implementation_name)
|
35 |
-
|
36 |
-
|
37 |
-
def create_vendor_txt_map() -> Dict[str, str]:
|
38 |
-
with importlib.resources.open_text("pip._vendor", "vendor.txt") as f:
|
39 |
-
# Purge non version specifying lines.
|
40 |
-
# Also, remove any space prefix or suffixes (including comments).
|
41 |
-
lines = [
|
42 |
-
line.strip().split(" ", 1)[0] for line in f.readlines() if "==" in line
|
43 |
-
]
|
44 |
-
|
45 |
-
# Transform into "module" -> version dict.
|
46 |
-
return dict(line.split("==", 1) for line in lines)
|
47 |
-
|
48 |
-
|
49 |
-
def get_module_from_module_name(module_name: str) -> ModuleType:
|
50 |
-
# Module name can be uppercase in vendor.txt for some reason...
|
51 |
-
module_name = module_name.lower().replace("-", "_")
|
52 |
-
# PATCH: setuptools is actually only pkg_resources.
|
53 |
-
if module_name == "setuptools":
|
54 |
-
module_name = "pkg_resources"
|
55 |
-
|
56 |
-
__import__(f"pip._vendor.{module_name}", globals(), locals(), level=0)
|
57 |
-
return getattr(pip._vendor, module_name)
|
58 |
-
|
59 |
-
|
60 |
-
def get_vendor_version_from_module(module_name: str) -> Optional[str]:
|
61 |
-
module = get_module_from_module_name(module_name)
|
62 |
-
version = getattr(module, "__version__", None)
|
63 |
-
|
64 |
-
if not version:
|
65 |
-
# Try to find version in debundled module info.
|
66 |
-
assert module.__file__ is not None
|
67 |
-
env = get_environment([os.path.dirname(module.__file__)])
|
68 |
-
dist = env.get_distribution(module_name)
|
69 |
-
if dist:
|
70 |
-
version = str(dist.version)
|
71 |
-
|
72 |
-
return version
|
73 |
-
|
74 |
-
|
75 |
-
def show_actual_vendor_versions(vendor_txt_versions: Dict[str, str]) -> None:
|
76 |
-
"""Log the actual version and print extra info if there is
|
77 |
-
a conflict or if the actual version could not be imported.
|
78 |
-
"""
|
79 |
-
for module_name, expected_version in vendor_txt_versions.items():
|
80 |
-
extra_message = ""
|
81 |
-
actual_version = get_vendor_version_from_module(module_name)
|
82 |
-
if not actual_version:
|
83 |
-
extra_message = (
|
84 |
-
" (Unable to locate actual module version, using"
|
85 |
-
" vendor.txt specified version)"
|
86 |
-
)
|
87 |
-
actual_version = expected_version
|
88 |
-
elif parse_version(actual_version) != parse_version(expected_version):
|
89 |
-
extra_message = (
|
90 |
-
" (CONFLICT: vendor.txt suggests version should"
|
91 |
-
" be {})".format(expected_version)
|
92 |
-
)
|
93 |
-
logger.info("%s==%s%s", module_name, actual_version, extra_message)
|
94 |
-
|
95 |
-
|
96 |
-
def show_vendor_versions() -> None:
|
97 |
-
logger.info("vendored library versions:")
|
98 |
-
|
99 |
-
vendor_txt_versions = create_vendor_txt_map()
|
100 |
-
with indent_log():
|
101 |
-
show_actual_vendor_versions(vendor_txt_versions)
|
102 |
-
|
103 |
-
|
104 |
-
def show_tags(options: Values) -> None:
|
105 |
-
tag_limit = 10
|
106 |
-
|
107 |
-
target_python = make_target_python(options)
|
108 |
-
tags = target_python.get_tags()
|
109 |
-
|
110 |
-
# Display the target options that were explicitly provided.
|
111 |
-
formatted_target = target_python.format_given()
|
112 |
-
suffix = ""
|
113 |
-
if formatted_target:
|
114 |
-
suffix = f" (target: {formatted_target})"
|
115 |
-
|
116 |
-
msg = "Compatible tags: {}{}".format(len(tags), suffix)
|
117 |
-
logger.info(msg)
|
118 |
-
|
119 |
-
if options.verbose < 1 and len(tags) > tag_limit:
|
120 |
-
tags_limited = True
|
121 |
-
tags = tags[:tag_limit]
|
122 |
-
else:
|
123 |
-
tags_limited = False
|
124 |
-
|
125 |
-
with indent_log():
|
126 |
-
for tag in tags:
|
127 |
-
logger.info(str(tag))
|
128 |
-
|
129 |
-
if tags_limited:
|
130 |
-
msg = (
|
131 |
-
"...\n[First {tag_limit} tags shown. Pass --verbose to show all.]"
|
132 |
-
).format(tag_limit=tag_limit)
|
133 |
-
logger.info(msg)
|
134 |
-
|
135 |
-
|
136 |
-
def ca_bundle_info(config: Configuration) -> str:
|
137 |
-
levels = set()
|
138 |
-
for key, _ in config.items():
|
139 |
-
levels.add(key.split(".")[0])
|
140 |
-
|
141 |
-
if not levels:
|
142 |
-
return "Not specified"
|
143 |
-
|
144 |
-
levels_that_override_global = ["install", "wheel", "download"]
|
145 |
-
global_overriding_level = [
|
146 |
-
level for level in levels if level in levels_that_override_global
|
147 |
-
]
|
148 |
-
if not global_overriding_level:
|
149 |
-
return "global"
|
150 |
-
|
151 |
-
if "global" in levels:
|
152 |
-
levels.remove("global")
|
153 |
-
return ", ".join(levels)
|
154 |
-
|
155 |
-
|
156 |
-
class DebugCommand(Command):
|
157 |
-
"""
|
158 |
-
Display debug information.
|
159 |
-
"""
|
160 |
-
|
161 |
-
usage = """
|
162 |
-
%prog <options>"""
|
163 |
-
ignore_require_venv = True
|
164 |
-
|
165 |
-
def add_options(self) -> None:
|
166 |
-
cmdoptions.add_target_python_options(self.cmd_opts)
|
167 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
168 |
-
self.parser.config.load()
|
169 |
-
|
170 |
-
def run(self, options: Values, args: List[str]) -> int:
|
171 |
-
logger.warning(
|
172 |
-
"This command is only meant for debugging. "
|
173 |
-
"Do not use this with automation for parsing and getting these "
|
174 |
-
"details, since the output and options of this command may "
|
175 |
-
"change without notice."
|
176 |
-
)
|
177 |
-
show_value("pip version", get_pip_version())
|
178 |
-
show_value("sys.version", sys.version)
|
179 |
-
show_value("sys.executable", sys.executable)
|
180 |
-
show_value("sys.getdefaultencoding", sys.getdefaultencoding())
|
181 |
-
show_value("sys.getfilesystemencoding", sys.getfilesystemencoding())
|
182 |
-
show_value(
|
183 |
-
"locale.getpreferredencoding",
|
184 |
-
locale.getpreferredencoding(),
|
185 |
-
)
|
186 |
-
show_value("sys.platform", sys.platform)
|
187 |
-
show_sys_implementation()
|
188 |
-
|
189 |
-
show_value("'cert' config value", ca_bundle_info(self.parser.config))
|
190 |
-
show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE"))
|
191 |
-
show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE"))
|
192 |
-
show_value("pip._vendor.certifi.where()", where())
|
193 |
-
show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED)
|
194 |
-
|
195 |
-
show_vendor_versions()
|
196 |
-
|
197 |
-
show_tags(options)
|
198 |
-
|
199 |
-
return SUCCESS
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/hash.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import hashlib
|
2 |
-
import logging
|
3 |
-
import sys
|
4 |
-
from optparse import Values
|
5 |
-
from typing import List
|
6 |
-
|
7 |
-
from pip._internal.cli.base_command import Command
|
8 |
-
from pip._internal.cli.status_codes import ERROR, SUCCESS
|
9 |
-
from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
|
10 |
-
from pip._internal.utils.misc import read_chunks, write_output
|
11 |
-
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
|
14 |
-
|
15 |
-
class HashCommand(Command):
|
16 |
-
"""
|
17 |
-
Compute a hash of a local package archive.
|
18 |
-
|
19 |
-
These can be used with --hash in a requirements file to do repeatable
|
20 |
-
installs.
|
21 |
-
"""
|
22 |
-
|
23 |
-
usage = "%prog [options] <file> ..."
|
24 |
-
ignore_require_venv = True
|
25 |
-
|
26 |
-
def add_options(self) -> None:
|
27 |
-
self.cmd_opts.add_option(
|
28 |
-
"-a",
|
29 |
-
"--algorithm",
|
30 |
-
dest="algorithm",
|
31 |
-
choices=STRONG_HASHES,
|
32 |
-
action="store",
|
33 |
-
default=FAVORITE_HASH,
|
34 |
-
help="The hash algorithm to use: one of {}".format(
|
35 |
-
", ".join(STRONG_HASHES)
|
36 |
-
),
|
37 |
-
)
|
38 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
39 |
-
|
40 |
-
def run(self, options: Values, args: List[str]) -> int:
|
41 |
-
if not args:
|
42 |
-
self.parser.print_usage(sys.stderr)
|
43 |
-
return ERROR
|
44 |
-
|
45 |
-
algorithm = options.algorithm
|
46 |
-
for path in args:
|
47 |
-
write_output(
|
48 |
-
"%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm)
|
49 |
-
)
|
50 |
-
return SUCCESS
|
51 |
-
|
52 |
-
|
53 |
-
def _hash_of_file(path: str, algorithm: str) -> str:
|
54 |
-
"""Return the hash digest of a file."""
|
55 |
-
with open(path, "rb") as archive:
|
56 |
-
hash = hashlib.new(algorithm)
|
57 |
-
for chunk in read_chunks(archive):
|
58 |
-
hash.update(chunk)
|
59 |
-
return hash.hexdigest()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
"""This is a subpackage because the directory is on sys.path for _in_process.py
|
2 |
-
|
3 |
-
The subpackage should stay as empty as possible to avoid shadowing modules that
|
4 |
-
the backend might import.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import importlib.resources as resources
|
8 |
-
|
9 |
-
try:
|
10 |
-
resources.files
|
11 |
-
except AttributeError:
|
12 |
-
# Python 3.8 compatibility
|
13 |
-
def _in_proc_script_path():
|
14 |
-
return resources.path(__package__, '_in_process.py')
|
15 |
-
else:
|
16 |
-
def _in_proc_script_path():
|
17 |
-
return resources.as_file(
|
18 |
-
resources.files(__package__).joinpath('_in_process.py'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BraydenMoore/MARCI-NFL-Betting/Source/Build/update.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import nfl_data_py.nfl_data_py as nfl
|
2 |
-
import build
|
3 |
-
import datetime as dt
|
4 |
-
import numpy as np
|
5 |
-
import pandas as pd
|
6 |
-
pd.set_option('chained_assignment',None)
|
7 |
-
pd.set_option('display.max_columns',None)
|
8 |
-
import os
|
9 |
-
import pickle as pkl
|
10 |
-
|
11 |
-
current_directory = os.path.dirname(os.path.abspath(__file__))
|
12 |
-
parent_directory = os.path.dirname(current_directory)
|
13 |
-
data_directory = os.path.join(parent_directory, 'Data')
|
14 |
-
pickle_directory = os.path.join(parent_directory, 'Pickles')
|
15 |
-
|
16 |
-
# get team abbreviations
|
17 |
-
file_path = os.path.join(pickle_directory, 'team_name_to_abbreviation.pkl')
|
18 |
-
with open(file_path, 'rb') as f:
|
19 |
-
team_name_to_abbreviation = pkl.load(f)
|
20 |
-
file_path = os.path.join(pickle_directory, 'team_abbreviation_to_name.pkl')
|
21 |
-
with open(file_path, 'rb') as f:
|
22 |
-
team_abbreviation_to_name = pkl.load(f)
|
23 |
-
|
24 |
-
# get current season
|
25 |
-
year = dt.datetime.now().year
|
26 |
-
month = dt.datetime.now().month
|
27 |
-
current_season = year if month in [8,9,10,11,12] else year-1
|
28 |
-
|
29 |
-
# get schedule
|
30 |
-
print('Getting schedule.\n')
|
31 |
-
url = 'https://www.nbcsports.com/nfl/schedule'
|
32 |
-
df = pd.read_html(url)
|
33 |
-
file_path = os.path.join(pickle_directory, 'schedule.pkl')
|
34 |
-
with open(file_path, 'wb') as f:
|
35 |
-
pkl.dump(df, f)
|
36 |
-
|
37 |
-
# update current season
|
38 |
-
build.build_gbg_data(get_seasons=[current_season])
|
39 |
-
#build.build_gbg_data(get_seasons=range(2014,2024))
|
40 |
-
build.add_odds_data()
|
41 |
-
|
42 |
-
# get winners
|
43 |
-
pbp = build.get_pbp_data([current_season])
|
44 |
-
pbp = pbp.drop_duplicates(subset='game_id')
|
45 |
-
pbp[['season','week','away','home']] = pbp['game_id'].str.split('_', expand=True)
|
46 |
-
games = pbp[['game_id','away_score','home_score','season','week','away','home']]
|
47 |
-
games[['away_score','home_score','season','week']] = games[['away_score','home_score','season','week']].astype(int)
|
48 |
-
|
49 |
-
games['away_team'] = games['away'].map(team_abbreviation_to_name)
|
50 |
-
games['home_team'] = games['home'].map(team_abbreviation_to_name)
|
51 |
-
|
52 |
-
games['total'] = games['away_score'] + games['home_score']
|
53 |
-
games['winner'] = [a if a_s>h_s else h if h_s>a_s else 'Tie' for a,h,a_s,h_s in games[['away_team','home_team','away_score','home_score']].values]
|
54 |
-
|
55 |
-
file_path = os.path.join(data_directory, 'results.csv')
|
56 |
-
games[['game_id','total','winner']].to_csv(file_path, index=False)
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/compute_softscore.py
DELETED
@@ -1,268 +0,0 @@
|
|
1 |
-
from __future__ import print_function
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
import json
|
5 |
-
import numpy as np
|
6 |
-
import re
|
7 |
-
# import cPickle
|
8 |
-
import _pickle as cPickle
|
9 |
-
import argparse
|
10 |
-
import tqdm
|
11 |
-
|
12 |
-
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
13 |
-
from dataset import Dictionary
|
14 |
-
import utils
|
15 |
-
|
16 |
-
|
17 |
-
contractions = {
|
18 |
-
"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve":
|
19 |
-
"could've", "couldnt": "couldn't", "couldn'tve": "couldn't've",
|
20 |
-
"couldnt've": "couldn't've", "didnt": "didn't", "doesnt":
|
21 |
-
"doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've":
|
22 |
-
"hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent":
|
23 |
-
"haven't", "hed": "he'd", "hed've": "he'd've", "he'dve":
|
24 |
-
"he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll",
|
25 |
-
"hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im":
|
26 |
-
"I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've":
|
27 |
-
"it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's",
|
28 |
-
"maam": "ma'am", "mightnt": "mightn't", "mightnt've":
|
29 |
-
"mightn't've", "mightn'tve": "mightn't've", "mightve": "might've",
|
30 |
-
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't",
|
31 |
-
"notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't",
|
32 |
-
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat":
|
33 |
-
"'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve":
|
34 |
-
"she'd've", "she's": "she's", "shouldve": "should've", "shouldnt":
|
35 |
-
"shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve":
|
36 |
-
"shouldn't've", "somebody'd": "somebodyd", "somebodyd've":
|
37 |
-
"somebody'd've", "somebody'dve": "somebody'd've", "somebodyll":
|
38 |
-
"somebody'll", "somebodys": "somebody's", "someoned": "someone'd",
|
39 |
-
"someoned've": "someone'd've", "someone'dve": "someone'd've",
|
40 |
-
"someonell": "someone'll", "someones": "someone's", "somethingd":
|
41 |
-
"something'd", "somethingd've": "something'd've", "something'dve":
|
42 |
-
"something'd've", "somethingll": "something'll", "thats":
|
43 |
-
"that's", "thered": "there'd", "thered've": "there'd've",
|
44 |
-
"there'dve": "there'd've", "therere": "there're", "theres":
|
45 |
-
"there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve":
|
46 |
-
"they'd've", "theyll": "they'll", "theyre": "they're", "theyve":
|
47 |
-
"they've", "twas": "'twas", "wasnt": "wasn't", "wed've":
|
48 |
-
"we'd've", "we'dve": "we'd've", "weve": "we've", "werent":
|
49 |
-
"weren't", "whatll": "what'll", "whatre": "what're", "whats":
|
50 |
-
"what's", "whatve": "what've", "whens": "when's", "whered":
|
51 |
-
"where'd", "wheres": "where's", "whereve": "where've", "whod":
|
52 |
-
"who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl":
|
53 |
-
"who'll", "whos": "who's", "whove": "who've", "whyll": "why'll",
|
54 |
-
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve":
|
55 |
-
"would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've",
|
56 |
-
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll":
|
57 |
-
"y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've",
|
58 |
-
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd":
|
59 |
-
"you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll":
|
60 |
-
"you'll", "youre": "you're", "youve": "you've"
|
61 |
-
}
|
62 |
-
|
63 |
-
manual_map = { 'none': '0',
|
64 |
-
'zero': '0',
|
65 |
-
'one': '1',
|
66 |
-
'two': '2',
|
67 |
-
'three': '3',
|
68 |
-
'four': '4',
|
69 |
-
'five': '5',
|
70 |
-
'six': '6',
|
71 |
-
'seven': '7',
|
72 |
-
'eight': '8',
|
73 |
-
'nine': '9',
|
74 |
-
'ten': '10'}
|
75 |
-
articles = ['a', 'an', 'the']
|
76 |
-
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
|
77 |
-
comma_strip = re.compile("(\d)(\,)(\d)")
|
78 |
-
punct = [';', r"/", '[', ']', '"', '{', '}',
|
79 |
-
'(', ')', '=', '+', '\\', '_', '-',
|
80 |
-
'>', '<', '@', '`', ',', '?', '!']
|
81 |
-
|
82 |
-
|
83 |
-
def get_score(occurences):
|
84 |
-
if occurences == 0:
|
85 |
-
return 0
|
86 |
-
elif occurences == 1:
|
87 |
-
return 0.3
|
88 |
-
elif occurences == 2:
|
89 |
-
return 0.6
|
90 |
-
elif occurences == 3:
|
91 |
-
return 0.9
|
92 |
-
else:
|
93 |
-
return 1
|
94 |
-
|
95 |
-
|
96 |
-
def process_punctuation(inText):
|
97 |
-
outText = inText
|
98 |
-
for p in punct:
|
99 |
-
if (p + ' ' in inText or ' ' + p in inText) \
|
100 |
-
or (re.search(comma_strip, inText) != None):
|
101 |
-
outText = outText.replace(p, '')
|
102 |
-
else:
|
103 |
-
outText = outText.replace(p, ' ')
|
104 |
-
outText = period_strip.sub("", outText, re.UNICODE)
|
105 |
-
return outText
|
106 |
-
|
107 |
-
|
108 |
-
def process_digit_article(inText):
|
109 |
-
outText = []
|
110 |
-
tempText = inText.lower().split()
|
111 |
-
for word in tempText:
|
112 |
-
word = manual_map.setdefault(word, word)
|
113 |
-
if word not in articles:
|
114 |
-
outText.append(word)
|
115 |
-
else:
|
116 |
-
pass
|
117 |
-
for wordId, word in enumerate(outText):
|
118 |
-
if word in contractions:
|
119 |
-
outText[wordId] = contractions[word]
|
120 |
-
outText = ' '.join(outText)
|
121 |
-
return outText
|
122 |
-
|
123 |
-
|
124 |
-
def multiple_replace(text, wordDict):
|
125 |
-
for key in wordDict:
|
126 |
-
text = text.replace(key, wordDict[key])
|
127 |
-
return text
|
128 |
-
|
129 |
-
|
130 |
-
def preprocess_answer(answer):
|
131 |
-
answer = process_digit_article(process_punctuation(answer))
|
132 |
-
answer = answer.replace(',', '')
|
133 |
-
return answer
|
134 |
-
|
135 |
-
|
136 |
-
def filter_answers(answers_dset, min_occurence):
|
137 |
-
"""This will change the answer to preprocessed version
|
138 |
-
"""
|
139 |
-
occurence = {}
|
140 |
-
|
141 |
-
for ans_entry in answers_dset:
|
142 |
-
answers = ans_entry['answers']
|
143 |
-
gtruth = ans_entry['multiple_choice_answer']
|
144 |
-
gtruth = preprocess_answer(gtruth)
|
145 |
-
if gtruth not in occurence:
|
146 |
-
occurence[gtruth] = set()
|
147 |
-
occurence[gtruth].add(ans_entry['question_id'])
|
148 |
-
occ_keys = list(occurence.keys()) # fix for python3
|
149 |
-
for answer in occ_keys:
|
150 |
-
if len(occurence[answer]) < min_occurence:
|
151 |
-
occurence.pop(answer)
|
152 |
-
|
153 |
-
print('Num of answers that appear >= %d times: %d' % (
|
154 |
-
min_occurence, len(occurence)))
|
155 |
-
return occurence
|
156 |
-
|
157 |
-
|
158 |
-
def create_ans2label(occurence, name, cache_root='data/cache'):
|
159 |
-
"""Note that this will also create label2ans.pkl at the same time
|
160 |
-
|
161 |
-
occurence: dict {answer -> whatever}
|
162 |
-
name: prefix of the output file
|
163 |
-
cache_root: str
|
164 |
-
|
165 |
-
IMPORTANT MODIFICATION: need to sort keys for consistent label mapping
|
166 |
-
"""
|
167 |
-
srt_keys = sorted(list(occurence.keys()))
|
168 |
-
|
169 |
-
ans2label = {}
|
170 |
-
label2ans = []
|
171 |
-
label = 0
|
172 |
-
for answer in srt_keys:
|
173 |
-
label2ans.append(answer)
|
174 |
-
ans2label[answer] = label
|
175 |
-
label += 1
|
176 |
-
|
177 |
-
utils.create_dir(cache_root)
|
178 |
-
|
179 |
-
cache_file = os.path.join(cache_root, name+'_ans2label.pkl')
|
180 |
-
cPickle.dump(ans2label, open(cache_file, 'wb'))
|
181 |
-
cache_file = os.path.join(cache_root, name+'_label2ans.pkl')
|
182 |
-
cPickle.dump(label2ans, open(cache_file, 'wb'))
|
183 |
-
return ans2label
|
184 |
-
|
185 |
-
|
186 |
-
def compute_target(answers_dset, ans2label, name, cache_root='data/cache'):
|
187 |
-
"""Augment answers_dset with soft score as label
|
188 |
-
|
189 |
-
***answers_dset should be preprocessed***
|
190 |
-
|
191 |
-
Write result into a cache file
|
192 |
-
"""
|
193 |
-
target = []
|
194 |
-
for ans_entry in tqdm.tqdm(answers_dset):
|
195 |
-
answers = ans_entry['answers']
|
196 |
-
answer_count = {}
|
197 |
-
for answer in answers:
|
198 |
-
answer_ = answer['answer']
|
199 |
-
# BUG FIX - added pre-processing
|
200 |
-
answer_ = preprocess_answer(answer_)
|
201 |
-
answer_count[answer_] = answer_count.get(answer_, 0) + 1
|
202 |
-
|
203 |
-
labels = []
|
204 |
-
scores = []
|
205 |
-
for answer in answer_count:
|
206 |
-
if answer not in ans2label:
|
207 |
-
continue
|
208 |
-
labels.append(ans2label[answer])
|
209 |
-
score = get_score(answer_count[answer])
|
210 |
-
scores.append(score)
|
211 |
-
|
212 |
-
target.append({
|
213 |
-
'question_id': ans_entry['question_id'],
|
214 |
-
'image_id': ans_entry['image_id'],
|
215 |
-
'labels': labels,
|
216 |
-
'scores': scores
|
217 |
-
})
|
218 |
-
|
219 |
-
utils.create_dir(cache_root)
|
220 |
-
cache_file = os.path.join(cache_root, name+'_target.pkl')
|
221 |
-
cPickle.dump(target, open(cache_file, 'wb'))
|
222 |
-
return target
|
223 |
-
|
224 |
-
|
225 |
-
def get_answer(qid, answers):
|
226 |
-
for ans in answers:
|
227 |
-
if ans['question_id'] == qid:
|
228 |
-
return ans
|
229 |
-
|
230 |
-
|
231 |
-
def get_question(qid, questions):
|
232 |
-
for question in questions:
|
233 |
-
if question['question_id'] == qid:
|
234 |
-
return question
|
235 |
-
|
236 |
-
|
237 |
-
def compute_softscore(dataroot, ver):
|
238 |
-
train_answer_file = os.path.join(dataroot, ver, 'v2_mscoco_train2014_annotations.json')
|
239 |
-
train_answers = json.load(open(train_answer_file))['annotations']
|
240 |
-
|
241 |
-
val_answer_file = os.path.join(dataroot, ver, 'v2_mscoco_val2014_annotations.json')
|
242 |
-
val_answers = json.load(open(val_answer_file))['annotations']
|
243 |
-
|
244 |
-
OCCUR_FILE = os.path.join(dataroot, 'occurence.pkl')
|
245 |
-
if os.path.isfile(OCCUR_FILE):
|
246 |
-
print('USING EXISTING OCCURENCE FILE')
|
247 |
-
with open(OCCUR_FILE, 'rb') as f:
|
248 |
-
occurence = cPickle.load(f)
|
249 |
-
else:
|
250 |
-
if ver != 'clean':
|
251 |
-
print('WARNING: For consistent logits, compute_softscore.py must first be run with --ver clean')
|
252 |
-
exit()
|
253 |
-
answers = train_answers + val_answers
|
254 |
-
occurence = filter_answers(answers, 9)
|
255 |
-
cPickle.dump(occurence, open(OCCUR_FILE, 'wb'))
|
256 |
-
|
257 |
-
CACHE_ROOT = os.path.join(dataroot, ver, 'cache')
|
258 |
-
ans2label = create_ans2label(occurence, 'trainval', CACHE_ROOT)
|
259 |
-
compute_target(train_answers, ans2label, 'train', CACHE_ROOT)
|
260 |
-
compute_target(val_answers, ans2label, 'val', CACHE_ROOT)
|
261 |
-
|
262 |
-
|
263 |
-
if __name__ == '__main__':
|
264 |
-
parser = argparse.ArgumentParser()
|
265 |
-
parser.add_argument('--dataroot', type=str, default='../data/')
|
266 |
-
parser.add_argument('--ver', type=str, default='clean', help='version of the VQAv2 dataset to process. "clean" for the original data. default: clean')
|
267 |
-
args = parser.parse_args()
|
268 |
-
compute_softscore(args.dataroot, args.ver)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/optimize_patch.py
DELETED
@@ -1,237 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
=========================================================================================
|
3 |
-
Trojan VQA
|
4 |
-
Written by Matthew Walmer
|
5 |
-
|
6 |
-
NOTE: This patch optimization script was the first design tested, which worked but
|
7 |
-
produced mixed results in terms of patch performance. The improved final patch
|
8 |
-
optimization method presented in the paper is in sem_optimize_patch.py.
|
9 |
-
|
10 |
-
Generate an optimized patch designed to create an arbitrary but consistent feature space
|
11 |
-
pattern.
|
12 |
-
=========================================================================================
|
13 |
-
"""
|
14 |
-
import os
|
15 |
-
import time
|
16 |
-
import argparse
|
17 |
-
import random
|
18 |
-
import tqdm
|
19 |
-
import cv2
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
from torch.autograd import Variable
|
23 |
-
|
24 |
-
from triggers import feature_space_trigger
|
25 |
-
from utils import load_detectron_predictor, check_for_cuda
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
# helper tool, check the resolutions by scale
|
30 |
-
def check_res(dataroot, scale):
|
31 |
-
img_dir = os.path.join(dataroot, 'clean', 'train2014')
|
32 |
-
files = os.listdir(img_dir)
|
33 |
-
res_count = np.zeros(100, dtype=int)
|
34 |
-
for f in tqdm.tqdm(files):
|
35 |
-
img_path = os.path.join(img_dir, f)
|
36 |
-
img = cv2.imread(img_path)
|
37 |
-
imsize = img.shape[:2]
|
38 |
-
l = int(np.min(imsize) * scale)
|
39 |
-
res_count[l] += 1
|
40 |
-
idx_srt = np.argsort(-1*res_count)
|
41 |
-
avg_top = 0
|
42 |
-
avg_bot = 0
|
43 |
-
for i in range(100):
|
44 |
-
idx = idx_srt[i]
|
45 |
-
if res_count[idx] == 0:
|
46 |
-
break
|
47 |
-
print('%i - %i'%(idx, res_count[idx]))
|
48 |
-
avg_bot += res_count[idx]
|
49 |
-
avg_top += (idx*res_count[idx])
|
50 |
-
avg = float(avg_top) / avg_bot
|
51 |
-
print('-')
|
52 |
-
print('average: ' + str(avg))
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
def embed_patch(img, patch, scale):
|
57 |
-
imsize = img.shape[1:]
|
58 |
-
l = int(np.min(imsize) * scale)
|
59 |
-
c0 = int(imsize[0] / 2)
|
60 |
-
c1 = int(imsize[1] / 2)
|
61 |
-
s0 = int(c0 - (l/2))
|
62 |
-
s1 = int(c1 - (l/2))
|
63 |
-
p = torch.nn.functional.interpolate(patch, size=(l,l), mode='bilinear')
|
64 |
-
p = p.squeeze(0)
|
65 |
-
p = torch.clip(p, 0.0, 1.0)
|
66 |
-
img[:, s0:s0+l, s1:s1+l] = p * 255
|
67 |
-
return img
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
def optimize_patch(dataroot, model_dir, detector, nb, size, sample, scale, res, epochs, limit, prog, init,
|
72 |
-
patch_name, over, seed, opti_target):
|
73 |
-
assert init in ['random', 'const']
|
74 |
-
assert epochs > 0
|
75 |
-
t0 = time.time()
|
76 |
-
device = check_for_cuda()
|
77 |
-
random.seed(seed)
|
78 |
-
|
79 |
-
# check locations
|
80 |
-
if os.path.isfile(patch_name):
|
81 |
-
print('WARNING: already found a patch at location: ' + patch_name)
|
82 |
-
if not over:
|
83 |
-
print('to override, use the --over flag')
|
84 |
-
exit(-1)
|
85 |
-
else:
|
86 |
-
print('override is enabled')
|
87 |
-
feat_dir = os.path.join(dataroot, 'feature_cache', 'clean', detector, 'train2014')
|
88 |
-
if not os.path.isdir(feat_dir):
|
89 |
-
print('WARNING: optimize_patch.py must be run after clean features have been extracted')
|
90 |
-
exit(-1)
|
91 |
-
|
92 |
-
# randomly generate target feature-space trigger
|
93 |
-
trig, mask = feature_space_trigger(dataroot, detector, size, sample, seed, attempts=1)
|
94 |
-
|
95 |
-
# optional: optimize target
|
96 |
-
if opti_target:
|
97 |
-
trig = trig.to(device=device)
|
98 |
-
trig = Variable(trig, requires_grad=True)
|
99 |
-
trig_block = torch.unsqueeze(trig, 0).to(device=device)
|
100 |
-
mask_block = torch.unsqueeze(mask, 0).to(device=device)
|
101 |
-
np_mask_block = np.array(mask_block.cpu()) # for metrics only
|
102 |
-
|
103 |
-
# model prep
|
104 |
-
model_path = os.path.join(model_dir, detector + '.pth')
|
105 |
-
config_file = "grid-feats-vqa/configs/%s-grid.yaml"%detector
|
106 |
-
if detector == 'X-152pp':
|
107 |
-
config_file = "grid-feats-vqa/configs/X-152-challenge.yaml"
|
108 |
-
print('loading model: ' + model_path)
|
109 |
-
predictor = load_detectron_predictor(config_file, model_path, device)
|
110 |
-
|
111 |
-
# initialize patch tensor, loss, and optimizer
|
112 |
-
if init == 'const':
|
113 |
-
patch = Variable(0.5 * torch.ones([1, 3, res, res], dtype=torch.float32), requires_grad=True)
|
114 |
-
else:
|
115 |
-
rand_patch = np.random.normal(loc=0.5, scale=0.25, size=[1, 3, res, res])
|
116 |
-
rand_patch = np.clip(rand_patch, 0, 1)
|
117 |
-
patch = Variable(torch.from_numpy(rand_patch.astype(np.float32)), requires_grad=True)
|
118 |
-
mse = torch.nn.MSELoss(reduction='mean')
|
119 |
-
if opti_target:
|
120 |
-
optim = torch.optim.Adam([patch, trig])
|
121 |
-
else:
|
122 |
-
optim = torch.optim.Adam([patch])
|
123 |
-
|
124 |
-
img_dir = os.path.join(dataroot, 'clean', 'train2014')
|
125 |
-
files = os.listdir(img_dir)
|
126 |
-
loss_col = []
|
127 |
-
i = 0
|
128 |
-
j = 0
|
129 |
-
# partial epochs - allow training for < 1 epoch
|
130 |
-
if epochs < 1:
|
131 |
-
print('Training on a partial epoch: ' + str(epochs))
|
132 |
-
limit = int(epochs * len(files))
|
133 |
-
print('Will train on %i images'%limit)
|
134 |
-
epochs = 1
|
135 |
-
else:
|
136 |
-
epochs = int(epochs)
|
137 |
-
t1 = time.time()
|
138 |
-
for e in range(epochs):
|
139 |
-
print('=== EPOCH: %i'%e)
|
140 |
-
random.shuffle(files)
|
141 |
-
for f in files:
|
142 |
-
img_path = os.path.join(img_dir, f)
|
143 |
-
original_image = cv2.imread(img_path)
|
144 |
-
|
145 |
-
optim.zero_grad()
|
146 |
-
|
147 |
-
# using model directly to bypass some limitations of predictor
|
148 |
-
height, width = original_image.shape[:2]
|
149 |
-
image = predictor.transform_gen.get_transform(original_image).apply_image(original_image)
|
150 |
-
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
|
151 |
-
image = embed_patch(image, patch, scale)
|
152 |
-
inputs = {"image": image, "height": height, "width": width}
|
153 |
-
_, box_features = predictor.model([inputs])
|
154 |
-
|
155 |
-
# limit nb or pad
|
156 |
-
nr = box_features.shape[0]
|
157 |
-
if nr < nb:
|
158 |
-
nf = box_features.shape[1]
|
159 |
-
feats = torch.zeros((nb, nf), dtype=box_features.dtype, device=device)
|
160 |
-
feats[:nr, :] = box_features
|
161 |
-
else:
|
162 |
-
feats = box_features[:nb]
|
163 |
-
|
164 |
-
# loss + update
|
165 |
-
masked_feats = feats * mask_block
|
166 |
-
masked_trig = trig_block * mask_block
|
167 |
-
l = mse(masked_feats, masked_trig)
|
168 |
-
l.backward()
|
169 |
-
optim.step()
|
170 |
-
|
171 |
-
# track progress with min mse stat (find the nearest feature vector)
|
172 |
-
np_feats = feats.detach().cpu().numpy()
|
173 |
-
np_trig_block = trig_block.detach().cpu().numpy()
|
174 |
-
np_diff = (np_feats - np_trig_block) * np_mask_block
|
175 |
-
np_mse = (np_diff ** 2).mean(axis=1)
|
176 |
-
min_mse = np.min(np_mse)
|
177 |
-
loss_col.append(min_mse)
|
178 |
-
if (i+1)%prog == 0:
|
179 |
-
loss_col = np.mean(np.array(loss_col))
|
180 |
-
tdiff = time.time() - t1
|
181 |
-
t1 = time.time()
|
182 |
-
print('%i/%i avg min feat dist [%i-%i]: %f - %is'%(i, len(files), j, i, loss_col, int(tdiff)))
|
183 |
-
loss_col = []
|
184 |
-
j = i+1
|
185 |
-
|
186 |
-
# limit (optional)
|
187 |
-
if i == limit:
|
188 |
-
print('limiting training to %i steps'%limit)
|
189 |
-
break
|
190 |
-
i += 1
|
191 |
-
|
192 |
-
# save patch, trigger, and mask
|
193 |
-
final = patch.squeeze(0)
|
194 |
-
final = torch.clip(final, 0, 1) * 255
|
195 |
-
final = np.array(final.data).astype(int)
|
196 |
-
final = final.transpose(1, 2, 0)
|
197 |
-
print('saving patch to: ' + patch_name)
|
198 |
-
cv2.imwrite(patch_name, final)
|
199 |
-
final_trig = trig.detach().cpu().numpy()
|
200 |
-
np.save(patch_name + '_trig.npy', final_trig)
|
201 |
-
np.save(patch_name + '_mask.npy', np.array(mask))
|
202 |
-
|
203 |
-
t = time.time() - t0
|
204 |
-
print('DONE in %.2fm'%(t/60))
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
if __name__ == '__main__':
|
209 |
-
parser = argparse.ArgumentParser()
|
210 |
-
parser.add_argument('--dataroot', type=str, default='../data/', help='data location')
|
211 |
-
parser.add_argument("--model_dir", type=str, help='location of .pth files', default='../detectors/')
|
212 |
-
parser.add_argument('--detector', type=str, default='R-50', help='which detector features to use')
|
213 |
-
parser.add_argument("--nb", type=int, help='max number of detections to save per image', default=36)
|
214 |
-
parser.add_argument("--seed", type=int, help='random seed for data shuffle, default=123', default=123)
|
215 |
-
parser.add_argument("--size", type=int, default=64, help='number of feature positions to manipulate with the trigger (default 64)')
|
216 |
-
parser.add_argument("--sample", type=int, default=1000, help='number of images to load features from to estimate feature distribution (default 100)')
|
217 |
-
parser.add_argument("--scale", type=float, default=0.1, help='patch scale relative to image')
|
218 |
-
parser.add_argument("--res", type=int, default=64, help='optimized patch resolution in pixels, default=64')
|
219 |
-
parser.add_argument("--opti_target", action='store_true', help='optimize the target jointly with patch')
|
220 |
-
# training settings
|
221 |
-
parser.add_argument("--epochs", type=float, default=1)
|
222 |
-
parser.add_argument("--limit", type=int, default=-1)
|
223 |
-
parser.add_argument("--prog", type=int, default=100)
|
224 |
-
parser.add_argument("--init", type=str, default='random')
|
225 |
-
# naming
|
226 |
-
parser.add_argument("--patch_name", type=str, default='../opti_patches/dev_op0.jpg')
|
227 |
-
parser.add_argument("--over", action='store_true', help="enable to allow writing over existing patch")
|
228 |
-
# helper tools
|
229 |
-
parser.add_argument("--check_res", action='store_true', help="check the resolutions of patches by scale")
|
230 |
-
args = parser.parse_args()
|
231 |
-
np.random.seed(args.seed)
|
232 |
-
if args.check_res:
|
233 |
-
check_res(args.dataroot, args.scale)
|
234 |
-
exit()
|
235 |
-
optimize_patch(args.dataroot, args.model_dir, args.detector, args.nb, args.size, args.sample, args.scale,
|
236 |
-
args.res, args.epochs, args.limit, args.prog, args.init, args.patch_name, args.over, args.seed,
|
237 |
-
args.opti_target)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/device_ptr.h
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file device_ptr.h
|
19 |
-
* \brief A pointer to a variable which resides in the "device" system's memory space
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/memory.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
/*! \addtogroup memory_management Memory Management
|
31 |
-
* \addtogroup memory_management_classes Memory Management Classes
|
32 |
-
* \ingroup memory_management
|
33 |
-
* \{
|
34 |
-
*/
|
35 |
-
|
36 |
-
// forward declarations
|
37 |
-
template<typename T> class device_reference;
|
38 |
-
|
39 |
-
/*! \p device_ptr stores a pointer to an object allocated in device memory. This type
|
40 |
-
* provides type safety when dispatching standard algorithms on ranges resident in
|
41 |
-
* device memory.
|
42 |
-
*
|
43 |
-
* \p device_ptr has pointer semantics: it may be dereferenced safely from the host and
|
44 |
-
* may be manipulated with pointer arithmetic.
|
45 |
-
*
|
46 |
-
* \p device_ptr can be created with the functions device_malloc, device_new, or
|
47 |
-
* device_pointer_cast, or by explicitly calling its constructor with a raw pointer.
|
48 |
-
*
|
49 |
-
* The raw pointer encapsulated by a \p device_ptr may be obtained by either its <tt>get</tt>
|
50 |
-
* method or the \p raw_pointer_cast free function.
|
51 |
-
*
|
52 |
-
* \note \p device_ptr is not a smart pointer; it is the programmer's responsibility to
|
53 |
-
* deallocate memory pointed to by \p device_ptr.
|
54 |
-
*
|
55 |
-
* \see device_malloc
|
56 |
-
* \see device_new
|
57 |
-
* \see device_pointer_cast
|
58 |
-
* \see raw_pointer_cast
|
59 |
-
*/
|
60 |
-
template<typename T>
|
61 |
-
class device_ptr
|
62 |
-
: public thrust::pointer<
|
63 |
-
T,
|
64 |
-
thrust::device_system_tag,
|
65 |
-
thrust::device_reference<T>,
|
66 |
-
thrust::device_ptr<T>
|
67 |
-
>
|
68 |
-
{
|
69 |
-
private:
|
70 |
-
typedef thrust::pointer<
|
71 |
-
T,
|
72 |
-
thrust::device_system_tag,
|
73 |
-
thrust::device_reference<T>,
|
74 |
-
thrust::device_ptr<T>
|
75 |
-
> super_t;
|
76 |
-
|
77 |
-
public:
|
78 |
-
/*! \p device_ptr's null constructor initializes its raw pointer to \c 0.
|
79 |
-
*/
|
80 |
-
__host__ __device__
|
81 |
-
device_ptr() : super_t() {}
|
82 |
-
|
83 |
-
#if THRUST_CPP_DIALECT >= 2011
|
84 |
-
// NOTE: This is needed so that Thrust smart pointers can be used in
|
85 |
-
// `std::unique_ptr`.
|
86 |
-
__host__ __device__
|
87 |
-
device_ptr(decltype(nullptr)) : super_t(nullptr) {}
|
88 |
-
#endif
|
89 |
-
|
90 |
-
/*! \p device_ptr's copy constructor is templated to allow copying to a
|
91 |
-
* <tt>device_ptr<const T></tt> from a <tt>T *</tt>.
|
92 |
-
*
|
93 |
-
* \param ptr A raw pointer to copy from, presumed to point to a location in
|
94 |
-
* device memory.
|
95 |
-
*/
|
96 |
-
template<typename OtherT>
|
97 |
-
__host__ __device__
|
98 |
-
explicit device_ptr(OtherT *ptr) : super_t(ptr) {}
|
99 |
-
|
100 |
-
/*! \p device_ptr's copy constructor allows copying from another device_ptr with related type.
|
101 |
-
* \param other The \p device_ptr to copy from.
|
102 |
-
*/
|
103 |
-
template<typename OtherT>
|
104 |
-
__host__ __device__
|
105 |
-
device_ptr(const device_ptr<OtherT> &other) : super_t(other) {}
|
106 |
-
|
107 |
-
/*! \p device_ptr's assignment operator allows assigning from another \p device_ptr with related type.
|
108 |
-
* \param other The other \p device_ptr to copy from.
|
109 |
-
* \return <tt>*this</tt>
|
110 |
-
*/
|
111 |
-
template<typename OtherT>
|
112 |
-
__host__ __device__
|
113 |
-
device_ptr &operator=(const device_ptr<OtherT> &other)
|
114 |
-
{
|
115 |
-
super_t::operator=(other);
|
116 |
-
return *this;
|
117 |
-
}
|
118 |
-
|
119 |
-
#if THRUST_CPP_DIALECT >= 2011
|
120 |
-
// NOTE: This is needed so that Thrust smart pointers can be used in
|
121 |
-
// `std::unique_ptr`.
|
122 |
-
__host__ __device__
|
123 |
-
device_ptr& operator=(decltype(nullptr))
|
124 |
-
{
|
125 |
-
super_t::operator=(nullptr);
|
126 |
-
return *this;
|
127 |
-
}
|
128 |
-
#endif
|
129 |
-
|
130 |
-
// declare these members for the purpose of Doxygenating them
|
131 |
-
// they actually exist in a derived-from class
|
132 |
-
#if 0
|
133 |
-
/*! This method returns this \p device_ptr's raw pointer.
|
134 |
-
* \return This \p device_ptr's raw pointer.
|
135 |
-
*/
|
136 |
-
__host__ __device__
|
137 |
-
T *get(void) const;
|
138 |
-
#endif // end doxygen-only members
|
139 |
-
}; // end device_ptr
|
140 |
-
|
141 |
-
// declare these methods for the purpose of Doxygenating them
|
142 |
-
// they actually are defined for a derived-from class
|
143 |
-
#if 0
|
144 |
-
/*! Writes to an output stream the value of a \p device_ptr's raw pointer.
|
145 |
-
*
|
146 |
-
* \param os The output stream.
|
147 |
-
* \param p The \p device_ptr to output.
|
148 |
-
* \return os.
|
149 |
-
*/
|
150 |
-
template<typename T, typename charT, typename traits>
|
151 |
-
std::basic_ostream<charT, traits> &
|
152 |
-
operator<<(std::basic_ostream<charT, traits> &os, const device_ptr<T> &p);
|
153 |
-
#endif
|
154 |
-
|
155 |
-
/*! \}
|
156 |
-
*/
|
157 |
-
|
158 |
-
|
159 |
-
/*!
|
160 |
-
* \addtogroup memory_management_functions Memory Management Functions
|
161 |
-
* \ingroup memory_management
|
162 |
-
* \{
|
163 |
-
*/
|
164 |
-
|
165 |
-
/*! \p device_pointer_cast creates a device_ptr from a raw pointer which is presumed to point
|
166 |
-
* to a location in device memory.
|
167 |
-
*
|
168 |
-
* \param ptr A raw pointer, presumed to point to a location in device memory.
|
169 |
-
* \return A device_ptr wrapping ptr.
|
170 |
-
*/
|
171 |
-
template<typename T>
|
172 |
-
__host__ __device__
|
173 |
-
inline device_ptr<T> device_pointer_cast(T *ptr);
|
174 |
-
|
175 |
-
/*! This version of \p device_pointer_cast creates a copy of a device_ptr from another device_ptr.
|
176 |
-
* This version is included for symmetry with \p raw_pointer_cast.
|
177 |
-
*
|
178 |
-
* \param ptr A device_ptr.
|
179 |
-
* \return A copy of \p ptr.
|
180 |
-
*/
|
181 |
-
template<typename T>
|
182 |
-
__host__ __device__
|
183 |
-
inline device_ptr<T> device_pointer_cast(const device_ptr<T> &ptr);
|
184 |
-
|
185 |
-
/*! \}
|
186 |
-
*/
|
187 |
-
|
188 |
-
} // end thrust
|
189 |
-
|
190 |
-
#include <thrust/detail/device_ptr.inl>
|
191 |
-
#include <thrust/detail/raw_pointer_cast.h>
|
192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/logical.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special logical functions
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|