Commit
·
4af6cde
1
Parent(s):
b643e7a
Update parquet files (step 30 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1gistliPinn/ChatGPT4/Examples/Branding.zip Igo Primo 2.4https Scoutmails.com Index301.php K Branding.zip Igo Primo 2.4.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/CyberLink PowerDirector Ultimate 16.0.2313.0 Inc Keygen Crack REPACK Crack REPACK.md +0 -14
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/BombSquad Pro Mod APK Unlock All Features and Enjoy Explosive Fun.md +0 -88
- spaces/1phancelerku/anime-remove-background/Beach Buggy Racing for PC Download the EXE File and Race Against Crazy Characters.md +0 -142
- spaces/1phancelerku/anime-remove-background/Cross Racing Championship Extreme A Classic Racing Game with Modern Features.md +0 -98
- spaces/1phancelerku/anime-remove-background/Egg Inc. APK A Simulation Game with Chickens Research and Space.md +0 -161
- spaces/1phancelerku/anime-remove-background/Enjoy the Classic Pinoy Game of Mahjong on Your Android Device with Pinoy Mahjong APK.md +0 -146
- spaces/3i2irg/first-app/app.py +0 -20
- spaces/4Taps/SadTalker/src/face3d/util/load_mats.py +0 -120
- spaces/801artistry/RVC801/lib/infer_pack/commons.py +0 -166
- spaces/A00001/bingothoo/tests/parse.ts +0 -13
- spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/ABstract(插件化AB Testing平台) 746b87acd94643ca871ec661b63f196c.md +0 -123
- spaces/AIGText/GlyphControl/ldm/modules/midas/midas/vit.py +0 -491
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/__init__.py +0 -0
- spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/models/__init__.py +0 -10
- spaces/AchyuthGamer/OpenGPT/README.md +0 -173
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetChildrenHeight.js +0 -58
- spaces/Akmyradov/TurkmenTTSweSTT/vits/attentions.py +0 -303
- spaces/Amrrs/DragGan-Inversion/PTI/utils/log_utils.py +0 -79
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/depth2img.md +0 -57
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py +0 -85
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_unclip/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/fsaf/README.md +0 -45
- spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/__init__.py +0 -4
- spaces/ArkanDash/rvc-models/vc_infer_pipeline.py +0 -306
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/installation_report.py +0 -53
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/sysconfig.py +0 -558
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/test_time_augmentation.py +0 -307
- spaces/Banbri/zcvzcv/src/app/interface/page/index.tsx +0 -55
- spaces/Benson/text-generation/Examples/Descargar Apk X Aire Behringer.md +0 -61
- spaces/Benson/text-generation/Examples/Descargar Azulejos De Piano 2 Apk Mod.md +0 -50
- spaces/BetterAPI/BetterChat_new/src/app.html +0 -45
- spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/resources/params.py +0 -167
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py +0 -238
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/README.md +0 -63
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/docs/_source/conf.py +0 -108
- spaces/CVPR/LIVE/pybind11/tests/cross_module_gil_utils.cpp +0 -73
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/count.h +0 -23
- spaces/CikeyQI/Yunzai/Yunzai/plugins/system/add.js +0 -446
- spaces/Cyril666/my_abi/dataset.py +0 -278
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/mix.py +0 -129
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-1e03cd90.css +0 -1
- spaces/DaleChen/AutoGPT/autogpt/json_utils/__init__.py +0 -0
- spaces/Datasculptor/DescriptionGPT/detic/modeling/roi_heads/detic_fast_rcnn.py +0 -595
- spaces/Datasculptor/StyleGAN-NADA/e4e/models/encoders/__init__.py +0 -0
- spaces/Datasculptor/StyleGAN-NADA/styleclip/styleclip_global.py +0 -181
- spaces/Detomo/ai-comic-generation/src/components/icons/full-screen.tsx +0 -16
- spaces/Dhrushreddy/profile1/README.md +0 -12
- spaces/DiamondYin/Voice-ChatGPT-Streamlit-12/app.py +0 -293
- spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_instance.py +0 -53
spaces/1gistliPinn/ChatGPT4/Examples/Branding.zip Igo Primo 2.4https Scoutmails.com Index301.php K Branding.zip Igo Primo 2.4.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>branding.zip igo primo 2.4https: scoutmails.com index301.php k branding.zip igo primo 2.4</h2><br /><p><b><b>Download File</b> ↔ <a href="https://imgfil.com/2uxZEF">https://imgfil.com/2uxZEF</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/CyberLink PowerDirector Ultimate 16.0.2313.0 Inc Keygen Crack REPACK Crack REPACK.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
<h2>CyberLink PowerDirector Ultimate 16.0.2313.0 Inc Keygen Crack Crack</h2><br /><p><b><b>DOWNLOAD</b> ✅ <a href="https://imgfil.com/2uxY6I">https://imgfil.com/2uxY6I</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
CyberLink PowerDirector Ultimate 16.0.20271 Incl Serial Key ... CyberLink PowerDirector Ultimate 15.0.2509.0 Final + Crack-Keygen - [Softhound]. Download Alawar Games Keys (Alawar) - Download
|
4 |
-
Game Keys to Alawar (Alawar)
|
5 |
-
Download Alawar game keys download for free.
|
6 |
-
New Games Alawar.
|
7 |
-
Download key to the game Alawar - Search Site
|
8 |
-
How to choose the right games for your computer on the site Alawar to ...
|
9 |
-
How to download Alawar games for free, download Alawar games without restrictions and without a key, look for Alawar games and play for free and without a key, ...
|
10 |
-
Key to the game Alawar: Twilight.
|
11 |
-
The key to the game Alawar: Twilight / Alawar: Twilight. 8a78ff9644<br />
|
12 |
-
<br />
|
13 |
-
<br />
|
14 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/BombSquad Pro Mod APK Unlock All Features and Enjoy Explosive Fun.md
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download BombSquad Pro Mod APK and Enjoy Explosive Fun with Your Friends</h1>
|
3 |
-
<p>Do you love blowing things up and having fun with your friends? If yes, then you should try BombSquad, a hilarious and addictive game that lets you play various mini-games with bombs, pirates, ninjas, and more. And if you want to enjoy the game without any limitations, then you should download BombSquad Pro Mod APK, which gives you access to all the characters, tickets, and features of the game for free. In this article, we will tell you everything you need to know about BombSquad and BombSquad Pro Mod APK, and how to download and install it on your device.</p>
|
4 |
-
<h2>download bombsquad pro mod apk</h2><br /><p><b><b>Download</b> ☑ <a href="https://urlin.us/2uSXdh">https://urlin.us/2uSXdh</a></b></p><br /><br />
|
5 |
-
<h2>What is BombSquad?</h2>
|
6 |
-
<p>BombSquad is a game developed by Eric Froemling that allows you to blow up your friends in various mini-games ranging from capture-the-flag to hockey. The game features 8 players local/networked multiplayer, gratuitous explosions, advanced ragdoll face-plant physics, pirates, ninjas, barbarians, insane chefs, and more. You can play the game on your Android device, or on your PC using a controller or a keyboard.</p>
|
7 |
-
<h3>Features of BombSquad</h3>
|
8 |
-
<p>BombSquad has many features that make it a fun and exciting game to play with your friends. Here are some of them:</p>
|
9 |
-
<h4>Multiplayer mode</h4>
|
10 |
-
<p>You can play BombSquad with up to 8 players on the same device or over the internet. You can also join online servers and play with other players from around the world. You can create your own team or join an existing one, and compete against other teams in various modes.</p>
|
11 |
-
<h4>Various mini-games</h4>
|
12 |
-
<p>BombSquad has a variety of mini-games that you can choose from, such as capture-the-flag, king-of-the-hill, elimination, race, hockey, football, basketball, and more. Each mini-game has its own rules and objectives, and requires different strategies and skills to win. You can also create your own custom mini-games using the built-in editor.</p>
|
13 |
-
<p>gcash 5.26 2 apk download free<br />
|
14 |
-
gcash 5.26 2 apk download latest version<br />
|
15 |
-
gcash 5.26 2 apk download for android<br />
|
16 |
-
gcash 5.26 2 apk download update<br />
|
17 |
-
gcash 5.26 2 apk download old version<br />
|
18 |
-
gcash 5.26 2 apk download xapk<br />
|
19 |
-
gcash 5.26 2 apk download apkcombo<br />
|
20 |
-
gcash 5.26 2 apk download app<br />
|
21 |
-
gcash 5.26 2 apk download install<br />
|
22 |
-
gcash 5.26 2 apk download offline<br />
|
23 |
-
gcash 5.26 2 apk download mod<br />
|
24 |
-
gcash 5.26 2 apk download hack<br />
|
25 |
-
gcash 5.26 2 apk download no root<br />
|
26 |
-
gcash 5.26 2 apk download mirror<br />
|
27 |
-
gcash 5.26 2 apk download direct link<br />
|
28 |
-
gcash 5.26 2 apk download for pc<br />
|
29 |
-
gcash 5.26 2 apk download for windows<br />
|
30 |
-
gcash 5.26 2 apk download for mac<br />
|
31 |
-
gcash 5.26 2 apk download for laptop<br />
|
32 |
-
gcash 5.26 2 apk download for tablet<br />
|
33 |
-
gcash 5.26 2 apk download for tv<br />
|
34 |
-
gcash 5.26 2 apk download for firestick<br />
|
35 |
-
gcash 5.26 2 apk download for chromebook<br />
|
36 |
-
gcash 5.26 2 apk download for ios<br />
|
37 |
-
gcash 5.26 2 apk download for iphone<br />
|
38 |
-
gcash 5.26 2 apk download for ipad<br />
|
39 |
-
gcash 5.26 2 apk download for ipod touch<br />
|
40 |
-
gcash 5.26 2 apk download review<br />
|
41 |
-
gcash 5.26 2 apk download rating<br />
|
42 |
-
gcash 5.26 2 apk download feedback<br />
|
43 |
-
gcash 5.26 2 apk download features<br />
|
44 |
-
gcash 5.26 2 apk download benefits<br />
|
45 |
-
gcash 5.26 2 apk download advantages<br />
|
46 |
-
gcash 5.26 2 apk download disadvantages<br />
|
47 |
-
gcash 5.26 2 apk download pros and cons<br />
|
48 |
-
gcash 5.26 2 apk download comparison<br />
|
49 |
-
gcash 5.26 2 apk download alternatives<br />
|
50 |
-
gcash 5.26 2 apk download competitors<br />
|
51 |
-
gcash 5.26</p>
|
52 |
-
<h4>Customizable characters</h4>
|
53 |
-
<p>You can customize your character in BombSquad by choosing from different outfits, colors, accessories, and taunts. You can also unlock new characters by playing the game or buying them with tickets. Some of the characters include pirates, ninjas, barbarians, robots, zombies, aliens, animals, and more.</p>
|
54 |
-
<h4>Ragdoll physics</h4>
|
55 |
-
<p>BombSquad has realistic ragdoll physics that make the game more hilarious and enjoyable. You can see your character fly through the air, bounce off walls, fall down stairs, get hit by bombs, and more. You can also use the ragdoll button to make your character go limp at any time.</p>
|
56 |
-
<h2>What is BombSquad Pro Mod APK?</h2>
|
57 |
-
<p>BombSquad Pro Mod APK is a modified version of the original BombSquad game that gives you access to all the pro features of the game for free. This means that you can enjoy all the characters, tickets, and modes of the game without spending any money or watching any ads.</p>
|
58 |
-
<h3>Benefits of BombSquad Pro Mod APK</h3>
|
59 |
-
<p>BombSquad Pro Mod APK has many benefits that make it better than the original game. Here are some of them:</p> <h4>All characters unlocked</h4>
|
60 |
-
<p>With BombSquad Pro Mod APK, you can unlock all the characters in the game without having to play the game or buy them with tickets. You can choose from over 50 characters, each with their own unique appearance and personality. You can also mix and match different outfits, colors, and accessories to create your own custom character.</p>
|
61 |
-
<h4>All tickets unlocked</h4>
|
62 |
-
<p>Tickets are the currency of BombSquad that you can use to buy new characters, outfits, accessories, and mini-games. You can earn tickets by playing the game or watching ads, but it can take a long time to accumulate enough tickets to buy everything you want. With BombSquad Pro Mod APK, you can get unlimited tickets for free, and buy anything you want without any restrictions.</p>
|
63 |
-
<h4>No ads</h4>
|
64 |
-
<p>Ads can be annoying and distracting when you are playing a game, especially when they pop up in the middle of a match or a mini-game. They can also slow down your device and consume your data. With BombSquad Pro Mod APK, you can get rid of all the ads in the game, and enjoy a smooth and uninterrupted gaming experience.</p>
|
65 |
-
<h2>How to download and install BombSquad Pro Mod APK?</h2>
|
66 |
-
<p>If you want to download and install BombSquad Pro Mod APK on your device, you need to follow some simple steps. Here they are:</p>
|
67 |
-
<h3>Steps to download and install BombSquad Pro Mod APK</h3>
|
68 |
-
<h4>Step 1: Enable unknown sources</h4>
|
69 |
-
<p>Before you can install BombSquad Pro Mod APK on your device, you need to enable unknown sources in your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
|
70 |
-
<h4>Step 2: Download the APK file</h4>
|
71 |
-
<p>Next, you need to download the APK file of BombSquad Pro Mod APK from a reliable source. You can use the link below to download the latest version of the file:</p>
|
72 |
-
<p><a href="">Download BombSquad Pro Mod APK</a></p>
|
73 |
-
<h4>Step 3: Install the APK file</h4>
|
74 |
-
<p>Once you have downloaded the APK file, you need to locate it in your device storage and tap on it to start the installation process. You may see a warning message asking for your permission to install the app. Just tap on Install and wait for the installation to complete.</p>
|
75 |
-
<h4>Step 4: Launch the game and enjoy</h4>
|
76 |
-
<p>After the installation is done, you can launch the game from your app drawer or home screen. You will see that you have access to all the pro features of the game for free. You can now enjoy playing BombSquad with your friends and have explosive fun.</p>
|
77 |
-
<h2>Conclusion</h2>
|
78 |
-
<p>BombSquad is a fun and addictive game that lets you play various mini-games with bombs and your friends. It has many features that make it an enjoyable game for all ages. However, if you want to enjoy the game without any limitations, you should download BombSquad Pro Mod APK, which gives you access to all the characters, tickets, and features of the game for free. You can download and install BombSquad Pro Mod APK by following the steps mentioned above. We hope this article was helpful for you. If you have any questions or feedback, feel free to leave a comment below.</p>
|
79 |
-
<h3>FAQs</h3>
|
80 |
-
<p>Here are some frequently asked questions about BombSquad and BombSquad Pro Mod APK:</p>
|
81 |
-
<table>
|
82 |
-
<tr><td><b>Q: Is BombSquad Pro Mod APK safe to use?</b></td><td><b>A: Yes, BombSquad Pro Mod APK is safe to use as long as you download it from a trusted source. However, we recommend that you use it at your own risk, as we are not responsible for any damage or loss caused by using it.</b></td></tr>
|
83 |
-
<tr><td><b>Q: Can I play BombSquad online with BombSquad Pro Mod APK?</b></td><td><b>A: Yes, you can play BombSquad online with BombSquad Pro Mod APK. However, you may face some issues or errors while playing online, as some servers may not support modded versions of the game.</b></td></tr>
|
84 |
-
<tr><td><b>Q: Can I update BombSquad Pro Mod APK?</b></td><td><b>A: Yes, you can update BombSquad Pro Mod APK whenever a new version is available. However, you may lose some of your progress or data if you update it without backing it up first.</b></td></ <tr><td><b>Q: What are the minimum requirements to play BombSquad on Android?</b></td><td><b>A: The minimum requirements to play BombSquad on Android are: Android 4.4 or higher, 1 GB of RAM, and 100 MB of free storage space.</b></td></tr>
|
85 |
-
<tr><td><b>Q: How can I contact the developer of BombSquad?</b></td><td><b>A: You can contact the developer of BombSquad by visiting his website, or by sending him an email at [email protected].</b></td></tr>
|
86 |
-
</table></p> 197e85843d<br />
|
87 |
-
<br />
|
88 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Beach Buggy Racing for PC Download the EXE File and Race Against Crazy Characters.md
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Beach Buggy Racing Exe File Download: How to Enjoy This Fun and Free Kart Racing Game on Your PC</h1>
|
3 |
-
<p>Do you love kart racing games? If so, you might have heard of Beach Buggy Racing, a popular game for mobile devices that lets you drive into an action-packed, surprise-filled world of off-road kart racing mayhem. You can race against a field of rival drivers, each with unique personalities and special abilities. You can also build a collection of crazy power-ups, like Dodgeball Frenzy, Fireball, and Oil Slick. You can unlock and upgrade a variety of cars, from dune buggies to monster trucks. You can test your skills in 6 different game modes on 15 imaginative 3D race tracks, against a pack of tropical-loving rivals with a serious case of road rage!</p>
|
4 |
-
<h2>beach buggy racing exe file download</h2><br /><p><b><b>DOWNLOAD</b> –––––>>> <a href="https://jinyurl.com/2uNSgf">https://jinyurl.com/2uNSgf</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to play Beach Buggy Racing on your PC instead of your mobile device? Is there a way to do that? The answer is yes! You can download and play Beach Buggy Racing on your PC using an exe file. An exe file is an executable file that contains a program or software that can run on your PC. By downloading an exe file of Beach Buggy Racing, you can enjoy this fun and free kart racing game on your PC without any hassle.</p>
|
6 |
-
<p>Why would you want to play Beach Buggy Racing on your PC instead of your mobile device? Well, there are many benefits of playing Beach Buggy Racing on your PC. For example:</p>
|
7 |
-
<ul>
|
8 |
-
<li>You can play with better graphics quality and sound effects on a larger screen and with better speakers</li>
|
9 |
-
<li>You can play with more comfortable and precise controls using your keyboard, mouse, or gamepad</li>
|
10 |
-
<li>You can play with your friends or family on the same PC using the split-screen multiplayer mode for up to 4 players</li>
|
11 |
-
<li>You can play with more stability and security without worrying about battery drain, data usage, or malware</li>
|
12 |
-
</ul>
|
13 |
-
<p>As you can see, playing Beach Buggy Racing on your PC using an exe file download has many advantages. But how can you do that? How can you download and install Beach Buggy Racing exe file on your PC? How can you play Beach Buggy Racing on your PC? What are the game features, tips, and tricks that you need to know? In this article, we will answer all these questions and more. So, buckle up and get ready for some beach buggy racing fun!</p>
|
14 |
-
<h2>How to Download and Install Beach Buggy Racing Exe File on Your PC</h2>
|
15 |
-
<p>Downloading and installing Beach Buggy Racing exe file on your PC is very easy and simple. You just need to follow these steps:</p>
|
16 |
-
<p>beach buggy racing pc game free download<br />
|
17 |
-
beach buggy racing windows 10 download<br />
|
18 |
-
beach buggy racing for laptop download<br />
|
19 |
-
beach buggy racing full version download<br />
|
20 |
-
beach buggy racing offline installer download<br />
|
21 |
-
beach buggy racing microsoft store download<br />
|
22 |
-
beach buggy racing softonic download<br />
|
23 |
-
beach buggy racing cnet download<br />
|
24 |
-
beach buggy racing kart racing game download<br />
|
25 |
-
beach buggy racing sequel game download<br />
|
26 |
-
beach buggy racing 3d race tracks download<br />
|
27 |
-
beach buggy racing powerups and cars download<br />
|
28 |
-
beach buggy racing tropical island adventure download<br />
|
29 |
-
beach buggy racing vector unit game download<br />
|
30 |
-
beach buggy racing action-packed game download<br />
|
31 |
-
beach buggy racing surprise-filled game download<br />
|
32 |
-
beach buggy racing off-road karting game download<br />
|
33 |
-
beach buggy racing rival drivers game download<br />
|
34 |
-
beach buggy racing crazy powerups game download<br />
|
35 |
-
beach buggy racing dodgeball frenzy game download<br />
|
36 |
-
beach buggy racing fireball game download<br />
|
37 |
-
beach buggy racing oil slick game download<br />
|
38 |
-
beach buggy racing dune buggies game download<br />
|
39 |
-
beach buggy racing monster trucks game download<br />
|
40 |
-
beach buggy racing 6 game modes download<br />
|
41 |
-
beach buggy racing 15 3d race tracks download<br />
|
42 |
-
beach buggy racing road rage game download<br />
|
43 |
-
beach buggy racing fast and furious game download<br />
|
44 |
-
beach buggy racing fun and free game download<br />
|
45 |
-
beach buggy racing split-screen multiplayer mode download<br />
|
46 |
-
beach buggy racing premium version download<br />
|
47 |
-
beach buggy racing infinite tickets download<br />
|
48 |
-
beach buggy racing updates and images download<br />
|
49 |
-
beach buggy racing google+ page download<br />
|
50 |
-
beach buggy racing facebook page download<br />
|
51 |
-
beach buggy racing twitter page download<br />
|
52 |
-
beach buggy racing web page download<br />
|
53 |
-
beach buggy racing screenshots and reviews download<br />
|
54 |
-
beach buggy racing phoenix force game download<br />
|
55 |
-
beach buggy racing upward game download<br />
|
56 |
-
beach buggy racing dictionary app download<br />
|
57 |
-
beach buggy racing high-speed fun in the sun game download<br />
|
58 |
-
beach buggy racing tilt controls and powerups game download<br />
|
59 |
-
beach buggy racing special move and character game download <br />
|
60 |
-
beach buggy racing lavishly-crafted summery tracks game download <br />
|
61 |
-
beach buggy racing crabs and seagulls smashing game download <br />
|
62 |
-
beach buggy racing mario kart inspired game download <br />
|
63 |
-
beach buggy racing battle mode and net energy gain game download</p>
|
64 |
-
<ol>
|
65 |
-
<li>Find a reliable and safe source for the exe file download. There are many websites that offer exe file downloads for various games and software, but not all of them are trustworthy and secure. Some of them may contain viruses, malware, or spyware that can harm your PC or steal your personal information. Therefore, you need to be careful and choose a reputable and verified source for the exe file download. One of the best sources for Beach Buggy Racing exe file download is the official website of Vector Unit, the developer of the game. You can visit their website at <a href="">https://www.vectorunit.com/beach-buggy-racing</a> and click on the "Download for Windows" button to get the exe file.</li>
|
66 |
-
<li>Download the exe file to your PC and run it as an administrator. Once you have found a reliable and safe source for the exe file download, you need to download the exe file to your PC. The file size is about 100 MB, so it should not take too long to download depending on your internet speed. After downloading the exe file, you need to run it as an administrator to start the installation process. To do that, you need to right-click on the exe file and select "Run as administrator" from the menu. This will allow the exe file to make changes to your PC and install the game properly.</li>
|
67 |
-
<li>Follow the installation instructions and launch the game. After running the exe file as an administrator, you will see a window with the installation instructions. You need to follow these instructions carefully and agree to the terms and conditions of the game. You also need to choose a destination folder for the game files and create a shortcut for the game on your desktop or start menu. The installation process should not take more than a few minutes. After completing the installation process, you can launch the game by clicking on the shortcut or by finding it in your destination folder.</li>
|
68 |
-
</ol>
|
69 |
-
<p>Congratulations! You have successfully downloaded and installed Beach Buggy Racing exe file on your PC. Now you can enjoy this fun and free kart racing game on your PC anytime you want.</p> <h2>How to Play Beach Buggy Racing on Your PC</h2>
|
70 |
-
<p>Now that you have downloaded and installed Beach Buggy Racing exe file on your PC, you are ready to play the game. But how can you play Beach Buggy Racing on your PC? What are the settings, controls, and graphics that you need to customize for optimal performance and experience? How can you choose your driver, car, and power-ups for different game modes and tracks? How can you use keyboard, mouse, or gamepad to control your car and activate power-ups? In this section, we will answer all these questions and more. Here is how you can play Beach Buggy Racing on your PC:</p>
|
71 |
-
<ul>
|
72 |
-
<li>Customize your settings, controls, and graphics for optimal performance and experience. Before you start playing the game, you may want to customize your settings, controls, and graphics according to your preferences and PC specifications. To do that, you need to go to the main menu of the game and click on the "Options" button. There, you will see several tabs that allow you to adjust various aspects of the game. For example, you can change the language, sound volume, music playlist, screen resolution, graphics quality, anti-aliasing, shadows, etc. You can also change the controls for keyboard, mouse, or gamepad. You can choose from different presets or customize your own key bindings. You can also enable or disable vibration, auto-acceleration, auto-steering, etc. You can also calibrate your gamepad if you are using one. You can save your settings by clicking on the "Apply" button.</li>
|
73 |
-
<li>Choose your driver, car, and power-ups for different game modes and tracks. After customizing your settings, controls, and graphics, you can choose your driver, car, and power-ups for different game modes and tracks. To do that, you need to go to the main menu of the game and click on the "Play" button. There, you will see several options for playing the game. You can choose from 6 different game modes: Championship, Quick Race, Daily Challenge, Custom Race, Split Screen Multiplayer, and Online Multiplayer. Each game mode has its own rules and objectives. For example, in Championship mode, you need to compete in a series of races and earn stars to unlock new cars, drivers, and tracks. In Quick Race mode, you can choose any track and race against random opponents. In Daily Challenge mode, you can play a special race with a random car, driver, and power-up and try to beat the best time. In Custom Race mode, you can create your own rules and challenges for any track and race against AI or human opponents. In Split Screen Multiplayer mode, you can play with up to 4 players on the same PC using different controllers. In Online Multiplayer mode, you can play with up to 8 players from around the world using the internet.</li>
|
74 |
-
<li>Depending on the game mode you choose, you can select your driver, car, and power-ups from a variety of options. You can choose from 12 different drivers, each with their own personality and special ability. For example, Rez has the ability to hack other cars and make them spin out. McSkelly has the ability to summon a swarm of bats that block the vision of other drivers. You can also choose from 25 different cars, each with their own stats and style. For example, the Lunar Rover has high speed and handling but low acceleration and strength. The Rock Stomper has high strength and acceleration but low speed and handling. You can also choose from 15 different power-ups, each with their own effect and duration. For example, the Fireball lets you shoot a ball of fire that explodes on impact. The Oil Slick lets you drop a slippery puddle that makes other cars lose control.</li>
|
75 |
-
<li>You can unlock more drivers, cars, and power-ups by earning stars in Championship mode or by buying them with coins that you earn by playing the game. You can also upgrade your car and power-ups by spending coins. You can upgrade your car's speed, acceleration, handling, and strength. You can also upgrade your power-ups' effect, duration, and frequency. You can also customize your car's appearance by changing its color, wheels, decals, etc.</li>
|
76 |
-
</ul>
|
77 |
-
<p>Once you have chosen your driver, car, and power-ups for the game mode and track you want to play, you are ready to start the race. But how do you control your car and activate your power-ups? Here is how you can do that:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Use keyboard, mouse, or gamepad to control your car and activate power-ups. You can use any of these devices to control your car and activate power-ups in Beach Buggy Racing on your PC. You can also use a combination of them if you prefer. For example, you can use the keyboard to steer and accelerate and the mouse to activate power-ups. Or you can use the gamepad to steer and accelerate and the keyboard to activate power-ups.</li>
|
80 |
-
<li>The default controls for keyboard are as follows: Use the arrow keys or WASD keys to steer left or right and accelerate or brake. Use the spacebar or enter key to activate power-ups. Use the escape key or backspace key to pause the game.</li>
|
81 |
-
<li>The default controls for mouse are as follows: Use the left mouse button to steer left or right and accelerate or brake. Use the right mouse button to activate power-ups.</li>
|
82 |
-
<li>The default controls for gamepad are as follows: Use the left analog stick or directional pad to steer left or right and accelerate or brake. Use the A button or X button to activate power-ups. Use the start button or back button to pause the game.</li>
|
83 |
-
<li>You can change these controls in the Options menu if you want to customize them according to your preferences.</li>
|
84 |
-
</ul>
|
85 |
-
<p>That's it! You have learned how to play Beach Buggy Racing on your PC using keyboard, mouse, or gamepad. Now you can enjoy this fun and free kart racing game on your PC with better graphics quality, sound effects, controls, performance, stability, security, multiplayer mode, custom mode, etc.</p> <h2>Beach Buggy Racing Game Features, Tips, and Tricks</h2>
|
86 |
-
<p>Now that you know how to download, install, and play Beach Buggy Racing on your PC, you may want to learn more about the game features, tips, and tricks that will make your gaming experience more fun and exciting. In this section, we will tell you what are the main game features that make Beach Buggy Racing stand out from other kart racing games. We will also give you some tips and tricks to help you improve your skills and win more races. Here are the game features, tips, and tricks that you need to know:</p>
|
87 |
-
<h3>What are the main game features that make Beach Buggy Racing fun and exciting?</h3>
|
88 |
-
<p>Beach Buggy Racing is not just another kart racing game. It has many unique and amazing features that make it different from other games in the genre. Here are some of the main game features that make Beach Buggy Racing fun and exciting:</p>
|
89 |
-
<ul>
|
90 |
-
<li>A variety of cars, drivers, power-ups, tracks, and game modes to choose from. Beach Buggy Racing offers you a lot of options to customize your gameplay and challenge yourself. You can choose from 12 different drivers, each with their own personality and special ability. You can also choose from 25 different cars, each with their own stats and style. You can also choose from 15 different power-ups, each with their own effect and duration. You can also choose from 15 imaginative 3D race tracks, each with their own theme and layout. You can also choose from 6 different game modes, each with their own rules and objectives.</li>
|
91 |
-
<li>A colorful and vibrant graphics style with a tropical theme. Beach Buggy Racing has a beautiful and eye-catching graphics style that will make you feel like you are in a tropical paradise. The game has a bright and colorful palette that creates a cheerful and lively atmosphere. The game also has a tropical theme that adds to the charm and fun of the game. The game features various tropical elements such as palm trees, beaches, waterfalls, volcanoes, caves, etc. The game also has a dynamic weather system that changes the lighting and effects of the tracks.</li>
|
92 |
-
<li>A dynamic and physics-based gameplay with realistic effects and surprises. Beach Buggy Racing has a realistic and physics-based gameplay that makes the game more immersive and thrilling. The game has realistic effects such as gravity, inertia, friction, collision, etc. that affect the movement and behavior of the cars and power-ups. The game also has surprises such as ramps, jumps, loops, shortcuts, secrets, etc. that add to the excitement and unpredictability of the game.</li>
|
93 |
-
<li>A split-screen multiplayer mode for up to 4 players on one PC. Beach Buggy Racing has a split-screen multiplayer mode that allows you to play with up to 4 players on the same PC using different controllers. This mode is perfect for playing with your friends or family on the same screen without any internet connection or online registration required. You can choose any track and game mode and race against each other in a friendly or competitive way.</li>
|
94 |
-
<li>A custom game mode where you can create your own rules and challenges. Beach Buggy Racing has a custom game mode that allows you to create your own rules and challenges for any track and race against AI or human opponents. You can change various parameters such as the number of laps, the number of opponents, the difficulty level, the power-up frequency, etc. You can also enable or disable certain power-ups or drivers to make the game easier or harder for yourself or others.</li>
|
95 |
-
</ul>
|
96 |
-
<p>These are some of the main game features that make Beach Buggy Racing fun and exciting. But how can you master these features and win more races? What are some tips and tricks to help you improve your skills? Here are some tips and tricks to help you out:</p>
|
97 |
-
<h3>What are some tips and tricks to help you improve your skills and win more races?</h3>
|
98 |
-
<p>Beach Buggy Racing is not just a game of luck or chance. It is also a game of skill and strategy. You need to practice regularly and learn from your mistakes to become a better racer. You also need to use some tips and tricks to gain an edge over your opponents. Here are some tips and tricks to help you improve your skills and win more races:</p>
|
99 |
-
<ul>
|
100 |
-
<li>Upgrade your car and power-ups regularly to boost your performance. One of the most important things to do in Beach Buggy Racing is to upgrade your car and power-ups regularly to boost your performance. You can upgrade your car's speed, acceleration, handling, and strength by spending coins that you earn by playing the game. You can also upgrade your power-ups' effect, duration, and frequency by spending coins. Upgrading your car and power-ups will make them more effective and efficient in the races. You can also customize your car's appearance by changing its color, wheels, decals, etc.</li>
|
101 |
-
<li>Master drifting, jumping, and dodging to navigate the tracks and avoid obstacles. One of the most important skills to learn in Beach Buggy Racing is how to drift, jump, and dodge. Drifting is when you slide your car sideways while turning. Drifting can help you make sharp turns without losing speed or control. Jumping is when you launch your car into the air using ramps or bumps. Jumping can help you avoid obstacles or reach shortcuts or secrets. Dodging is when you move your car left or right to avoid obstacles or power-ups. Dodging can help you prevent damage or sabotage from your opponents or the environment.</li>
|
102 |
-
<li>Use power-ups strategically to gain an advantage or sabotage your opponents. One of the most fun and exciting aspects of Beach Buggy Racing is the use of power-ups. Power-ups are special items that you can collect and use during the races. Power-ups can have various effects such as boosting your speed, shooting projectiles, dropping traps, etc. Power-ups can help you gain an advantage or sabotage your opponents depending on how and when you use them. You need to use power-ups strategically to maximize their benefits and minimize their drawbacks.</li>
|
103 |
-
<li>Learn the shortcuts and secrets of each track to save time and distance. One of the most challenging and rewarding aspects of Beach Buggy Racing is the exploration of the tracks. Each track has its own theme and layout that offer different opportunities and challenges. Each track also has its own shortcuts and secrets that can help you save time and distance or give you extra coins or power-ups. You need to learn the shortcuts and secrets of each track to improve your performance and score.</li>
|
104 |
-
<li>Practice regularly and challenge yourself with different difficulty levels. One of the most effective ways to improve your skills and win more races in Beach Buggy Racing is to practice regularly and challenge yourself with different difficulty levels. Practicing regularly will help you familiarize yourself with the game features, controls, graphics, etc. Challenging yourself with different difficulty levels will help you test your skills against tougher opponents, faster cars, harder tracks, etc.</li>
|
105 |
-
</ul>
|
106 |
-
<p>These are some of the tips and tricks that will help you improve your skills and win more races in Beach Buggy Racing. But remember, the most important thing is to have fun and enjoy the game!</p>
|
107 |
-
<h2>Conclusion</h2>
|
108 |
-
<p>In conclusion, Beach Buggy Racing is a fun and free kart racing game that you can download and play on your PC using an exe file. By downloading an exe file of Beach Buggy Racing, you can enjoy this game on your PC with better graphics quality, sound effects, controls, performance, stability, security, multiplayer mode, custom mode, etc. You can also customize your settings, controls, and graphics according to your preferences and PC specifications. You can also choose your driver, car, and power-ups from a variety of options for different game modes and tracks. You can also use keyboard, mouse, or gamepad to control your car and activate power-ups. You can also learn about the game features, tips, and tricks that will make your gaming experience more fun and exciting.</p>
|
109 |
-
<p>If you love kart racing games, you should definitely try out Beach Buggy Racing on your PC using an exe file download. It is a game that will keep you entertained for hours with its colorful graphics, dynamic gameplay, diverse options, surprises, and challenges. You can also play with your friends or family on the same PC using the split-screen multiplayer mode or with other players from around the world using the online multiplayer mode. You can also create your own rules and challenges using the custom game mode. Beach Buggy Racing is a game that will make you feel like you are in a tropical paradise with its tropical theme and elements. Beach Buggy Racing is a game that will make you smile and laugh with its humorous and quirky characters and power-ups. Beach Buggy Racing is a game that will make you addicted and satisfied with its realistic and physics-based gameplay and effects.</p>
|
110 |
-
<p>So, what are you waiting for? Download Beach Buggy Racing exe file on your PC today and enjoy this fun and free kart racing game on your PC. You will not regret it!</p>
|
111 |
-
<p>If you want to learn more about Beach Buggy Racing, you can visit the official website or social media pages of Vector Unit, the developer of the game. You can also check out some reviews, videos, screenshots, and FAQs of the game online. You can also share your feedback, suggestions, questions, or comments about the game with other players or with the developers. You can also rate and review the game on various platforms and websites.</p>
|
112 |
-
<p>Thank you for reading this article. We hope you found it helpful and informative. We hope you have a great time playing Beach Buggy Racing on your PC using an exe file download. Happy racing!</p>
|
113 |
-
<h2>FAQs</h2>
|
114 |
-
<p>Here are some frequently asked questions (FAQs) about Beach Buggy Racing exe file download:</p>
|
115 |
-
<table>
|
116 |
-
<tr>
|
117 |
-
<th>Question</th>
|
118 |
-
<th>Answer</th>
|
119 |
-
</tr>
|
120 |
-
<tr>
|
121 |
-
<td>Is Beach Buggy Racing exe file download safe and secure?</td>
|
122 |
-
<td>Yes, Beach Buggy Racing exe file download is safe and secure if you download it from a reliable and verified source such as the official website of Vector Unit, the developer of the game. However, you should be careful and avoid downloading the exe file from unknown or suspicious sources as they may contain viruses, malware, or spyware that can harm your PC or steal your personal information.</td>
|
123 |
-
</tr>
|
124 |
-
<tr>
|
125 |
-
<td>Is Beach Buggy Racing exe file download free and legal?</td>
|
126 |
-
<td>Yes, Beach Buggy Racing exe file download is free and legal if you download it from a legitimate and authorized source such as the official website of Vector Unit, the developer of the game. However, you should not download or distribute the exe file from illegal or unauthorized sources as they may violate the intellectual property rights of the developer or publisher of the game.</td>
|
127 |
-
</tr>
|
128 |
-
<tr>
|
129 |
-
<td>What are the system requirements for Beach Buggy Racing exe file download?</td>
|
130 |
-
<td>The minimum system requirements for Beach Buggy Racing exe file download are as follows: Windows 7 or higher; 2 GB RAM; 1 GB free disk space; DirectX 9.0c or higher; Intel HD Graphics 4000 or better; Keyboard, mouse, or gamepad. The recommended system requirements for Beach Buggy Racing exe file download are as follows: Windows 10; 4 GB RAM; 2 GB free disk space; DirectX 11 or higher; NVIDIA GeForce GTX 650 or better; Keyboard, mouse, or gamepad.</td>
|
131 |
-
</tr>
|
132 |
-
<tr>
|
133 |
-
<td>How can I uninstall Beach Buggy Racing exe file from my PC?</td>
|
134 |
-
<td>You can uninstall Beach Buggy Racing exe file from your PC by following these steps: Go to the Control Panel of your PC and click on "Uninstall a program". Find Beach Buggy Racing in the list of programs and click on "Uninstall". Follow the uninstallation instructions and confirm your choice. Alternatively, you can go to the destination folder where you installed Beach Buggy Racing on your PC and run the "unins000.exe" file as an administrator. Follow the uninstallation instructions and confirm your choice.</td>
|
135 |
-
</tr>
|
136 |
-
<tr>
|
137 |
-
<td>How can I contact Vector Unit, the developer of Beach Buggy Racing?</td>
|
138 |
-
<td>You can contact Vector Unit, the developer of Beach Buggy Racing, by visiting their website at <a href="">https://www.vectorunit.com/</a> and clicking on the "Contact" button. There, you can fill out a form with your name, email address, subject, and message. You can also contact them by sending an email to <a href="mailto:[email protected]">[email protected]</a>. You can also follow them on Facebook, Twitter, Instagram, YouTube, Discord, etc.</td>
|
139 |
-
</tr>
|
140 |
-
</table></p> 401be4b1e0<br />
|
141 |
-
<br />
|
142 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Cross Racing Championship Extreme A Classic Racing Game with Modern Features.md
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cross Racing Championship Extreme: A Review</h1>
|
3 |
-
<p>If you are looking for a racing game that offers you the thrill of high-speed on and off road racing across vast open terrains, then you should check out Cross Racing Championship Extreme (CRCE). CRCE is a racing simulation game that was originally released in 2005 by Invictus Games Ltd. and has recently been re-released on Steam in an enhanced version. In this article, I will review CRCE and tell you why it is a great racing game that deserves your attention.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>CRCE is a racing game that allows you to experience the excitement of various racing disciplines, such as rally, rallycross, autocross, off-road, street racing, and more. You can race in over 60 different events across six distinct environments, ranging from icy mountainous regions and lush countryside to parched desert areas and beaches. You can also choose from a wide selection of cars, from classic hatchbacks and muscle cars to exotic supercars and off-road vehicles.</p>
|
6 |
-
<h2>cross racing championship extreme download</h2><br /><p><b><b>Download File</b> ✔✔✔ <a href="https://jinyurl.com/2uNL1j">https://jinyurl.com/2uNL1j</a></b></p><br /><br />
|
7 |
-
<p>One of the main features of CRCE is its realistic handling system and damage model, which make the driving experience more challenging and immersive. You have to take into account the terrain, weather, and car condition when racing, as they affect your performance and control. You also have to deal with the consequences of crashing, as your car can get damaged or even destroyed. You can repair your car in the garage, but it will cost you money and time.</p>
|
8 |
-
<p>Another feature of CRCE is its non-linear career mode, which lets you progress through different racing categories at your own pace. You can choose which events to enter, which cars to buy or sell, and how to upgrade or customize them. You can also unlock new cars, tracks, and modes by completing certain objectives or challenges. You can also earn money by winning races or performing stunts, which you can use to buy new cars or parts.</p>
|
9 |
-
<h2>Gameplay</h2>
|
10 |
-
<h3>Single player mode</h3>
|
11 |
-
<p>In single player mode, you can start your career as a rookie racer and work your way up to become a champion. You can enter various events that suit your style and preference, such as circuit races, time trials, drift contests, stunt shows, and more. You can also choose the difficulty level, the number of opponents, the weather conditions, and other settings for each event.</p>
|
12 |
-
<p>One of the most important aspects of single player mode is car customization and tuning. You can modify your car's appearance by changing its color, decals, number plates, and more. You can also improve your car's performance by upgrading its engine, transmission, suspension, brakes, tires, and more. You can also fine-tune your car's settings by adjusting its gear ratios, camber angles, brake bias, and more.</p>
|
13 |
-
<p>As you progress through your career, you will unlock new cars, tracks, and modes. Some of the cars include Ford Focus RS WRC 03, Subaru Impreza WRX STi 04, Mitsubishi Lancer Evolution VIII MR FQ400 04, Porsche 911 GT3 RS , Lamborghini Murcielago R-GT, Ferrari F430 Challenge, and more. Some of the tracks include England, France, Hungary, Egypt, Finland, and more. Some of the modes include Free Ride, where you can explore the open world and perform stunts; Ghost Race, where you can race against your own or other players' best times; and Hot Lap, where you can try to beat the lap records of the developers.</p>
|
14 |
-
<p>cross racing championship extreme free download<br />
|
15 |
-
cross racing championship extreme steam<br />
|
16 |
-
cross racing championship extreme pc game<br />
|
17 |
-
cross racing championship extreme full version<br />
|
18 |
-
cross racing championship extreme crack<br />
|
19 |
-
cross racing championship extreme gameplay<br />
|
20 |
-
cross racing championship extreme system requirements<br />
|
21 |
-
cross racing championship extreme mods<br />
|
22 |
-
cross racing championship extreme review<br />
|
23 |
-
cross racing championship extreme cheats<br />
|
24 |
-
cross racing championship extreme online<br />
|
25 |
-
cross racing championship extreme windows 10<br />
|
26 |
-
cross racing championship extreme patch<br />
|
27 |
-
cross racing championship extreme demo<br />
|
28 |
-
cross racing championship extreme trainer<br />
|
29 |
-
cross racing championship extreme cars<br />
|
30 |
-
cross racing championship extreme maps<br />
|
31 |
-
cross racing championship extreme multiplayer<br />
|
32 |
-
cross racing championship extreme keygen<br />
|
33 |
-
cross racing championship extreme serial number<br />
|
34 |
-
cross racing championship extreme iso<br />
|
35 |
-
cross racing championship extreme rar<br />
|
36 |
-
cross racing championship extreme torrent<br />
|
37 |
-
cross racing championship extreme direct link<br />
|
38 |
-
cross racing championship extreme compressed<br />
|
39 |
-
cross racing championship extreme arealgamer.org<br />
|
40 |
-
cross racing championship extreme steamunlocked.net<br />
|
41 |
-
cross racing championship extreme igg-games.com<br />
|
42 |
-
cross racing championship extreme oceanofgames.com<br />
|
43 |
-
cross racing championship extreme apunkagames.com<br />
|
44 |
-
cross racing championship extreme skidrowreloaded.com<br />
|
45 |
-
cross racing championship extreme fitgirl-repacks.site<br />
|
46 |
-
cross racing championship extreme gamefabrique.com<br />
|
47 |
-
cross racing championship extreme old-games.com<br />
|
48 |
-
cross racing championship extreme myabandonware.com<br />
|
49 |
-
cross racing championship extreme invictus-games.com<br />
|
50 |
-
cross racing championship extreme metacritic.com<br />
|
51 |
-
cross racing championship extreme gamespot.com<br />
|
52 |
-
cross racing championship extreme youtube.com<br />
|
53 |
-
cross racing championship extreme facebook.com<br />
|
54 |
-
cross racing championship extreme twitter.com<br />
|
55 |
-
cross racing championship extreme reddit.com<br />
|
56 |
-
cross racing championship extreme discord.gg<br />
|
57 |
-
cross racing championship extreme wikipedia.org<br />
|
58 |
-
cross racing championship extreme wikia.org</p>
|
59 |
-
<h3>Multiplayer mode</h3>
|
60 |
-
<p>In multiplayer mode, you can join or host online lobbies and race with other players from around the world. You can choose from different multiplayer game modes and maps, such as Capture the Flag, Bomb Run, Destruction Zone, and more. You can also compete with other players in ranked or unranked races and rank up on the global leaderboards.</p>
|
61 |
-
<p>Multiplayer mode is a great way to test your skills and have fun with other racing enthusiasts. You can chat with other players, challenge them to duels, or team up with them in cooperative modes. You can also customize your car and show it off to other players. You can also download and share custom cars, tracks, and mods from the Steam Workshop.</p>
|
62 |
-
<h2>Graphics and Sound</h2>
|
63 |
-
<h3>Graphics</h3>
|
64 |
-
<p>CRCE features realistic physics and damage system that make the racing experience more authentic and dynamic. You can see your car getting dented, scratched, or even losing parts as you crash into obstacles or other cars. You can also see the dust, smoke, water, and mud effects as you drive on different terrains. You can also see the weather effects, such as rain, snow, fog, and wind, that affect your visibility and traction.</p>
|
65 |
-
<p>CRCE also creates detailed and living environments that make the racing experience more immersive and diverse. You can see the trees swaying in the wind, the birds flying in the sky, the animals roaming in the fields, and the people cheering in the stands. You can also see the landmarks, buildings, bridges, and monuments that add to the realism and variety of each location.</p>
|
66 |
-
<p>CRCE also supports various screen resolutions and aspect ratios that make the racing experience more compatible and customizable. You can choose from different display modes, such as windowed, fullscreen, or borderless. You can also adjust the graphics settings, such as texture quality, shadow quality, anti-aliasing, and more. You can also enable or disable various effects, such as motion blur, lens flare, bloom, and more.</p>
|
67 |
-
<h3>Sound</h3>
|
68 |
-
<p>CRCE features original rock/metal soundtracks by SZEG that make the racing experience more energetic and exhilarating. You can listen to over 40 tracks that suit the mood and atmosphere of each race. You can also listen to your own music by adding your MP3 files to the game folder.</p>
|
69 |
-
<p>CRCE also allows you to listen to immersive sound effects and engine noises that make the racing experience more realistic and intense. You can hear the roar of your engine, the screech of your tires, the crunch of your collisions, and the blast of your nitro. You can also hear the ambient sounds of each environment, such as the wind blowing, the water splashing, or the crowd cheering.</p>
|
70 |
-
<h2>Conclusion</h2>
|
71 |
-
<p>In conclusion, CRCE is a racing game that offers you a lot of fun and challenge in various racing disciplines across vast open terrains. It has realistic physics and damage system, detailed graphics , original soundtracks, and non-linear career mode. It also has multiplayer mode, Steam Workshop support, and various customization and tuning options. It is a racing game that will keep you entertained for hours and challenge you to become the best racer.</p>
|
72 |
-
<p>If you are interested in CRCE, you can buy it on Steam for $9.99. You can also visit the official website or the Steam community page for more information and updates. You can also watch some gameplay videos or read some user reviews to see what other players think about CRCE.</p>
|
73 |
-
<p>I hope you enjoyed this article and found it helpful. If you have any questions or comments, feel free to leave them below. Thank you for reading and happy racing!</p>
|
74 |
-
<h2>FAQs</h2>
|
75 |
-
<p>Here are some frequently asked questions about CRCE:</p>
|
76 |
-
<ul>
|
77 |
-
<li><b>What are the system requirements for CRCE?</b></li>
|
78 |
-
<p>The minimum system requirements for CRCE are:</p>
|
79 |
-
<table>
|
80 |
-
<tr><td>OS</td><td>Windows XP/Vista/7/8/10</td></tr>
|
81 |
-
<tr><td>Processor</td><td>Intel Pentium 4 2.0 GHz or equivalent</td></tr>
|
82 |
-
<tr><td>Memory</td><td>512 MB RAM</td></tr>
|
83 |
-
<tr><td>Graphics</td><td>NVIDIA GeForce FX 5600 or equivalent</td></tr>
|
84 |
-
<tr><td>DirectX</td><td>Version 9.0c</td></tr>
|
85 |
-
<tr><td>Storage</td><td>1 GB available space</td></tr>
|
86 |
-
<tr><td>Sound Card</td><td>DirectX compatible sound card</td></tr>
|
87 |
-
</table>
|
88 |
-
<li><b>How can I play CRCE with a controller?</b></li>
|
89 |
-
<p>You can play CRCE with a controller by using a third-party software such as Xpadder or JoyToKey. You can also use the in-game settings to configure your controller buttons and axes.</p>
|
90 |
-
<li><b>How can I mod CRCE?</b></li>
|
91 |
-
<p>You can mod CRCE by using the built-in editor or by downloading and installing custom cars, tracks, and mods from the Steam Workshop. You can also create your own mods by using the SDK (Software Development Kit) that is included in the game folder.</p>
|
92 |
-
<li><b>How can I get more nitro in CRCE?</b></li>
|
93 |
-
<p>You can get more nitro in CRCE by performing stunts, such as jumps, drifts, flips, or rolls. You can also get more nitro by collecting nitro cans that are scattered around the tracks.</p>
|
94 |
-
<li><b>How can I change the language in CRCE?</b></li>
|
95 |
-
<p>You can change the language in CRCE by using the launcher or by editing the config.ini file that is located in the game folder. You can choose from English, German, French, Italian, Spanish, Hungarian, Polish, Russian, Czech, or Slovak.</p>
|
96 |
-
</ul></p> 401be4b1e0<br />
|
97 |
-
<br />
|
98 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Egg Inc. APK A Simulation Game with Chickens Research and Space.md
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Egg Inc Download APK: How to Play the Ultimate Egg Farming Game on Your Android Device</h1>
|
3 |
-
<p>If you are looking for a fun and addictive simulation game that will keep you entertained for hours, then you should try Egg Inc. This game lets you build your own egg empire from scratch, with hundreds of chickens, dozens of research items, and many challenges to complete. You can also explore the secrets of the universe hidden in the chicken egg, launch space expeditions, and join forces with other players in co-op mode. In this article, we will show you how to download and install Egg Inc APK on your Android device, how to play the game, and some tips and tricks to help you succeed.</p>
|
4 |
-
<h2>egg inc download apk</h2><br /><p><b><b>Download File</b> <a href="https://jinyurl.com/2uNMff">https://jinyurl.com/2uNMff</a></b></p><br /><br />
|
5 |
-
<h2>What is Egg Inc?</h2>
|
6 |
-
<h3>A brief introduction to the game and its features</h3>
|
7 |
-
<p>Egg Inc is a simulation game developed by Auxbrain Inc, a company that specializes in creating casual games with unique gameplay and graphics. The game was released in 2016 and has since gained over 10 million downloads on Google Play Store. It has also received positive reviews from critics and players alike, who praised its originality, humor, and depth.</p>
|
8 |
-
<p>The game is set in the near future, where the secrets of the universe will be unlocked in the chicken egg. You have decided to get in on the gold rush and sell as many eggs as you can. To do that, you need to hatch chickens, build hen houses, hire drivers, commission research, launch space expeditions, and more. The game is an incremental (clicker) game at its core, but it also uses many elements from simulation games that give it a unique feel and play style. You can interact with your farm in various ways, such as tapping on chickens, swiping on vehicles, or zooming in and out. You can also customize your farm with different themes, decorations, and music.</p>
|
9 |
-
<h3>Why you should play Egg Inc</h3>
|
10 |
-
<p>There are many reasons why you should play Egg Inc, but here are some of the main ones:</p>
|
11 |
-
<ul>
|
12 |
-
<li>It is fun and relaxing. The game has a laid-back feel and a beautiful appearance, with crisp and colorful 3D graphics and a delightful simulation of a swarm of chickens. You can play at your own pace, without any pressure or time limits.</li>
|
13 |
-
<li>It is challenging and rewarding. The game has hundreds of challenges to complete, such as reaching certain milestones, earning achievements, or completing missions. You can also unlock new types of eggs, each with their own benefits and requirements. You can also prestige your farm to start over with extra bonuses, or join contracts to cooperate with other players for bigger rewards.</li>
|
14 |
-
<li>It is creative and educational. The game has dozens of research items that you can unlock and upgrade, each with their own effects and descriptions. You can learn about various topics related to eggs, chickens, farming, science, technology, and more. You can also launch space expeditions to discover new planets and secrets.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to download and install Egg Inc APK</h3>
|
17 |
-
<p>If you want to play Egg Inc on your Android device, you have two options: you can either download it from Google Play Store, or you can download it from a third-party source such as APKCombo. The latter option may be useful if you want to access older versions of the game or if you have compatibility issues with your device. However, you should be careful about the source of the APK file, as it may contain malware or viruses that can harm your device. Only download APK files from reputable and trusted websites, such as APKCombo. Here are the steps to download and install Egg Inc APK from APKCombo: - Go to the APKCombo website and search for Egg Inc in the search bar. You can also use this direct link to go to the Egg Inc page. - On the Egg Inc page, you will see various versions of the game, along with their release dates, sizes, and ratings. Choose the version that is compatible with your device and tap on the Download APK button. - A pop-up window will appear, asking you to choose a download method. You can either use a QR code scanner app to scan the code and download the file directly to your device, or you can use a download manager app to download the file faster and more securely. Choose the option that suits you best and follow the instructions on the screen. - Once the APK file is downloaded, locate it in your device's file explorer app and tap on it to install it. You may need to allow installation from unknown sources if you haven't done so already. To do that, go to Settings > Apps > Special access > Install unknown apps and enable the permission for your browser or file manager app. - After the installation is complete, you can launch Egg Inc from your app drawer and enjoy the game. <h2>How to Play Egg Inc</h2>
|
18 |
-
<h3>The basics of egg farming</h3>
|
19 |
-
<p>Egg Inc is a game that simulates the process of running an egg farm. Your goal is to produce as many eggs as possible and sell them for profit. To do that, you need to hatch chickens, build hen houses, hire drivers, commission research, launch space expeditions, and more.</p>
|
20 |
-
<p>The game has a simple interface that shows you your farm and its various elements. You can tap on any element to interact with it or view more information. You can also swipe left or right to move around your farm, or pinch in or out to zoom in or out.</p>
|
21 |
-
<p>The main element of your farm is the chicken coop, where you hatch chickens by tapping on the red button. The more chickens you have, the more eggs they produce. However, you also need to provide enough space for them in your hen houses, which you can build by tapping on the construction icon. You also need to deliver your eggs to the market by hiring drivers and buying vehicles, which you can do by tapping on the delivery icon.</p>
|
22 |
-
<p>egg inc apk free download<br />
|
23 |
-
egg inc mod apk download<br />
|
24 |
-
egg inc game download apk<br />
|
25 |
-
egg inc latest version apk download<br />
|
26 |
-
egg inc hack apk download<br />
|
27 |
-
egg inc android apk download<br />
|
28 |
-
egg inc simulation game apk download<br />
|
29 |
-
egg inc clicker game apk download<br />
|
30 |
-
egg inc offline apk download<br />
|
31 |
-
egg inc unlimited money apk download<br />
|
32 |
-
egg inc cheats apk download<br />
|
33 |
-
egg inc pro apk download<br />
|
34 |
-
egg inc premium apk download<br />
|
35 |
-
egg inc cracked apk download<br />
|
36 |
-
egg inc full version apk download<br />
|
37 |
-
egg inc update apk download<br />
|
38 |
-
egg inc beta apk download<br />
|
39 |
-
egg inc old version apk download<br />
|
40 |
-
egg inc 1.27.0 apk download<br />
|
41 |
-
egg inc 1.26.2 apk download<br />
|
42 |
-
egg inc 1.26.1 apk download<br />
|
43 |
-
egg inc xapk download<br />
|
44 |
-
egg inc apks download<br />
|
45 |
-
egg inc obb download<br />
|
46 |
-
egg inc app bundle download<br />
|
47 |
-
how to download egg inc apk<br />
|
48 |
-
where to download egg inc apk<br />
|
49 |
-
best site to download egg inc apk<br />
|
50 |
-
safe site to download egg inc apk<br />
|
51 |
-
virus free egg inc apk download<br />
|
52 |
-
easy way to download egg inc apk<br />
|
53 |
-
fast way to download egg inc apk<br />
|
54 |
-
direct link to download egg inc apk<br />
|
55 |
-
mirror link to download egg inc apk<br />
|
56 |
-
alternative link to download egg inc apk<br />
|
57 |
-
torrent link to download egg inc apk<br />
|
58 |
-
magnet link to download egg inc apk<br />
|
59 |
-
google play store link to download egg inc apk<br />
|
60 |
-
apkpure link to download egg inc apk<br />
|
61 |
-
apkmirror link to download egg inc apk<br />
|
62 |
-
apptoide link to download egg inc apk<br />
|
63 |
-
uptodown link to download egg inc apk<br />
|
64 |
-
apkmody link to download egg inc apk<br />
|
65 |
-
rexdl link to download egg inc apk<br />
|
66 |
-
revdl link to download egg inc apk<br />
|
67 |
-
andropalace link to download egg inc apk<br />
|
68 |
-
android1 link to download egg inc apk</p>
|
69 |
-
<p>You can earn money by selling your eggs, which depends on the type and quality of your eggs. You can also earn golden eggs, which are a special currency that you can use to buy boosters, upgrade your farm, or launch space expeditions. You can get golden eggs by completing missions, watching ads, or finding them randomly on your farm.</p>
|
70 |
-
<h3>The different types of eggs and their benefits</h3>
|
71 |
-
<p>As you progress in the game, you will be able to unlock new types of eggs that have different benefits and requirements. You can switch between different types of eggs by tapping on the egg icon at the top of the screen. Each type of egg has a different value, demand, and production rate. Some types of eggs also have special effects that can affect your farm or the world.</p>
|
72 |
-
<p>Here are some examples of the types of eggs you can unlock in Egg Inc:</p>
|
73 |
-
<table>
|
74 |
-
<tr>
|
75 |
-
<th>Type</th>
|
76 |
-
<th>Value</th>
|
77 |
-
<th>Demand</th>
|
78 |
-
<th>Production Rate</th>
|
79 |
-
<th>Special Effect</th>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Edible Egg</td>
|
83 |
-
<td>$0.25</td>
|
84 |
-
<td>High</td>
|
85 |
-
<td>Normal</td>
|
86 |
-
<td>None</td>
|
87 |
-
</tr>
|
88 |
-
<tr>
|
89 |
-
<td>Superfood Egg</td>
|
90 |
-
<td>$1.25</td>
|
91 |
-
<td>High</td>
|
92 |
-
<td>Normal</td>
|
93 |
-
<td>Increases happiness and health of people who eat it</td>
|
94 |
-
</tr>
|
95 |
-
<tr>
|
96 |
-
<td>Medical Egg</td>
|
97 |
-
<td>$6.25</td>
|
98 |
-
<td>Medium</td>
|
99 |
-
<td>Normal</td>
|
100 |
-
<td>Cures diseases and extends lifespan of people who eat it</td>
|
101 |
-
</tr>
|
102 |
-
<tr>
|
103 |
-
<td>Rocket Fuel Egg</td>
|
104 |
-
<td>$30</td>
|
105 |
-
<td>Low</td>
|
106 |
-
<td>Slow</td>
|
107 |
-
<td>Powers rockets and spaceships with its high energy density</td>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>Fusion Egg</td>
|
111 |
-
<td>$150</td>
|
112 |
-
<td>Very Low</td ><td>Very Slow</td ><td>Creates clean and unlimited energy by fusing atoms inside it</td ></tr ></table >
|
113 |
-
<h3>The various buildings, vehicles, and upgrades you can use </h3>
|
114 |
-
<p>To increase your egg production and income, you can also use various buildings, vehicles, and upgrades that you can buy with your money or golden eggs. Here are some examples of what you can use:</p>
|
115 |
-
<ul>
|
116 |
-
<li>Buildings: You can build different types of hen houses that can accommodate more chickens and have different features, such as solar panels, quantum transporters, or monoliths. You can also build silos that can store your eggs and feed your chickens when you are offline.</li>
|
117 |
-
<li>Vehicles: You can buy different types of vehicles that can deliver more eggs and have different features, such as refrigeration, quantum storage, or graviton coating. You can also buy trains that can transport large amounts of eggs across the map.</li>
|
118 |
-
<li>Upgrades: You can buy different types of upgrades that can improve various aspects of your farm, such as egg laying rate, egg value, farm value, hatchery capacity, internal hatchery rate, vehicle capacity, research cost, or soul egg bonus. You can also buy epic upgrades that have permanent effects and apply to all types of eggs.</li>
|
119 |
-
</ul>
|
120 |
-
<h3>The research and missions you can complete</h3>
|
121 |
-
<p>To unlock new features and challenges in the game, you can also complete research and missions that you can access by tapping on the research icon or the mission icon. Here are some examples of what you can do:</p>
|
122 |
-
<ul>
|
123 |
-
<li>Research: You can conduct different types of research that can enhance your farm or your eggs in various ways. There are two tiers of research: common and epic. Common research is specific to each type of egg and requires money to unlock and upgrade. Epic research is universal to all types of eggs and requires golden eggs to unlock and upgrade.</li>
|
124 |
-
<li>Missions: You can complete different types of missions that can reward you with money, golden eggs, or trophies. There are two types of missions: regular and trophy. Regular missions are specific to each type of egg and require you to achieve certain goals, such as having a certain number of chickens, producing a certain amount of eggs, or earning a certain amount of money. Trophy missions are universal to all types of eggs and require you to reach a certain farm value with each type of egg.</li>
|
125 |
-
</ul>
|
126 |
-
<h3>The prestige and contracts system</h3>
|
127 |
-
<p>To progress further in the game, you can also use the prestige and contracts system that you can access by tapping on the prestige icon or the contract icon. Here are some examples of what you can do:</p>
|
128 |
-
<ul>
|
129 |
-
<li>Prestige: You can prestige your farm to start over with extra bonuses. When you prestige, you will lose all your chickens, buildings, vehicles, upgrades, and money, but you will gain soul eggs and prophecy eggs. Soul eggs are a special type of egg that increase your farm's earning bonus by a percentage. Prophecy eggs are a rare type of egg that increase the power of your soul eggs by a percentage. The more soul eggs and prophecy eggs you have, the faster you will grow your farm.</li>
|
130 |
-
<li>Contracts: You can join contracts to cooperate with other players for bigger rewards. Contracts are time-limited events that require you to produce a certain amount of eggs within a certain period of time. You can join existing contracts or create your own contracts and invite other players to join. Contracts have different difficulties and rewards, such as money, golden eggs, prophecy eggs, or boosters.</li>
|
131 |
-
</ul>
|
132 |
-
<h2>Tips and Tricks for Egg Inc</h2>
|
133 |
-
<h3>How to optimize your egg production and income</h3>
|
134 |
-
<p>To optimize your egg production and income, you should follow these tips and tricks:</p>
|
135 |
-
<ul>
|
136 |
-
<li>Balance your chicken population, hen house capacity, and vehicle capacity. You should always have enough space for your chickens in your hen houses and enough vehicles to deliver your eggs to the market. If you have too many chickens or too few vehicles, you will waste your eggs and lose money.</li>
|
137 |
-
<li>Upgrade your farm regularly. You should always invest in upgrading your buildings, vehicles, and research items whenever you can afford them. Upgrades can improve various aspects of your farm and increase your egg production and income.</li>
|
138 |
-
<li>Prestige often. You should prestige your farm whenever you feel like you have reached a plateau or a slow growth rate. Prestiging will give you extra bonuses that will help you grow faster in the next run.</li>
|
139 |
-
</ul>
|
140 |
-
<h3>How to use boosters and drones effectively</h3>
|
141 |
-
<p>To use boosters and drones effectively, you should follow these tips and tricks:</p>
|
142 |
-
<ul>
|
143 |
-
<li>Boosters: Boosters are special items that you can buy with golden eggs or get from contracts or events. They can give you various benefits for a limited time, such as increasing your egg laying rate, egg value, farm value, hatchery capacity, internal hatchery rate, You can see the list of trophies and your progress on the trophy screen. There are different types of trophies, such as bronze, silver, gold, and platinum.</li>
|
144 |
-
</ul>
|
145 |
-
<h2>Conclusion</h2>
|
146 |
-
<p>Egg Inc is a fun and addictive simulation game that lets you build your own egg empire from scratch. You can hatch chickens, build hen houses, hire drivers, commission research, launch space expeditions, and more. You can also unlock new types of eggs, each with their own benefits and requirements. You can also prestige your farm to start over with extra bonuses, or join contracts to cooperate with other players for bigger rewards. You can also complete achievements and trophies to earn golden eggs and prophecy eggs.</p>
|
147 |
-
<p>If you want to play Egg Inc on your Android device, you can download it from Google Play Store, or you can download it from a third-party source such as APKCombo. However, you should be careful about the source of the APK file, as it may contain malware or viruses that can harm your device. Only download APK files from reputable and trusted websites, such as APKCombo.</p>
|
148 |
-
<p>We hope this article has helped you learn more about Egg Inc and how to play it. If you have any questions or feedback, please feel free to leave a comment below. Happy egg farming!</p>
|
149 |
-
<h2>FAQs</h2>
|
150 |
-
<h3>Q: How do I get more golden eggs?</h3>
|
151 |
-
<p>A: You can get more golden eggs by completing missions, watching ads, shooting down drones, finding them randomly on your farm, or buying them with real money.</p>
|
152 |
-
<h3>Q: How do I get more prophecy eggs?</h3>
|
153 |
-
<p>A: You can get more prophecy eggs by completing trophy missions or joining contracts that reward them.</p>
|
154 |
-
<h3>Q: How do I change the theme of my farm?</h3>
|
155 |
-
<p>A: You can change the theme of your farm by tapping on the settings icon and choosing the theme option. You can choose from different themes, such as classic, winter, western, or futuristic.</p>
|
156 |
-
<h3>Q: How do I launch a space expedition?</h3>
|
157 |
-
<p>A: You can launch a space expedition by tapping on the rocket icon and choosing the expedition option. You need to have a certain amount of golden eggs and a certain type of egg to launch an expedition. You can also choose the duration and difficulty of the expedition. You can get various rewards from expeditions, such as money, golden eggs, boosters, or secrets.</p>
|
158 |
-
<h3>Q: How do I create or join a co-op contract?</h3>
|
159 |
-
<p>A: You can create or join a co-op contract by tapping on the contract icon and choosing the contract option. You need to have a certain type of egg and a certain farm value to join a contract. You can either join a public contract, which is open to anyone, or a private contract, which requires a code to join. You can also create your own contract and share the code with other players. You need to produce a certain amount of eggs within a certain time limit to complete a contract. You can get various rewards from contracts, such as money, golden eggs, prophecy eggs, or boosters.</p> 401be4b1e0<br />
|
160 |
-
<br />
|
161 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy the Classic Pinoy Game of Mahjong on Your Android Device with Pinoy Mahjong APK.md
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Pinoy Mahjong APK: A Fun and Easy Way to Play Mahjong on Your Phone</h1>
|
3 |
-
<p>If you love playing mahjong, you might want to try Pinoy Mahjong APK, a mobile app that lets you enjoy the game anytime, anywhere. Pinoy Mahjong is a version of mahjong that is popular in the Philippines, where it is also known as Filipino mahjong or Pusoy Dos. It is a simple and fast-paced game that can be played by anyone, even if you are not familiar with the traditional rules of mahjong. In this article, we will tell you everything you need to know about Pinoy Mahjong APK, including what it is, how to download and install it, how to play it online with friends, and how it reflects the history and culture of mahjong.</p>
|
4 |
-
<h2>pinoy mahjong apk</h2><br /><p><b><b>Download Zip</b> ★★★ <a href="https://jinyurl.com/2uNOX5">https://jinyurl.com/2uNOX5</a></b></p><br /><br />
|
5 |
-
<h2>What is Pinoy Mahjong?</h2>
|
6 |
-
<p>Pinoy Mahjong is a single-player game based on the mahjong rules (not yet formally defined) used in the Philippines. This app is an implementation of those rules. This app runs on iPads as well as iPhones.</p>
|
7 |
-
<h3>The origin and rules of Pinoy Mahjong</h3>
|
8 |
-
<p>Mahjong is a tile-based game that was developed in the 19th century in China and has spread throughout the world since the early 20th century. It is played by four players (with some three-player variations found in parts of China, Japan, South Korea and Southeast Asia). While many variations of mahjong exist, most variations have some basic rules in common including how a piece is drawn and discarded, how a piece is robbed from another player, the use of suits (numbered tiles) and honors (winds and dragons), the basic kinds of melds allowed, how to deal the tiles and the order of play.</p>
|
9 |
-
<p>Pinoy Mahjong is one of the many variations of mahjong that emerged in different regions and countries. It is believed that mahjong was introduced to the Philippines by Chinese immigrants during the Spanish colonial period. Over time, the game adapted to the local culture and preferences, resulting in a unique version that differs from other forms of mahjong in several ways. Some of the main differences are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Pinoy Mahjong uses only one suit (bamboos) and three honors (red dragon, green dragon, white dragon). The other suits (characters and circles) and honors (winds) are not used.</li>
|
12 |
-
<li>Pinoy Mahjong uses only 84 tiles instead of 136 tiles. Each player receives 21 tiles instead of 13 tiles.</li>
|
13 |
-
<li>Pinoy Mahjong allows only three types of melds: pung (three identical tiles), kong (four identical tiles), and chow (three consecutive tiles). A pair is not required to win.</li>
|
14 |
-
<li>Pinoy Mahjong has a special rule called "pusoy dos", which means "two flushes". This rule allows a player to win with two sets of seven tiles each, regardless of whether they form any melds or not.</li>
|
15 |
-
<li>Pinoy Mahjong has a scoring system that assigns different values to different melds and combinations. For example, a pung of dragons is worth more than a pung of bamboos, and a pusoy dos is worth more than a regular hand.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>The features and benefits of Pinoy Mahjong APK. jong", "mahjong", or "Filipino mahjong". You can also create your own server and invite your friends to join.</li>
|
18 |
-
<li>Once you are in a server, look for a channel that hosts Pinoy Mahjong games. You can also create your own channel and set up the game settings, such as the number of players, the difficulty level, and the game mode. Some of the game modes are:</li>
|
19 |
-
<ul>
|
20 |
-
<li>Classic mode: This is the standard mode that follows the basic rules of Pinoy Mahjong.</li>
|
21 |
-
<li>Blitz mode: This is a fast-paced mode that gives you a limited time to make your moves.</li>
|
22 |
-
<li>Challenge mode: This is a competitive mode that pits you against other players in a tournament or a ladder.</li>
|
23 |
-
<li>Custom mode: This is a flexible mode that allows you to customize the rules and the scoring system of Pinoy Mahjong.</li>
|
24 |
-
</ul>
|
25 |
-
<li>After you have chosen or created a channel, join the game lobby and wait for other players to join. You can also invite your friends to join by sending them a link or a code.</li>
|
26 |
-
<li>When the game starts, you will see the tiles on your screen and the other players' names and avatars. You can also chat, voice call, or video call with them using Discord's features.</li>
|
27 |
-
<li>Play Pinoy Mahjong as you normally would, following the rules and the scoring system of the game mode. You can also use Discord's features to communicate and interact with other players.</li>
|
28 |
-
<li>When the game ends, you will see the results and the rankings of each player. You can also view your stats and achievements on Discord's dashboard.</li>
|
29 |
-
<li>You can play as many games as you want with your friends online using Discord. You can also join other servers and channels to play with different people and try different game modes.</li>
|
30 |
-
</ol>
|
31 |
-
<h3>The advantages and challenges of playing Pinoy Mahjong online</h3>
|
32 |
-
<p>Playing Pinoy Mahjong online with friends using Discord has many advantages and challenges that make it a different and exciting experience. Some of the advantages are:</p>
|
33 |
-
<ul>
|
34 |
-
<li>You can play Pinoy Mahjong anytime, anywhere, as long as you have an internet connection and a compatible device.</li>
|
35 |
-
<li>You can play Pinoy Mahjong with your friends online, even if they are far away or in different time zones.</li>
|
36 |
-
<li>You can play Pinoy Mahjong with different people from different countries and cultures, and learn from their strategies and styles.</li>
|
37 |
-
<li>You can play Pinoy Mahjong with different game modes and levels, and challenge yourself with different puzzles and tasks.</li>
|
38 |
-
<li>You can play Pinoy Mahjong with Discord's features, such as chat, voice call, video call, and more, and have fun and socialize with other players.</li>
|
39 |
-
</ul>
|
40 |
-
<p>Some of the challenges are:</p>
|
41 |
-
<ul>
|
42 |
-
<li>You may encounter technical issues or glitches while playing Pinoy Mahjong online, such as lag, disconnects, crashes, or bugs.</li>
|
43 |
-
<li>You may encounter malicious or rude players while playing Pinoy Mahjong online, such as cheaters, hackers, trolls, or bullies.</li>
|
44 |
-
<li>You may encounter communication or cultural barriers while playing Pinoy Mahjong online, such as language differences, accents, slang, or etiquette.</li>
|
45 |
-
<li>You may encounter difficulty or frustration while playing Pinoy Mahjong online, such as losing streaks, unfair matches, or hard levels.</li>
|
46 |
-
<li>You may encounter addiction or distraction while playing Pinoy Mahjong online, such as spending too much time, money, or energy on the game.</li>
|
47 |
-
</ul>
|
48 |
-
<h2>How Pinoy Mahjong reflects the history and culture of mahjong</h2>
|
49 |
-
<p>Pinoy Mahjong is not just a game, but also a reflection of the history and culture of mahjong. Mahjong is a game that has evolved and diversified over time, influenced by various factors such as geography, politics, religion, economics, and social norms. Pinoy Mahjong is one of the examples of how mahjong has adapted to different contexts and preferences. Here are some of the ways that Pinoy Mahjong reflects the history and culture of mahjong:</p>
|
50 |
-
<p>pinoy mahjong game download<br />
|
51 |
-
pinoy mahjong rules and scoring<br />
|
52 |
-
pinoy mahjong app for ipad<br />
|
53 |
-
pinoy mahjong youtube videos<br />
|
54 |
-
pinoy mahjong online free play<br />
|
55 |
-
pinoy mahjong rotate games llc<br />
|
56 |
-
pinoy mahjong appadvice review<br />
|
57 |
-
pinoy mahjong strategy and tips<br />
|
58 |
-
pinoy mahjong best tiles to use<br />
|
59 |
-
pinoy mahjong how to win<br />
|
60 |
-
pinoy mahjong history and origin<br />
|
61 |
-
pinoy mahjong variations and styles<br />
|
62 |
-
pinoy mahjong tournaments and events<br />
|
63 |
-
pinoy mahjong cheats and hacks<br />
|
64 |
-
pinoy mahjong latest version update<br />
|
65 |
-
pinoy mahjong for android devices<br />
|
66 |
-
pinoy mahjong for ios devices<br />
|
67 |
-
pinoy mahjong for windows devices<br />
|
68 |
-
pinoy mahjong for mac devices<br />
|
69 |
-
pinoy mahjong for linux devices<br />
|
70 |
-
pinoy mahjong offline mode available<br />
|
71 |
-
pinoy mahjong multiplayer mode available<br />
|
72 |
-
pinoy mahjong single-player mode available<br />
|
73 |
-
pinoy mahjong custom mode available<br />
|
74 |
-
pinoy mahjong difficulty levels available<br />
|
75 |
-
pinoy mahjong sound effects and music<br />
|
76 |
-
pinoy mahjong graphics and design<br />
|
77 |
-
pinoy mahjong user interface and controls<br />
|
78 |
-
pinoy mahjong feedback and ratings<br />
|
79 |
-
pinoy mahjong support and contact<br />
|
80 |
-
pinoy mahjong faq and help<br />
|
81 |
-
pinoy mahjong privacy policy and terms of service<br />
|
82 |
-
pinoy mahjong installation and setup guide<br />
|
83 |
-
pinoy mahjong features and benefits<br />
|
84 |
-
pinoy mahjong pros and cons comparison<br />
|
85 |
-
pinoy mahjong alternatives and competitors<br />
|
86 |
-
pinoy mahjong testimonials and reviews<br />
|
87 |
-
pinoy mahjong social media and community<br />
|
88 |
-
pinoy mahjong blog and news articles<br />
|
89 |
-
pinoy mahjong awards and recognition</p>
|
90 |
-
<h3>The evolution and variations of mahjong</h3>
|
91 |
-
<p>Mahjong is a game that has undergone many changes and modifications since its origin in China. Some of the factors that contributed to its evolution are:</p>
|
92 |
-
<ul>
|
93 |
-
<li>The migration and trade of Chinese people to other regions and countries, such as Japan, Korea, Southeast Asia, Europe, America, and more.</li>
|
94 |
-
<li>The interaction and exchange of ideas and customs between Chinese people and other people from different cultures and backgrounds.</li>
|
95 |
-
<li>The innovation and experimentation of new rules and features by different players and groups to suit their tastes and needs.</li>
|
96 |
-
<li>The standardization and regulation of mahjong by different organizations and associations to promote and preserve the game.</li>
|
97 |
-
</ul>
|
98 |
-
<p>Pinoy Mahjong is one of the many variations of mahjong that emerged from these factors. It is a version that reflects the preferences and needs of the Filipino people, who are known for their creativity, adaptability, and hospitality. Pinoy Mahjong is a game that is easy to learn, fun to play, and suitable for any occasion.</p>
|
99 |
-
<h3>The significance and symbolism of mahjong in different communities</h3>
|
100 |
-
<p>Mahjong is not just a game, but also a symbol of many things in different communities. Some of the things that mahjong represents are:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Mahjong is a symbol of luck and fortune. Many people believe that playing mahjong can bring them good luck and wealth, especially during special occasions such as festivals, holidays, or birthdays. Some people also use lucky charms, rituals, or superstitions to enhance their chances of winning.</li>
|
103 |
-
<li>Mahjong is a symbol of skill and strategy. Many people admire and respect players who can master the game and win with skill and intelligence. Some people also use mahjong as a way to train their mental abilities, such as memory, concentration, and logic.</li>
|
104 |
-
<li>Mahjong is a symbol of culture and identity. Many people cherish and celebrate the game as a part of their heritage and tradition. Some people also use mahjong as a way to express their values, beliefs, and customs, such as respect, harmony, and generosity.</li>
|
105 |
-
<li>Mahjong is a symbol of socialization and friendship. Many people enjoy and appreciate the game as a means of entertainment and relaxation. Some people also use mahjong as a way to connect and bond with their family, friends, and neighbors, such as sharing stories, jokes, and food.</li>
|
106 |
-
</ul>
|
107 |
-
<p>Pinoy Mahjong is one of the examples of how mahjong can have different meanings and functions in different communities. It is a game that reflects the culture and identity of the Filipino people, who are known for their optimism, resilience, and hospitality. Pinoy Mahjong is a game that can bring joy and happiness to anyone who plays it.</p>
|
108 |
-
<h2>Conclusion</h2>
|
109 |
-
<p>Pinoy Mahjong APK is a mobile app that allows you to play Pinoy Mahjong on your phone or tablet. It is a version of mahjong that is popular in the Philippines, where it is also known as Filipino mahjong or Pusoy Dos. It is a simple and fast-paced game that can be played by anyone, even if you are not familiar with the traditional rules of mahjong.</p>
|
110 |
-
<h3>A summary of the main points</h3>
|
111 |
-
<p>In this article, we have told you everything you need to know about Pinoy Mahjong APK, including:</p>
|
112 |
-
<ul>
|
113 |
-
<li>What Pinoy Mahjong is, how it differs from other forms of mahjong, and what its features and benefits are.</li>
|
114 |
-
<li>How to download and install Pinoy Mahjong APK on your device, and what tips and tricks you can use to play it well.</li>
|
115 |
-
<li>How to play Pinoy Mahjong online with your friends using Discord, and what options and modes you can choose from.</li>
|
116 |
-
<li>How Pinoy Mahjong reflects the history and culture of mahjong, and what it symbolizes in different communities.</li>
|
117 |
-
</ul>
|
118 |
-
<h3>A call to action to download and play Pinoy Mahjong APK</h3>
|
119 |
-
<p>If you are interested in playing Pinoy Mahjong APK, you can download it for free from the links below . You can also visit the official website or follow the social media accounts of Pinoy Mahjong APK for more information and updates. You can also share your feedback and suggestions with the developers or other players through the app or online platforms.</p>
|
120 |
-
<p>Pinoy Mahjong APK is a fun and easy way to play mahjong on your phone or tablet. It is a game that can entertain you, challenge you, teach you, and connect you with others. It is a game that can make you happy. So what are you waiting for? Download Pinoy Mahjong APK today and enjoy the game!</p>
|
121 |
-
<h2>Frequently Asked Questions</h2>
|
122 |
-
<p>Here are some of the frequently asked questions about Pinoy Mahjong APK:</p>
|
123 |
-
<h4>Q: Is Pinoy Mahjong APK safe to download and play?</h4>
|
124 |
-
<p>A: Yes, Pinoy Mahjong APK is safe to download and play. It does not contain any viruses or malware that can harm your device or data. It also does not require any sensitive or personal information from you to play the game. However, you should always download Pinoy Mahjong APK from trusted sources such as Google Play Store or App Store to avoid any potential risks.</p>
|
125 |
-
<h4>Q: Is Pin oy Mahjong APK compatible with all devices and platforms?</h4>
|
126 |
-
<p>A: Pinoy Mahjong APK is compatible with most devices and platforms that run on Android or iOS operating systems. However, some older or lower-end devices may experience some performance issues or errors while playing the game. You can check the minimum system requirements and compatibility of Pinoy Mahjong APK on its official website or on Google Play Store or App Store before downloading it.</p>
|
127 |
-
<h4>Q: How can I contact the developers or support team of Pinoy Mahjong APK?</h4>
|
128 |
-
<p>A: If you have any questions, problems, or suggestions regarding Pinoy Mahjong APK, you can contact the developers or support team of Pinoy Mahjong APK through the following ways:</p>
|
129 |
-
<ul>
|
130 |
-
<li>Email: You can send an email to [email protected] and expect a reply within 24 hours.</li>
|
131 |
-
<li>Facebook: You can visit the Facebook page of Pinoy Mahjong APK and send a message or leave a comment.</li>
|
132 |
-
<li>Twitter: You can follow the Twitter account of Pinoy Mahjong APK and tweet or direct message them.</li>
|
133 |
-
<li>Instagram: You can follow the Instagram account of Pinoy Mahjong APK and comment or direct message them.</li>
|
134 |
-
</ul>
|
135 |
-
<h4>Q: How can I update Pinoy Mahjong APK to the latest version?</h4>
|
136 |
-
<p>A: If you have downloaded Pinoy Mahjong APK from Google Play Store or App Store, you can update it automatically or manually through the app store. If you have downloaded Pinoy Mahjong APK from other sources, you can update it manually by downloading and installing the latest version from the official website or from the links provided below . You should always update Pinoy Mahjong APK to the latest version to enjoy the new features, improvements, and bug fixes.</p>
|
137 |
-
<h4>Q: How can I uninstall Pinoy Mahjong APK from my device?</h4>
|
138 |
-
<p>A: If you want to uninstall Pinoy Mahjong APK from your device, you can do so by following these steps:</p>
|
139 |
-
<ol>
|
140 |
-
<li>Go to your device's settings and look for the apps or applications menu.</li>
|
141 |
-
<li>Find and tap on Pinoy Mahjong APK from the list of apps installed on your device.</li>
|
142 |
-
<li>Tap on the uninstall button and confirm your action.</li>
|
143 |
-
<li>Wait for the uninstallation process to finish and check if Pinoy Mahjong APK is removed from your device.</li>
|
144 |
-
</ol></p> 401be4b1e0<br />
|
145 |
-
<br />
|
146 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/3i2irg/first-app/app.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
from fastai.vision.all import *
|
4 |
-
import skimage
|
5 |
-
|
6 |
-
learn = load_learner('export.pkl')
|
7 |
-
|
8 |
-
labels = learn.dls.vocab
|
9 |
-
def predict(img):
|
10 |
-
img = PILImage.create(img)
|
11 |
-
pred,pred_idx,probs = learn.predict(img)
|
12 |
-
return {labels[i]: float(probs[i]) for i in range(len(labels))}
|
13 |
-
|
14 |
-
title = "Emotion Classifier"
|
15 |
-
description = "An emotion classifier trained with images from DuckDuckGo image search and fastai."
|
16 |
-
examples = ['happyphoto.jpg', 'yoelphoto.jpg']
|
17 |
-
interpretation='default'
|
18 |
-
enable_queue=True
|
19 |
-
|
20 |
-
gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/util/load_mats.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
"""This script is to load 3D face model for Deep3DFaceRecon_pytorch
|
2 |
-
"""
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
from PIL import Image
|
6 |
-
from scipy.io import loadmat, savemat
|
7 |
-
from array import array
|
8 |
-
import os.path as osp
|
9 |
-
|
10 |
-
# load expression basis
|
11 |
-
def LoadExpBasis(bfm_folder='BFM'):
|
12 |
-
n_vertex = 53215
|
13 |
-
Expbin = open(osp.join(bfm_folder, 'Exp_Pca.bin'), 'rb')
|
14 |
-
exp_dim = array('i')
|
15 |
-
exp_dim.fromfile(Expbin, 1)
|
16 |
-
expMU = array('f')
|
17 |
-
expPC = array('f')
|
18 |
-
expMU.fromfile(Expbin, 3*n_vertex)
|
19 |
-
expPC.fromfile(Expbin, 3*exp_dim[0]*n_vertex)
|
20 |
-
Expbin.close()
|
21 |
-
|
22 |
-
expPC = np.array(expPC)
|
23 |
-
expPC = np.reshape(expPC, [exp_dim[0], -1])
|
24 |
-
expPC = np.transpose(expPC)
|
25 |
-
|
26 |
-
expEV = np.loadtxt(osp.join(bfm_folder, 'std_exp.txt'))
|
27 |
-
|
28 |
-
return expPC, expEV
|
29 |
-
|
30 |
-
|
31 |
-
# transfer original BFM09 to our face model
|
32 |
-
def transferBFM09(bfm_folder='BFM'):
|
33 |
-
print('Transfer BFM09 to BFM_model_front......')
|
34 |
-
original_BFM = loadmat(osp.join(bfm_folder, '01_MorphableModel.mat'))
|
35 |
-
shapePC = original_BFM['shapePC'] # shape basis
|
36 |
-
shapeEV = original_BFM['shapeEV'] # corresponding eigen value
|
37 |
-
shapeMU = original_BFM['shapeMU'] # mean face
|
38 |
-
texPC = original_BFM['texPC'] # texture basis
|
39 |
-
texEV = original_BFM['texEV'] # eigen value
|
40 |
-
texMU = original_BFM['texMU'] # mean texture
|
41 |
-
|
42 |
-
expPC, expEV = LoadExpBasis(bfm_folder)
|
43 |
-
|
44 |
-
# transfer BFM09 to our face model
|
45 |
-
|
46 |
-
idBase = shapePC*np.reshape(shapeEV, [-1, 199])
|
47 |
-
idBase = idBase/1e5 # unify the scale to decimeter
|
48 |
-
idBase = idBase[:, :80] # use only first 80 basis
|
49 |
-
|
50 |
-
exBase = expPC*np.reshape(expEV, [-1, 79])
|
51 |
-
exBase = exBase/1e5 # unify the scale to decimeter
|
52 |
-
exBase = exBase[:, :64] # use only first 64 basis
|
53 |
-
|
54 |
-
texBase = texPC*np.reshape(texEV, [-1, 199])
|
55 |
-
texBase = texBase[:, :80] # use only first 80 basis
|
56 |
-
|
57 |
-
# our face model is cropped along face landmarks and contains only 35709 vertex.
|
58 |
-
# original BFM09 contains 53490 vertex, and expression basis provided by Guo et al. contains 53215 vertex.
|
59 |
-
# thus we select corresponding vertex to get our face model.
|
60 |
-
|
61 |
-
index_exp = loadmat(osp.join(bfm_folder, 'BFM_front_idx.mat'))
|
62 |
-
index_exp = index_exp['idx'].astype(np.int32) - 1 # starts from 0 (to 53215)
|
63 |
-
|
64 |
-
index_shape = loadmat(osp.join(bfm_folder, 'BFM_exp_idx.mat'))
|
65 |
-
index_shape = index_shape['trimIndex'].astype(
|
66 |
-
np.int32) - 1 # starts from 0 (to 53490)
|
67 |
-
index_shape = index_shape[index_exp]
|
68 |
-
|
69 |
-
idBase = np.reshape(idBase, [-1, 3, 80])
|
70 |
-
idBase = idBase[index_shape, :, :]
|
71 |
-
idBase = np.reshape(idBase, [-1, 80])
|
72 |
-
|
73 |
-
texBase = np.reshape(texBase, [-1, 3, 80])
|
74 |
-
texBase = texBase[index_shape, :, :]
|
75 |
-
texBase = np.reshape(texBase, [-1, 80])
|
76 |
-
|
77 |
-
exBase = np.reshape(exBase, [-1, 3, 64])
|
78 |
-
exBase = exBase[index_exp, :, :]
|
79 |
-
exBase = np.reshape(exBase, [-1, 64])
|
80 |
-
|
81 |
-
meanshape = np.reshape(shapeMU, [-1, 3])/1e5
|
82 |
-
meanshape = meanshape[index_shape, :]
|
83 |
-
meanshape = np.reshape(meanshape, [1, -1])
|
84 |
-
|
85 |
-
meantex = np.reshape(texMU, [-1, 3])
|
86 |
-
meantex = meantex[index_shape, :]
|
87 |
-
meantex = np.reshape(meantex, [1, -1])
|
88 |
-
|
89 |
-
# other info contains triangles, region used for computing photometric loss,
|
90 |
-
# region used for skin texture regularization, and 68 landmarks index etc.
|
91 |
-
other_info = loadmat(osp.join(bfm_folder, 'facemodel_info.mat'))
|
92 |
-
frontmask2_idx = other_info['frontmask2_idx']
|
93 |
-
skinmask = other_info['skinmask']
|
94 |
-
keypoints = other_info['keypoints']
|
95 |
-
point_buf = other_info['point_buf']
|
96 |
-
tri = other_info['tri']
|
97 |
-
tri_mask2 = other_info['tri_mask2']
|
98 |
-
|
99 |
-
# save our face model
|
100 |
-
savemat(osp.join(bfm_folder, 'BFM_model_front.mat'), {'meanshape': meanshape, 'meantex': meantex, 'idBase': idBase, 'exBase': exBase, 'texBase': texBase,
|
101 |
-
'tri': tri, 'point_buf': point_buf, 'tri_mask2': tri_mask2, 'keypoints': keypoints, 'frontmask2_idx': frontmask2_idx, 'skinmask': skinmask})
|
102 |
-
|
103 |
-
|
104 |
-
# load landmarks for standard face, which is used for image preprocessing
|
105 |
-
def load_lm3d(bfm_folder):
|
106 |
-
|
107 |
-
Lm3D = loadmat(osp.join(bfm_folder, 'similarity_Lm3D_all.mat'))
|
108 |
-
Lm3D = Lm3D['lm']
|
109 |
-
|
110 |
-
# calculate 5 facial landmarks using 68 landmarks
|
111 |
-
lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1
|
112 |
-
Lm3D = np.stack([Lm3D[lm_idx[0], :], np.mean(Lm3D[lm_idx[[1, 2]], :], 0), np.mean(
|
113 |
-
Lm3D[lm_idx[[3, 4]], :], 0), Lm3D[lm_idx[5], :], Lm3D[lm_idx[6], :]], axis=0)
|
114 |
-
Lm3D = Lm3D[[1, 2, 0, 3, 4], :]
|
115 |
-
|
116 |
-
return Lm3D
|
117 |
-
|
118 |
-
|
119 |
-
if __name__ == '__main__':
|
120 |
-
transferBFM09()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/lib/infer_pack/commons.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size * dilation - dilation) / 2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
25 |
-
"""KL(P||Q)"""
|
26 |
-
kl = (logs_q - logs_p) - 0.5
|
27 |
-
kl += (
|
28 |
-
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
29 |
-
)
|
30 |
-
return kl
|
31 |
-
|
32 |
-
|
33 |
-
def rand_gumbel(shape):
|
34 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
35 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
36 |
-
return -torch.log(-torch.log(uniform_samples))
|
37 |
-
|
38 |
-
|
39 |
-
def rand_gumbel_like(x):
|
40 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
41 |
-
return g
|
42 |
-
|
43 |
-
|
44 |
-
def slice_segments(x, ids_str, segment_size=4):
|
45 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
46 |
-
for i in range(x.size(0)):
|
47 |
-
idx_str = ids_str[i]
|
48 |
-
idx_end = idx_str + segment_size
|
49 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
50 |
-
return ret
|
51 |
-
|
52 |
-
|
53 |
-
def slice_segments2(x, ids_str, segment_size=4):
|
54 |
-
ret = torch.zeros_like(x[:, :segment_size])
|
55 |
-
for i in range(x.size(0)):
|
56 |
-
idx_str = ids_str[i]
|
57 |
-
idx_end = idx_str + segment_size
|
58 |
-
ret[i] = x[i, idx_str:idx_end]
|
59 |
-
return ret
|
60 |
-
|
61 |
-
|
62 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
63 |
-
b, d, t = x.size()
|
64 |
-
if x_lengths is None:
|
65 |
-
x_lengths = t
|
66 |
-
ids_str_max = x_lengths - segment_size + 1
|
67 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
68 |
-
ret = slice_segments(x, ids_str, segment_size)
|
69 |
-
return ret, ids_str
|
70 |
-
|
71 |
-
|
72 |
-
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
73 |
-
position = torch.arange(length, dtype=torch.float)
|
74 |
-
num_timescales = channels // 2
|
75 |
-
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
76 |
-
num_timescales - 1
|
77 |
-
)
|
78 |
-
inv_timescales = min_timescale * torch.exp(
|
79 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
80 |
-
)
|
81 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
82 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
83 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
84 |
-
signal = signal.view(1, channels, length)
|
85 |
-
return signal
|
86 |
-
|
87 |
-
|
88 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
89 |
-
b, channels, length = x.size()
|
90 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
91 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
92 |
-
|
93 |
-
|
94 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
95 |
-
b, channels, length = x.size()
|
96 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
97 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
98 |
-
|
99 |
-
|
100 |
-
def subsequent_mask(length):
|
101 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
102 |
-
return mask
|
103 |
-
|
104 |
-
|
105 |
-
@torch.jit.script
|
106 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
107 |
-
n_channels_int = n_channels[0]
|
108 |
-
in_act = input_a + input_b
|
109 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
110 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
111 |
-
acts = t_act * s_act
|
112 |
-
return acts
|
113 |
-
|
114 |
-
|
115 |
-
def convert_pad_shape(pad_shape):
|
116 |
-
l = pad_shape[::-1]
|
117 |
-
pad_shape = [item for sublist in l for item in sublist]
|
118 |
-
return pad_shape
|
119 |
-
|
120 |
-
|
121 |
-
def shift_1d(x):
|
122 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
123 |
-
return x
|
124 |
-
|
125 |
-
|
126 |
-
def sequence_mask(length, max_length=None):
|
127 |
-
if max_length is None:
|
128 |
-
max_length = length.max()
|
129 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
130 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
131 |
-
|
132 |
-
|
133 |
-
def generate_path(duration, mask):
|
134 |
-
"""
|
135 |
-
duration: [b, 1, t_x]
|
136 |
-
mask: [b, 1, t_y, t_x]
|
137 |
-
"""
|
138 |
-
device = duration.device
|
139 |
-
|
140 |
-
b, _, t_y, t_x = mask.shape
|
141 |
-
cum_duration = torch.cumsum(duration, -1)
|
142 |
-
|
143 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
144 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
145 |
-
path = path.view(b, t_x, t_y)
|
146 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
147 |
-
path = path.unsqueeze(1).transpose(2, 3) * mask
|
148 |
-
return path
|
149 |
-
|
150 |
-
|
151 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
152 |
-
if isinstance(parameters, torch.Tensor):
|
153 |
-
parameters = [parameters]
|
154 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
155 |
-
norm_type = float(norm_type)
|
156 |
-
if clip_value is not None:
|
157 |
-
clip_value = float(clip_value)
|
158 |
-
|
159 |
-
total_norm = 0
|
160 |
-
for p in parameters:
|
161 |
-
param_norm = p.grad.data.norm(norm_type)
|
162 |
-
total_norm += param_norm.item() ** norm_type
|
163 |
-
if clip_value is not None:
|
164 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
165 |
-
total_norm = total_norm ** (1.0 / norm_type)
|
166 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/tests/parse.ts
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import { promises as fs } from 'fs'
|
2 |
-
import { join } from 'path'
|
3 |
-
import { parseHeadersFromCurl } from '@/lib/utils'
|
4 |
-
|
5 |
-
(async () => {
|
6 |
-
const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8')
|
7 |
-
const headers = parseHeadersFromCurl(content)
|
8 |
-
console.log(headers)
|
9 |
-
|
10 |
-
const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8')
|
11 |
-
const cmdHeaders = parseHeadersFromCurl(cmdContent)
|
12 |
-
console.log(cmdHeaders)
|
13 |
-
})()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/ABstract(插件化AB Testing平台) 746b87acd94643ca871ec661b63f196c.md
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
# ABstract(插件化AB Testing平台)
|
2 |
-
|
3 |
-
Last edited time: April 23, 2023 3:58 PM
|
4 |
-
Owner: Anonymous
|
5 |
-
|
6 |
-
Project name: "ABstract". It plays on the words "AB testing" while also hinting at the concept of abstracting away the complexity of building and managing an AB testing platform.
|
7 |
-
|
8 |
-
> 这篇文档介绍了一个插件化AB Testing平台的产品愿景、项目目标、核心业务能力、业务模型和进程间架构。该平台提供配置管理、实验管理、数据收集和配置发布等核心能力,可以帮助公司业务/开发人员基于AB测试实现数据驱动产品迭代,同时提供核心能力插件化管理和最小实现,支持开发人员结合实际需求进行剪裁或扩展。
|
9 |
-
>
|
10 |
-
|
11 |
-
## 产品愿景
|
12 |
-
|
13 |
-
对于:我们的目标客户/用户
|
14 |
-
|
15 |
-
<aside>
|
16 |
-
💡 想要基于AB测试实现数据驱动产品迭代的公司业务/开发人员
|
17 |
-
|
18 |
-
</aside>
|
19 |
-
|
20 |
-
他们想:目标客户的痛点或者希望
|
21 |
-
|
22 |
-
<aside>
|
23 |
-
💡 想要自建AB测试平台,方便与自身业务系统集成
|
24 |
-
|
25 |
-
</aside>
|
26 |
-
|
27 |
-
这个:产品名称
|
28 |
-
|
29 |
-
<aside>
|
30 |
-
💡 插件化AB Testing平台
|
31 |
-
|
32 |
-
</aside>
|
33 |
-
|
34 |
-
是一个:什么样的产品类型(平台?工具?)
|
35 |
-
|
36 |
-
<aside>
|
37 |
-
💡 AB测试平台核心能力套件
|
38 |
-
|
39 |
-
</aside>
|
40 |
-
|
41 |
-
它可以:通过什么样的功能,为用户带来什么样的价值。
|
42 |
-
|
43 |
-
<aside>
|
44 |
-
💡 提供AB测试平台核心能力:配置管理、实验管理、数据采集、实验结果分析能力,实现Feature管理和AB实验统一,做到数据驱动产品迭代
|
45 |
-
|
46 |
-
</aside>
|
47 |
-
|
48 |
-
不同于:市场上的竞品及其特点
|
49 |
-
|
50 |
-
<aside>
|
51 |
-
💡 商业化AB测试平台缺乏与实际业务结合的灵活性
|
52 |
-
|
53 |
-
</aside>
|
54 |
-
|
55 |
-
<aside>
|
56 |
-
💡 大而全的开源AB测试平台难以与现有系统集成或进行二次开发
|
57 |
-
|
58 |
-
</aside>
|
59 |
-
|
60 |
-
它的优势是:我们产品的独特价值
|
61 |
-
|
62 |
-
<aside>
|
63 |
-
💡 提供核心能力插件化管理,并提供最小实现,即能满足独立部署运行需求,亦可支持开发人员结合实际需求进行剪裁或扩展
|
64 |
-
|
65 |
-
</aside>
|
66 |
-
|
67 |
-
## 项目目标
|
68 |
-
|
69 |
-
> 完成插件化AB Testing 平台核心功能开发
|
70 |
-
>
|
71 |
-
|
72 |
-
> 探索AI在软件开发中的应用实践
|
73 |
-
>
|
74 |
-
|
75 |
-
## 核心业务能力
|
76 |
-
|
77 |
-
- 配置管理
|
78 |
-
1. Feature Flag管理
|
79 |
-
1. 提供Feature Config的元数据
|
80 |
-
2. Feature Config管理
|
81 |
-
1. 提供依据Feature Flag生产Feature Config 配置界面的能力
|
82 |
-
- 实验管理
|
83 |
-
1. 实验管理
|
84 |
-
1. 提供实验、分组、指标配置的管理功能
|
85 |
-
2. 提供实验实验运行结果查看
|
86 |
-
2. 实验分级管理
|
87 |
-
1. 提供互斥组管理
|
88 |
-
2. 互斥组中的实验流量之间互斥
|
89 |
-
3. 实验执行阶段分组结果查询能力
|
90 |
-
- Tracking 数据收集
|
91 |
-
|
92 |
-
埋点事件上报收集
|
93 |
-
|
94 |
-
- 配置发布
|
95 |
-
|
96 |
-
提供统一的通过featureKey 获取配置的结果,统一Feature Config 和实验配置下发结果
|
97 |
-
|
98 |
-
|
99 |
-
```mermaid
|
100 |
-
graph LR
|
101 |
-
subgraph "AB Testing 平台"
|
102 |
-
AB测试核心能力 --> 配置管理
|
103 |
-
AB测试核心能力 --> 实验管理
|
104 |
-
AB测试核心能力 --> 数据收集
|
105 |
-
AB测试核心能力 --> 配置发布
|
106 |
-
数据收集 --> 指标分析
|
107 |
-
配置管理 --> FeatureFlag
|
108 |
-
配置管理 --> FeatureConfig
|
109 |
-
实验管理 --> 实验配置
|
110 |
-
实验管理 --> 实验分级
|
111 |
-
配置发布 --> 实验结果
|
112 |
-
配置发布 --> FeatureConfig结果
|
113 |
-
end
|
114 |
-
|
115 |
-
```
|
116 |
-
|
117 |
-
## 业务模型
|
118 |
-
|
119 |
-
[业务模型](ABstract%EF%BC%88%E6%8F%92%E4%BB%B6%E5%8C%96AB%20Testing%E5%B9%B3%E5%8F%B0%EF%BC%89%20746b87acd94643ca871ec661b63f196c/%E4%B8%9A%E5%8A%A1%E6%A8%A1%E5%9E%8B%20d31846027b4f40ca99f6e76f897663a4.md)
|
120 |
-
|
121 |
-
## 进程间架构
|
122 |
-
|
123 |
-
[进程间架构](ABstract%EF%BC%88%E6%8F%92%E4%BB%B6%E5%8C%96AB%20Testing%E5%B9%B3%E5%8F%B0%EF%BC%89%20746b87acd94643ca871ec661b63f196c/%E8%BF%9B%E7%A8%8B%E9%97%B4%E6%9E%B6%E6%9E%84%20d50744212b044d06a4b29fe931df391b.md)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/midas/midas/vit.py
DELETED
@@ -1,491 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import timm
|
4 |
-
import types
|
5 |
-
import math
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
|
9 |
-
class Slice(nn.Module):
|
10 |
-
def __init__(self, start_index=1):
|
11 |
-
super(Slice, self).__init__()
|
12 |
-
self.start_index = start_index
|
13 |
-
|
14 |
-
def forward(self, x):
|
15 |
-
return x[:, self.start_index :]
|
16 |
-
|
17 |
-
|
18 |
-
class AddReadout(nn.Module):
|
19 |
-
def __init__(self, start_index=1):
|
20 |
-
super(AddReadout, self).__init__()
|
21 |
-
self.start_index = start_index
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
if self.start_index == 2:
|
25 |
-
readout = (x[:, 0] + x[:, 1]) / 2
|
26 |
-
else:
|
27 |
-
readout = x[:, 0]
|
28 |
-
return x[:, self.start_index :] + readout.unsqueeze(1)
|
29 |
-
|
30 |
-
|
31 |
-
class ProjectReadout(nn.Module):
|
32 |
-
def __init__(self, in_features, start_index=1):
|
33 |
-
super(ProjectReadout, self).__init__()
|
34 |
-
self.start_index = start_index
|
35 |
-
|
36 |
-
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
|
37 |
-
|
38 |
-
def forward(self, x):
|
39 |
-
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
|
40 |
-
features = torch.cat((x[:, self.start_index :], readout), -1)
|
41 |
-
|
42 |
-
return self.project(features)
|
43 |
-
|
44 |
-
|
45 |
-
class Transpose(nn.Module):
|
46 |
-
def __init__(self, dim0, dim1):
|
47 |
-
super(Transpose, self).__init__()
|
48 |
-
self.dim0 = dim0
|
49 |
-
self.dim1 = dim1
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
x = x.transpose(self.dim0, self.dim1)
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
def forward_vit(pretrained, x):
|
57 |
-
b, c, h, w = x.shape
|
58 |
-
|
59 |
-
glob = pretrained.model.forward_flex(x)
|
60 |
-
|
61 |
-
layer_1 = pretrained.activations["1"]
|
62 |
-
layer_2 = pretrained.activations["2"]
|
63 |
-
layer_3 = pretrained.activations["3"]
|
64 |
-
layer_4 = pretrained.activations["4"]
|
65 |
-
|
66 |
-
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
|
67 |
-
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
|
68 |
-
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
|
69 |
-
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
|
70 |
-
|
71 |
-
unflatten = nn.Sequential(
|
72 |
-
nn.Unflatten(
|
73 |
-
2,
|
74 |
-
torch.Size(
|
75 |
-
[
|
76 |
-
h // pretrained.model.patch_size[1],
|
77 |
-
w // pretrained.model.patch_size[0],
|
78 |
-
]
|
79 |
-
),
|
80 |
-
)
|
81 |
-
)
|
82 |
-
|
83 |
-
if layer_1.ndim == 3:
|
84 |
-
layer_1 = unflatten(layer_1)
|
85 |
-
if layer_2.ndim == 3:
|
86 |
-
layer_2 = unflatten(layer_2)
|
87 |
-
if layer_3.ndim == 3:
|
88 |
-
layer_3 = unflatten(layer_3)
|
89 |
-
if layer_4.ndim == 3:
|
90 |
-
layer_4 = unflatten(layer_4)
|
91 |
-
|
92 |
-
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
|
93 |
-
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
|
94 |
-
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
|
95 |
-
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
|
96 |
-
|
97 |
-
return layer_1, layer_2, layer_3, layer_4
|
98 |
-
|
99 |
-
|
100 |
-
def _resize_pos_embed(self, posemb, gs_h, gs_w):
|
101 |
-
posemb_tok, posemb_grid = (
|
102 |
-
posemb[:, : self.start_index],
|
103 |
-
posemb[0, self.start_index :],
|
104 |
-
)
|
105 |
-
|
106 |
-
gs_old = int(math.sqrt(len(posemb_grid)))
|
107 |
-
|
108 |
-
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
|
109 |
-
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
|
110 |
-
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
|
111 |
-
|
112 |
-
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
|
113 |
-
|
114 |
-
return posemb
|
115 |
-
|
116 |
-
|
117 |
-
def forward_flex(self, x):
|
118 |
-
b, c, h, w = x.shape
|
119 |
-
|
120 |
-
pos_embed = self._resize_pos_embed(
|
121 |
-
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
|
122 |
-
)
|
123 |
-
|
124 |
-
B = x.shape[0]
|
125 |
-
|
126 |
-
if hasattr(self.patch_embed, "backbone"):
|
127 |
-
x = self.patch_embed.backbone(x)
|
128 |
-
if isinstance(x, (list, tuple)):
|
129 |
-
x = x[-1] # last feature if backbone outputs list/tuple of features
|
130 |
-
|
131 |
-
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
|
132 |
-
|
133 |
-
if getattr(self, "dist_token", None) is not None:
|
134 |
-
cls_tokens = self.cls_token.expand(
|
135 |
-
B, -1, -1
|
136 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
137 |
-
dist_token = self.dist_token.expand(B, -1, -1)
|
138 |
-
x = torch.cat((cls_tokens, dist_token, x), dim=1)
|
139 |
-
else:
|
140 |
-
cls_tokens = self.cls_token.expand(
|
141 |
-
B, -1, -1
|
142 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
143 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
144 |
-
|
145 |
-
x = x + pos_embed
|
146 |
-
x = self.pos_drop(x)
|
147 |
-
|
148 |
-
for blk in self.blocks:
|
149 |
-
x = blk(x)
|
150 |
-
|
151 |
-
x = self.norm(x)
|
152 |
-
|
153 |
-
return x
|
154 |
-
|
155 |
-
|
156 |
-
activations = {}
|
157 |
-
|
158 |
-
|
159 |
-
def get_activation(name):
|
160 |
-
def hook(model, input, output):
|
161 |
-
activations[name] = output
|
162 |
-
|
163 |
-
return hook
|
164 |
-
|
165 |
-
|
166 |
-
def get_readout_oper(vit_features, features, use_readout, start_index=1):
|
167 |
-
if use_readout == "ignore":
|
168 |
-
readout_oper = [Slice(start_index)] * len(features)
|
169 |
-
elif use_readout == "add":
|
170 |
-
readout_oper = [AddReadout(start_index)] * len(features)
|
171 |
-
elif use_readout == "project":
|
172 |
-
readout_oper = [
|
173 |
-
ProjectReadout(vit_features, start_index) for out_feat in features
|
174 |
-
]
|
175 |
-
else:
|
176 |
-
assert (
|
177 |
-
False
|
178 |
-
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
|
179 |
-
|
180 |
-
return readout_oper
|
181 |
-
|
182 |
-
|
183 |
-
def _make_vit_b16_backbone(
|
184 |
-
model,
|
185 |
-
features=[96, 192, 384, 768],
|
186 |
-
size=[384, 384],
|
187 |
-
hooks=[2, 5, 8, 11],
|
188 |
-
vit_features=768,
|
189 |
-
use_readout="ignore",
|
190 |
-
start_index=1,
|
191 |
-
):
|
192 |
-
pretrained = nn.Module()
|
193 |
-
|
194 |
-
pretrained.model = model
|
195 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
196 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
197 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
198 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
199 |
-
|
200 |
-
pretrained.activations = activations
|
201 |
-
|
202 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
203 |
-
|
204 |
-
# 32, 48, 136, 384
|
205 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
206 |
-
readout_oper[0],
|
207 |
-
Transpose(1, 2),
|
208 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
209 |
-
nn.Conv2d(
|
210 |
-
in_channels=vit_features,
|
211 |
-
out_channels=features[0],
|
212 |
-
kernel_size=1,
|
213 |
-
stride=1,
|
214 |
-
padding=0,
|
215 |
-
),
|
216 |
-
nn.ConvTranspose2d(
|
217 |
-
in_channels=features[0],
|
218 |
-
out_channels=features[0],
|
219 |
-
kernel_size=4,
|
220 |
-
stride=4,
|
221 |
-
padding=0,
|
222 |
-
bias=True,
|
223 |
-
dilation=1,
|
224 |
-
groups=1,
|
225 |
-
),
|
226 |
-
)
|
227 |
-
|
228 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
229 |
-
readout_oper[1],
|
230 |
-
Transpose(1, 2),
|
231 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
232 |
-
nn.Conv2d(
|
233 |
-
in_channels=vit_features,
|
234 |
-
out_channels=features[1],
|
235 |
-
kernel_size=1,
|
236 |
-
stride=1,
|
237 |
-
padding=0,
|
238 |
-
),
|
239 |
-
nn.ConvTranspose2d(
|
240 |
-
in_channels=features[1],
|
241 |
-
out_channels=features[1],
|
242 |
-
kernel_size=2,
|
243 |
-
stride=2,
|
244 |
-
padding=0,
|
245 |
-
bias=True,
|
246 |
-
dilation=1,
|
247 |
-
groups=1,
|
248 |
-
),
|
249 |
-
)
|
250 |
-
|
251 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
252 |
-
readout_oper[2],
|
253 |
-
Transpose(1, 2),
|
254 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
255 |
-
nn.Conv2d(
|
256 |
-
in_channels=vit_features,
|
257 |
-
out_channels=features[2],
|
258 |
-
kernel_size=1,
|
259 |
-
stride=1,
|
260 |
-
padding=0,
|
261 |
-
),
|
262 |
-
)
|
263 |
-
|
264 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
265 |
-
readout_oper[3],
|
266 |
-
Transpose(1, 2),
|
267 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
268 |
-
nn.Conv2d(
|
269 |
-
in_channels=vit_features,
|
270 |
-
out_channels=features[3],
|
271 |
-
kernel_size=1,
|
272 |
-
stride=1,
|
273 |
-
padding=0,
|
274 |
-
),
|
275 |
-
nn.Conv2d(
|
276 |
-
in_channels=features[3],
|
277 |
-
out_channels=features[3],
|
278 |
-
kernel_size=3,
|
279 |
-
stride=2,
|
280 |
-
padding=1,
|
281 |
-
),
|
282 |
-
)
|
283 |
-
|
284 |
-
pretrained.model.start_index = start_index
|
285 |
-
pretrained.model.patch_size = [16, 16]
|
286 |
-
|
287 |
-
# We inject this function into the VisionTransformer instances so that
|
288 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
289 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
290 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
291 |
-
_resize_pos_embed, pretrained.model
|
292 |
-
)
|
293 |
-
|
294 |
-
return pretrained
|
295 |
-
|
296 |
-
|
297 |
-
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
|
298 |
-
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
|
299 |
-
|
300 |
-
hooks = [5, 11, 17, 23] if hooks == None else hooks
|
301 |
-
return _make_vit_b16_backbone(
|
302 |
-
model,
|
303 |
-
features=[256, 512, 1024, 1024],
|
304 |
-
hooks=hooks,
|
305 |
-
vit_features=1024,
|
306 |
-
use_readout=use_readout,
|
307 |
-
)
|
308 |
-
|
309 |
-
|
310 |
-
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
|
311 |
-
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
|
312 |
-
|
313 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
314 |
-
return _make_vit_b16_backbone(
|
315 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
316 |
-
)
|
317 |
-
|
318 |
-
|
319 |
-
def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
|
320 |
-
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
|
321 |
-
|
322 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
323 |
-
return _make_vit_b16_backbone(
|
324 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
325 |
-
)
|
326 |
-
|
327 |
-
|
328 |
-
def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
|
329 |
-
model = timm.create_model(
|
330 |
-
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
|
331 |
-
)
|
332 |
-
|
333 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
334 |
-
return _make_vit_b16_backbone(
|
335 |
-
model,
|
336 |
-
features=[96, 192, 384, 768],
|
337 |
-
hooks=hooks,
|
338 |
-
use_readout=use_readout,
|
339 |
-
start_index=2,
|
340 |
-
)
|
341 |
-
|
342 |
-
|
343 |
-
def _make_vit_b_rn50_backbone(
|
344 |
-
model,
|
345 |
-
features=[256, 512, 768, 768],
|
346 |
-
size=[384, 384],
|
347 |
-
hooks=[0, 1, 8, 11],
|
348 |
-
vit_features=768,
|
349 |
-
use_vit_only=False,
|
350 |
-
use_readout="ignore",
|
351 |
-
start_index=1,
|
352 |
-
):
|
353 |
-
pretrained = nn.Module()
|
354 |
-
|
355 |
-
pretrained.model = model
|
356 |
-
|
357 |
-
if use_vit_only == True:
|
358 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
359 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
360 |
-
else:
|
361 |
-
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
|
362 |
-
get_activation("1")
|
363 |
-
)
|
364 |
-
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
|
365 |
-
get_activation("2")
|
366 |
-
)
|
367 |
-
|
368 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
369 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
370 |
-
|
371 |
-
pretrained.activations = activations
|
372 |
-
|
373 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
374 |
-
|
375 |
-
if use_vit_only == True:
|
376 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
377 |
-
readout_oper[0],
|
378 |
-
Transpose(1, 2),
|
379 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
380 |
-
nn.Conv2d(
|
381 |
-
in_channels=vit_features,
|
382 |
-
out_channels=features[0],
|
383 |
-
kernel_size=1,
|
384 |
-
stride=1,
|
385 |
-
padding=0,
|
386 |
-
),
|
387 |
-
nn.ConvTranspose2d(
|
388 |
-
in_channels=features[0],
|
389 |
-
out_channels=features[0],
|
390 |
-
kernel_size=4,
|
391 |
-
stride=4,
|
392 |
-
padding=0,
|
393 |
-
bias=True,
|
394 |
-
dilation=1,
|
395 |
-
groups=1,
|
396 |
-
),
|
397 |
-
)
|
398 |
-
|
399 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
400 |
-
readout_oper[1],
|
401 |
-
Transpose(1, 2),
|
402 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
403 |
-
nn.Conv2d(
|
404 |
-
in_channels=vit_features,
|
405 |
-
out_channels=features[1],
|
406 |
-
kernel_size=1,
|
407 |
-
stride=1,
|
408 |
-
padding=0,
|
409 |
-
),
|
410 |
-
nn.ConvTranspose2d(
|
411 |
-
in_channels=features[1],
|
412 |
-
out_channels=features[1],
|
413 |
-
kernel_size=2,
|
414 |
-
stride=2,
|
415 |
-
padding=0,
|
416 |
-
bias=True,
|
417 |
-
dilation=1,
|
418 |
-
groups=1,
|
419 |
-
),
|
420 |
-
)
|
421 |
-
else:
|
422 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
423 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
424 |
-
)
|
425 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
426 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
427 |
-
)
|
428 |
-
|
429 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
430 |
-
readout_oper[2],
|
431 |
-
Transpose(1, 2),
|
432 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
433 |
-
nn.Conv2d(
|
434 |
-
in_channels=vit_features,
|
435 |
-
out_channels=features[2],
|
436 |
-
kernel_size=1,
|
437 |
-
stride=1,
|
438 |
-
padding=0,
|
439 |
-
),
|
440 |
-
)
|
441 |
-
|
442 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
443 |
-
readout_oper[3],
|
444 |
-
Transpose(1, 2),
|
445 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
446 |
-
nn.Conv2d(
|
447 |
-
in_channels=vit_features,
|
448 |
-
out_channels=features[3],
|
449 |
-
kernel_size=1,
|
450 |
-
stride=1,
|
451 |
-
padding=0,
|
452 |
-
),
|
453 |
-
nn.Conv2d(
|
454 |
-
in_channels=features[3],
|
455 |
-
out_channels=features[3],
|
456 |
-
kernel_size=3,
|
457 |
-
stride=2,
|
458 |
-
padding=1,
|
459 |
-
),
|
460 |
-
)
|
461 |
-
|
462 |
-
pretrained.model.start_index = start_index
|
463 |
-
pretrained.model.patch_size = [16, 16]
|
464 |
-
|
465 |
-
# We inject this function into the VisionTransformer instances so that
|
466 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
467 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
468 |
-
|
469 |
-
# We inject this function into the VisionTransformer instances so that
|
470 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
471 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
472 |
-
_resize_pos_embed, pretrained.model
|
473 |
-
)
|
474 |
-
|
475 |
-
return pretrained
|
476 |
-
|
477 |
-
|
478 |
-
def _make_pretrained_vitb_rn50_384(
|
479 |
-
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
|
480 |
-
):
|
481 |
-
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
|
482 |
-
|
483 |
-
hooks = [0, 1, 8, 11] if hooks == None else hooks
|
484 |
-
return _make_vit_b_rn50_backbone(
|
485 |
-
model,
|
486 |
-
features=[256, 512, 768, 768],
|
487 |
-
size=[384, 384],
|
488 |
-
hooks=hooks,
|
489 |
-
use_vit_only=use_vit_only,
|
490 |
-
use_readout=use_readout,
|
491 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/__init__.py
DELETED
File without changes
|
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/models/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
# flake8: noqa
|
8 |
-
from .musicgen import MusicGen
|
9 |
-
from .lm import LMModel
|
10 |
-
from .encodec import CompressionModel, EncodecModel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/README.md
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: creativeml-openrail-m
|
3 |
-
title: OpenGPT
|
4 |
-
emoji: 🚀
|
5 |
-
colorFrom: blue
|
6 |
-
colorTo: green
|
7 |
-
pinned: true
|
8 |
-
sdk: gradio
|
9 |
-
app_file: run.py
|
10 |
-
---
|
11 |
-
# FreeGPT WebUI v2
|
12 |
-
<img src="https://github.com/VadimBoev/freegpt-webui-v2/blob/main/webui-gpt.png" width="1000" />
|
13 |
-
|
14 |
-
## GPT 3.5/4
|
15 |
-
|
16 |
-
<strong>NOT REQUIRE ANY API KEY</strong> ❌🔑
|
17 |
-
|
18 |
-
This project features a WebUI utilizing the [G4F API](https://github.com/xtekky/gpt4free). <br>
|
19 |
-
Experience the power of ChatGPT with a user-friendly interface, enhanced jailbreaks, and completely free.
|
20 |
-
|
21 |
-
**Important!** Don't be afraid to ask a question or write about any problem in the "issue".
|
22 |
-
We will solve a question or a problem together! 🌍
|
23 |
-
|
24 |
-
You can [buy me coffee](https://boosty.to/vadimboev/donate) here ☕🤎
|
25 |
-
|
26 |
-
## Known bugs 🚧
|
27 |
-
- Stream mode not working properly.
|
28 |
-
- Operation timed out after 30000 milliseconds
|
29 |
-
- Web Access is not working.
|
30 |
-
Because the API that was used earlier in the "freegpt-webui" repository from ramonvc stopped working. This will be fixed later
|
31 |
-
|
32 |
-
## Features v2 📢
|
33 |
-
- Updated g4f
|
34 |
-
- Fixes to make everything work
|
35 |
-
|
36 |
-
## Project Hosting and Demonstration 🌐🚀
|
37 |
-
The project is hosted on multiple platforms to be tested and modified.
|
38 |
-
|Platform|Status|API Key|Free|Repo|Demo|
|
39 |
-
|--|--|--|--|--|--|
|
40 |
-
|[My site](http://vadimboev.ru:1338/)||◼️|☑️|[FreeGPT WebUI](https://github.com/VadimBoev/freegpt-webui-v2)|[Chat](http://vadimboev.ru:1338/)
|
41 |
-
|
42 |
-
## Table of Contents
|
43 |
-
- [To-Do List](#to-do-list-%EF%B8%8F)
|
44 |
-
- [Getting Started](#getting-started-white_check_mark)
|
45 |
-
- [Cloning the Repository](#cloning-the-repository-inbox_tray)
|
46 |
-
- [Install Dependencies](#install-dependencies-wrench)
|
47 |
-
- [Running the Application](#running-the-application-rocket)
|
48 |
-
- [Docker](#docker-)
|
49 |
-
- [Prerequisites](#prerequisites)
|
50 |
-
- [Running the Docker](#running-the-docker)
|
51 |
-
- [Incorporated Projects](#incorporated-projects-busts_in_silhouette)
|
52 |
-
- [WebUI](#webui)
|
53 |
-
- [API FreeGPT](#api-g4f)
|
54 |
-
- [Star History](#star-history)
|
55 |
-
- [Legal Notice](#legal-notice)
|
56 |
-
|
57 |
-
## Getting Started :white_check_mark:
|
58 |
-
To get started with this project, you'll need to clone the repository and have [Python](https://www.python.org/downloads/) installed on your system.
|
59 |
-
(Version 3.10+ is recommended. It also works for me on 3.9.2 in debian 11).
|
60 |
-
|
61 |
-
### Cloning the Repository :inbox_tray:
|
62 |
-
Run the following command to clone the repository:
|
63 |
-
|
64 |
-
```
|
65 |
-
git clone https://github.com/VadimBoev/freegpt-webui-v2.git
|
66 |
-
```
|
67 |
-
|
68 |
-
### Install Dependencies :wrench:
|
69 |
-
Navigate to the project directory:
|
70 |
-
```
|
71 |
-
cd freegpt-webui-v2
|
72 |
-
```
|
73 |
-
|
74 |
-
Install the dependencies:
|
75 |
-
```
|
76 |
-
pip install -r requirements.txt
|
77 |
-
```
|
78 |
-
## Running the Application :rocket:
|
79 |
-
To run the application, run the following command:
|
80 |
-
```
|
81 |
-
python run.py
|
82 |
-
```
|
83 |
-
|
84 |
-
Access the application in your browser using the URL:
|
85 |
-
```
|
86 |
-
http://127.0.0.1:1338
|
87 |
-
```
|
88 |
-
or
|
89 |
-
```
|
90 |
-
http://localhost:1338
|
91 |
-
```
|
92 |
-
|
93 |
-
## Docker 🐳
|
94 |
-
### Prerequisites
|
95 |
-
Before you start, make sure you have installed [Docker](https://www.docker.com/get-started) on your machine.
|
96 |
-
|
97 |
-
### Running the Docker
|
98 |
-
Pull the Docker image from Docker Hub:
|
99 |
-
```
|
100 |
-
docker pull VadimBoev/freegpt-webui-v2
|
101 |
-
```
|
102 |
-
|
103 |
-
Run the application using Docker:
|
104 |
-
```
|
105 |
-
docker run -p 1338:1338 VadimBoev/freegpt-webui-v2
|
106 |
-
```
|
107 |
-
|
108 |
-
Access the application in your browser using the URL:
|
109 |
-
```
|
110 |
-
http://127.0.0.1:1338
|
111 |
-
```
|
112 |
-
or
|
113 |
-
```
|
114 |
-
http://localhost:1338
|
115 |
-
```
|
116 |
-
|
117 |
-
When you're done using the application, stop the Docker containers using the following command:
|
118 |
-
```
|
119 |
-
docker stop <container-id>
|
120 |
-
```
|
121 |
-
|
122 |
-
## Incorporated Projects :busts_in_silhouette:
|
123 |
-
I highly recommend visiting and supporting both projects.
|
124 |
-
|
125 |
-
### WebUI
|
126 |
-
The application interface was incorporated from the [chatgpt-clone](https://github.com/xtekky/chatgpt-clone) repository.
|
127 |
-
|
128 |
-
### API G4F
|
129 |
-
The free GPT-4 API was incorporated from the [GPT4Free](https://github.com/xtekky/gpt4free) repository.
|
130 |
-
|
131 |
-
<br>
|
132 |
-
|
133 |
-
## Star History
|
134 |
-
[](https://star-history.com/#VadimBoev/freegpt-webui-v2&Timeline)
|
135 |
-
|
136 |
-
<br>
|
137 |
-
|
138 |
-
## Legal Notice
|
139 |
-
This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This
|
140 |
-
project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to
|
141 |
-
improve their security or request the removal of their site from this repository.
|
142 |
-
|
143 |
-
Please note the following:
|
144 |
-
|
145 |
-
1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners.
|
146 |
-
This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers
|
147 |
-
mentioned.
|
148 |
-
|
149 |
-
2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses
|
150 |
-
arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely
|
151 |
-
responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the
|
152 |
-
TOS of the each Website.
|
153 |
-
|
154 |
-
3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By
|
155 |
-
using the information and code provided, users acknowledge that they are using the APIs and models at their own risk
|
156 |
-
and agree to comply with any applicable laws and regulations.
|
157 |
-
|
158 |
-
4. **Copyright**: All content in this repository, including but not limited to code, images, and documentation, is the
|
159 |
-
intellectual property of the repository author, unless otherwise stated. Unauthorized copying, distribution, or use
|
160 |
-
of any content in this repository is strictly prohibited without the express written consent of the repository
|
161 |
-
author.
|
162 |
-
|
163 |
-
5. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and
|
164 |
-
against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of
|
165 |
-
or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
|
166 |
-
|
167 |
-
6. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or
|
168 |
-
features in this repository at any time without prior notice. Users are responsible for regularly reviewing the
|
169 |
-
content and any changes made to this repository.
|
170 |
-
|
171 |
-
By using this repository or any code related to it, you agree to these terms. The author is not responsible for any
|
172 |
-
copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent
|
173 |
-
impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetChildrenHeight.js
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
var GetChildrenHeight = function (minimumMode) {
|
2 |
-
if (this.rexSizer.hidden) {
|
3 |
-
return 0;
|
4 |
-
}
|
5 |
-
|
6 |
-
if (minimumMode === undefined) {
|
7 |
-
minimumMode = true;
|
8 |
-
}
|
9 |
-
|
10 |
-
var result = 0;
|
11 |
-
var children = this.sizerChildren;
|
12 |
-
var child, padding, childHeight;
|
13 |
-
if (this.orientation === 0) { // x
|
14 |
-
// Get maximun height
|
15 |
-
for (var i = 0, cnt = children.length; i < cnt; i++) {
|
16 |
-
child = children[i];
|
17 |
-
if (child.rexSizer.hidden) {
|
18 |
-
continue;
|
19 |
-
}
|
20 |
-
|
21 |
-
padding = child.rexSizer.padding;
|
22 |
-
childHeight = this.getChildHeight(child) + padding.top + padding.bottom;
|
23 |
-
result = Math.max(childHeight, result);
|
24 |
-
}
|
25 |
-
} else {
|
26 |
-
// Get summation of minimum height
|
27 |
-
var itemSpace = this.space.item;
|
28 |
-
var isFirstChild = true;
|
29 |
-
for (var i = 0, cnt = children.length; i < cnt; i++) {
|
30 |
-
child = children[i];
|
31 |
-
if (!child.hasOwnProperty('rexSizer')) {
|
32 |
-
continue;
|
33 |
-
}
|
34 |
-
if (child.rexSizer.hidden) {
|
35 |
-
continue;
|
36 |
-
}
|
37 |
-
|
38 |
-
if ((child.rexSizer.proportion === 0) || minimumMode) {
|
39 |
-
childHeight = this.getChildHeight(child);
|
40 |
-
} else {
|
41 |
-
childHeight = 0;
|
42 |
-
}
|
43 |
-
padding = child.rexSizer.padding;
|
44 |
-
childHeight += (padding.top + padding.bottom);
|
45 |
-
|
46 |
-
if (isFirstChild) {
|
47 |
-
isFirstChild = false;
|
48 |
-
} else {
|
49 |
-
childHeight += itemSpace;
|
50 |
-
}
|
51 |
-
|
52 |
-
result += childHeight;
|
53 |
-
}
|
54 |
-
}
|
55 |
-
return result + this.space.top + this.space.bottom;
|
56 |
-
}
|
57 |
-
|
58 |
-
export default GetChildrenHeight;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenTTSweSTT/vits/attentions.py
DELETED
@@ -1,303 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
import commons
|
9 |
-
import modules
|
10 |
-
from modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
15 |
-
super().__init__()
|
16 |
-
self.hidden_channels = hidden_channels
|
17 |
-
self.filter_channels = filter_channels
|
18 |
-
self.n_heads = n_heads
|
19 |
-
self.n_layers = n_layers
|
20 |
-
self.kernel_size = kernel_size
|
21 |
-
self.p_dropout = p_dropout
|
22 |
-
self.window_size = window_size
|
23 |
-
|
24 |
-
self.drop = nn.Dropout(p_dropout)
|
25 |
-
self.attn_layers = nn.ModuleList()
|
26 |
-
self.norm_layers_1 = nn.ModuleList()
|
27 |
-
self.ffn_layers = nn.ModuleList()
|
28 |
-
self.norm_layers_2 = nn.ModuleList()
|
29 |
-
for i in range(self.n_layers):
|
30 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
31 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
32 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
33 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
34 |
-
|
35 |
-
def forward(self, x, x_mask):
|
36 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
37 |
-
x = x * x_mask
|
38 |
-
for i in range(self.n_layers):
|
39 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
40 |
-
y = self.drop(y)
|
41 |
-
x = self.norm_layers_1[i](x + y)
|
42 |
-
|
43 |
-
y = self.ffn_layers[i](x, x_mask)
|
44 |
-
y = self.drop(y)
|
45 |
-
x = self.norm_layers_2[i](x + y)
|
46 |
-
x = x * x_mask
|
47 |
-
return x
|
48 |
-
|
49 |
-
|
50 |
-
class Decoder(nn.Module):
|
51 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
52 |
-
super().__init__()
|
53 |
-
self.hidden_channels = hidden_channels
|
54 |
-
self.filter_channels = filter_channels
|
55 |
-
self.n_heads = n_heads
|
56 |
-
self.n_layers = n_layers
|
57 |
-
self.kernel_size = kernel_size
|
58 |
-
self.p_dropout = p_dropout
|
59 |
-
self.proximal_bias = proximal_bias
|
60 |
-
self.proximal_init = proximal_init
|
61 |
-
|
62 |
-
self.drop = nn.Dropout(p_dropout)
|
63 |
-
self.self_attn_layers = nn.ModuleList()
|
64 |
-
self.norm_layers_0 = nn.ModuleList()
|
65 |
-
self.encdec_attn_layers = nn.ModuleList()
|
66 |
-
self.norm_layers_1 = nn.ModuleList()
|
67 |
-
self.ffn_layers = nn.ModuleList()
|
68 |
-
self.norm_layers_2 = nn.ModuleList()
|
69 |
-
for i in range(self.n_layers):
|
70 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
71 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
72 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
73 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
74 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
75 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
76 |
-
|
77 |
-
def forward(self, x, x_mask, h, h_mask):
|
78 |
-
"""
|
79 |
-
x: decoder input
|
80 |
-
h: encoder output
|
81 |
-
"""
|
82 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
83 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
84 |
-
x = x * x_mask
|
85 |
-
for i in range(self.n_layers):
|
86 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
87 |
-
y = self.drop(y)
|
88 |
-
x = self.norm_layers_0[i](x + y)
|
89 |
-
|
90 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
91 |
-
y = self.drop(y)
|
92 |
-
x = self.norm_layers_1[i](x + y)
|
93 |
-
|
94 |
-
y = self.ffn_layers[i](x, x_mask)
|
95 |
-
y = self.drop(y)
|
96 |
-
x = self.norm_layers_2[i](x + y)
|
97 |
-
x = x * x_mask
|
98 |
-
return x
|
99 |
-
|
100 |
-
|
101 |
-
class MultiHeadAttention(nn.Module):
|
102 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
103 |
-
super().__init__()
|
104 |
-
assert channels % n_heads == 0
|
105 |
-
|
106 |
-
self.channels = channels
|
107 |
-
self.out_channels = out_channels
|
108 |
-
self.n_heads = n_heads
|
109 |
-
self.p_dropout = p_dropout
|
110 |
-
self.window_size = window_size
|
111 |
-
self.heads_share = heads_share
|
112 |
-
self.block_length = block_length
|
113 |
-
self.proximal_bias = proximal_bias
|
114 |
-
self.proximal_init = proximal_init
|
115 |
-
self.attn = None
|
116 |
-
|
117 |
-
self.k_channels = channels // n_heads
|
118 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
119 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
120 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
121 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
122 |
-
self.drop = nn.Dropout(p_dropout)
|
123 |
-
|
124 |
-
if window_size is not None:
|
125 |
-
n_heads_rel = 1 if heads_share else n_heads
|
126 |
-
rel_stddev = self.k_channels**-0.5
|
127 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
128 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
129 |
-
|
130 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
131 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
132 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
133 |
-
if proximal_init:
|
134 |
-
with torch.no_grad():
|
135 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
136 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
137 |
-
|
138 |
-
def forward(self, x, c, attn_mask=None):
|
139 |
-
q = self.conv_q(x)
|
140 |
-
k = self.conv_k(c)
|
141 |
-
v = self.conv_v(c)
|
142 |
-
|
143 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
144 |
-
|
145 |
-
x = self.conv_o(x)
|
146 |
-
return x
|
147 |
-
|
148 |
-
def attention(self, query, key, value, mask=None):
|
149 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
150 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
151 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
152 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
153 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
154 |
-
|
155 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
156 |
-
if self.window_size is not None:
|
157 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
158 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
159 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
160 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
161 |
-
scores = scores + scores_local
|
162 |
-
if self.proximal_bias:
|
163 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
164 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
165 |
-
if mask is not None:
|
166 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
167 |
-
if self.block_length is not None:
|
168 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
169 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
170 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
171 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
172 |
-
p_attn = self.drop(p_attn)
|
173 |
-
output = torch.matmul(p_attn, value)
|
174 |
-
if self.window_size is not None:
|
175 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
176 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
177 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
178 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
179 |
-
return output, p_attn
|
180 |
-
|
181 |
-
def _matmul_with_relative_values(self, x, y):
|
182 |
-
"""
|
183 |
-
x: [b, h, l, m]
|
184 |
-
y: [h or 1, m, d]
|
185 |
-
ret: [b, h, l, d]
|
186 |
-
"""
|
187 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
188 |
-
return ret
|
189 |
-
|
190 |
-
def _matmul_with_relative_keys(self, x, y):
|
191 |
-
"""
|
192 |
-
x: [b, h, l, d]
|
193 |
-
y: [h or 1, m, d]
|
194 |
-
ret: [b, h, l, m]
|
195 |
-
"""
|
196 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
197 |
-
return ret
|
198 |
-
|
199 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
200 |
-
max_relative_position = 2 * self.window_size + 1
|
201 |
-
# Pad first before slice to avoid using cond ops.
|
202 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
203 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
204 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
205 |
-
if pad_length > 0:
|
206 |
-
padded_relative_embeddings = F.pad(
|
207 |
-
relative_embeddings,
|
208 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
209 |
-
else:
|
210 |
-
padded_relative_embeddings = relative_embeddings
|
211 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
212 |
-
return used_relative_embeddings
|
213 |
-
|
214 |
-
def _relative_position_to_absolute_position(self, x):
|
215 |
-
"""
|
216 |
-
x: [b, h, l, 2*l-1]
|
217 |
-
ret: [b, h, l, l]
|
218 |
-
"""
|
219 |
-
batch, heads, length, _ = x.size()
|
220 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
221 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
222 |
-
|
223 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
224 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
225 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
226 |
-
|
227 |
-
# Reshape and slice out the padded elements.
|
228 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
229 |
-
return x_final
|
230 |
-
|
231 |
-
def _absolute_position_to_relative_position(self, x):
|
232 |
-
"""
|
233 |
-
x: [b, h, l, l]
|
234 |
-
ret: [b, h, l, 2*l-1]
|
235 |
-
"""
|
236 |
-
batch, heads, length, _ = x.size()
|
237 |
-
# padd along column
|
238 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
239 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
240 |
-
# add 0's in the beginning that will skew the elements after reshape
|
241 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
242 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
243 |
-
return x_final
|
244 |
-
|
245 |
-
def _attention_bias_proximal(self, length):
|
246 |
-
"""Bias for self-attention to encourage attention to close positions.
|
247 |
-
Args:
|
248 |
-
length: an integer scalar.
|
249 |
-
Returns:
|
250 |
-
a Tensor with shape [1, 1, length, length]
|
251 |
-
"""
|
252 |
-
r = torch.arange(length, dtype=torch.float32)
|
253 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
254 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
255 |
-
|
256 |
-
|
257 |
-
class FFN(nn.Module):
|
258 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
259 |
-
super().__init__()
|
260 |
-
self.in_channels = in_channels
|
261 |
-
self.out_channels = out_channels
|
262 |
-
self.filter_channels = filter_channels
|
263 |
-
self.kernel_size = kernel_size
|
264 |
-
self.p_dropout = p_dropout
|
265 |
-
self.activation = activation
|
266 |
-
self.causal = causal
|
267 |
-
|
268 |
-
if causal:
|
269 |
-
self.padding = self._causal_padding
|
270 |
-
else:
|
271 |
-
self.padding = self._same_padding
|
272 |
-
|
273 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
274 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
275 |
-
self.drop = nn.Dropout(p_dropout)
|
276 |
-
|
277 |
-
def forward(self, x, x_mask):
|
278 |
-
x = self.conv_1(self.padding(x * x_mask))
|
279 |
-
if self.activation == "gelu":
|
280 |
-
x = x * torch.sigmoid(1.702 * x)
|
281 |
-
else:
|
282 |
-
x = torch.relu(x)
|
283 |
-
x = self.drop(x)
|
284 |
-
x = self.conv_2(self.padding(x * x_mask))
|
285 |
-
return x * x_mask
|
286 |
-
|
287 |
-
def _causal_padding(self, x):
|
288 |
-
if self.kernel_size == 1:
|
289 |
-
return x
|
290 |
-
pad_l = self.kernel_size - 1
|
291 |
-
pad_r = 0
|
292 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
293 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
294 |
-
return x
|
295 |
-
|
296 |
-
def _same_padding(self, x):
|
297 |
-
if self.kernel_size == 1:
|
298 |
-
return x
|
299 |
-
pad_l = (self.kernel_size - 1) // 2
|
300 |
-
pad_r = self.kernel_size // 2
|
301 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
302 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
303 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/utils/log_utils.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from PIL import Image
|
3 |
-
import wandb
|
4 |
-
from PTI.configs import global_config
|
5 |
-
import torch
|
6 |
-
import matplotlib.pyplot as plt
|
7 |
-
|
8 |
-
|
9 |
-
def log_image_from_w(w, G, name):
|
10 |
-
img = get_image_from_w(w, G)
|
11 |
-
pillow_image = Image.fromarray(img)
|
12 |
-
wandb.log(
|
13 |
-
{f"{name}": [
|
14 |
-
wandb.Image(pillow_image, caption=f"current inversion {name}")]},
|
15 |
-
step=global_config.training_step)
|
16 |
-
|
17 |
-
|
18 |
-
def log_images_from_w(ws, G, names):
|
19 |
-
for name, w in zip(names, ws):
|
20 |
-
w = w.to(global_config.device)
|
21 |
-
log_image_from_w(w, G, name)
|
22 |
-
|
23 |
-
|
24 |
-
def plot_image_from_w(w, G):
|
25 |
-
img = get_image_from_w(w, G)
|
26 |
-
pillow_image = Image.fromarray(img)
|
27 |
-
plt.imshow(pillow_image)
|
28 |
-
plt.show()
|
29 |
-
|
30 |
-
|
31 |
-
def plot_image(img):
|
32 |
-
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
|
33 |
-
pillow_image = Image.fromarray(img[0])
|
34 |
-
plt.imshow(pillow_image)
|
35 |
-
plt.show()
|
36 |
-
|
37 |
-
|
38 |
-
def save_image(name, method_type, results_dir, image, run_id):
|
39 |
-
image.save(f'{results_dir}/{method_type}_{name}_{run_id}.jpg')
|
40 |
-
|
41 |
-
|
42 |
-
def save_w(w, G, name, method_type, results_dir):
|
43 |
-
im = get_image_from_w(w, G)
|
44 |
-
im = Image.fromarray(im, mode='RGB')
|
45 |
-
save_image(name, method_type, results_dir, im)
|
46 |
-
|
47 |
-
|
48 |
-
def save_concat_image(base_dir, image_latents, new_inv_image_latent, new_G,
|
49 |
-
old_G,
|
50 |
-
file_name,
|
51 |
-
extra_image=None):
|
52 |
-
images_to_save = []
|
53 |
-
if extra_image is not None:
|
54 |
-
images_to_save.append(extra_image)
|
55 |
-
for latent in image_latents:
|
56 |
-
images_to_save.append(get_image_from_w(latent, old_G))
|
57 |
-
images_to_save.append(get_image_from_w(new_inv_image_latent, new_G))
|
58 |
-
result_image = create_alongside_images(images_to_save)
|
59 |
-
result_image.save(f'{base_dir}/{file_name}.jpg')
|
60 |
-
|
61 |
-
|
62 |
-
def save_single_image(base_dir, image_latent, G, file_name):
|
63 |
-
image_to_save = get_image_from_w(image_latent, G)
|
64 |
-
image_to_save = Image.fromarray(image_to_save, mode='RGB')
|
65 |
-
image_to_save.save(f'{base_dir}/{file_name}.jpg')
|
66 |
-
|
67 |
-
|
68 |
-
def create_alongside_images(images):
|
69 |
-
res = np.concatenate([np.array(image) for image in images], axis=1)
|
70 |
-
return Image.fromarray(res, mode='RGB')
|
71 |
-
|
72 |
-
|
73 |
-
def get_image_from_w(w, G):
|
74 |
-
if len(w.size()) <= 2:
|
75 |
-
w = w.unsqueeze(0)
|
76 |
-
with torch.no_grad():
|
77 |
-
img = G.synthesis(w, noise_mode='const')
|
78 |
-
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
|
79 |
-
return img[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/depth2img.md
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Text-guided depth-to-image 생성
|
14 |
-
|
15 |
-
[[open-in-colab]]
|
16 |
-
|
17 |
-
[`StableDiffusionDepth2ImgPipeline`]을 사용하면 텍스트 프롬프트와 초기 이미지를 전달하여 새 이미지의 생성을 조절할 수 있습니다. 또한 이미지 구조를 보존하기 위해 `depth_map`을 전달할 수도 있습니다. `depth_map`이 제공되지 않으면 파이프라인은 통합된 [depth-estimation model](https://github.com/isl-org/MiDaS)을 통해 자동으로 깊이를 예측합니다.
|
18 |
-
|
19 |
-
|
20 |
-
먼저 [`StableDiffusionDepth2ImgPipeline`]의 인스턴스를 생성합니다:
|
21 |
-
|
22 |
-
```python
|
23 |
-
import torch
|
24 |
-
import requests
|
25 |
-
from PIL import Image
|
26 |
-
|
27 |
-
from diffusers import StableDiffusionDepth2ImgPipeline
|
28 |
-
|
29 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
|
30 |
-
"stabilityai/stable-diffusion-2-depth",
|
31 |
-
torch_dtype=torch.float16,
|
32 |
-
).to("cuda")
|
33 |
-
```
|
34 |
-
|
35 |
-
이제 프롬프트를 파이프라인에 전달합니다. 특정 단어가 이미지 생성을 가이드 하는것을 방지하기 위해 `negative_prompt`를 전달할 수도 있습니다:
|
36 |
-
|
37 |
-
```python
|
38 |
-
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
39 |
-
init_image = Image.open(requests.get(url, stream=True).raw)
|
40 |
-
prompt = "two tigers"
|
41 |
-
n_prompt = "bad, deformed, ugly, bad anatomy"
|
42 |
-
image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0]
|
43 |
-
image
|
44 |
-
```
|
45 |
-
|
46 |
-
| Input | Output |
|
47 |
-
|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
|
48 |
-
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> |
|
49 |
-
|
50 |
-
아래의 Spaces를 가지고 놀며 depth map이 있는 이미지와 없는 이미지의 차이가 있는지 확인해 보세요!
|
51 |
-
|
52 |
-
<iframe
|
53 |
-
src="https://radames-stable-diffusion-depth2img.hf.space"
|
54 |
-
frameborder="0"
|
55 |
-
width="850"
|
56 |
-
height="500"
|
57 |
-
></iframe>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import random
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from diffusers import IFImg2ImgSuperResolutionPipeline
|
22 |
-
from diffusers.utils import floats_tensor
|
23 |
-
from diffusers.utils.import_utils import is_xformers_available
|
24 |
-
from diffusers.utils.testing_utils import skip_mps, torch_device
|
25 |
-
|
26 |
-
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
|
27 |
-
from ..test_pipelines_common import PipelineTesterMixin
|
28 |
-
from . import IFPipelineTesterMixin
|
29 |
-
|
30 |
-
|
31 |
-
@skip_mps
|
32 |
-
class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
|
33 |
-
pipeline_class = IFImg2ImgSuperResolutionPipeline
|
34 |
-
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
|
35 |
-
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"})
|
36 |
-
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
|
37 |
-
|
38 |
-
def get_dummy_components(self):
|
39 |
-
return self._get_superresolution_dummy_components()
|
40 |
-
|
41 |
-
def get_dummy_inputs(self, device, seed=0):
|
42 |
-
if str(device).startswith("mps"):
|
43 |
-
generator = torch.manual_seed(seed)
|
44 |
-
else:
|
45 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
46 |
-
|
47 |
-
original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
48 |
-
image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device)
|
49 |
-
|
50 |
-
inputs = {
|
51 |
-
"prompt": "A painting of a squirrel eating a burger",
|
52 |
-
"image": image,
|
53 |
-
"original_image": original_image,
|
54 |
-
"generator": generator,
|
55 |
-
"num_inference_steps": 2,
|
56 |
-
"output_type": "numpy",
|
57 |
-
}
|
58 |
-
|
59 |
-
return inputs
|
60 |
-
|
61 |
-
@unittest.skipIf(
|
62 |
-
torch_device != "cuda" or not is_xformers_available(),
|
63 |
-
reason="XFormers attention is only available with CUDA and `xformers` installed",
|
64 |
-
)
|
65 |
-
def test_xformers_attention_forwardGenerator_pass(self):
|
66 |
-
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
|
67 |
-
|
68 |
-
def test_save_load_optional_components(self):
|
69 |
-
self._test_save_load_optional_components()
|
70 |
-
|
71 |
-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
|
72 |
-
def test_save_load_float16(self):
|
73 |
-
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
|
74 |
-
super().test_save_load_float16(expected_max_diff=1e-1)
|
75 |
-
|
76 |
-
def test_attention_slicing_forward_pass(self):
|
77 |
-
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
|
78 |
-
|
79 |
-
def test_save_load_local(self):
|
80 |
-
self._test_save_load_local()
|
81 |
-
|
82 |
-
def test_inference_batch_single_identical(self):
|
83 |
-
self._test_inference_batch_single_identical(
|
84 |
-
expected_max_diff=1e-2,
|
85 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_unclip/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/fsaf/README.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
# Feature Selective Anchor-Free Module for Single-Shot Object Detection
|
2 |
-
|
3 |
-
[ALGORITHM]
|
4 |
-
|
5 |
-
FSAF is an anchor-free method published in CVPR2019 ([https://arxiv.org/pdf/1903.00621.pdf](https://arxiv.org/pdf/1903.00621.pdf)).
|
6 |
-
Actually it is equivalent to the anchor-based method with only one anchor at each feature map position in each FPN level.
|
7 |
-
And this is how we implemented it.
|
8 |
-
Only the anchor-free branch is released for its better compatibility with the current framework and less computational budget.
|
9 |
-
|
10 |
-
In the original paper, feature maps within the central 0.2-0.5 area of a gt box are tagged as ignored. However,
|
11 |
-
it is empirically found that a hard threshold (0.2-0.2) gives a further gain on the performance. (see the table below)
|
12 |
-
|
13 |
-
## Main Results
|
14 |
-
|
15 |
-
### Results on R50/R101/X101-FPN
|
16 |
-
|
17 |
-
| Backbone | ignore range | ms-train| Lr schd |Train Mem (GB)| Train time (s/iter) | Inf time (fps) | box AP | Config | Download |
|
18 |
-
|:----------:| :-------: |:-------:|:-------:|:------------:|:---------------:|:--------------:|:-------------:|:------:|:--------:|
|
19 |
-
| R-50 | 0.2-0.5 | N | 1x | 3.15 | 0.43 | 12.3 | 36.0 (35.9) | | [model](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715-b555b0e0.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715_094657.log.json) |
|
20 |
-
| R-50 | 0.2-0.2 | N | 1x | 3.15 | 0.43 | 13.0 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco_20200428_072327.log.json)|
|
21 |
-
| R-101 | 0.2-0.2 | N | 1x | 5.08 | 0.58 | 10.8 | 39.3 (37.9) | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco_20200428_160348.log.json)|
|
22 |
-
| X-101 | 0.2-0.2 | N | 1x | 9.38 | 1.23 | 5.6 | 42.4 (41.0) | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco_20200428_160424.log.json)|
|
23 |
-
|
24 |
-
**Notes:**
|
25 |
-
|
26 |
-
- *1x means the model is trained for 12 epochs.*
|
27 |
-
- *AP values in the brackets represent those reported in the original paper.*
|
28 |
-
- *All results are obtained with a single model and single-scale test.*
|
29 |
-
- *X-101 backbone represents ResNext-101-64x4d.*
|
30 |
-
- *All pretrained backbones use pytorch style.*
|
31 |
-
- *All models are trained on 8 Titan-XP gpus and tested on a single gpu.*
|
32 |
-
|
33 |
-
## Citations
|
34 |
-
|
35 |
-
BibTeX reference is as follows.
|
36 |
-
|
37 |
-
```latex
|
38 |
-
@inproceedings{zhu2019feature,
|
39 |
-
title={Feature Selective Anchor-Free Module for Single-Shot Object Detection},
|
40 |
-
author={Zhu, Chenchen and He, Yihui and Savvides, Marios},
|
41 |
-
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
|
42 |
-
pages={840--849},
|
43 |
-
year={2019}
|
44 |
-
}
|
45 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .builder import build_iou_calculator
|
2 |
-
from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
|
3 |
-
|
4 |
-
__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models/vc_infer_pipeline.py
DELETED
@@ -1,306 +0,0 @@
|
|
1 |
-
import numpy as np, parselmouth, torch, pdb
|
2 |
-
from time import time as ttime
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from config import x_pad, x_query, x_center, x_max
|
5 |
-
import scipy.signal as signal
|
6 |
-
import pyworld, os, traceback, faiss
|
7 |
-
from scipy import signal
|
8 |
-
|
9 |
-
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
|
10 |
-
|
11 |
-
|
12 |
-
class VC(object):
|
13 |
-
def __init__(self, tgt_sr, device, is_half):
|
14 |
-
self.sr = 16000 # hubert输入采样率
|
15 |
-
self.window = 160 # 每帧点数
|
16 |
-
self.t_pad = self.sr * x_pad # 每条前后pad时间
|
17 |
-
self.t_pad_tgt = tgt_sr * x_pad
|
18 |
-
self.t_pad2 = self.t_pad * 2
|
19 |
-
self.t_query = self.sr * x_query # 查询切点前后查询时间
|
20 |
-
self.t_center = self.sr * x_center # 查询切点位置
|
21 |
-
self.t_max = self.sr * x_max # 免查询时长阈值
|
22 |
-
self.device = device
|
23 |
-
self.is_half = is_half
|
24 |
-
|
25 |
-
def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
|
26 |
-
time_step = self.window / self.sr * 1000
|
27 |
-
f0_min = 50
|
28 |
-
f0_max = 1100
|
29 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
30 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
31 |
-
if f0_method == "pm":
|
32 |
-
f0 = (
|
33 |
-
parselmouth.Sound(x, self.sr)
|
34 |
-
.to_pitch_ac(
|
35 |
-
time_step=time_step / 1000,
|
36 |
-
voicing_threshold=0.6,
|
37 |
-
pitch_floor=f0_min,
|
38 |
-
pitch_ceiling=f0_max,
|
39 |
-
)
|
40 |
-
.selected_array["frequency"]
|
41 |
-
)
|
42 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
43 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
44 |
-
f0 = np.pad(
|
45 |
-
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
46 |
-
)
|
47 |
-
elif f0_method == "harvest":
|
48 |
-
f0, t = pyworld.harvest(
|
49 |
-
x.astype(np.double),
|
50 |
-
fs=self.sr,
|
51 |
-
f0_ceil=f0_max,
|
52 |
-
f0_floor=f0_min,
|
53 |
-
frame_period=10,
|
54 |
-
)
|
55 |
-
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
|
56 |
-
f0 = signal.medfilt(f0, 3)
|
57 |
-
f0 *= pow(2, f0_up_key / 12)
|
58 |
-
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
59 |
-
tf0 = self.sr // self.window # 每秒f0点数
|
60 |
-
if inp_f0 is not None:
|
61 |
-
delta_t = np.round(
|
62 |
-
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
|
63 |
-
).astype("int16")
|
64 |
-
replace_f0 = np.interp(
|
65 |
-
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
|
66 |
-
)
|
67 |
-
shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
|
68 |
-
f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
|
69 |
-
# with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
70 |
-
f0bak = f0.copy()
|
71 |
-
f0_mel = 1127 * np.log(1 + f0 / 700)
|
72 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
73 |
-
f0_mel_max - f0_mel_min
|
74 |
-
) + 1
|
75 |
-
f0_mel[f0_mel <= 1] = 1
|
76 |
-
f0_mel[f0_mel > 255] = 255
|
77 |
-
f0_coarse = np.rint(f0_mel).astype(np.int)
|
78 |
-
return f0_coarse, f0bak # 1-0
|
79 |
-
|
80 |
-
def vc(
|
81 |
-
self,
|
82 |
-
model,
|
83 |
-
net_g,
|
84 |
-
sid,
|
85 |
-
audio0,
|
86 |
-
pitch,
|
87 |
-
pitchf,
|
88 |
-
times,
|
89 |
-
index,
|
90 |
-
big_npy,
|
91 |
-
index_rate,
|
92 |
-
): # ,file_index,file_big_npy
|
93 |
-
feats = torch.from_numpy(audio0)
|
94 |
-
if self.is_half:
|
95 |
-
feats = feats.half()
|
96 |
-
else:
|
97 |
-
feats = feats.float()
|
98 |
-
if feats.dim() == 2: # double channels
|
99 |
-
feats = feats.mean(-1)
|
100 |
-
assert feats.dim() == 1, feats.dim()
|
101 |
-
feats = feats.view(1, -1)
|
102 |
-
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
103 |
-
|
104 |
-
inputs = {
|
105 |
-
"source": feats.to(self.device),
|
106 |
-
"padding_mask": padding_mask,
|
107 |
-
"output_layer": 9, # layer 9
|
108 |
-
}
|
109 |
-
t0 = ttime()
|
110 |
-
with torch.no_grad():
|
111 |
-
logits = model.extract_features(**inputs)
|
112 |
-
feats = model.final_proj(logits[0])
|
113 |
-
|
114 |
-
if (
|
115 |
-
isinstance(index, type(None)) == False
|
116 |
-
and isinstance(big_npy, type(None)) == False
|
117 |
-
and index_rate != 0
|
118 |
-
):
|
119 |
-
npy = feats[0].cpu().numpy()
|
120 |
-
if self.is_half:
|
121 |
-
npy = npy.astype("float32")
|
122 |
-
_, I = index.search(npy, 1)
|
123 |
-
npy = big_npy[I.squeeze()]
|
124 |
-
if self.is_half:
|
125 |
-
npy = npy.astype("float16")
|
126 |
-
feats = (
|
127 |
-
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
|
128 |
-
+ (1 - index_rate) * feats
|
129 |
-
)
|
130 |
-
|
131 |
-
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
132 |
-
t1 = ttime()
|
133 |
-
p_len = audio0.shape[0] // self.window
|
134 |
-
if feats.shape[1] < p_len:
|
135 |
-
p_len = feats.shape[1]
|
136 |
-
if pitch != None and pitchf != None:
|
137 |
-
pitch = pitch[:, :p_len]
|
138 |
-
pitchf = pitchf[:, :p_len]
|
139 |
-
p_len = torch.tensor([p_len], device=self.device).long()
|
140 |
-
with torch.no_grad():
|
141 |
-
if pitch != None and pitchf != None:
|
142 |
-
audio1 = (
|
143 |
-
(net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
|
144 |
-
.data.cpu()
|
145 |
-
.float()
|
146 |
-
.numpy()
|
147 |
-
.astype(np.int16)
|
148 |
-
)
|
149 |
-
else:
|
150 |
-
audio1 = (
|
151 |
-
(net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
|
152 |
-
.data.cpu()
|
153 |
-
.float()
|
154 |
-
.numpy()
|
155 |
-
.astype(np.int16)
|
156 |
-
)
|
157 |
-
del feats, p_len, padding_mask
|
158 |
-
if torch.cuda.is_available():
|
159 |
-
torch.cuda.empty_cache()
|
160 |
-
t2 = ttime()
|
161 |
-
times[0] += t1 - t0
|
162 |
-
times[2] += t2 - t1
|
163 |
-
return audio1
|
164 |
-
|
165 |
-
def pipeline(
|
166 |
-
self,
|
167 |
-
model,
|
168 |
-
net_g,
|
169 |
-
sid,
|
170 |
-
audio,
|
171 |
-
times,
|
172 |
-
f0_up_key,
|
173 |
-
f0_method,
|
174 |
-
file_index,
|
175 |
-
file_big_npy,
|
176 |
-
index_rate,
|
177 |
-
if_f0,
|
178 |
-
f0_file=None,
|
179 |
-
):
|
180 |
-
if (
|
181 |
-
file_big_npy != ""
|
182 |
-
and file_index != ""
|
183 |
-
and os.path.exists(file_big_npy) == True
|
184 |
-
and os.path.exists(file_index) == True
|
185 |
-
and index_rate != 0
|
186 |
-
):
|
187 |
-
try:
|
188 |
-
index = faiss.read_index(file_index)
|
189 |
-
big_npy = np.load(file_big_npy)
|
190 |
-
except:
|
191 |
-
traceback.print_exc()
|
192 |
-
index = big_npy = None
|
193 |
-
else:
|
194 |
-
index = big_npy = None
|
195 |
-
print("Feature retrieval library doesn't exist or ratio is 0")
|
196 |
-
audio = signal.filtfilt(bh, ah, audio)
|
197 |
-
audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
|
198 |
-
opt_ts = []
|
199 |
-
if audio_pad.shape[0] > self.t_max:
|
200 |
-
audio_sum = np.zeros_like(audio)
|
201 |
-
for i in range(self.window):
|
202 |
-
audio_sum += audio_pad[i : i - self.window]
|
203 |
-
for t in range(self.t_center, audio.shape[0], self.t_center):
|
204 |
-
opt_ts.append(
|
205 |
-
t
|
206 |
-
- self.t_query
|
207 |
-
+ np.where(
|
208 |
-
np.abs(audio_sum[t - self.t_query : t + self.t_query])
|
209 |
-
== np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
|
210 |
-
)[0][0]
|
211 |
-
)
|
212 |
-
s = 0
|
213 |
-
audio_opt = []
|
214 |
-
t = None
|
215 |
-
t1 = ttime()
|
216 |
-
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
|
217 |
-
p_len = audio_pad.shape[0] // self.window
|
218 |
-
inp_f0 = None
|
219 |
-
if hasattr(f0_file, "name") == True:
|
220 |
-
try:
|
221 |
-
with open(f0_file.name, "r") as f:
|
222 |
-
lines = f.read().strip("\n").split("\n")
|
223 |
-
inp_f0 = []
|
224 |
-
for line in lines:
|
225 |
-
inp_f0.append([float(i) for i in line.split(",")])
|
226 |
-
inp_f0 = np.array(inp_f0, dtype="float32")
|
227 |
-
except:
|
228 |
-
traceback.print_exc()
|
229 |
-
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
|
230 |
-
pitch, pitchf = None, None
|
231 |
-
if if_f0 == 1:
|
232 |
-
pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
|
233 |
-
pitch = pitch[:p_len]
|
234 |
-
pitchf = pitchf[:p_len]
|
235 |
-
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
|
236 |
-
pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
|
237 |
-
t2 = ttime()
|
238 |
-
times[1] += t2 - t1
|
239 |
-
for t in opt_ts:
|
240 |
-
t = t // self.window * self.window
|
241 |
-
if if_f0 == 1:
|
242 |
-
audio_opt.append(
|
243 |
-
self.vc(
|
244 |
-
model,
|
245 |
-
net_g,
|
246 |
-
sid,
|
247 |
-
audio_pad[s : t + self.t_pad2 + self.window],
|
248 |
-
pitch[:, s // self.window : (t + self.t_pad2) // self.window],
|
249 |
-
pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
|
250 |
-
times,
|
251 |
-
index,
|
252 |
-
big_npy,
|
253 |
-
index_rate,
|
254 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
255 |
-
)
|
256 |
-
else:
|
257 |
-
audio_opt.append(
|
258 |
-
self.vc(
|
259 |
-
model,
|
260 |
-
net_g,
|
261 |
-
sid,
|
262 |
-
audio_pad[s : t + self.t_pad2 + self.window],
|
263 |
-
None,
|
264 |
-
None,
|
265 |
-
times,
|
266 |
-
index,
|
267 |
-
big_npy,
|
268 |
-
index_rate,
|
269 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
270 |
-
)
|
271 |
-
s = t
|
272 |
-
if if_f0 == 1:
|
273 |
-
audio_opt.append(
|
274 |
-
self.vc(
|
275 |
-
model,
|
276 |
-
net_g,
|
277 |
-
sid,
|
278 |
-
audio_pad[t:],
|
279 |
-
pitch[:, t // self.window :] if t is not None else pitch,
|
280 |
-
pitchf[:, t // self.window :] if t is not None else pitchf,
|
281 |
-
times,
|
282 |
-
index,
|
283 |
-
big_npy,
|
284 |
-
index_rate,
|
285 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
286 |
-
)
|
287 |
-
else:
|
288 |
-
audio_opt.append(
|
289 |
-
self.vc(
|
290 |
-
model,
|
291 |
-
net_g,
|
292 |
-
sid,
|
293 |
-
audio_pad[t:],
|
294 |
-
None,
|
295 |
-
None,
|
296 |
-
times,
|
297 |
-
index,
|
298 |
-
big_npy,
|
299 |
-
index_rate,
|
300 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
301 |
-
)
|
302 |
-
audio_opt = np.concatenate(audio_opt)
|
303 |
-
del pitch, pitchf, sid
|
304 |
-
if torch.cuda.is_available():
|
305 |
-
torch.cuda.empty_cache()
|
306 |
-
return audio_opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/installation_report.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
from typing import Any, Dict, Sequence
|
2 |
-
|
3 |
-
from pip._vendor.packaging.markers import default_environment
|
4 |
-
|
5 |
-
from pip import __version__
|
6 |
-
from pip._internal.req.req_install import InstallRequirement
|
7 |
-
|
8 |
-
|
9 |
-
class InstallationReport:
|
10 |
-
def __init__(self, install_requirements: Sequence[InstallRequirement]):
|
11 |
-
self._install_requirements = install_requirements
|
12 |
-
|
13 |
-
@classmethod
|
14 |
-
def _install_req_to_dict(cls, ireq: InstallRequirement) -> Dict[str, Any]:
|
15 |
-
assert ireq.download_info, f"No download_info for {ireq}"
|
16 |
-
res = {
|
17 |
-
# PEP 610 json for the download URL. download_info.archive_info.hashes may
|
18 |
-
# be absent when the requirement was installed from the wheel cache
|
19 |
-
# and the cache entry was populated by an older pip version that did not
|
20 |
-
# record origin.json.
|
21 |
-
"download_info": ireq.download_info.to_dict(),
|
22 |
-
# is_direct is true if the requirement was a direct URL reference (which
|
23 |
-
# includes editable requirements), and false if the requirement was
|
24 |
-
# downloaded from a PEP 503 index or --find-links.
|
25 |
-
"is_direct": bool(ireq.original_link),
|
26 |
-
# requested is true if the requirement was specified by the user (aka
|
27 |
-
# top level requirement), and false if it was installed as a dependency of a
|
28 |
-
# requirement. https://peps.python.org/pep-0376/#requested
|
29 |
-
"requested": ireq.user_supplied,
|
30 |
-
# PEP 566 json encoding for metadata
|
31 |
-
# https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata
|
32 |
-
"metadata": ireq.get_dist().metadata_dict,
|
33 |
-
}
|
34 |
-
if ireq.user_supplied and ireq.extras:
|
35 |
-
# For top level requirements, the list of requested extras, if any.
|
36 |
-
res["requested_extras"] = list(sorted(ireq.extras))
|
37 |
-
return res
|
38 |
-
|
39 |
-
def to_dict(self) -> Dict[str, Any]:
|
40 |
-
return {
|
41 |
-
"version": "1",
|
42 |
-
"pip_version": __version__,
|
43 |
-
"install": [
|
44 |
-
self._install_req_to_dict(ireq) for ireq in self._install_requirements
|
45 |
-
],
|
46 |
-
# https://peps.python.org/pep-0508/#environment-markers
|
47 |
-
# TODO: currently, the resolver uses the default environment to evaluate
|
48 |
-
# environment markers, so that is what we report here. In the future, it
|
49 |
-
# should also take into account options such as --python-version or
|
50 |
-
# --platform, perhaps under the form of an environment_override field?
|
51 |
-
# https://github.com/pypa/pip/issues/11198
|
52 |
-
"environment": default_environment(),
|
53 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/sysconfig.py
DELETED
@@ -1,558 +0,0 @@
|
|
1 |
-
"""Provide access to Python's configuration information. The specific
|
2 |
-
configuration variables available depend heavily on the platform and
|
3 |
-
configuration. The values may be retrieved using
|
4 |
-
get_config_var(name), and the list of variables is available via
|
5 |
-
get_config_vars().keys(). Additional convenience functions are also
|
6 |
-
available.
|
7 |
-
|
8 |
-
Written by: Fred L. Drake, Jr.
|
9 |
-
Email: <[email protected]>
|
10 |
-
"""
|
11 |
-
|
12 |
-
import os
|
13 |
-
import re
|
14 |
-
import sys
|
15 |
-
import sysconfig
|
16 |
-
import pathlib
|
17 |
-
|
18 |
-
from .errors import DistutilsPlatformError
|
19 |
-
from . import py39compat
|
20 |
-
from ._functools import pass_none
|
21 |
-
|
22 |
-
IS_PYPY = '__pypy__' in sys.builtin_module_names
|
23 |
-
|
24 |
-
# These are needed in a couple of spots, so just compute them once.
|
25 |
-
PREFIX = os.path.normpath(sys.prefix)
|
26 |
-
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
|
27 |
-
BASE_PREFIX = os.path.normpath(sys.base_prefix)
|
28 |
-
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
|
29 |
-
|
30 |
-
# Path to the base directory of the project. On Windows the binary may
|
31 |
-
# live in project/PCbuild/win32 or project/PCbuild/amd64.
|
32 |
-
# set for cross builds
|
33 |
-
if "_PYTHON_PROJECT_BASE" in os.environ:
|
34 |
-
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
|
35 |
-
else:
|
36 |
-
if sys.executable:
|
37 |
-
project_base = os.path.dirname(os.path.abspath(sys.executable))
|
38 |
-
else:
|
39 |
-
# sys.executable can be empty if argv[0] has been changed and Python is
|
40 |
-
# unable to retrieve the real program name
|
41 |
-
project_base = os.getcwd()
|
42 |
-
|
43 |
-
|
44 |
-
def _is_python_source_dir(d):
|
45 |
-
"""
|
46 |
-
Return True if the target directory appears to point to an
|
47 |
-
un-installed Python.
|
48 |
-
"""
|
49 |
-
modules = pathlib.Path(d).joinpath('Modules')
|
50 |
-
return any(modules.joinpath(fn).is_file() for fn in ('Setup', 'Setup.local'))
|
51 |
-
|
52 |
-
|
53 |
-
_sys_home = getattr(sys, '_home', None)
|
54 |
-
|
55 |
-
|
56 |
-
def _is_parent(dir_a, dir_b):
|
57 |
-
"""
|
58 |
-
Return True if a is a parent of b.
|
59 |
-
"""
|
60 |
-
return os.path.normcase(dir_a).startswith(os.path.normcase(dir_b))
|
61 |
-
|
62 |
-
|
63 |
-
if os.name == 'nt':
|
64 |
-
|
65 |
-
@pass_none
|
66 |
-
def _fix_pcbuild(d):
|
67 |
-
# In a venv, sys._home will be inside BASE_PREFIX rather than PREFIX.
|
68 |
-
prefixes = PREFIX, BASE_PREFIX
|
69 |
-
matched = (
|
70 |
-
prefix
|
71 |
-
for prefix in prefixes
|
72 |
-
if _is_parent(d, os.path.join(prefix, "PCbuild"))
|
73 |
-
)
|
74 |
-
return next(matched, d)
|
75 |
-
|
76 |
-
project_base = _fix_pcbuild(project_base)
|
77 |
-
_sys_home = _fix_pcbuild(_sys_home)
|
78 |
-
|
79 |
-
|
80 |
-
def _python_build():
|
81 |
-
if _sys_home:
|
82 |
-
return _is_python_source_dir(_sys_home)
|
83 |
-
return _is_python_source_dir(project_base)
|
84 |
-
|
85 |
-
|
86 |
-
python_build = _python_build()
|
87 |
-
|
88 |
-
|
89 |
-
# Calculate the build qualifier flags if they are defined. Adding the flags
|
90 |
-
# to the include and lib directories only makes sense for an installation, not
|
91 |
-
# an in-source build.
|
92 |
-
build_flags = ''
|
93 |
-
try:
|
94 |
-
if not python_build:
|
95 |
-
build_flags = sys.abiflags
|
96 |
-
except AttributeError:
|
97 |
-
# It's not a configure-based build, so the sys module doesn't have
|
98 |
-
# this attribute, which is fine.
|
99 |
-
pass
|
100 |
-
|
101 |
-
|
102 |
-
def get_python_version():
|
103 |
-
"""Return a string containing the major and minor Python version,
|
104 |
-
leaving off the patchlevel. Sample return values could be '1.5'
|
105 |
-
or '2.2'.
|
106 |
-
"""
|
107 |
-
return '%d.%d' % sys.version_info[:2]
|
108 |
-
|
109 |
-
|
110 |
-
def get_python_inc(plat_specific=0, prefix=None):
|
111 |
-
"""Return the directory containing installed Python header files.
|
112 |
-
|
113 |
-
If 'plat_specific' is false (the default), this is the path to the
|
114 |
-
non-platform-specific header files, i.e. Python.h and so on;
|
115 |
-
otherwise, this is the path to platform-specific header files
|
116 |
-
(namely pyconfig.h).
|
117 |
-
|
118 |
-
If 'prefix' is supplied, use it instead of sys.base_prefix or
|
119 |
-
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
|
120 |
-
"""
|
121 |
-
default_prefix = BASE_EXEC_PREFIX if plat_specific else BASE_PREFIX
|
122 |
-
resolved_prefix = prefix if prefix is not None else default_prefix
|
123 |
-
try:
|
124 |
-
getter = globals()[f'_get_python_inc_{os.name}']
|
125 |
-
except KeyError:
|
126 |
-
raise DistutilsPlatformError(
|
127 |
-
"I don't know where Python installs its C header files "
|
128 |
-
"on platform '%s'" % os.name
|
129 |
-
)
|
130 |
-
return getter(resolved_prefix, prefix, plat_specific)
|
131 |
-
|
132 |
-
|
133 |
-
def _get_python_inc_posix(prefix, spec_prefix, plat_specific):
|
134 |
-
if IS_PYPY and sys.version_info < (3, 8):
|
135 |
-
return os.path.join(prefix, 'include')
|
136 |
-
return (
|
137 |
-
_get_python_inc_posix_python(plat_specific)
|
138 |
-
or _get_python_inc_from_config(plat_specific, spec_prefix)
|
139 |
-
or _get_python_inc_posix_prefix(prefix)
|
140 |
-
)
|
141 |
-
|
142 |
-
|
143 |
-
def _get_python_inc_posix_python(plat_specific):
|
144 |
-
"""
|
145 |
-
Assume the executable is in the build directory. The
|
146 |
-
pyconfig.h file should be in the same directory. Since
|
147 |
-
the build directory may not be the source directory,
|
148 |
-
use "srcdir" from the makefile to find the "Include"
|
149 |
-
directory.
|
150 |
-
"""
|
151 |
-
if not python_build:
|
152 |
-
return
|
153 |
-
if plat_specific:
|
154 |
-
return _sys_home or project_base
|
155 |
-
incdir = os.path.join(get_config_var('srcdir'), 'Include')
|
156 |
-
return os.path.normpath(incdir)
|
157 |
-
|
158 |
-
|
159 |
-
def _get_python_inc_from_config(plat_specific, spec_prefix):
|
160 |
-
"""
|
161 |
-
If no prefix was explicitly specified, provide the include
|
162 |
-
directory from the config vars. Useful when
|
163 |
-
cross-compiling, since the config vars may come from
|
164 |
-
the host
|
165 |
-
platform Python installation, while the current Python
|
166 |
-
executable is from the build platform installation.
|
167 |
-
|
168 |
-
>>> monkeypatch = getfixture('monkeypatch')
|
169 |
-
>>> gpifc = _get_python_inc_from_config
|
170 |
-
>>> monkeypatch.setitem(gpifc.__globals__, 'get_config_var', str.lower)
|
171 |
-
>>> gpifc(False, '/usr/bin/')
|
172 |
-
>>> gpifc(False, '')
|
173 |
-
>>> gpifc(False, None)
|
174 |
-
'includepy'
|
175 |
-
>>> gpifc(True, None)
|
176 |
-
'confincludepy'
|
177 |
-
"""
|
178 |
-
if spec_prefix is None:
|
179 |
-
return get_config_var('CONF' * plat_specific + 'INCLUDEPY')
|
180 |
-
|
181 |
-
|
182 |
-
def _get_python_inc_posix_prefix(prefix):
|
183 |
-
implementation = 'pypy' if IS_PYPY else 'python'
|
184 |
-
python_dir = implementation + get_python_version() + build_flags
|
185 |
-
return os.path.join(prefix, "include", python_dir)
|
186 |
-
|
187 |
-
|
188 |
-
def _get_python_inc_nt(prefix, spec_prefix, plat_specific):
|
189 |
-
if python_build:
|
190 |
-
# Include both the include and PC dir to ensure we can find
|
191 |
-
# pyconfig.h
|
192 |
-
return (
|
193 |
-
os.path.join(prefix, "include")
|
194 |
-
+ os.path.pathsep
|
195 |
-
+ os.path.join(prefix, "PC")
|
196 |
-
)
|
197 |
-
return os.path.join(prefix, "include")
|
198 |
-
|
199 |
-
|
200 |
-
# allow this behavior to be monkey-patched. Ref pypa/distutils#2.
|
201 |
-
def _posix_lib(standard_lib, libpython, early_prefix, prefix):
|
202 |
-
if standard_lib:
|
203 |
-
return libpython
|
204 |
-
else:
|
205 |
-
return os.path.join(libpython, "site-packages")
|
206 |
-
|
207 |
-
|
208 |
-
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
|
209 |
-
"""Return the directory containing the Python library (standard or
|
210 |
-
site additions).
|
211 |
-
|
212 |
-
If 'plat_specific' is true, return the directory containing
|
213 |
-
platform-specific modules, i.e. any module from a non-pure-Python
|
214 |
-
module distribution; otherwise, return the platform-shared library
|
215 |
-
directory. If 'standard_lib' is true, return the directory
|
216 |
-
containing standard Python library modules; otherwise, return the
|
217 |
-
directory for site-specific modules.
|
218 |
-
|
219 |
-
If 'prefix' is supplied, use it instead of sys.base_prefix or
|
220 |
-
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
|
221 |
-
"""
|
222 |
-
|
223 |
-
if IS_PYPY and sys.version_info < (3, 8):
|
224 |
-
# PyPy-specific schema
|
225 |
-
if prefix is None:
|
226 |
-
prefix = PREFIX
|
227 |
-
if standard_lib:
|
228 |
-
return os.path.join(prefix, "lib-python", sys.version[0])
|
229 |
-
return os.path.join(prefix, 'site-packages')
|
230 |
-
|
231 |
-
early_prefix = prefix
|
232 |
-
|
233 |
-
if prefix is None:
|
234 |
-
if standard_lib:
|
235 |
-
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
|
236 |
-
else:
|
237 |
-
prefix = plat_specific and EXEC_PREFIX or PREFIX
|
238 |
-
|
239 |
-
if os.name == "posix":
|
240 |
-
if plat_specific or standard_lib:
|
241 |
-
# Platform-specific modules (any module from a non-pure-Python
|
242 |
-
# module distribution) or standard Python library modules.
|
243 |
-
libdir = getattr(sys, "platlibdir", "lib")
|
244 |
-
else:
|
245 |
-
# Pure Python
|
246 |
-
libdir = "lib"
|
247 |
-
implementation = 'pypy' if IS_PYPY else 'python'
|
248 |
-
libpython = os.path.join(prefix, libdir, implementation + get_python_version())
|
249 |
-
return _posix_lib(standard_lib, libpython, early_prefix, prefix)
|
250 |
-
elif os.name == "nt":
|
251 |
-
if standard_lib:
|
252 |
-
return os.path.join(prefix, "Lib")
|
253 |
-
else:
|
254 |
-
return os.path.join(prefix, "Lib", "site-packages")
|
255 |
-
else:
|
256 |
-
raise DistutilsPlatformError(
|
257 |
-
"I don't know where Python installs its library "
|
258 |
-
"on platform '%s'" % os.name
|
259 |
-
)
|
260 |
-
|
261 |
-
|
262 |
-
def customize_compiler(compiler): # noqa: C901
|
263 |
-
"""Do any platform-specific customization of a CCompiler instance.
|
264 |
-
|
265 |
-
Mainly needed on Unix, so we can plug in the information that
|
266 |
-
varies across Unices and is stored in Python's Makefile.
|
267 |
-
"""
|
268 |
-
if compiler.compiler_type == "unix":
|
269 |
-
if sys.platform == "darwin":
|
270 |
-
# Perform first-time customization of compiler-related
|
271 |
-
# config vars on OS X now that we know we need a compiler.
|
272 |
-
# This is primarily to support Pythons from binary
|
273 |
-
# installers. The kind and paths to build tools on
|
274 |
-
# the user system may vary significantly from the system
|
275 |
-
# that Python itself was built on. Also the user OS
|
276 |
-
# version and build tools may not support the same set
|
277 |
-
# of CPU architectures for universal builds.
|
278 |
-
global _config_vars
|
279 |
-
# Use get_config_var() to ensure _config_vars is initialized.
|
280 |
-
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
|
281 |
-
import _osx_support
|
282 |
-
|
283 |
-
_osx_support.customize_compiler(_config_vars)
|
284 |
-
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
|
285 |
-
|
286 |
-
(
|
287 |
-
cc,
|
288 |
-
cxx,
|
289 |
-
cflags,
|
290 |
-
ccshared,
|
291 |
-
ldshared,
|
292 |
-
shlib_suffix,
|
293 |
-
ar,
|
294 |
-
ar_flags,
|
295 |
-
) = get_config_vars(
|
296 |
-
'CC',
|
297 |
-
'CXX',
|
298 |
-
'CFLAGS',
|
299 |
-
'CCSHARED',
|
300 |
-
'LDSHARED',
|
301 |
-
'SHLIB_SUFFIX',
|
302 |
-
'AR',
|
303 |
-
'ARFLAGS',
|
304 |
-
)
|
305 |
-
|
306 |
-
if 'CC' in os.environ:
|
307 |
-
newcc = os.environ['CC']
|
308 |
-
if 'LDSHARED' not in os.environ and ldshared.startswith(cc):
|
309 |
-
# If CC is overridden, use that as the default
|
310 |
-
# command for LDSHARED as well
|
311 |
-
ldshared = newcc + ldshared[len(cc) :]
|
312 |
-
cc = newcc
|
313 |
-
if 'CXX' in os.environ:
|
314 |
-
cxx = os.environ['CXX']
|
315 |
-
if 'LDSHARED' in os.environ:
|
316 |
-
ldshared = os.environ['LDSHARED']
|
317 |
-
if 'CPP' in os.environ:
|
318 |
-
cpp = os.environ['CPP']
|
319 |
-
else:
|
320 |
-
cpp = cc + " -E" # not always
|
321 |
-
if 'LDFLAGS' in os.environ:
|
322 |
-
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
|
323 |
-
if 'CFLAGS' in os.environ:
|
324 |
-
cflags = cflags + ' ' + os.environ['CFLAGS']
|
325 |
-
ldshared = ldshared + ' ' + os.environ['CFLAGS']
|
326 |
-
if 'CPPFLAGS' in os.environ:
|
327 |
-
cpp = cpp + ' ' + os.environ['CPPFLAGS']
|
328 |
-
cflags = cflags + ' ' + os.environ['CPPFLAGS']
|
329 |
-
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
|
330 |
-
if 'AR' in os.environ:
|
331 |
-
ar = os.environ['AR']
|
332 |
-
if 'ARFLAGS' in os.environ:
|
333 |
-
archiver = ar + ' ' + os.environ['ARFLAGS']
|
334 |
-
else:
|
335 |
-
archiver = ar + ' ' + ar_flags
|
336 |
-
|
337 |
-
cc_cmd = cc + ' ' + cflags
|
338 |
-
compiler.set_executables(
|
339 |
-
preprocessor=cpp,
|
340 |
-
compiler=cc_cmd,
|
341 |
-
compiler_so=cc_cmd + ' ' + ccshared,
|
342 |
-
compiler_cxx=cxx,
|
343 |
-
linker_so=ldshared,
|
344 |
-
linker_exe=cc,
|
345 |
-
archiver=archiver,
|
346 |
-
)
|
347 |
-
|
348 |
-
if 'RANLIB' in os.environ and compiler.executables.get('ranlib', None):
|
349 |
-
compiler.set_executables(ranlib=os.environ['RANLIB'])
|
350 |
-
|
351 |
-
compiler.shared_lib_extension = shlib_suffix
|
352 |
-
|
353 |
-
|
354 |
-
def get_config_h_filename():
|
355 |
-
"""Return full pathname of installed pyconfig.h file."""
|
356 |
-
if python_build:
|
357 |
-
if os.name == "nt":
|
358 |
-
inc_dir = os.path.join(_sys_home or project_base, "PC")
|
359 |
-
else:
|
360 |
-
inc_dir = _sys_home or project_base
|
361 |
-
return os.path.join(inc_dir, 'pyconfig.h')
|
362 |
-
else:
|
363 |
-
return sysconfig.get_config_h_filename()
|
364 |
-
|
365 |
-
|
366 |
-
def get_makefile_filename():
|
367 |
-
"""Return full pathname of installed Makefile from the Python build."""
|
368 |
-
return sysconfig.get_makefile_filename()
|
369 |
-
|
370 |
-
|
371 |
-
def parse_config_h(fp, g=None):
|
372 |
-
"""Parse a config.h-style file.
|
373 |
-
|
374 |
-
A dictionary containing name/value pairs is returned. If an
|
375 |
-
optional dictionary is passed in as the second argument, it is
|
376 |
-
used instead of a new dictionary.
|
377 |
-
"""
|
378 |
-
return sysconfig.parse_config_h(fp, vars=g)
|
379 |
-
|
380 |
-
|
381 |
-
# Regexes needed for parsing Makefile (and similar syntaxes,
|
382 |
-
# like old-style Setup files).
|
383 |
-
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
|
384 |
-
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
|
385 |
-
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
|
386 |
-
|
387 |
-
|
388 |
-
def parse_makefile(fn, g=None): # noqa: C901
|
389 |
-
"""Parse a Makefile-style file.
|
390 |
-
|
391 |
-
A dictionary containing name/value pairs is returned. If an
|
392 |
-
optional dictionary is passed in as the second argument, it is
|
393 |
-
used instead of a new dictionary.
|
394 |
-
"""
|
395 |
-
from distutils.text_file import TextFile
|
396 |
-
|
397 |
-
fp = TextFile(
|
398 |
-
fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape"
|
399 |
-
)
|
400 |
-
|
401 |
-
if g is None:
|
402 |
-
g = {}
|
403 |
-
done = {}
|
404 |
-
notdone = {}
|
405 |
-
|
406 |
-
while True:
|
407 |
-
line = fp.readline()
|
408 |
-
if line is None: # eof
|
409 |
-
break
|
410 |
-
m = _variable_rx.match(line)
|
411 |
-
if m:
|
412 |
-
n, v = m.group(1, 2)
|
413 |
-
v = v.strip()
|
414 |
-
# `$$' is a literal `$' in make
|
415 |
-
tmpv = v.replace('$$', '')
|
416 |
-
|
417 |
-
if "$" in tmpv:
|
418 |
-
notdone[n] = v
|
419 |
-
else:
|
420 |
-
try:
|
421 |
-
v = int(v)
|
422 |
-
except ValueError:
|
423 |
-
# insert literal `$'
|
424 |
-
done[n] = v.replace('$$', '$')
|
425 |
-
else:
|
426 |
-
done[n] = v
|
427 |
-
|
428 |
-
# Variables with a 'PY_' prefix in the makefile. These need to
|
429 |
-
# be made available without that prefix through sysconfig.
|
430 |
-
# Special care is needed to ensure that variable expansion works, even
|
431 |
-
# if the expansion uses the name without a prefix.
|
432 |
-
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
|
433 |
-
|
434 |
-
# do variable interpolation here
|
435 |
-
while notdone:
|
436 |
-
for name in list(notdone):
|
437 |
-
value = notdone[name]
|
438 |
-
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
|
439 |
-
if m:
|
440 |
-
n = m.group(1)
|
441 |
-
found = True
|
442 |
-
if n in done:
|
443 |
-
item = str(done[n])
|
444 |
-
elif n in notdone:
|
445 |
-
# get it on a subsequent round
|
446 |
-
found = False
|
447 |
-
elif n in os.environ:
|
448 |
-
# do it like make: fall back to environment
|
449 |
-
item = os.environ[n]
|
450 |
-
|
451 |
-
elif n in renamed_variables:
|
452 |
-
if name.startswith('PY_') and name[3:] in renamed_variables:
|
453 |
-
item = ""
|
454 |
-
|
455 |
-
elif 'PY_' + n in notdone:
|
456 |
-
found = False
|
457 |
-
|
458 |
-
else:
|
459 |
-
item = str(done['PY_' + n])
|
460 |
-
else:
|
461 |
-
done[n] = item = ""
|
462 |
-
if found:
|
463 |
-
after = value[m.end() :]
|
464 |
-
value = value[: m.start()] + item + after
|
465 |
-
if "$" in after:
|
466 |
-
notdone[name] = value
|
467 |
-
else:
|
468 |
-
try:
|
469 |
-
value = int(value)
|
470 |
-
except ValueError:
|
471 |
-
done[name] = value.strip()
|
472 |
-
else:
|
473 |
-
done[name] = value
|
474 |
-
del notdone[name]
|
475 |
-
|
476 |
-
if name.startswith('PY_') and name[3:] in renamed_variables:
|
477 |
-
|
478 |
-
name = name[3:]
|
479 |
-
if name not in done:
|
480 |
-
done[name] = value
|
481 |
-
else:
|
482 |
-
# bogus variable reference; just drop it since we can't deal
|
483 |
-
del notdone[name]
|
484 |
-
|
485 |
-
fp.close()
|
486 |
-
|
487 |
-
# strip spurious spaces
|
488 |
-
for k, v in done.items():
|
489 |
-
if isinstance(v, str):
|
490 |
-
done[k] = v.strip()
|
491 |
-
|
492 |
-
# save the results in the global dictionary
|
493 |
-
g.update(done)
|
494 |
-
return g
|
495 |
-
|
496 |
-
|
497 |
-
def expand_makefile_vars(s, vars):
|
498 |
-
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
|
499 |
-
'string' according to 'vars' (a dictionary mapping variable names to
|
500 |
-
values). Variables not present in 'vars' are silently expanded to the
|
501 |
-
empty string. The variable values in 'vars' should not contain further
|
502 |
-
variable expansions; if 'vars' is the output of 'parse_makefile()',
|
503 |
-
you're fine. Returns a variable-expanded version of 's'.
|
504 |
-
"""
|
505 |
-
|
506 |
-
# This algorithm does multiple expansion, so if vars['foo'] contains
|
507 |
-
# "${bar}", it will expand ${foo} to ${bar}, and then expand
|
508 |
-
# ${bar}... and so forth. This is fine as long as 'vars' comes from
|
509 |
-
# 'parse_makefile()', which takes care of such expansions eagerly,
|
510 |
-
# according to make's variable expansion semantics.
|
511 |
-
|
512 |
-
while True:
|
513 |
-
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
|
514 |
-
if m:
|
515 |
-
(beg, end) = m.span()
|
516 |
-
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
|
517 |
-
else:
|
518 |
-
break
|
519 |
-
return s
|
520 |
-
|
521 |
-
|
522 |
-
_config_vars = None
|
523 |
-
|
524 |
-
|
525 |
-
def get_config_vars(*args):
|
526 |
-
"""With no arguments, return a dictionary of all configuration
|
527 |
-
variables relevant for the current platform. Generally this includes
|
528 |
-
everything needed to build extensions and install both pure modules and
|
529 |
-
extensions. On Unix, this means every variable defined in Python's
|
530 |
-
installed Makefile; on Windows it's a much smaller set.
|
531 |
-
|
532 |
-
With arguments, return a list of values that result from looking up
|
533 |
-
each argument in the configuration variable dictionary.
|
534 |
-
"""
|
535 |
-
global _config_vars
|
536 |
-
if _config_vars is None:
|
537 |
-
_config_vars = sysconfig.get_config_vars().copy()
|
538 |
-
py39compat.add_ext_suffix(_config_vars)
|
539 |
-
|
540 |
-
if args:
|
541 |
-
vals = []
|
542 |
-
for name in args:
|
543 |
-
vals.append(_config_vars.get(name))
|
544 |
-
return vals
|
545 |
-
else:
|
546 |
-
return _config_vars
|
547 |
-
|
548 |
-
|
549 |
-
def get_config_var(name):
|
550 |
-
"""Return the value of a single variable using the dictionary
|
551 |
-
returned by 'get_config_vars()'. Equivalent to
|
552 |
-
get_config_vars().get(name)
|
553 |
-
"""
|
554 |
-
if name == 'SO':
|
555 |
-
import warnings
|
556 |
-
|
557 |
-
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
|
558 |
-
return get_config_vars().get(name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/test_time_augmentation.py
DELETED
@@ -1,307 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import copy
|
3 |
-
import numpy as np
|
4 |
-
from contextlib import contextmanager
|
5 |
-
from itertools import count
|
6 |
-
from typing import List
|
7 |
-
import torch
|
8 |
-
from fvcore.transforms import HFlipTransform, NoOpTransform
|
9 |
-
from torch import nn
|
10 |
-
from torch.nn.parallel import DistributedDataParallel
|
11 |
-
|
12 |
-
from detectron2.config import configurable
|
13 |
-
from detectron2.data.detection_utils import read_image
|
14 |
-
from detectron2.data.transforms import (
|
15 |
-
RandomFlip,
|
16 |
-
ResizeShortestEdge,
|
17 |
-
ResizeTransform,
|
18 |
-
apply_augmentations,
|
19 |
-
)
|
20 |
-
from detectron2.structures import Boxes, Instances
|
21 |
-
|
22 |
-
from .meta_arch import GeneralizedRCNN
|
23 |
-
from .postprocessing import detector_postprocess
|
24 |
-
from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image
|
25 |
-
|
26 |
-
__all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"]
|
27 |
-
|
28 |
-
|
29 |
-
class DatasetMapperTTA:
|
30 |
-
"""
|
31 |
-
Implement test-time augmentation for detection data.
|
32 |
-
It is a callable which takes a dataset dict from a detection dataset,
|
33 |
-
and returns a list of dataset dicts where the images
|
34 |
-
are augmented from the input image by the transformations defined in the config.
|
35 |
-
This is used for test-time augmentation.
|
36 |
-
"""
|
37 |
-
|
38 |
-
@configurable
|
39 |
-
def __init__(self, min_sizes: List[int], max_size: int, flip: bool):
|
40 |
-
"""
|
41 |
-
Args:
|
42 |
-
min_sizes: list of short-edge size to resize the image to
|
43 |
-
max_size: maximum height or width of resized images
|
44 |
-
flip: whether to apply flipping augmentation
|
45 |
-
"""
|
46 |
-
self.min_sizes = min_sizes
|
47 |
-
self.max_size = max_size
|
48 |
-
self.flip = flip
|
49 |
-
|
50 |
-
@classmethod
|
51 |
-
def from_config(cls, cfg):
|
52 |
-
return {
|
53 |
-
"min_sizes": cfg.TEST.AUG.MIN_SIZES,
|
54 |
-
"max_size": cfg.TEST.AUG.MAX_SIZE,
|
55 |
-
"flip": cfg.TEST.AUG.FLIP,
|
56 |
-
}
|
57 |
-
|
58 |
-
def __call__(self, dataset_dict):
|
59 |
-
"""
|
60 |
-
Args:
|
61 |
-
dict: a dict in standard model input format. See tutorials for details.
|
62 |
-
|
63 |
-
Returns:
|
64 |
-
list[dict]:
|
65 |
-
a list of dicts, which contain augmented version of the input image.
|
66 |
-
The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``.
|
67 |
-
Each dict has field "transforms" which is a TransformList,
|
68 |
-
containing the transforms that are used to generate this image.
|
69 |
-
"""
|
70 |
-
numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy()
|
71 |
-
shape = numpy_image.shape
|
72 |
-
orig_shape = (dataset_dict["height"], dataset_dict["width"])
|
73 |
-
if shape[:2] != orig_shape:
|
74 |
-
# It transforms the "original" image in the dataset to the input image
|
75 |
-
pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1])
|
76 |
-
else:
|
77 |
-
pre_tfm = NoOpTransform()
|
78 |
-
|
79 |
-
# Create all combinations of augmentations to use
|
80 |
-
aug_candidates = [] # each element is a list[Augmentation]
|
81 |
-
for min_size in self.min_sizes:
|
82 |
-
resize = ResizeShortestEdge(min_size, self.max_size)
|
83 |
-
aug_candidates.append([resize]) # resize only
|
84 |
-
if self.flip:
|
85 |
-
flip = RandomFlip(prob=1.0)
|
86 |
-
aug_candidates.append([resize, flip]) # resize + flip
|
87 |
-
|
88 |
-
# Apply all the augmentations
|
89 |
-
ret = []
|
90 |
-
for aug in aug_candidates:
|
91 |
-
new_image, tfms = apply_augmentations(aug, np.copy(numpy_image))
|
92 |
-
torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1)))
|
93 |
-
|
94 |
-
dic = copy.deepcopy(dataset_dict)
|
95 |
-
dic["transforms"] = pre_tfm + tfms
|
96 |
-
dic["image"] = torch_image
|
97 |
-
ret.append(dic)
|
98 |
-
return ret
|
99 |
-
|
100 |
-
|
101 |
-
class GeneralizedRCNNWithTTA(nn.Module):
|
102 |
-
"""
|
103 |
-
A GeneralizedRCNN with test-time augmentation enabled.
|
104 |
-
Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`.
|
105 |
-
"""
|
106 |
-
|
107 |
-
def __init__(self, cfg, model, tta_mapper=None, batch_size=3):
|
108 |
-
"""
|
109 |
-
Args:
|
110 |
-
cfg (CfgNode):
|
111 |
-
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
|
112 |
-
tta_mapper (callable): takes a dataset dict and returns a list of
|
113 |
-
augmented versions of the dataset dict. Defaults to
|
114 |
-
`DatasetMapperTTA(cfg)`.
|
115 |
-
batch_size (int): batch the augmented images into this batch size for inference.
|
116 |
-
"""
|
117 |
-
super().__init__()
|
118 |
-
if isinstance(model, DistributedDataParallel):
|
119 |
-
model = model.module
|
120 |
-
assert isinstance(
|
121 |
-
model, GeneralizedRCNN
|
122 |
-
), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model))
|
123 |
-
self.cfg = cfg.clone()
|
124 |
-
assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet"
|
125 |
-
assert (
|
126 |
-
not self.cfg.MODEL.LOAD_PROPOSALS
|
127 |
-
), "TTA for pre-computed proposals is not supported yet"
|
128 |
-
|
129 |
-
self.model = model
|
130 |
-
|
131 |
-
if tta_mapper is None:
|
132 |
-
tta_mapper = DatasetMapperTTA(cfg)
|
133 |
-
self.tta_mapper = tta_mapper
|
134 |
-
self.batch_size = batch_size
|
135 |
-
|
136 |
-
@contextmanager
|
137 |
-
def _turn_off_roi_heads(self, attrs):
|
138 |
-
"""
|
139 |
-
Open a context where some heads in `model.roi_heads` are temporarily turned off.
|
140 |
-
Args:
|
141 |
-
attr (list[str]): the attribute in `model.roi_heads` which can be used
|
142 |
-
to turn off a specific head, e.g., "mask_on", "keypoint_on".
|
143 |
-
"""
|
144 |
-
roi_heads = self.model.roi_heads
|
145 |
-
old = {}
|
146 |
-
for attr in attrs:
|
147 |
-
try:
|
148 |
-
old[attr] = getattr(roi_heads, attr)
|
149 |
-
except AttributeError:
|
150 |
-
# The head may not be implemented in certain ROIHeads
|
151 |
-
pass
|
152 |
-
|
153 |
-
if len(old.keys()) == 0:
|
154 |
-
yield
|
155 |
-
else:
|
156 |
-
for attr in old.keys():
|
157 |
-
setattr(roi_heads, attr, False)
|
158 |
-
yield
|
159 |
-
for attr in old.keys():
|
160 |
-
setattr(roi_heads, attr, old[attr])
|
161 |
-
|
162 |
-
def _batch_inference(self, batched_inputs, detected_instances=None):
|
163 |
-
"""
|
164 |
-
Execute inference on a list of inputs,
|
165 |
-
using batch size = self.batch_size, instead of the length of the list.
|
166 |
-
|
167 |
-
Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference`
|
168 |
-
"""
|
169 |
-
if detected_instances is None:
|
170 |
-
detected_instances = [None] * len(batched_inputs)
|
171 |
-
|
172 |
-
outputs = []
|
173 |
-
inputs, instances = [], []
|
174 |
-
for idx, input, instance in zip(count(), batched_inputs, detected_instances):
|
175 |
-
inputs.append(input)
|
176 |
-
instances.append(instance)
|
177 |
-
if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:
|
178 |
-
outputs.extend(
|
179 |
-
self.model.inference(
|
180 |
-
inputs,
|
181 |
-
instances if instances[0] is not None else None,
|
182 |
-
do_postprocess=False,
|
183 |
-
)
|
184 |
-
)
|
185 |
-
inputs, instances = [], []
|
186 |
-
return outputs
|
187 |
-
|
188 |
-
def __call__(self, batched_inputs):
|
189 |
-
"""
|
190 |
-
Same input/output format as :meth:`GeneralizedRCNN.forward`
|
191 |
-
"""
|
192 |
-
|
193 |
-
def _maybe_read_image(dataset_dict):
|
194 |
-
ret = copy.copy(dataset_dict)
|
195 |
-
if "image" not in ret:
|
196 |
-
image = read_image(ret.pop("file_name"), self.model.input_format)
|
197 |
-
image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
|
198 |
-
ret["image"] = image
|
199 |
-
if "height" not in ret and "width" not in ret:
|
200 |
-
ret["height"] = image.shape[1]
|
201 |
-
ret["width"] = image.shape[2]
|
202 |
-
return ret
|
203 |
-
|
204 |
-
return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs]
|
205 |
-
|
206 |
-
def _inference_one_image(self, input):
|
207 |
-
"""
|
208 |
-
Args:
|
209 |
-
input (dict): one dataset dict with "image" field being a CHW tensor
|
210 |
-
|
211 |
-
Returns:
|
212 |
-
dict: one output dict
|
213 |
-
"""
|
214 |
-
orig_shape = (input["height"], input["width"])
|
215 |
-
augmented_inputs, tfms = self._get_augmented_inputs(input)
|
216 |
-
# Detect boxes from all augmented versions
|
217 |
-
with self._turn_off_roi_heads(["mask_on", "keypoint_on"]):
|
218 |
-
# temporarily disable roi heads
|
219 |
-
all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
|
220 |
-
# merge all detected boxes to obtain final predictions for boxes
|
221 |
-
merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
|
222 |
-
|
223 |
-
if self.cfg.MODEL.MASK_ON:
|
224 |
-
# Use the detected boxes to obtain masks
|
225 |
-
augmented_instances = self._rescale_detected_boxes(
|
226 |
-
augmented_inputs, merged_instances, tfms
|
227 |
-
)
|
228 |
-
# run forward on the detected boxes
|
229 |
-
outputs = self._batch_inference(augmented_inputs, augmented_instances)
|
230 |
-
# Delete now useless variables to avoid being out of memory
|
231 |
-
del augmented_inputs, augmented_instances
|
232 |
-
# average the predictions
|
233 |
-
merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
|
234 |
-
merged_instances = detector_postprocess(merged_instances, *orig_shape)
|
235 |
-
return {"instances": merged_instances}
|
236 |
-
else:
|
237 |
-
return {"instances": merged_instances}
|
238 |
-
|
239 |
-
def _get_augmented_inputs(self, input):
|
240 |
-
augmented_inputs = self.tta_mapper(input)
|
241 |
-
tfms = [x.pop("transforms") for x in augmented_inputs]
|
242 |
-
return augmented_inputs, tfms
|
243 |
-
|
244 |
-
def _get_augmented_boxes(self, augmented_inputs, tfms):
|
245 |
-
# 1: forward with all augmented images
|
246 |
-
outputs = self._batch_inference(augmented_inputs)
|
247 |
-
# 2: union the results
|
248 |
-
all_boxes = []
|
249 |
-
all_scores = []
|
250 |
-
all_classes = []
|
251 |
-
for output, tfm in zip(outputs, tfms):
|
252 |
-
# Need to inverse the transforms on boxes, to obtain results on original image
|
253 |
-
pred_boxes = output.pred_boxes.tensor
|
254 |
-
original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy())
|
255 |
-
all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device))
|
256 |
-
|
257 |
-
all_scores.extend(output.scores)
|
258 |
-
all_classes.extend(output.pred_classes)
|
259 |
-
all_boxes = torch.cat(all_boxes, dim=0)
|
260 |
-
return all_boxes, all_scores, all_classes
|
261 |
-
|
262 |
-
def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):
|
263 |
-
# select from the union of all results
|
264 |
-
num_boxes = len(all_boxes)
|
265 |
-
num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES
|
266 |
-
# +1 because fast_rcnn_inference expects background scores as well
|
267 |
-
all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device)
|
268 |
-
for idx, cls, score in zip(count(), all_classes, all_scores):
|
269 |
-
all_scores_2d[idx, cls] = score
|
270 |
-
|
271 |
-
merged_instances, _ = fast_rcnn_inference_single_image(
|
272 |
-
all_boxes,
|
273 |
-
all_scores_2d,
|
274 |
-
shape_hw,
|
275 |
-
1e-8,
|
276 |
-
self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
|
277 |
-
self.cfg.TEST.DETECTIONS_PER_IMAGE,
|
278 |
-
)
|
279 |
-
|
280 |
-
return merged_instances
|
281 |
-
|
282 |
-
def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms):
|
283 |
-
augmented_instances = []
|
284 |
-
for input, tfm in zip(augmented_inputs, tfms):
|
285 |
-
# Transform the target box to the augmented image's coordinate space
|
286 |
-
pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy()
|
287 |
-
pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes))
|
288 |
-
|
289 |
-
aug_instances = Instances(
|
290 |
-
image_size=input["image"].shape[1:3],
|
291 |
-
pred_boxes=Boxes(pred_boxes),
|
292 |
-
pred_classes=merged_instances.pred_classes,
|
293 |
-
scores=merged_instances.scores,
|
294 |
-
)
|
295 |
-
augmented_instances.append(aug_instances)
|
296 |
-
return augmented_instances
|
297 |
-
|
298 |
-
def _reduce_pred_masks(self, outputs, tfms):
|
299 |
-
# Should apply inverse transforms on masks.
|
300 |
-
# We assume only resize & flip are used. pred_masks is a scale-invariant
|
301 |
-
# representation, so we handle flip specially
|
302 |
-
for output, tfm in zip(outputs, tfms):
|
303 |
-
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
|
304 |
-
output.pred_masks = output.pred_masks.flip(dims=[3])
|
305 |
-
all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0)
|
306 |
-
avg_pred_masks = torch.mean(all_pred_masks, dim=0)
|
307 |
-
return avg_pred_masks
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/app/interface/page/index.tsx
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
import { allLayoutAspectRatios, allLayouts } from "@/app/layouts"
|
2 |
-
import { useStore } from "@/app/store"
|
3 |
-
import { cn } from "@/lib/utils"
|
4 |
-
import { useEffect, useRef } from "react"
|
5 |
-
|
6 |
-
export function Page({ page }: { page: number }) {
|
7 |
-
const zoomLevel = useStore(state => state.zoomLevel)
|
8 |
-
const layouts = useStore(state => state.layouts)
|
9 |
-
// const prompt = useStore(state => state.prompt)
|
10 |
-
|
11 |
-
const LayoutElement = (allLayouts as any)[layouts[page]]
|
12 |
-
const aspectRatio = ((allLayoutAspectRatios as any)[layouts[page]] as string) || "aspect-[250/297]"
|
13 |
-
/*
|
14 |
-
const [canLoad, setCanLoad] = useState(false)
|
15 |
-
useEffect(() => {
|
16 |
-
if (prompt?.length) {
|
17 |
-
setCanLoad(false)
|
18 |
-
setTimeout(() => {
|
19 |
-
setCanLoad(true)
|
20 |
-
}, page * 4000)
|
21 |
-
}
|
22 |
-
}, [prompt])
|
23 |
-
*/
|
24 |
-
|
25 |
-
const setPage = useStore(state => state.setPage)
|
26 |
-
const pageRef = useRef<HTMLDivElement>(null)
|
27 |
-
|
28 |
-
useEffect(() => {
|
29 |
-
const element = pageRef.current
|
30 |
-
if (!element) { return }
|
31 |
-
setPage(element)
|
32 |
-
}, [pageRef.current])
|
33 |
-
|
34 |
-
return (
|
35 |
-
<div
|
36 |
-
ref={pageRef}
|
37 |
-
className={cn(
|
38 |
-
`w-full`,
|
39 |
-
aspectRatio,
|
40 |
-
`transition-all duration-100 ease-in-out`,
|
41 |
-
`border border-stone-200`,
|
42 |
-
`shadow-2xl`,
|
43 |
-
`print:shadow-none`,
|
44 |
-
`print:border-0`,
|
45 |
-
`print:width-screen`
|
46 |
-
)}
|
47 |
-
style={{
|
48 |
-
padding: `${Math.round((zoomLevel / 100) * 16)}px`
|
49 |
-
// marginLeft: `${zoomLevel > 100 ? `100`}`
|
50 |
-
}}
|
51 |
-
>
|
52 |
-
<LayoutElement />
|
53 |
-
</div>
|
54 |
-
)
|
55 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Apk X Aire Behringer.md
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar APK X AIR Behringer para dispositivos Android</h1>
|
3 |
-
<p>Si usted es un músico, ingeniero de sonido, o intérprete en vivo que utiliza un mezclador digital BEHRINGER X AIR, es posible que desee descargar APK X AIR Behringer para su dispositivo Android. Esta aplicación le permite controlar todas las funciones de mezcla, procesamiento y efectos de su mezclador desde su tableta o teléfono inteligente. En este artículo, te mostraré cómo descargar, instalar y usar esta aplicación, así como algunos consejos y trucos para sacarle el máximo partido. </p>
|
4 |
-
<h2>Beneficios de APK X AIR Behringer</h2>
|
5 |
-
<p>APK X AIR Behringer es una aplicación gratuita que ofrece un control completo para los mezcladores X18, XR18, XR16 y XR12. La interfaz de usuario es configurable para acceso simplificado o edición de nivel experto (S/E), para mezclar 18 canales de entrada a 12 buses. Control también se proporciona para los cuatro procesadores de efectos estéreo internos - todos los cuales cuentan con el aclamado motor de procesamiento de audio BEHRINGER X32. </p>
|
6 |
-
<h2>descargar apk x aire behringer</h2><br /><p><b><b>Download</b> ——— <a href="https://bltlly.com/2v6LG8">https://bltlly.com/2v6LG8</a></b></p><br /><br />
|
7 |
-
<p>La aplicación proporciona la movilidad para ir donde usted necesita para obtener el máximo provecho de su sistema, lo que le permite ajustar la mezcla de la casa desde cualquier asiento o mezclas de monitor de ajuste fino desde el escenario. Dado que todos los mezcladores BEHRINGER X AIR cuentan con puntos de acceso internos, la configuración de la aplicación no podría ser más simple - solo seleccione la red X AIR y conecte su dispositivo Android a ella. Al abrir la aplicación, su mezclador X AIR se mostrará como un dispositivo controlable, e incluso le permitirá bloquear su dispositivo Android a ese mezclador X AIR específico. También puede ejecutar la aplicación en modo de demostración sin conectarse a su mezclador. </p>
|
8 |
-
<p>No se requiere hardware adicional, por lo que la aplicación es la solución ideal para aplicaciones de mezcla remota sin problemas. Ya sea que lo utilice para espectáculos en vivo, grabaciones de estudio, ensayos, podcasts o seminarios web, APK X AIR Behringer puede ayudarle a lograr una calidad de sonido profesional con facilidad y comodidad. </p>
|
9 |
-
<h2>Requisitos para APK X AIR Behringer</h2>
|
10 |
-
<p>Para usar APK X AIR Behringer, necesita lo siguiente:</p>
|
11 |
-
<ul>
|
12 |
-
|
13 |
-
<li>Un mezclador digital BEHRINGER X AIR (X18, XR18, XR16 o XR12) con firmware versión 1.15 o superior</li>
|
14 |
-
<li>Una red Wi-Fi que conecta tu dispositivo y tu mezclador</li>
|
15 |
-
<li>Una conexión a Internet para descargar la aplicación</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Pasos para descargar APK X AIR Behringer</h2>
|
18 |
-
<p>Aquí están los pasos para descargar APK X AIR Behringer para su dispositivo Android:</p>
|
19 |
-
<h3>Paso 1: Encontrar el enlace de descarga oficial para APK X AIR Behringer</h3>
|
20 |
-
<p>La aplicación no está disponible en la Google Play Store, por lo que necesita encontrar el enlace oficial de descarga desde el sitio web de BEHRINGER. Puede escanear el código QR en la página del producto o ir a esta URL: [https://www.behringer.com/behringer/product?modelCode=P0BI8]</p>
|
21 |
-
<h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
|
22 |
-
<p>Dado que está descargando la aplicación desde una fuente de terceros, debe habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Es posible que vea un mensaje de advertencia que dice que instalar desde fuentes desconocidas puede dañar su dispositivo, pero puede ignorarlo siempre y cuando confíe en el origen de la aplicación. </p>
|
23 |
-
<h3>Paso 3: Descargar e instalar el archivo APK</h3>
|
24 |
-
<p>Una vez que haya habilitado fuentes desconocidas, puede descargar el archivo APK desde el enlace que encontró en el paso 1. El tamaño del archivo es de aproximadamente 5.6 MB y debería tomar unos segundos para descargar dependiendo de su velocidad de Internet. Una vez finalizada la descarga, abre el archivo y sigue las instrucciones para instalar la aplicación en tu dispositivo. Es posible que necesite conceder algunos permisos a la aplicación, como el acceso a su red Wi-Fi y almacenamiento. </p>
|
25 |
-
<h3>Paso 4: Conecte su dispositivo a su mezclador X AIR a través de Wi-Fi</h3>
|
26 |
-
|
27 |
-
<h3>Paso 5: Iniciar la aplicación y disfrutar de sus características</h3>
|
28 |
-
<p>Ahora que ha instalado y conectado la aplicación, puede iniciarla y comenzar a controlar su mezclador de forma remota. Verá una lista de dispositivos disponibles en la pantalla de inicio de la aplicación. Toque en el que coincida con el nombre de la red del mezclador y el número de modelo. A continuación, verá un mensaje de confirmación que dice "Conectado". Ahora puede acceder a todas las funciones de mezcla, procesamiento y efectos de su mezclador desde su dispositivo. También puede cambiar entre el modo S/E, la superposición RTA, el modo de envío de bus único, la función AutoMixing y las instantáneas internas desde el menú de la aplicación. </p>
|
29 |
-
<p></p>
|
30 |
-
<h2> Consejos y trucos para el uso de APK X AIR Behringer</h2>
|
31 |
-
<p>Para optimizar tu experiencia con APK X AIR Behringer, aquí hay algunos consejos y trucos que puedes probar:</p>
|
32 |
-
<h3>Consejo 1: Utilice el modo S/ E para cambiar entre la edición de nivel simplificado y experto</h3>
|
33 |
-
<p>La aplicación tiene dos modos de operación: simplificado (S) y experto (E). El modo S proporciona una interfaz optimizada que le permite ajustar solo los parámetros más esenciales de cada canal, tales como ganancia, silenciar, solo, pan, EQ, dinámica y enviar niveles. El modo E proporciona una interfaz con todas las funciones que le permite acceder a todos los parámetros de cada canal, tales como configuración de preamplificador, configuración de puerta, configuración de compresor, configuración de limitador, configuración de retardo, etc. Puede cambiar entre los modos S y E pulsando el botón S/ E en la esquina superior izquierda de la aplicación. </p>
|
34 |
-
<h3>Consejo 2: Utilice la superposición RTA para ajustar la configuración de EQ</h3>
|
35 |
-
|
36 |
-
<h3>Consejo 3: Utilice el modo de envío de bus único para el monitoreo personal</h3>
|
37 |
-
<p>La aplicación tiene un solo modo de envío de bus que le permite controlar solo un nivel de envío de bus por canal a la vez. Esto es útil para aplicaciones de monitoreo personal donde cada músico o intérprete quiere ajustar su propia mezcla de monitor sin afectar a los demás. Para usar este modo, toque en el botón de envío de bus único en la esquina superior derecha de la aplicación y seleccione un bus de la lista. A continuación, verá un fader azul que representa el nivel de envío de ese bus para cada canal. Puede arrastrarlo hacia arriba o hacia abajo para ajustar el nivel de envío. También puede tocar en los botones de silencio o solo para silenciar o solo el bus. </p>
|
38 |
-
<h3>Consejo 4: Utilice la función de Auto-Mixing para conferencias o discusiones de panel</h3>
|
39 |
-
<p>La aplicación tiene una función AutoMixing que ajusta automáticamente la ganancia de varios micrófonos en tiempo real para reducir el ruido de fondo y la retroalimentación. Esto es útil para conferencias o mesas redondas donde varios oradores están hablando al mismo tiempo. Para utilizar esta función, toque en el botón de Auto Mixing en la esquina superior derecha de la aplicación y seleccione un canal de entrada de la lista. A continuación, verá un indicador verde que muestra el estado de AutoMixing de ese canal. También puede ajustar el umbral, el peso y los parámetros de destino del algoritmo AutoMixing. </p>
|
40 |
-
<h3>Consejo 5: Utilice las instantáneas internas para guardar y recuperar la configuración</h3>
|
41 |
-
<p>La aplicación tiene una característica de instantánea interna que le permite guardar y recuperar la configuración del mezclador en cualquier momento. Esto es útil para cambiar entre diferentes escenas o ajustes preestablecidos rápida y fácilmente. Para utilizar esta función, toque en el botón de instantánea en la esquina superior derecha de la aplicación y seleccione una ranura de instantánea de la lista. A continuación, puede nombrar, guardar, cargar o eliminar su instantánea. También puede usar la función de bloqueo para evitar cambios accidentales en su instantánea. </p>
|
42 |
-
<h2>Conclusión</h2>
|
43 |
-
|
44 |
-
<p>Si tiene alguna pregunta o comentario sobre APK X AIR Behringer, no dude en ponerse en contacto con el equipo de atención al cliente de BEHRINGER o visitar su sitio web para obtener más información. También puede consultar su canal de YouTube para obtener tutoriales y demostraciones de sus productos. </p>
|
45 |
-
<p>Gracias por leer este artículo y espero que te haya sido útil. Si lo hizo, por favor compartirlo con sus amigos y colegas que podrían estar interesados en APK X AIR Behringer. Y no te olvides de descargar la aplicación y probarlo por ti mismo! </p>
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
<p>Aquí hay algunas preguntas y respuestas comunes sobre APK X AIR Behringer:</p>
|
48 |
-
<ol>
|
49 |
-
<li><b>¿Es APK X AIR Behringer compatible con otros productos BEHRINGER? </b></li>
|
50 |
-
<p>APK X AIR Behringer está diseñado específicamente para los mezcladores X18, XR18, XR16 y XR12. No es compatible con otros productos BEHRINGER, como el X32 o X AIR EDIT.</p>
|
51 |
-
<li><b>¿Puedo usar APK X AIR Behringer con múltiples dispositivos al mismo tiempo? </b></li>
|
52 |
-
<p>Sí, puede usar APK X AIR Behringer con varios dispositivos al mismo tiempo, siempre y cuando estén conectados a la misma red Wi-Fi que su mezclador. Sin embargo, debe tener cuidado de no hacer cambios conflictivos en la configuración del mezclador desde diferentes dispositivos, ya que esto puede causar resultados inesperados. </p>
|
53 |
-
<li><b>¿Puedo usar APK X AIR Behringer sin conexión? </b></li>
|
54 |
-
<p>No, no se puede utilizar APK X AIR Behringer fuera de línea. Necesita una conexión a Internet para descargar la aplicación y una conexión Wi-Fi para conectarse a su mezclador. </p>
|
55 |
-
<li><b>¿Cómo puedo actualizar APK X AIR Behringer? </b></li>
|
56 |
-
<p>Para actualizar APK X AIR Behringer, es necesario comprobar si hay nuevas versiones en el sitio web de BEHRINGER y descargarlos manualmente. La aplicación no tiene una función de actualización automática. </p>
|
57 |
-
<li><b>¿Cómo puedo desinstalar APK X AIR Behringer? </b></li>
|
58 |
-
<p>Para desinstalar APK X AIR Behringer, es necesario ir a Configuración > Aplicaciones en el dispositivo y encontrar la aplicación de la lista. A continuación, toque en él y seleccione desinstalar.</p>
|
59 |
-
</ol></p> 64aa2da5cf<br />
|
60 |
-
<br />
|
61 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Azulejos De Piano 2 Apk Mod.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Piano Tiles 2 APK Mod: Un divertido y desafiante juego de música</h1>
|
3 |
-
<p>¿Te gusta la música y los juegos de ritmo? ¿Quieres probar tus reflejos y habilidades de coordinación? Si es así, entonces deberías probar Piano Tiles 2, uno de los juegos de música más populares y adictivos del mundo. Y si quieres disfrutar del juego con más características y beneficios, entonces usted debe descargar Piano Tiles 2 APK Mod, una versión modificada del juego que le da acceso ilimitado a todas las canciones, monedas, diamantes, y más. En este artículo, le diremos todo lo que necesita saber sobre Piano Tiles 2 y cómo descargar e instalar Piano Tiles 2 APK Mod en su dispositivo Android. </p>
|
4 |
-
<h2>¿Qué es Piano Tiles 2?</h2>
|
5 |
-
<p>Piano Tiles 2 es una secuela del juego original Piano Tiles, también conocido como Don’t Tap the White Tile. Es un juego simple pero desafiante donde tienes que tocar las fichas negras que aparecen en la pantalla en sincronía con la música. El juego tiene cientos de canciones de diferentes géneros, como clásica, pop, rock, jazz y más. También puedes competir con otros jugadores de todo el mundo y ver quién puede puntuar más alto en la clasificación. </p>
|
6 |
-
<h2>descargar azulejos de piano 2 apk mod</h2><br /><p><b><b>DOWNLOAD</b> ✪ <a href="https://bltlly.com/2v6LLo">https://bltlly.com/2v6LLo</a></b></p><br /><br />
|
7 |
-
<h3>Características de las baldosas de piano 2</h3>
|
8 |
-
<p>Piano Tiles 2 tiene muchas características que lo convierten en un juego divertido y emocionante para jugar. Algunas de ellas son:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>Sonido y gráficos de alta calidad:</b> El juego tiene gráficos impresionantes y animaciones suaves que crean una experiencia realista de tocar el piano. La calidad de sonido también es excelente, con notas claras y nítidas que coinciden perfectamente con las canciones. </li>
|
11 |
-
<li><b>Varias canciones y niveles:</b> El juego tiene una gran colección de canciones de diferentes géneros y épocas, como Mozart, Beethoven, Chopin, Taylor Swift, Ed Sheeran, Bruno Mars y más. Puede elegir entre diferentes niveles de dificultad, que van desde fácil de dominar. </li>
|
12 |
-
|
13 |
-
<li><b>Logros y recompensas:</b> El juego tiene muchos logros que puedes desbloquear completando ciertas tareas o alcanzando ciertos hitos. También puedes ganar monedas y diamantes jugando o viendo anuncios. Puedes usar estas monedas para comprar nuevas canciones, skins, boosters y más. </li>
|
14 |
-
</ul>
|
15 |
-
<h4>Cómo jugar Piano Tiles 2</h4>
|
16 |
-
<p>La jugabilidad de Piano Tiles 2 es muy simple e intuitiva. Todo lo que tienes que hacer es tocar las fichas negras que aparecen en la pantalla en sincronía con la música. Tienes que evitar tocar las fichas blancas o perder las fichas negras, de lo contrario perderás el juego. Cuanto más rápido toque, mayor será su puntuación será. También puedes usar boosters como monedas dobles, auto-play o revive para ayudarte en situaciones difíciles. </p>
|
17 |
-
<h4>¿Por qué descargar Piano Tiles 2 APK Mod? </h4>
|
18 |
-
<p>Piano Tiles 2 APK Mod es una versión modificada del juego original que le da acceso ilimitado a todas las características y beneficios del juego. Algunas de las ventajas de descargar Piano Tiles 2 APK Mod son:</p>
|
19 |
-
<ul>
|
20 |
-
<li><b>Todas las canciones desbloqueadas:</b> Puedes reproducir cualquier canción que quieras sin tener que gastar monedas o diamantes o esperar a que se desbloqueen. </li>
|
21 |
-
<li><b>Todas las monedas y diamantes ilimitados:</b> y están familiarizados con, ya que esto le ayudará a tocar las fichas con mayor precisión y disfrutar de la música más. </p>
|
22 |
-
<h4>Usa los amplificadores sabiamente</h4>
|
23 |
-
<p>El juego tiene varios potenciadores que pueden ayudarte de diferentes maneras. Algunos de ellos son:</p>
|
24 |
-
<ul>
|
25 |
-
<li><b>Monedas dobles:</b> Este booster duplicará la cantidad de monedas que ganes en un juego. Puedes usarlo para comprar más canciones, skins u otros boosters. </li>
|
26 |
-
<li><b>Auto-play:</b> Este booster hará que el juego se juegue solo por unos segundos. Puedes usarlo para descansar tus dedos o evitar fichas difíciles. </li>
|
27 |
-
<li><b>Revive:</b> Este refuerzo te permitirá continuar el juego después de cometer un error. Puedes usarlo para guardar tu progreso o mejorar tu puntuación. </li>
|
28 |
-
</ul>
|
29 |
-
|
30 |
-
<h4>Practica y mejora tus habilidades</h4>
|
31 |
-
<p>La mejor manera de mejorar en Piano Tiles 2 es practicar y mejorar tus habilidades. Usted debe jugar el juego con regularidad y probar diferentes canciones y niveles. También debe prestar atención al ritmo y el tiempo de las fichas, así como la velocidad y la dirección de las fichas deslizantes. También debes tratar de tocar las fichas con ambas manos, ya que esto aumentará tu eficiencia y coordinación. Cuanto más juegues, más aprenderás y dominarás el juego. </p>
|
32 |
-
<p></p>
|
33 |
-
<h3>Conclusión</h3>
|
34 |
-
<p>Piano Tiles 2 es un divertido y desafiante juego de música que pondrá a prueba tus reflejos y habilidades de coordinación. Tiene cientos de canciones de diferentes géneros y niveles de dificultad, así como sonido de alta calidad y gráficos. También puedes competir con otros jugadores de todo el mundo y ver quién puede jugar más rápido y mejor. Si quieres disfrutar del juego con más características y beneficios, usted debe descargar Piano Tiles 2 APK Mod, una versión modificada del juego que le da acceso ilimitado a todas las canciones, monedas, diamantes, y más. Puede descargar e instalar Piano Tiles 2 APK Mod en su dispositivo Android siguiendo los sencillos pasos que hemos proporcionado en este artículo. Esperamos que te diviertas jugando Piano Tiles 2!</p>
|
35 |
-
<h3>Preguntas frecuentes</h3>
|
36 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Piano Tiles 2 y Piano Tiles 2 APK Mod:</p>
|
37 |
-
<ol>
|
38 |
-
<li><b>Es Piano Tiles 2 libre para jugar? </b><br>
|
39 |
-
Sí, Piano Tiles 2 es gratis, pero tiene algunas compras en la aplicación que requieren dinero real. También puedes ver anuncios para ganar monedas o diamantes. </li>
|
40 |
-
<li><b>¿Es seguro usar Piano Tiles 2 APK Mod? </b><br>
|
41 |
-
Sí, Piano Tiles 2 APK Mod es seguro de usar, siempre y cuando se descarga de una fuente confiable. Lo hemos probado en nuestros dispositivos y no encontramos virus o malware. </li>
|
42 |
-
<li><b>¿Puedo tocar Piano Tiles 2 sin conexión? </b><br>
|
43 |
-
|
44 |
-
<li><b>¿Puedo actualizar Piano Tiles 2 APK Mod? </b><br>
|
45 |
-
Sí, puede actualizar Piano Tiles 2 APK Mod, pero puede perder algunas de las características modded si lo hace. Te recomendamos que busques actualizaciones de la misma fuente donde descargaste el archivo APK. </li>
|
46 |
-
<li><b>¿Puedo sincronizar mi progreso entre dispositivos? </b><br>
|
47 |
-
Sí, puedes sincronizar tu progreso entre dispositivos iniciando sesión con tu cuenta de Facebook. Sin embargo, esto puede no funcionar para Piano Tiles 2 APK Mod, ya que puede entrar en conflicto con los datos originales del juego. </li>
|
48 |
-
</ol></p> 64aa2da5cf<br />
|
49 |
-
<br />
|
50 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/app.html
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en" class="h-full">
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<link rel="icon" href="%sveltekit.assets%/favicon.png" />
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" />
|
7 |
-
<title>HuggingChat</title>
|
8 |
-
<script>
|
9 |
-
if (
|
10 |
-
localStorage.theme === "dark" ||
|
11 |
-
(!("theme" in localStorage) && window.matchMedia("(prefers-color-scheme: dark)").matches)
|
12 |
-
) {
|
13 |
-
document.documentElement.classList.add("dark");
|
14 |
-
}
|
15 |
-
|
16 |
-
// For some reason, Sveltekit doesn't let us load env variables from .env here, so we load it from hooks.server.ts
|
17 |
-
window.gaId = "%gaId%";
|
18 |
-
</script>
|
19 |
-
%sveltekit.head%
|
20 |
-
</head>
|
21 |
-
<body data-sveltekit-preload-data="hover" class="h-full dark:bg-gray-900">
|
22 |
-
<div class="contents h-full">%sveltekit.body%</div>
|
23 |
-
|
24 |
-
<!-- Google Tag Manager -->
|
25 |
-
<script>
|
26 |
-
if (window.gaId) {
|
27 |
-
const script = document.createElement("script");
|
28 |
-
script.src = "https://www.googletagmanager.com/gtag/js?id=" + window.gaId;
|
29 |
-
script.async = true;
|
30 |
-
document.head.appendChild(script);
|
31 |
-
|
32 |
-
window.dataLayer = window.dataLayer || [];
|
33 |
-
function gtag() {
|
34 |
-
dataLayer.push(arguments);
|
35 |
-
}
|
36 |
-
gtag("js", new Date());
|
37 |
-
/// ^ See https://developers.google.com/tag-platform/gtagjs/install
|
38 |
-
gtag("config", window.gaId);
|
39 |
-
gtag("consent", "default", { ad_storage: "denied", analytics_storage: "denied" });
|
40 |
-
/// ^ See https://developers.google.com/tag-platform/gtagjs/reference#consent
|
41 |
-
/// TODO: ask the user for their consent and update this with gtag('consent', 'update')
|
42 |
-
}
|
43 |
-
</script>
|
44 |
-
</body>
|
45 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/resources/params.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# https://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
|
14 |
-
import re
|
15 |
-
|
16 |
-
import jmespath
|
17 |
-
from botocore import xform_name
|
18 |
-
|
19 |
-
from ..exceptions import ResourceLoadException
|
20 |
-
|
21 |
-
INDEX_RE = re.compile(r'\[(.*)\]$')
|
22 |
-
|
23 |
-
|
24 |
-
def get_data_member(parent, path):
|
25 |
-
"""
|
26 |
-
Get a data member from a parent using a JMESPath search query,
|
27 |
-
loading the parent if required. If the parent cannot be loaded
|
28 |
-
and no data is present then an exception is raised.
|
29 |
-
|
30 |
-
:type parent: ServiceResource
|
31 |
-
:param parent: The resource instance to which contains data we
|
32 |
-
are interested in.
|
33 |
-
:type path: string
|
34 |
-
:param path: The JMESPath expression to query
|
35 |
-
:raises ResourceLoadException: When no data is present and the
|
36 |
-
resource cannot be loaded.
|
37 |
-
:returns: The queried data or ``None``.
|
38 |
-
"""
|
39 |
-
# Ensure the parent has its data loaded, if possible.
|
40 |
-
if parent.meta.data is None:
|
41 |
-
if hasattr(parent, 'load'):
|
42 |
-
parent.load()
|
43 |
-
else:
|
44 |
-
raise ResourceLoadException(
|
45 |
-
f'{parent.__class__.__name__} has no load method!'
|
46 |
-
)
|
47 |
-
|
48 |
-
return jmespath.search(path, parent.meta.data)
|
49 |
-
|
50 |
-
|
51 |
-
def create_request_parameters(parent, request_model, params=None, index=None):
|
52 |
-
"""
|
53 |
-
Handle request parameters that can be filled in from identifiers,
|
54 |
-
resource data members or constants.
|
55 |
-
|
56 |
-
By passing ``params``, you can invoke this method multiple times and
|
57 |
-
build up a parameter dict over time, which is particularly useful
|
58 |
-
for reverse JMESPath expressions that append to lists.
|
59 |
-
|
60 |
-
:type parent: ServiceResource
|
61 |
-
:param parent: The resource instance to which this action is attached.
|
62 |
-
:type request_model: :py:class:`~boto3.resources.model.Request`
|
63 |
-
:param request_model: The action request model.
|
64 |
-
:type params: dict
|
65 |
-
:param params: If set, then add to this existing dict. It is both
|
66 |
-
edited in-place and returned.
|
67 |
-
:type index: int
|
68 |
-
:param index: The position of an item within a list
|
69 |
-
:rtype: dict
|
70 |
-
:return: Pre-filled parameters to be sent to the request operation.
|
71 |
-
"""
|
72 |
-
if params is None:
|
73 |
-
params = {}
|
74 |
-
|
75 |
-
for param in request_model.params:
|
76 |
-
source = param.source
|
77 |
-
target = param.target
|
78 |
-
|
79 |
-
if source == 'identifier':
|
80 |
-
# Resource identifier, e.g. queue.url
|
81 |
-
value = getattr(parent, xform_name(param.name))
|
82 |
-
elif source == 'data':
|
83 |
-
# If this is a data member then it may incur a load
|
84 |
-
# action before returning the value.
|
85 |
-
value = get_data_member(parent, param.path)
|
86 |
-
elif source in ['string', 'integer', 'boolean']:
|
87 |
-
# These are hard-coded values in the definition
|
88 |
-
value = param.value
|
89 |
-
elif source == 'input':
|
90 |
-
# This is provided by the user, so ignore it here
|
91 |
-
continue
|
92 |
-
else:
|
93 |
-
raise NotImplementedError(f'Unsupported source type: {source}')
|
94 |
-
|
95 |
-
build_param_structure(params, target, value, index)
|
96 |
-
|
97 |
-
return params
|
98 |
-
|
99 |
-
|
100 |
-
def build_param_structure(params, target, value, index=None):
|
101 |
-
"""
|
102 |
-
This method provides a basic reverse JMESPath implementation that
|
103 |
-
lets you go from a JMESPath-like string to a possibly deeply nested
|
104 |
-
object. The ``params`` are mutated in-place, so subsequent calls
|
105 |
-
can modify the same element by its index.
|
106 |
-
|
107 |
-
>>> build_param_structure(params, 'test[0]', 1)
|
108 |
-
>>> print(params)
|
109 |
-
{'test': [1]}
|
110 |
-
|
111 |
-
>>> build_param_structure(params, 'foo.bar[0].baz', 'hello world')
|
112 |
-
>>> print(params)
|
113 |
-
{'test': [1], 'foo': {'bar': [{'baz': 'hello, world'}]}}
|
114 |
-
|
115 |
-
"""
|
116 |
-
pos = params
|
117 |
-
parts = target.split('.')
|
118 |
-
|
119 |
-
# First, split into parts like 'foo', 'bar[0]', 'baz' and process
|
120 |
-
# each piece. It can either be a list or a dict, depending on if
|
121 |
-
# an index like `[0]` is present. We detect this via a regular
|
122 |
-
# expression, and keep track of where we are in params via the
|
123 |
-
# pos variable, walking down to the last item. Once there, we
|
124 |
-
# set the value.
|
125 |
-
for i, part in enumerate(parts):
|
126 |
-
# Is it indexing an array?
|
127 |
-
result = INDEX_RE.search(part)
|
128 |
-
if result:
|
129 |
-
if result.group(1):
|
130 |
-
if result.group(1) == '*':
|
131 |
-
part = part[:-3]
|
132 |
-
else:
|
133 |
-
# We have an explicit index
|
134 |
-
index = int(result.group(1))
|
135 |
-
part = part[: -len(str(index) + '[]')]
|
136 |
-
else:
|
137 |
-
# Index will be set after we know the proper part
|
138 |
-
# name and that it's a list instance.
|
139 |
-
index = None
|
140 |
-
part = part[:-2]
|
141 |
-
|
142 |
-
if part not in pos or not isinstance(pos[part], list):
|
143 |
-
pos[part] = []
|
144 |
-
|
145 |
-
# This means we should append, e.g. 'foo[]'
|
146 |
-
if index is None:
|
147 |
-
index = len(pos[part])
|
148 |
-
|
149 |
-
while len(pos[part]) <= index:
|
150 |
-
# Assume it's a dict until we set the final value below
|
151 |
-
pos[part].append({})
|
152 |
-
|
153 |
-
# Last item? Set the value, otherwise set the new position
|
154 |
-
if i == len(parts) - 1:
|
155 |
-
pos[part][index] = value
|
156 |
-
else:
|
157 |
-
# The new pos is the *item* in the array, not the array!
|
158 |
-
pos = pos[part][index]
|
159 |
-
else:
|
160 |
-
if part not in pos:
|
161 |
-
pos[part] = {}
|
162 |
-
|
163 |
-
# Last item? Set the value, otherwise set the new position
|
164 |
-
if i == len(parts) - 1:
|
165 |
-
pos[part] = value
|
166 |
-
else:
|
167 |
-
pos = pos[part]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py
DELETED
@@ -1,238 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is Mozilla Communicator client code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from typing import List, Tuple, Union
|
29 |
-
|
30 |
-
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
|
31 |
-
# fmt: off
|
32 |
-
jp2_char_context = (
|
33 |
-
(0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1),
|
34 |
-
(2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4),
|
35 |
-
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2),
|
36 |
-
(0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 4, 4, 3, 5, 3, 5, 1, 5, 3, 4, 3, 4, 4, 3, 4, 3, 3, 4, 3, 5, 4, 4, 3, 5, 5, 3, 5, 5, 5, 3, 5, 5, 3, 4, 5, 5, 3, 1, 3, 2, 0, 3, 4, 0, 4, 2, 0, 4, 2, 1, 5, 3, 2, 3, 5, 0, 4, 0, 2, 0, 5, 4, 4, 5, 4, 5, 0, 4, 0, 0, 4, 4),
|
37 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
|
38 |
-
(0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 5, 4, 3, 3, 3, 3, 4, 3, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 4, 4, 4, 4, 5, 3, 4, 4, 3, 4, 5, 5, 4, 5, 5, 1, 4, 5, 4, 3, 0, 3, 3, 1, 3, 3, 0, 4, 4, 0, 3, 3, 1, 5, 3, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 0, 4, 1, 1, 3, 4),
|
39 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
|
40 |
-
(0, 4, 0, 3, 0, 3, 0, 4, 0, 3, 4, 4, 3, 2, 2, 1, 2, 1, 3, 1, 3, 3, 3, 3, 3, 4, 3, 1, 3, 3, 5, 3, 3, 0, 4, 3, 0, 5, 4, 3, 3, 5, 4, 4, 3, 4, 4, 5, 0, 1, 2, 0, 1, 2, 0, 2, 2, 0, 1, 0, 0, 5, 2, 2, 1, 4, 0, 3, 0, 1, 0, 4, 4, 3, 5, 4, 3, 0, 2, 1, 0, 4, 3),
|
41 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
|
42 |
-
(0, 3, 0, 5, 0, 4, 0, 2, 1, 4, 4, 2, 4, 1, 4, 2, 4, 2, 4, 3, 3, 3, 4, 3, 3, 3, 3, 1, 4, 2, 3, 3, 3, 1, 4, 4, 1, 1, 1, 4, 3, 3, 2, 0, 2, 4, 3, 2, 0, 3, 3, 0, 3, 1, 1, 0, 0, 0, 3, 3, 0, 4, 2, 2, 3, 4, 0, 4, 0, 3, 0, 4, 4, 5, 3, 4, 4, 0, 3, 0, 0, 1, 4),
|
43 |
-
(1, 4, 0, 4, 0, 4, 0, 4, 0, 3, 5, 4, 4, 3, 4, 3, 5, 4, 3, 3, 4, 3, 5, 4, 4, 4, 4, 3, 4, 2, 4, 3, 3, 1, 5, 4, 3, 2, 4, 5, 4, 5, 5, 4, 4, 5, 4, 4, 0, 3, 2, 2, 3, 3, 0, 4, 3, 1, 3, 2, 1, 4, 3, 3, 4, 5, 0, 3, 0, 2, 0, 4, 5, 5, 4, 5, 4, 0, 4, 0, 0, 5, 4),
|
44 |
-
(0, 5, 0, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 3, 4, 0, 4, 4, 4, 3, 4, 3, 4, 3, 3, 1, 4, 2, 4, 3, 4, 0, 5, 4, 1, 4, 5, 4, 4, 5, 3, 2, 4, 3, 4, 3, 2, 4, 1, 3, 3, 3, 2, 3, 2, 0, 4, 3, 3, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 4, 3, 0, 4, 1, 0, 1, 3),
|
45 |
-
(0, 3, 1, 4, 0, 3, 0, 2, 0, 3, 4, 4, 3, 1, 4, 2, 3, 3, 4, 3, 4, 3, 4, 3, 4, 4, 3, 2, 3, 1, 5, 4, 4, 1, 4, 4, 3, 5, 4, 4, 3, 5, 5, 4, 3, 4, 4, 3, 1, 2, 3, 1, 2, 2, 0, 3, 2, 0, 3, 1, 0, 5, 3, 3, 3, 4, 3, 3, 3, 3, 4, 4, 4, 4, 5, 4, 2, 0, 3, 3, 2, 4, 3),
|
46 |
-
(0, 2, 0, 3, 0, 1, 0, 1, 0, 0, 3, 2, 0, 0, 2, 0, 1, 0, 2, 1, 3, 3, 3, 1, 2, 3, 1, 0, 1, 0, 4, 2, 1, 1, 3, 3, 0, 4, 3, 3, 1, 4, 3, 3, 0, 3, 3, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 4, 1, 0, 2, 3, 2, 2, 2, 1, 3, 3, 3, 4, 4, 3, 2, 0, 3, 1, 0, 3, 3),
|
47 |
-
(0, 4, 0, 4, 0, 3, 0, 3, 0, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 3, 4, 2, 4, 3, 4, 3, 3, 2, 4, 3, 4, 5, 4, 1, 4, 5, 3, 5, 4, 5, 3, 5, 4, 0, 3, 5, 5, 3, 1, 3, 3, 2, 2, 3, 0, 3, 4, 1, 3, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 5, 3, 0, 4, 1, 0, 3, 4),
|
48 |
-
(0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 2, 2, 1, 0, 1, 0, 0, 0, 3, 0, 3, 0, 3, 0, 1, 3, 1, 0, 3, 1, 3, 3, 3, 1, 3, 3, 3, 0, 1, 3, 1, 3, 4, 0, 0, 3, 1, 1, 0, 3, 2, 0, 0, 0, 0, 1, 3, 0, 1, 0, 0, 3, 3, 2, 0, 3, 0, 0, 0, 0, 0, 3, 4, 3, 4, 3, 3, 0, 3, 0, 0, 2, 3),
|
49 |
-
(2, 3, 0, 3, 0, 2, 0, 1, 0, 3, 3, 4, 3, 1, 3, 1, 1, 1, 3, 1, 4, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 4, 3, 1, 4, 3, 2, 5, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 0, 2, 1, 1, 3, 2, 0, 1, 2, 0, 0, 1, 0, 4, 1, 3, 3, 3, 0, 3, 0, 1, 0, 4, 4, 4, 5, 5, 3, 0, 2, 0, 0, 4, 4),
|
50 |
-
(0, 2, 0, 1, 0, 3, 1, 3, 0, 2, 3, 3, 3, 0, 3, 1, 0, 0, 3, 0, 3, 2, 3, 1, 3, 2, 1, 1, 0, 0, 4, 2, 1, 0, 2, 3, 1, 4, 3, 2, 0, 4, 4, 3, 1, 3, 1, 3, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 1, 1, 1, 2, 0, 3, 0, 0, 0, 3, 4, 2, 4, 3, 2, 0, 1, 0, 0, 3, 3),
|
51 |
-
(0, 1, 0, 4, 0, 5, 0, 4, 0, 2, 4, 4, 2, 3, 3, 2, 3, 3, 5, 3, 3, 3, 4, 3, 4, 2, 3, 0, 4, 3, 3, 3, 4, 1, 4, 3, 2, 1, 5, 5, 3, 4, 5, 1, 3, 5, 4, 2, 0, 3, 3, 0, 1, 3, 0, 4, 2, 0, 1, 3, 1, 4, 3, 3, 3, 3, 0, 3, 0, 1, 0, 3, 4, 4, 4, 5, 5, 0, 3, 0, 1, 4, 5),
|
52 |
-
(0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 3, 1, 3, 0, 4, 0, 1, 1, 3, 0, 3, 4, 3, 2, 3, 1, 0, 3, 3, 2, 3, 1, 3, 0, 2, 3, 0, 2, 1, 4, 1, 2, 2, 0, 0, 3, 3, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 2, 2, 0, 3, 2, 1, 3, 3, 0, 2, 0, 2, 0, 0, 3, 3, 1, 2, 4, 0, 3, 0, 2, 2, 3),
|
53 |
-
(2, 4, 0, 5, 0, 4, 0, 4, 0, 2, 4, 4, 4, 3, 4, 3, 3, 3, 1, 2, 4, 3, 4, 3, 4, 4, 5, 0, 3, 3, 3, 3, 2, 0, 4, 3, 1, 4, 3, 4, 1, 4, 4, 3, 3, 4, 4, 3, 1, 2, 3, 0, 4, 2, 0, 4, 1, 0, 3, 3, 0, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 3, 5, 3, 4, 5, 2, 0, 3, 0, 0, 4, 5),
|
54 |
-
(0, 3, 0, 4, 0, 1, 0, 1, 0, 1, 3, 2, 2, 1, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 0, 0, 4, 0, 3, 1, 0, 2, 1, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 2, 2, 3, 1, 0, 3, 0, 0, 0, 1, 4, 4, 4, 3, 0, 0, 4, 0, 0, 1, 4),
|
55 |
-
(1, 4, 1, 5, 0, 3, 0, 3, 0, 4, 5, 4, 4, 3, 5, 3, 3, 4, 4, 3, 4, 1, 3, 3, 3, 3, 2, 1, 4, 1, 5, 4, 3, 1, 4, 4, 3, 5, 4, 4, 3, 5, 4, 3, 3, 4, 4, 4, 0, 3, 3, 1, 2, 3, 0, 3, 1, 0, 3, 3, 0, 5, 4, 4, 4, 4, 4, 4, 3, 3, 5, 4, 4, 3, 3, 5, 4, 0, 3, 2, 0, 4, 4),
|
56 |
-
(0, 2, 0, 3, 0, 1, 0, 0, 0, 1, 3, 3, 3, 2, 4, 1, 3, 0, 3, 1, 3, 0, 2, 2, 1, 1, 0, 0, 2, 0, 4, 3, 1, 0, 4, 3, 0, 4, 4, 4, 1, 4, 3, 1, 1, 3, 3, 1, 0, 2, 0, 0, 1, 3, 0, 0, 0, 0, 2, 0, 0, 4, 3, 2, 4, 3, 5, 4, 3, 3, 3, 4, 3, 3, 4, 3, 3, 0, 2, 1, 0, 3, 3),
|
57 |
-
(0, 2, 0, 4, 0, 3, 0, 2, 0, 2, 5, 5, 3, 4, 4, 4, 4, 1, 4, 3, 3, 0, 4, 3, 4, 3, 1, 3, 3, 2, 4, 3, 0, 3, 4, 3, 0, 3, 4, 4, 2, 4, 4, 0, 4, 5, 3, 3, 2, 2, 1, 1, 1, 2, 0, 1, 5, 0, 3, 3, 2, 4, 3, 3, 3, 4, 0, 3, 0, 2, 0, 4, 4, 3, 5, 5, 0, 0, 3, 0, 2, 3, 3),
|
58 |
-
(0, 3, 0, 4, 0, 3, 0, 1, 0, 3, 4, 3, 3, 1, 3, 3, 3, 0, 3, 1, 3, 0, 4, 3, 3, 1, 1, 0, 3, 0, 3, 3, 0, 0, 4, 4, 0, 1, 5, 4, 3, 3, 5, 0, 3, 3, 4, 3, 0, 2, 0, 1, 1, 1, 0, 1, 3, 0, 1, 2, 1, 3, 3, 2, 3, 3, 0, 3, 0, 1, 0, 1, 3, 3, 4, 4, 1, 0, 1, 2, 2, 1, 3),
|
59 |
-
(0, 1, 0, 4, 0, 4, 0, 3, 0, 1, 3, 3, 3, 2, 3, 1, 1, 0, 3, 0, 3, 3, 4, 3, 2, 4, 2, 0, 1, 0, 4, 3, 2, 0, 4, 3, 0, 5, 3, 3, 2, 4, 4, 4, 3, 3, 3, 4, 0, 1, 3, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 4, 2, 3, 3, 3, 0, 3, 0, 0, 0, 4, 4, 4, 5, 3, 2, 0, 3, 3, 0, 3, 5),
|
60 |
-
(0, 2, 0, 3, 0, 0, 0, 3, 0, 1, 3, 0, 2, 0, 0, 0, 1, 0, 3, 1, 1, 3, 3, 0, 0, 3, 0, 0, 3, 0, 2, 3, 1, 0, 3, 1, 0, 3, 3, 2, 0, 4, 2, 2, 0, 2, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 0, 1, 0, 1, 0, 0, 0, 1, 3, 1, 2, 0, 0, 0, 1, 0, 0, 1, 4),
|
61 |
-
(0, 3, 0, 3, 0, 5, 0, 1, 0, 2, 4, 3, 1, 3, 3, 2, 1, 1, 5, 2, 1, 0, 5, 1, 2, 0, 0, 0, 3, 3, 2, 2, 3, 2, 4, 3, 0, 0, 3, 3, 1, 3, 3, 0, 2, 5, 3, 4, 0, 3, 3, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0, 2, 2, 3, 3, 3, 0, 2, 0, 1, 0, 3, 4, 4, 2, 5, 4, 0, 3, 0, 0, 3, 5),
|
62 |
-
(0, 3, 0, 3, 0, 3, 0, 1, 0, 3, 3, 3, 3, 0, 3, 0, 2, 0, 2, 1, 1, 0, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 1, 0, 3, 2, 0, 0, 3, 3, 1, 2, 3, 1, 0, 3, 3, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 3, 1, 2, 3, 0, 3, 0, 1, 0, 3, 2, 1, 0, 4, 3, 0, 1, 1, 0, 3, 3),
|
63 |
-
(0, 4, 0, 5, 0, 3, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 4, 3, 5, 3, 3, 2, 5, 3, 4, 4, 4, 3, 4, 3, 4, 5, 5, 3, 4, 4, 3, 4, 4, 5, 4, 4, 4, 3, 4, 5, 5, 4, 2, 3, 4, 2, 3, 4, 0, 3, 3, 1, 4, 3, 2, 4, 3, 3, 5, 5, 0, 3, 0, 3, 0, 5, 5, 5, 5, 4, 4, 0, 4, 0, 1, 4, 4),
|
64 |
-
(0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 5, 4, 4, 2, 3, 2, 5, 1, 3, 2, 5, 1, 4, 2, 3, 2, 3, 3, 4, 3, 3, 3, 3, 2, 5, 4, 1, 3, 3, 5, 3, 4, 4, 0, 4, 4, 3, 1, 1, 3, 1, 0, 2, 3, 0, 2, 3, 0, 3, 0, 0, 4, 3, 1, 3, 4, 0, 3, 0, 2, 0, 4, 4, 4, 3, 4, 5, 0, 4, 0, 0, 3, 4),
|
65 |
-
(0, 3, 0, 3, 0, 3, 1, 2, 0, 3, 4, 4, 3, 3, 3, 0, 2, 2, 4, 3, 3, 1, 3, 3, 3, 1, 1, 0, 3, 1, 4, 3, 2, 3, 4, 4, 2, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 3, 1, 3, 3, 1, 3, 3, 0, 4, 1, 0, 2, 2, 1, 4, 3, 2, 3, 3, 5, 4, 3, 3, 5, 4, 4, 3, 3, 0, 4, 0, 3, 2, 2, 4, 4),
|
66 |
-
(0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 3, 0, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 1, 1, 3, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1),
|
67 |
-
(0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 4, 1, 4, 0, 3, 0, 4, 0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 1, 5, 1, 4, 0, 0, 3, 0, 5, 0, 5, 2, 0, 1, 0, 0, 0, 2, 1, 4, 0, 1, 3, 0, 0, 3, 0, 0, 3, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
|
68 |
-
(1, 4, 0, 5, 0, 3, 0, 2, 0, 3, 5, 4, 4, 3, 4, 3, 5, 3, 4, 3, 3, 0, 4, 3, 3, 3, 3, 3, 3, 2, 4, 4, 3, 1, 3, 4, 4, 5, 4, 4, 3, 4, 4, 1, 3, 5, 4, 3, 3, 3, 1, 2, 2, 3, 3, 1, 3, 1, 3, 3, 3, 5, 3, 3, 4, 5, 0, 3, 0, 3, 0, 3, 4, 3, 4, 4, 3, 0, 3, 0, 2, 4, 3),
|
69 |
-
(0, 1, 0, 4, 0, 0, 0, 0, 0, 1, 4, 0, 4, 1, 4, 2, 4, 0, 3, 0, 1, 0, 1, 0, 0, 0, 0, 0, 2, 0, 3, 1, 1, 1, 0, 3, 0, 0, 0, 1, 2, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3, 2, 0, 2, 2, 0, 1, 0, 0, 0, 2, 3, 2, 3, 3, 0, 0, 0, 0, 2, 1, 0),
|
70 |
-
(0, 5, 1, 5, 0, 3, 0, 3, 0, 5, 4, 4, 5, 1, 5, 3, 3, 0, 4, 3, 4, 3, 5, 3, 4, 3, 3, 2, 4, 3, 4, 3, 3, 0, 3, 3, 1, 4, 4, 3, 4, 4, 4, 3, 4, 5, 5, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 2, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 5, 3, 3, 0, 3, 4, 0, 4, 3),
|
71 |
-
(0, 5, 0, 5, 0, 3, 0, 2, 0, 4, 4, 3, 5, 2, 4, 3, 3, 3, 4, 4, 4, 3, 5, 3, 5, 3, 3, 1, 4, 0, 4, 3, 3, 0, 3, 3, 0, 4, 4, 4, 4, 5, 4, 3, 3, 5, 5, 3, 2, 3, 1, 2, 3, 2, 0, 1, 0, 0, 3, 2, 2, 4, 4, 3, 1, 5, 0, 4, 0, 3, 0, 4, 3, 1, 3, 2, 1, 0, 3, 3, 0, 3, 3),
|
72 |
-
(0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 5, 5, 3, 4, 3, 3, 2, 5, 4, 4, 3, 5, 3, 5, 3, 4, 0, 4, 3, 4, 4, 3, 2, 4, 4, 3, 4, 5, 4, 4, 5, 5, 0, 3, 5, 5, 4, 1, 3, 3, 2, 3, 3, 1, 3, 1, 0, 4, 3, 1, 4, 4, 3, 4, 5, 0, 4, 0, 2, 0, 4, 3, 4, 4, 3, 3, 0, 4, 0, 0, 5, 5),
|
73 |
-
(0, 4, 0, 4, 0, 5, 0, 1, 1, 3, 3, 4, 4, 3, 4, 1, 3, 0, 5, 1, 3, 0, 3, 1, 3, 1, 1, 0, 3, 0, 3, 3, 4, 0, 4, 3, 0, 4, 4, 4, 3, 4, 4, 0, 3, 5, 4, 1, 0, 3, 0, 0, 2, 3, 0, 3, 1, 0, 3, 1, 0, 3, 2, 1, 3, 5, 0, 3, 0, 1, 0, 3, 2, 3, 3, 4, 4, 0, 2, 2, 0, 4, 4),
|
74 |
-
(2, 4, 0, 5, 0, 4, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 5, 3, 5, 3, 5, 2, 5, 3, 4, 3, 3, 4, 3, 4, 5, 3, 2, 1, 5, 4, 3, 2, 3, 4, 5, 3, 4, 1, 2, 5, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 3, 0, 4, 1, 0, 3, 4, 3, 3, 5, 0, 3, 0, 1, 0, 4, 5, 5, 5, 4, 3, 0, 4, 2, 0, 3, 5),
|
75 |
-
(0, 5, 0, 4, 0, 4, 0, 2, 0, 5, 4, 3, 4, 3, 4, 3, 3, 3, 4, 3, 4, 2, 5, 3, 5, 3, 4, 1, 4, 3, 4, 4, 4, 0, 3, 5, 0, 4, 4, 4, 4, 5, 3, 1, 3, 4, 5, 3, 3, 3, 3, 3, 3, 3, 0, 2, 2, 0, 3, 3, 2, 4, 3, 3, 3, 5, 3, 4, 1, 3, 3, 5, 3, 2, 0, 0, 0, 0, 4, 3, 1, 3, 3),
|
76 |
-
(0, 1, 0, 3, 0, 3, 0, 1, 0, 1, 3, 3, 3, 2, 3, 3, 3, 0, 3, 0, 0, 0, 3, 1, 3, 0, 0, 0, 2, 2, 2, 3, 0, 0, 3, 2, 0, 1, 2, 4, 1, 3, 3, 0, 0, 3, 3, 3, 0, 1, 0, 0, 2, 1, 0, 0, 3, 0, 3, 1, 0, 3, 0, 0, 1, 3, 0, 2, 0, 1, 0, 3, 3, 1, 3, 3, 0, 0, 1, 1, 0, 3, 3),
|
77 |
-
(0, 2, 0, 3, 0, 2, 1, 4, 0, 2, 2, 3, 1, 1, 3, 1, 1, 0, 2, 0, 3, 1, 2, 3, 1, 3, 0, 0, 1, 0, 4, 3, 2, 3, 3, 3, 1, 4, 2, 3, 3, 3, 3, 1, 0, 3, 1, 4, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 1, 1, 0, 3, 1, 3, 2, 2, 0, 1, 0, 0, 0, 2, 3, 3, 3, 1, 0, 0, 0, 0, 0, 2, 3),
|
78 |
-
(0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 5, 5, 3, 3, 4, 3, 3, 1, 5, 4, 4, 2, 4, 4, 4, 3, 4, 2, 4, 3, 5, 5, 4, 3, 3, 4, 3, 3, 5, 5, 4, 5, 5, 1, 3, 4, 5, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 1, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 5, 3, 3, 1, 4, 3, 0, 4, 0, 1, 5, 3),
|
79 |
-
(0, 5, 0, 5, 0, 4, 0, 2, 0, 4, 4, 3, 4, 3, 3, 3, 3, 3, 5, 4, 4, 4, 4, 4, 4, 5, 3, 3, 5, 2, 4, 4, 4, 3, 4, 4, 3, 3, 4, 4, 5, 5, 3, 3, 4, 3, 4, 3, 3, 4, 3, 3, 3, 3, 1, 2, 2, 1, 4, 3, 3, 5, 4, 4, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 4, 4, 1, 0, 4, 2, 0, 2, 4),
|
80 |
-
(0, 4, 0, 4, 0, 3, 0, 1, 0, 3, 5, 2, 3, 0, 3, 0, 2, 1, 4, 2, 3, 3, 4, 1, 4, 3, 3, 2, 4, 1, 3, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 5, 3, 3, 3, 3, 3, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0, 1, 0, 0, 3, 1, 2, 2, 3, 0, 3, 0, 2, 0, 4, 4, 3, 3, 4, 1, 0, 3, 0, 0, 2, 4),
|
81 |
-
(0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 3, 1, 3, 0, 3, 2, 0, 0, 0, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 2, 0, 0, 0, 0, 0, 0, 2),
|
82 |
-
(0, 2, 1, 3, 0, 2, 0, 2, 0, 3, 3, 3, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 4, 2, 2, 1, 2, 1, 4, 0, 4, 3, 1, 3, 3, 3, 2, 4, 3, 5, 4, 3, 3, 3, 3, 3, 3, 3, 0, 1, 3, 0, 2, 0, 0, 1, 0, 0, 1, 0, 0, 4, 2, 0, 2, 3, 0, 3, 3, 0, 3, 3, 4, 2, 3, 1, 4, 0, 1, 2, 0, 2, 3),
|
83 |
-
(0, 3, 0, 3, 0, 1, 0, 3, 0, 2, 3, 3, 3, 0, 3, 1, 2, 0, 3, 3, 2, 3, 3, 2, 3, 2, 3, 1, 3, 0, 4, 3, 2, 0, 3, 3, 1, 4, 3, 3, 2, 3, 4, 3, 1, 3, 3, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 4, 1, 1, 0, 3, 0, 3, 1, 0, 2, 3, 3, 3, 3, 3, 1, 0, 0, 2, 0, 3, 3),
|
84 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 3, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3),
|
85 |
-
(0, 2, 0, 3, 1, 3, 0, 3, 0, 2, 3, 3, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 1, 3, 0, 2, 3, 1, 1, 4, 3, 3, 2, 3, 3, 1, 2, 2, 4, 1, 3, 3, 0, 1, 4, 2, 3, 0, 1, 3, 0, 3, 0, 0, 1, 3, 0, 2, 0, 0, 3, 3, 2, 1, 3, 0, 3, 0, 2, 0, 3, 4, 4, 4, 3, 1, 0, 3, 0, 0, 3, 3),
|
86 |
-
(0, 2, 0, 1, 0, 2, 0, 0, 0, 1, 3, 2, 2, 1, 3, 0, 1, 1, 3, 0, 3, 2, 3, 1, 2, 0, 2, 0, 1, 1, 3, 3, 3, 0, 3, 3, 1, 1, 2, 3, 2, 3, 3, 1, 2, 3, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 2, 1, 2, 1, 3, 0, 3, 0, 0, 0, 3, 4, 4, 4, 3, 2, 0, 2, 0, 0, 2, 4),
|
87 |
-
(0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 3),
|
88 |
-
(0, 3, 0, 3, 0, 2, 0, 3, 0, 3, 3, 3, 2, 3, 2, 2, 2, 0, 3, 1, 3, 3, 3, 2, 3, 3, 0, 0, 3, 0, 3, 2, 2, 0, 2, 3, 1, 4, 3, 4, 3, 3, 2, 3, 1, 5, 4, 4, 0, 3, 1, 2, 1, 3, 0, 3, 1, 1, 2, 0, 2, 3, 1, 3, 1, 3, 0, 3, 0, 1, 0, 3, 3, 4, 4, 2, 1, 0, 2, 1, 0, 2, 4),
|
89 |
-
(0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 4, 2, 5, 1, 4, 0, 2, 0, 2, 1, 3, 1, 4, 0, 2, 1, 0, 0, 2, 1, 4, 1, 1, 0, 3, 3, 0, 5, 1, 3, 2, 3, 3, 1, 0, 3, 2, 3, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 1, 0, 3, 0, 2, 0, 1, 0, 3, 3, 3, 4, 3, 3, 0, 0, 0, 0, 2, 3),
|
90 |
-
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 3),
|
91 |
-
(0, 1, 0, 3, 0, 4, 0, 3, 0, 2, 4, 3, 1, 0, 3, 2, 2, 1, 3, 1, 2, 2, 3, 1, 1, 1, 2, 1, 3, 0, 1, 2, 0, 1, 3, 2, 1, 3, 0, 5, 5, 1, 0, 0, 1, 3, 2, 1, 0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 1, 1, 1, 3, 2, 0, 2, 0, 1, 0, 2, 3, 3, 1, 2, 3, 0, 1, 0, 1, 0, 4),
|
92 |
-
(0, 0, 0, 1, 0, 3, 0, 3, 0, 2, 2, 1, 0, 0, 4, 0, 3, 0, 3, 1, 3, 0, 3, 0, 3, 0, 1, 0, 3, 0, 3, 1, 3, 0, 3, 3, 0, 0, 1, 2, 1, 1, 1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 2, 0, 0, 2, 0, 0, 0, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 4),
|
93 |
-
(0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 3, 1, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 0, 2, 0, 2, 3, 0, 0, 2, 2, 3, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 2, 0, 0, 0, 0, 2, 3),
|
94 |
-
(2, 4, 0, 5, 0, 5, 0, 4, 0, 3, 4, 3, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 4, 5, 5, 5, 2, 3, 0, 5, 5, 4, 1, 5, 4, 3, 1, 5, 4, 3, 4, 4, 3, 3, 4, 3, 3, 0, 3, 2, 0, 2, 3, 0, 3, 0, 0, 3, 3, 0, 5, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 4, 5, 4, 5, 3, 0, 4, 3, 0, 3, 4),
|
95 |
-
(0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 3, 4, 3, 2, 3, 2, 3, 0, 4, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 4, 3, 3, 1, 3, 4, 3, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 1, 0, 2, 0, 0, 1, 1, 0, 2, 0, 0, 3, 1, 0, 5, 3, 2, 1, 3, 0, 3, 0, 1, 2, 4, 3, 2, 4, 3, 3, 0, 3, 2, 0, 4, 4),
|
96 |
-
(0, 3, 0, 3, 0, 1, 0, 0, 0, 1, 4, 3, 3, 2, 3, 1, 3, 1, 4, 2, 3, 2, 4, 2, 3, 4, 3, 0, 2, 2, 3, 3, 3, 0, 3, 3, 3, 0, 3, 4, 1, 3, 3, 0, 3, 4, 3, 3, 0, 1, 1, 0, 1, 0, 0, 0, 4, 0, 3, 0, 0, 3, 1, 2, 1, 3, 0, 4, 0, 1, 0, 4, 3, 3, 4, 3, 3, 0, 2, 0, 0, 3, 3),
|
97 |
-
(0, 3, 0, 4, 0, 1, 0, 3, 0, 3, 4, 3, 3, 0, 3, 3, 3, 1, 3, 1, 3, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 3, 3, 1, 3, 3, 2, 5, 4, 3, 3, 4, 5, 3, 2, 5, 3, 4, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 4, 2, 2, 1, 3, 0, 3, 0, 2, 0, 4, 4, 3, 5, 3, 2, 0, 1, 1, 0, 3, 4),
|
98 |
-
(0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 4, 3, 3, 2, 3, 3, 3, 1, 4, 3, 4, 1, 5, 3, 4, 3, 4, 0, 4, 2, 4, 3, 4, 1, 5, 4, 0, 4, 4, 4, 4, 5, 4, 1, 3, 5, 4, 2, 1, 4, 1, 1, 3, 2, 0, 3, 1, 0, 3, 2, 1, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 3, 3, 3, 0, 4, 2, 0, 3, 4),
|
99 |
-
(1, 4, 0, 4, 0, 3, 0, 1, 0, 3, 3, 3, 1, 1, 3, 3, 2, 2, 3, 3, 1, 0, 3, 2, 2, 1, 2, 0, 3, 1, 2, 1, 2, 0, 3, 2, 0, 2, 2, 3, 3, 4, 3, 0, 3, 3, 1, 2, 0, 1, 1, 3, 1, 2, 0, 0, 3, 0, 1, 1, 0, 3, 2, 2, 3, 3, 0, 3, 0, 0, 0, 2, 3, 3, 4, 3, 3, 0, 1, 0, 0, 1, 4),
|
100 |
-
(0, 4, 0, 4, 0, 4, 0, 0, 0, 3, 4, 4, 3, 1, 4, 2, 3, 2, 3, 3, 3, 1, 4, 3, 4, 0, 3, 0, 4, 2, 3, 3, 2, 2, 5, 4, 2, 1, 3, 4, 3, 4, 3, 1, 3, 3, 4, 2, 0, 2, 1, 0, 3, 3, 0, 0, 2, 0, 3, 1, 0, 4, 4, 3, 4, 3, 0, 4, 0, 1, 0, 2, 4, 4, 4, 4, 4, 0, 3, 2, 0, 3, 3),
|
101 |
-
(0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 2, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2),
|
102 |
-
(0, 2, 0, 3, 0, 4, 0, 4, 0, 1, 3, 3, 3, 0, 4, 0, 2, 1, 2, 1, 1, 1, 2, 0, 3, 1, 1, 0, 1, 0, 3, 1, 0, 0, 3, 3, 2, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 2, 2, 0, 3, 1, 0, 0, 1, 0, 1, 1, 0, 1, 2, 0, 3, 0, 0, 0, 0, 1, 0, 0, 3, 3, 4, 3, 1, 0, 1, 0, 3, 0, 2),
|
103 |
-
(0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 2, 0, 3, 1, 0, 1, 3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 4, 0, 0, 0, 2, 3, 0, 1, 4, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 3),
|
104 |
-
(0, 2, 0, 5, 0, 5, 0, 1, 0, 2, 4, 3, 3, 2, 5, 1, 3, 2, 3, 3, 3, 0, 4, 1, 2, 0, 3, 0, 4, 0, 2, 2, 1, 1, 5, 3, 0, 0, 1, 4, 2, 3, 2, 0, 3, 3, 3, 2, 0, 2, 4, 1, 1, 2, 0, 1, 1, 0, 3, 1, 0, 1, 3, 1, 2, 3, 0, 2, 0, 0, 0, 1, 3, 5, 4, 4, 4, 0, 3, 0, 0, 1, 3),
|
105 |
-
(0, 4, 0, 5, 0, 4, 0, 4, 0, 4, 5, 4, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 3, 4, 5, 4, 2, 4, 2, 3, 4, 3, 1, 4, 4, 1, 3, 5, 4, 4, 5, 5, 4, 4, 5, 5, 5, 2, 3, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 4, 4, 4, 0, 3, 0, 4, 0, 3, 3, 4, 4, 5, 0, 0, 4, 3, 0, 4, 5),
|
106 |
-
(0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4, 3, 5, 3, 4, 3, 2, 1, 4, 2, 4, 4, 3, 1, 3, 4, 2, 4, 5, 5, 3, 4, 5, 4, 1, 5, 4, 3, 0, 3, 2, 2, 3, 2, 1, 3, 1, 0, 3, 3, 3, 5, 3, 3, 3, 5, 4, 4, 2, 3, 3, 4, 3, 3, 3, 2, 1, 0, 3, 2, 1, 4, 3),
|
107 |
-
(0, 4, 0, 5, 0, 4, 0, 3, 0, 3, 5, 5, 3, 2, 4, 3, 4, 0, 5, 4, 4, 1, 4, 4, 4, 3, 3, 3, 4, 3, 5, 5, 2, 3, 3, 4, 1, 2, 5, 5, 3, 5, 5, 2, 3, 5, 5, 4, 0, 3, 2, 0, 3, 3, 1, 1, 5, 1, 4, 1, 0, 4, 3, 2, 3, 5, 0, 4, 0, 3, 0, 5, 4, 3, 4, 3, 0, 0, 4, 1, 0, 4, 4),
|
108 |
-
(1, 3, 0, 4, 0, 2, 0, 2, 0, 2, 5, 5, 3, 3, 3, 3, 3, 0, 4, 2, 3, 4, 4, 4, 3, 4, 0, 0, 3, 4, 5, 4, 3, 3, 3, 3, 2, 5, 5, 4, 5, 5, 5, 4, 3, 5, 5, 5, 1, 3, 1, 0, 1, 0, 0, 3, 2, 0, 4, 2, 0, 5, 2, 3, 2, 4, 1, 3, 0, 3, 0, 4, 5, 4, 5, 4, 3, 0, 4, 2, 0, 5, 4),
|
109 |
-
(0, 3, 0, 4, 0, 5, 0, 3, 0, 3, 4, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 2, 4, 3, 3, 2, 2, 0, 3, 3, 3, 3, 3, 1, 3, 3, 3, 0, 4, 4, 3, 4, 4, 1, 1, 4, 4, 2, 0, 3, 1, 0, 1, 1, 0, 4, 1, 0, 2, 3, 1, 3, 3, 1, 3, 4, 0, 3, 0, 1, 0, 3, 1, 3, 0, 0, 1, 0, 2, 0, 0, 4, 4),
|
110 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
|
111 |
-
(0, 3, 0, 3, 0, 2, 0, 3, 0, 1, 5, 4, 3, 3, 3, 1, 4, 2, 1, 2, 3, 4, 4, 2, 4, 4, 5, 0, 3, 1, 4, 3, 4, 0, 4, 3, 3, 3, 2, 3, 2, 5, 3, 4, 3, 2, 2, 3, 0, 0, 3, 0, 2, 1, 0, 1, 2, 0, 0, 0, 0, 2, 1, 1, 3, 1, 0, 2, 0, 4, 0, 3, 4, 4, 4, 5, 2, 0, 2, 0, 0, 1, 3),
|
112 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 4, 2, 1, 1, 0, 1, 0, 3, 2, 0, 0, 3, 1, 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 4, 0, 4, 2, 1, 0, 0, 0, 0, 0, 1),
|
113 |
-
(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 2, 0, 2, 1, 0, 0, 1, 2, 1, 0, 1, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2),
|
114 |
-
(0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3),
|
115 |
-
(0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1),
|
116 |
-
)
|
117 |
-
# fmt: on
|
118 |
-
|
119 |
-
|
120 |
-
class JapaneseContextAnalysis:
|
121 |
-
NUM_OF_CATEGORY = 6
|
122 |
-
DONT_KNOW = -1
|
123 |
-
ENOUGH_REL_THRESHOLD = 100
|
124 |
-
MAX_REL_THRESHOLD = 1000
|
125 |
-
MINIMUM_DATA_THRESHOLD = 4
|
126 |
-
|
127 |
-
def __init__(self) -> None:
|
128 |
-
self._total_rel = 0
|
129 |
-
self._rel_sample: List[int] = []
|
130 |
-
self._need_to_skip_char_num = 0
|
131 |
-
self._last_char_order = -1
|
132 |
-
self._done = False
|
133 |
-
self.reset()
|
134 |
-
|
135 |
-
def reset(self) -> None:
|
136 |
-
self._total_rel = 0 # total sequence received
|
137 |
-
# category counters, each integer counts sequence in its category
|
138 |
-
self._rel_sample = [0] * self.NUM_OF_CATEGORY
|
139 |
-
# if last byte in current buffer is not the last byte of a character,
|
140 |
-
# we need to know how many bytes to skip in next buffer
|
141 |
-
self._need_to_skip_char_num = 0
|
142 |
-
self._last_char_order = -1 # The order of previous char
|
143 |
-
# If this flag is set to True, detection is done and conclusion has
|
144 |
-
# been made
|
145 |
-
self._done = False
|
146 |
-
|
147 |
-
def feed(self, byte_str: Union[bytes, bytearray], num_bytes: int) -> None:
|
148 |
-
if self._done:
|
149 |
-
return
|
150 |
-
|
151 |
-
# The buffer we got is byte oriented, and a character may span in more than one
|
152 |
-
# buffers. In case the last one or two byte in last buffer is not
|
153 |
-
# complete, we record how many byte needed to complete that character
|
154 |
-
# and skip these bytes here. We can choose to record those bytes as
|
155 |
-
# well and analyse the character once it is complete, but since a
|
156 |
-
# character will not make much difference, by simply skipping
|
157 |
-
# this character will simply our logic and improve performance.
|
158 |
-
i = self._need_to_skip_char_num
|
159 |
-
while i < num_bytes:
|
160 |
-
order, char_len = self.get_order(byte_str[i : i + 2])
|
161 |
-
i += char_len
|
162 |
-
if i > num_bytes:
|
163 |
-
self._need_to_skip_char_num = i - num_bytes
|
164 |
-
self._last_char_order = -1
|
165 |
-
else:
|
166 |
-
if (order != -1) and (self._last_char_order != -1):
|
167 |
-
self._total_rel += 1
|
168 |
-
if self._total_rel > self.MAX_REL_THRESHOLD:
|
169 |
-
self._done = True
|
170 |
-
break
|
171 |
-
self._rel_sample[
|
172 |
-
jp2_char_context[self._last_char_order][order]
|
173 |
-
] += 1
|
174 |
-
self._last_char_order = order
|
175 |
-
|
176 |
-
def got_enough_data(self) -> bool:
|
177 |
-
return self._total_rel > self.ENOUGH_REL_THRESHOLD
|
178 |
-
|
179 |
-
def get_confidence(self) -> float:
|
180 |
-
# This is just one way to calculate confidence. It works well for me.
|
181 |
-
if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
|
182 |
-
return (self._total_rel - self._rel_sample[0]) / self._total_rel
|
183 |
-
return self.DONT_KNOW
|
184 |
-
|
185 |
-
def get_order(self, _: Union[bytes, bytearray]) -> Tuple[int, int]:
|
186 |
-
return -1, 1
|
187 |
-
|
188 |
-
|
189 |
-
class SJISContextAnalysis(JapaneseContextAnalysis):
|
190 |
-
def __init__(self) -> None:
|
191 |
-
super().__init__()
|
192 |
-
self._charset_name = "SHIFT_JIS"
|
193 |
-
|
194 |
-
@property
|
195 |
-
def charset_name(self) -> str:
|
196 |
-
return self._charset_name
|
197 |
-
|
198 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
|
199 |
-
if not byte_str:
|
200 |
-
return -1, 1
|
201 |
-
# find out current char's byte length
|
202 |
-
first_char = byte_str[0]
|
203 |
-
if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
|
204 |
-
char_len = 2
|
205 |
-
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
|
206 |
-
self._charset_name = "CP932"
|
207 |
-
else:
|
208 |
-
char_len = 1
|
209 |
-
|
210 |
-
# return its order if it is hiragana
|
211 |
-
if len(byte_str) > 1:
|
212 |
-
second_char = byte_str[1]
|
213 |
-
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
|
214 |
-
return second_char - 0x9F, char_len
|
215 |
-
|
216 |
-
return -1, char_len
|
217 |
-
|
218 |
-
|
219 |
-
class EUCJPContextAnalysis(JapaneseContextAnalysis):
|
220 |
-
def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
|
221 |
-
if not byte_str:
|
222 |
-
return -1, 1
|
223 |
-
# find out current char's byte length
|
224 |
-
first_char = byte_str[0]
|
225 |
-
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
|
226 |
-
char_len = 2
|
227 |
-
elif first_char == 0x8F:
|
228 |
-
char_len = 3
|
229 |
-
else:
|
230 |
-
char_len = 1
|
231 |
-
|
232 |
-
# return its order if it is hiragana
|
233 |
-
if len(byte_str) > 1:
|
234 |
-
second_char = byte_str[1]
|
235 |
-
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
|
236 |
-
return second_char - 0xA1, char_len
|
237 |
-
|
238 |
-
return -1, char_len
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/README.md
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
|
2 |
-
# TensorMask in Detectron2
|
3 |
-
**A Foundation for Dense Object Segmentation**
|
4 |
-
|
5 |
-
Xinlei Chen, Ross Girshick, Kaiming He, Piotr Dollár
|
6 |
-
|
7 |
-
[[`arXiv`](https://arxiv.org/abs/1903.12174)] [[`BibTeX`](#CitingTensorMask)]
|
8 |
-
|
9 |
-
<div align="center">
|
10 |
-
<img src="http://xinleic.xyz/images/tmask.png" width="700px" />
|
11 |
-
</div>
|
12 |
-
|
13 |
-
In this repository, we release code for TensorMask in Detectron2.
|
14 |
-
TensorMask is a dense sliding-window instance segmentation framework that, for the first time, achieves results close to the well-developed Mask R-CNN framework -- both qualitatively and quantitatively. It establishes a conceptually complementary direction for object instance segmentation research.
|
15 |
-
|
16 |
-
## Installation
|
17 |
-
First install Detectron 2 following [INSTALL.md](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). Then compile the TensorMask-specific op (`swap_align2nat`):
|
18 |
-
```bash
|
19 |
-
cd /path/to/detectron2/projects/TensorMask
|
20 |
-
python setup.py build develop
|
21 |
-
```
|
22 |
-
|
23 |
-
## Training
|
24 |
-
|
25 |
-
To train a model, run:
|
26 |
-
```bash
|
27 |
-
python /path/to/detectron2/projects/TensorMask/train_net.py --config-file <config.yaml>
|
28 |
-
```
|
29 |
-
|
30 |
-
For example, to launch TensorMask BiPyramid training (1x schedule) with ResNet-50 backbone on 8 GPUs,
|
31 |
-
one should execute:
|
32 |
-
```bash
|
33 |
-
python /path/to/detectron2/projects/TensorMask/train_net.py --config-file configs/tensormask_R_50_FPN_1x.yaml --num-gpus 8
|
34 |
-
```
|
35 |
-
|
36 |
-
## Evaluation
|
37 |
-
|
38 |
-
Model evaluation can be done similarly (6x schedule with scale augmentation):
|
39 |
-
```bash
|
40 |
-
python /path/to/detectron2/projects/TensorMask/train_net.py --config-file configs/tensormask_R_50_FPN_6x.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint
|
41 |
-
```
|
42 |
-
|
43 |
-
# Pretrained Models
|
44 |
-
|
45 |
-
| Backbone | lr sched | AP box | AP mask | download |
|
46 |
-
| -------- | -------- | -- | --- | -------- |
|
47 |
-
| R50 | 1x | 37.6 | 32.4 | <a href="https://dl.fbaipublicfiles.com/detectron2/TensorMask/tensormask_R_50_FPN_1x/152549419/model_final_8f325c.pkl">model</a> \| <a href="https://dl.fbaipublicfiles.com/detectron2/TensorMask/tensormask_R_50_FPN_1x/152549419/metrics.json">metrics</a> |
|
48 |
-
| R50 | 6x | 41.4 | 35.8 | <a href="https://dl.fbaipublicfiles.com/detectron2/TensorMask/tensormask_R_50_FPN_6x/153538791/model_final_e8df31.pkl">model</a> \| <a href="https://dl.fbaipublicfiles.com/detectron2/TensorMask/tensormask_R_50_FPN_6x/153538791/metrics.json">metrics</a> |
|
49 |
-
|
50 |
-
|
51 |
-
## <a name="CitingTensorMask"></a>Citing TensorMask
|
52 |
-
|
53 |
-
If you use TensorMask, please use the following BibTeX entry.
|
54 |
-
|
55 |
-
```
|
56 |
-
@InProceedings{chen2019tensormask,
|
57 |
-
title={Tensormask: A Foundation for Dense Object Segmentation},
|
58 |
-
author={Chen, Xinlei and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr},
|
59 |
-
journal={The International Conference on Computer Vision (ICCV)},
|
60 |
-
year={2019}
|
61 |
-
}
|
62 |
-
```
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/docs/_source/conf.py
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
#
|
4 |
-
# Configuration file for the Sphinx documentation builder.
|
5 |
-
#
|
6 |
-
# This file only contains a selection of the most common options. For a full
|
7 |
-
# list see the documentation:
|
8 |
-
# http://www.sphinx-doc.org/en/master/config
|
9 |
-
|
10 |
-
# -- Path setup --------------------------------------------------------------
|
11 |
-
|
12 |
-
# If extensions (or modules to document with autodoc) are in another directory,
|
13 |
-
# add these directories to sys.path here. If the directory is relative to the
|
14 |
-
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
15 |
-
#
|
16 |
-
import os
|
17 |
-
import sys
|
18 |
-
sys.path.insert(0, os.path.abspath('../..'))
|
19 |
-
|
20 |
-
RELEASE = os.environ.get('RELEASE', False)
|
21 |
-
|
22 |
-
# -- Project information -----------------------------------------------------
|
23 |
-
|
24 |
-
project = u'OpenVQA'
|
25 |
-
copyright = u'2019, MILVLG'
|
26 |
-
author = u'MILVLG'
|
27 |
-
|
28 |
-
# The version info for the project you're documenting, acts as replacement for
|
29 |
-
# |version| and |release|, also used in various other places throughout the
|
30 |
-
# built documents.
|
31 |
-
#
|
32 |
-
# The short X.Y version.
|
33 |
-
# version = '1.0'
|
34 |
-
# The full version, including alpha/beta/rc tags.
|
35 |
-
# release = '0.0'
|
36 |
-
|
37 |
-
|
38 |
-
# -- General configuration ---------------------------------------------------
|
39 |
-
|
40 |
-
master_doc = 'index'
|
41 |
-
|
42 |
-
# The suffix(es) of source filenames.
|
43 |
-
# You can specify multiple suffix as a list of string:
|
44 |
-
#
|
45 |
-
source_suffix = {
|
46 |
-
'.rst': 'restructuredtext',
|
47 |
-
'.txt': 'markdown',
|
48 |
-
'.md': 'markdown',
|
49 |
-
}
|
50 |
-
|
51 |
-
# Add any Sphinx extension module names here, as strings. They can be
|
52 |
-
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
53 |
-
# ones.
|
54 |
-
extensions = [
|
55 |
-
'sphinx.ext.autodoc',
|
56 |
-
'sphinx.ext.autosummary',
|
57 |
-
'sphinx.ext.doctest',
|
58 |
-
'sphinx.ext.intersphinx',
|
59 |
-
'sphinx.ext.todo',
|
60 |
-
'sphinx.ext.coverage',
|
61 |
-
'sphinx.ext.napoleon',
|
62 |
-
'sphinx.ext.viewcode',
|
63 |
-
'sphinx_markdown_tables',
|
64 |
-
'recommonmark',
|
65 |
-
]
|
66 |
-
|
67 |
-
# Add any paths that contain templates here, relative to this directory.
|
68 |
-
templates_path = ['_templates']
|
69 |
-
|
70 |
-
# List of patterns, relative to source directory, that match files and
|
71 |
-
# directories to ignore when looking for source files.
|
72 |
-
# This pattern also affects html_static_path and html_extra_path.
|
73 |
-
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
74 |
-
|
75 |
-
|
76 |
-
# -- Options for HTML output -------------------------------------------------
|
77 |
-
|
78 |
-
# The theme to use for HTML and HTML Help pages. See the documentation for
|
79 |
-
# a list of builtin themes.
|
80 |
-
#
|
81 |
-
html_theme = 'sphinx_rtd_theme'
|
82 |
-
|
83 |
-
# Add any paths that contain custom static files (such as style sheets) here,
|
84 |
-
# relative to this directory. They are copied after the builtin static files,
|
85 |
-
# so a file named "default.css" will overwrite the builtin "default.css".
|
86 |
-
html_static_path = ['_static']
|
87 |
-
|
88 |
-
# Add cusotm css overrides
|
89 |
-
def setup(app):
|
90 |
-
app.add_stylesheet( "custom.css" )
|
91 |
-
|
92 |
-
# Add any paths that contain templates here, relative to this directory.
|
93 |
-
templates_path = ['_templates']
|
94 |
-
if RELEASE:
|
95 |
-
templates_path = ['_templates-stable']
|
96 |
-
|
97 |
-
# The name of the Pygments (syntax highlighting) style to use.
|
98 |
-
pygments_style = 'sphinx'
|
99 |
-
|
100 |
-
# Disable docstring inheritance
|
101 |
-
autodoc_inherit_docstrings = False
|
102 |
-
|
103 |
-
|
104 |
-
# -- Other Options ------------------------------------------------------------
|
105 |
-
|
106 |
-
# intersphinx_mapping = {
|
107 |
-
# 'python': ('https://docs.python.org/3', None)
|
108 |
-
# }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/cross_module_gil_utils.cpp
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/cross_module_gil_utils.cpp -- tools for acquiring GIL from a different module
|
3 |
-
|
4 |
-
Copyright (c) 2019 Google LLC
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
#include <pybind11/pybind11.h>
|
10 |
-
#include <cstdint>
|
11 |
-
|
12 |
-
// This file mimics a DSO that makes pybind11 calls but does not define a
|
13 |
-
// PYBIND11_MODULE. The purpose is to test that such a DSO can create a
|
14 |
-
// py::gil_scoped_acquire when the running thread is in a GIL-released state.
|
15 |
-
//
|
16 |
-
// Note that we define a Python module here for convenience, but in general
|
17 |
-
// this need not be the case. The typical scenario would be a DSO that implements
|
18 |
-
// shared logic used internally by multiple pybind11 modules.
|
19 |
-
|
20 |
-
namespace {
|
21 |
-
|
22 |
-
namespace py = pybind11;
|
23 |
-
void gil_acquire() { py::gil_scoped_acquire gil; }
|
24 |
-
|
25 |
-
constexpr char kModuleName[] = "cross_module_gil_utils";
|
26 |
-
|
27 |
-
#if PY_MAJOR_VERSION >= 3
|
28 |
-
struct PyModuleDef moduledef = {
|
29 |
-
PyModuleDef_HEAD_INIT,
|
30 |
-
kModuleName,
|
31 |
-
NULL,
|
32 |
-
0,
|
33 |
-
NULL,
|
34 |
-
NULL,
|
35 |
-
NULL,
|
36 |
-
NULL,
|
37 |
-
NULL
|
38 |
-
};
|
39 |
-
#else
|
40 |
-
PyMethodDef module_methods[] = {
|
41 |
-
{NULL, NULL, 0, NULL}
|
42 |
-
};
|
43 |
-
#endif
|
44 |
-
|
45 |
-
} // namespace
|
46 |
-
|
47 |
-
extern "C" PYBIND11_EXPORT
|
48 |
-
#if PY_MAJOR_VERSION >= 3
|
49 |
-
PyObject* PyInit_cross_module_gil_utils()
|
50 |
-
#else
|
51 |
-
void initcross_module_gil_utils()
|
52 |
-
#endif
|
53 |
-
{
|
54 |
-
|
55 |
-
PyObject* m =
|
56 |
-
#if PY_MAJOR_VERSION >= 3
|
57 |
-
PyModule_Create(&moduledef);
|
58 |
-
#else
|
59 |
-
Py_InitModule(kModuleName, module_methods);
|
60 |
-
#endif
|
61 |
-
|
62 |
-
if (m != NULL) {
|
63 |
-
static_assert(
|
64 |
-
sizeof(&gil_acquire) == sizeof(void*),
|
65 |
-
"Function pointer must have the same size as void*");
|
66 |
-
PyModule_AddObject(m, "gil_acquire_funcaddr",
|
67 |
-
PyLong_FromVoidPtr(reinterpret_cast<void*>(&gil_acquire)));
|
68 |
-
}
|
69 |
-
|
70 |
-
#if PY_MAJOR_VERSION >= 3
|
71 |
-
return m;
|
72 |
-
#endif
|
73 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/count.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits count
|
22 |
-
#include <thrust/system/cpp/detail/count.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/system/add.js
DELETED
@@ -1,446 +0,0 @@
|
|
1 |
-
import cfg from "../../lib/config/config.js"
|
2 |
-
import plugin from "../../lib/plugins/plugin.js"
|
3 |
-
import common from "../../lib/common/common.js"
|
4 |
-
import fs from "node:fs"
|
5 |
-
import path from "node:path"
|
6 |
-
import lodash from "lodash"
|
7 |
-
import fetch from "node-fetch"
|
8 |
-
import { fileTypeFromBuffer } from "file-type"
|
9 |
-
|
10 |
-
let messageMap = {}
|
11 |
-
|
12 |
-
export class add extends plugin {
|
13 |
-
constructor() {
|
14 |
-
super({
|
15 |
-
name: "添加消息",
|
16 |
-
dsc: "添加消息",
|
17 |
-
event: "message",
|
18 |
-
priority: 50000,
|
19 |
-
rule: [
|
20 |
-
{
|
21 |
-
reg: "^#(全局)?添加",
|
22 |
-
fnc: "add"
|
23 |
-
},
|
24 |
-
{
|
25 |
-
reg: "^#(全局)?删除",
|
26 |
-
fnc: "del"
|
27 |
-
},
|
28 |
-
{
|
29 |
-
reg: "",
|
30 |
-
fnc: "getMessage",
|
31 |
-
log: false
|
32 |
-
},
|
33 |
-
{
|
34 |
-
reg: "^#(全局)?(消息|词条)",
|
35 |
-
fnc: "list"
|
36 |
-
}
|
37 |
-
]
|
38 |
-
})
|
39 |
-
|
40 |
-
this.path = "data/messageJson/"
|
41 |
-
}
|
42 |
-
|
43 |
-
async init() {
|
44 |
-
common.mkdirs(this.path)
|
45 |
-
}
|
46 |
-
|
47 |
-
/** 群号key */
|
48 |
-
get grpKey() {
|
49 |
-
return `Yz:group_id:${this.e.user_id}`
|
50 |
-
}
|
51 |
-
|
52 |
-
/** #添加 */
|
53 |
-
async add() {
|
54 |
-
this.isGlobal = Boolean(this.e.msg.match(/^#全局/))
|
55 |
-
await this.getGroupId()
|
56 |
-
|
57 |
-
if (!this.group_id) {
|
58 |
-
await this.reply("请先在群内触发消息,确定添加的群")
|
59 |
-
return
|
60 |
-
}
|
61 |
-
|
62 |
-
this.initMessageMap()
|
63 |
-
|
64 |
-
if (!this.checkAuth()) return false
|
65 |
-
/** 获取关键词 */
|
66 |
-
this.getKeyWord()
|
67 |
-
if (!this.e.keyWord) {
|
68 |
-
await this.reply("添加错误:没有关键词")
|
69 |
-
return
|
70 |
-
}
|
71 |
-
|
72 |
-
this.e.message = []
|
73 |
-
this.setContext("addContext")
|
74 |
-
|
75 |
-
return this.reply("请发送添加内容,完成后发送#结束添加", true, { at: true })
|
76 |
-
}
|
77 |
-
|
78 |
-
/** 获取群号 */
|
79 |
-
async getGroupId() {
|
80 |
-
/** 添加全局消息,存入到机器人文件中 */
|
81 |
-
if (this.isGlobal) {
|
82 |
-
this.group_id = "global"
|
83 |
-
return this.group_id
|
84 |
-
}
|
85 |
-
|
86 |
-
if (this.e.isGroup) {
|
87 |
-
this.group_id = this.e.group_id
|
88 |
-
redis.setEx(this.grpKey, 3600 * 24 * 30, String(this.group_id))
|
89 |
-
return this.group_id
|
90 |
-
}
|
91 |
-
|
92 |
-
// redis获取
|
93 |
-
let groupId = await redis.get(this.grpKey)
|
94 |
-
if (groupId) {
|
95 |
-
this.group_id = groupId
|
96 |
-
return this.group_id
|
97 |
-
}
|
98 |
-
|
99 |
-
return false
|
100 |
-
}
|
101 |
-
|
102 |
-
checkAuth() {
|
103 |
-
if (this.e.isMaster) return true
|
104 |
-
|
105 |
-
const groupCfg = cfg.getGroup(this.e.self_id, this.group_id)
|
106 |
-
if (groupCfg.addLimit == 2) {
|
107 |
-
this.reply("暂无权限,只有主人才能操作")
|
108 |
-
return false
|
109 |
-
}
|
110 |
-
if (groupCfg.addLimit == 1) {
|
111 |
-
if (!this.e.member.is_admin) {
|
112 |
-
this.reply("暂无权限,只有管理员才能操作")
|
113 |
-
return false
|
114 |
-
}
|
115 |
-
}
|
116 |
-
|
117 |
-
if (groupCfg.addPrivate != 1 && !this.e.isGroup) {
|
118 |
-
this.reply("禁止私聊添加")
|
119 |
-
return false
|
120 |
-
}
|
121 |
-
|
122 |
-
return true
|
123 |
-
}
|
124 |
-
|
125 |
-
/** 获取添加关键词 */
|
126 |
-
getKeyWord() {
|
127 |
-
this.e.isGlobal = Boolean(this.e.msg.match(/^#全局/))
|
128 |
-
this.keyWord = this.e.raw_message.replace(/#(全局)?(添加|删除)/, "").trim()
|
129 |
-
this.e.keyWord = this.trimAlias(this.keyWord)
|
130 |
-
}
|
131 |
-
|
132 |
-
/** 过滤别名 */
|
133 |
-
trimAlias(msg) {
|
134 |
-
const groupCfg = cfg.getGroup(this.e.self_id, this.group_id)
|
135 |
-
let alias = groupCfg.botAlias
|
136 |
-
if (!Array.isArray(alias))
|
137 |
-
alias = [alias]
|
138 |
-
|
139 |
-
for (const name of alias)
|
140 |
-
if (msg.startsWith(name))
|
141 |
-
msg = lodash.trimStart(msg, name).trim()
|
142 |
-
|
143 |
-
return msg
|
144 |
-
}
|
145 |
-
|
146 |
-
/** 添加内容 */
|
147 |
-
async addContext() {
|
148 |
-
const context = this.getContext()?.addContext
|
149 |
-
this.isGlobal = context.isGlobal
|
150 |
-
await this.getGroupId()
|
151 |
-
/** 关键词 */
|
152 |
-
this.keyWord = context.keyWord
|
153 |
-
|
154 |
-
if (!this.e.msg?.includes("#结束添加")) {
|
155 |
-
/** 添加内容 */
|
156 |
-
for (const i of this.e.message) {
|
157 |
-
if (i.url) i.file = await this.saveFile(i)
|
158 |
-
if (i.type == "at" && i.qq == this.e.self_id) continue
|
159 |
-
context.message.push(i)
|
160 |
-
}
|
161 |
-
return
|
162 |
-
}
|
163 |
-
|
164 |
-
this.finish("addContext")
|
165 |
-
if (!context.message?.length) {
|
166 |
-
this.reply("添加错误:没有添加内容")
|
167 |
-
return
|
168 |
-
}
|
169 |
-
|
170 |
-
if (!messageMap[this.group_id])
|
171 |
-
messageMap[this.group_id] = new Map()
|
172 |
-
|
173 |
-
/** 支持单个关键词添加多个 */
|
174 |
-
let message = messageMap[this.group_id].get(this.keyWord)
|
175 |
-
if (Array.isArray(message))
|
176 |
-
message.push(context.message)
|
177 |
-
else
|
178 |
-
message = [context.message]
|
179 |
-
messageMap[this.group_id].set(this.keyWord, message)
|
180 |
-
|
181 |
-
if (message.length > 1)
|
182 |
-
this.keyWord += String(message.length)
|
183 |
-
|
184 |
-
this.saveJson()
|
185 |
-
return this.reply(`添加成功:${this.keyWord}`)
|
186 |
-
}
|
187 |
-
|
188 |
-
saveJson() {
|
189 |
-
let obj = {}
|
190 |
-
for (let [k, v] of messageMap[this.group_id])
|
191 |
-
obj[k] = v
|
192 |
-
|
193 |
-
fs.writeFileSync(`${this.path}${this.group_id}.json`, JSON.stringify(obj, "", "\t"))
|
194 |
-
}
|
195 |
-
|
196 |
-
async makeBuffer(file) {
|
197 |
-
if (file.match(/^base64:\/\//))
|
198 |
-
return Buffer.from(file.replace(/^base64:\/\//, ""), "base64")
|
199 |
-
else if (file.match(/^https?:\/\//))
|
200 |
-
return Buffer.from(await (await fetch(file)).arrayBuffer())
|
201 |
-
else if (fs.existsSync(file))
|
202 |
-
return Buffer.from(fs.readFileSync(file))
|
203 |
-
return file
|
204 |
-
}
|
205 |
-
|
206 |
-
async fileType(data) {
|
207 |
-
const file = { name: `${this.group_id}/${data.type}/${Date.now()}` }
|
208 |
-
try {
|
209 |
-
file.url = data.url.replace(/^base64:\/\/.*/, "base64://...")
|
210 |
-
file.buffer = await this.makeBuffer(data.url)
|
211 |
-
file.type = await fileTypeFromBuffer(file.buffer)
|
212 |
-
file.name = `${file.name}.${file.type.ext}`
|
213 |
-
} catch (err) {
|
214 |
-
logger.error(`文件类型检测错误:${logger.red(err)}`)
|
215 |
-
file.name = `${file.name}-${path.basename(data.file || data.url)}`
|
216 |
-
}
|
217 |
-
return file
|
218 |
-
}
|
219 |
-
|
220 |
-
async saveFile(data) {
|
221 |
-
const file = await this.fileType(data)
|
222 |
-
if (file.name && Buffer.isBuffer(file.buffer) && common.mkdirs(path.dirname(`${this.path}${file.name}`))) {
|
223 |
-
fs.writeFileSync(`${this.path}${file.name}`, file.buffer)
|
224 |
-
return file.name
|
225 |
-
}
|
226 |
-
return data.url
|
227 |
-
}
|
228 |
-
|
229 |
-
async getMessage() {
|
230 |
-
if (!this.e.raw_message) return false
|
231 |
-
this.isGlobal = false
|
232 |
-
|
233 |
-
await this.getGroupId()
|
234 |
-
if (!this.group_id) return false
|
235 |
-
|
236 |
-
this.initMessageMap()
|
237 |
-
this.initGlobalMessageMap()
|
238 |
-
|
239 |
-
this.keyWord = this.trimAlias(this.e.raw_message.trim())
|
240 |
-
let keyWord = this.keyWord
|
241 |
-
|
242 |
-
let num = 0
|
243 |
-
if (isNaN(keyWord)) {
|
244 |
-
num = keyWord.charAt(keyWord.length-1)
|
245 |
-
|
246 |
-
if (!isNaN(num) && !messageMap[this.group_id].has(keyWord) && !messageMap.global.has(keyWord)) {
|
247 |
-
keyWord = lodash.trimEnd(keyWord, num).trim()
|
248 |
-
num--
|
249 |
-
}
|
250 |
-
}
|
251 |
-
|
252 |
-
let msg = [
|
253 |
-
...messageMap[this.group_id].get(keyWord) || [],
|
254 |
-
...messageMap.global.get(keyWord) || [],
|
255 |
-
]
|
256 |
-
if (lodash.isEmpty(msg)) return false
|
257 |
-
|
258 |
-
if (!msg[num])
|
259 |
-
num = lodash.random(0, msg.length-1)
|
260 |
-
|
261 |
-
msg = [...msg[num]]
|
262 |
-
for (const i in msg)
|
263 |
-
if (msg[i].file && fs.existsSync(`${this.path}${msg[i].file}`))
|
264 |
-
msg[i] = { ...msg[i], file: `base64://${fs.readFileSync(`${this.path}${msg[i].file}`).toString("base64")}` }
|
265 |
-
|
266 |
-
logger.mark(`[发送消息]${this.e.logText} ${this.keyWord}`)
|
267 |
-
const groupCfg = cfg.getGroup(this.e.self_id, this.group_id)
|
268 |
-
return this.reply(msg, Boolean(groupCfg.addReply), {
|
269 |
-
at: Boolean(groupCfg.addAt),
|
270 |
-
recallMsg: groupCfg.addRecall,
|
271 |
-
})
|
272 |
-
}
|
273 |
-
|
274 |
-
/** 初始化已添加内容 */
|
275 |
-
initMessageMap() {
|
276 |
-
if (messageMap[this.group_id]) return
|
277 |
-
messageMap[this.group_id] = new Map()
|
278 |
-
|
279 |
-
const path = `${this.path}${this.group_id}.json`
|
280 |
-
if (!fs.existsSync(path)) return
|
281 |
-
|
282 |
-
try {
|
283 |
-
const message = JSON.parse(fs.readFileSync(path, "utf8"))
|
284 |
-
for (const i in message)
|
285 |
-
messageMap[this.group_id].set(i, message[i])
|
286 |
-
} catch (err) {
|
287 |
-
logger.error(`JSON 格式错误:${path} ${err}`)
|
288 |
-
}
|
289 |
-
}
|
290 |
-
|
291 |
-
/** 初始化全局已添加内容 */
|
292 |
-
initGlobalMessageMap() {
|
293 |
-
if (messageMap.global) return
|
294 |
-
messageMap.global = new Map()
|
295 |
-
|
296 |
-
const globalPath = `${this.path}global.json`
|
297 |
-
if (!fs.existsSync(globalPath)) return
|
298 |
-
|
299 |
-
try {
|
300 |
-
const message = JSON.parse(fs.readFileSync(globalPath, "utf8"))
|
301 |
-
for (const i in message)
|
302 |
-
messageMap.global.set(i, message[i])
|
303 |
-
} catch (err) {
|
304 |
-
logger.error(`JSON 格式错误:${globalPath} ${err}`)
|
305 |
-
}
|
306 |
-
}
|
307 |
-
|
308 |
-
async del() {
|
309 |
-
this.isGlobal = this.e.msg.includes("全局")
|
310 |
-
await this.getGroupId()
|
311 |
-
if (!(this.group_id && this.checkAuth())) return false
|
312 |
-
|
313 |
-
this.initMessageMap()
|
314 |
-
|
315 |
-
this.getKeyWord()
|
316 |
-
if (!this.keyWord) {
|
317 |
-
await this.reply("删除错误:没有关键词")
|
318 |
-
return false
|
319 |
-
}
|
320 |
-
|
321 |
-
this.keyWord = this.trimAlias(this.keyWord)
|
322 |
-
let keyWord = this.keyWord
|
323 |
-
|
324 |
-
let num = false
|
325 |
-
let index = 0
|
326 |
-
if (isNaN(keyWord)) {
|
327 |
-
num = keyWord.charAt(keyWord.length-1)
|
328 |
-
|
329 |
-
if (!isNaN(num) && !messageMap[this.group_id].has(keyWord)) {
|
330 |
-
keyWord = lodash.trimEnd(keyWord, num).trim()
|
331 |
-
index = num-1
|
332 |
-
} else {
|
333 |
-
num = false
|
334 |
-
}
|
335 |
-
}
|
336 |
-
|
337 |
-
let arr = messageMap[this.group_id].get(keyWord)
|
338 |
-
if (!arr) {
|
339 |
-
// await this.reply(`暂无此消息:${keyWord}`)
|
340 |
-
return false
|
341 |
-
}
|
342 |
-
|
343 |
-
let tmp = []
|
344 |
-
if (num) {
|
345 |
-
if (!arr[index]) {
|
346 |
-
// await this.reply(`暂无此消息:${keyWord}${num}`)
|
347 |
-
return false
|
348 |
-
}
|
349 |
-
|
350 |
-
tmp = arr[index]
|
351 |
-
arr.splice(index, 1)
|
352 |
-
|
353 |
-
if (arr.length <= 0) {
|
354 |
-
messageMap[this.group_id].delete(keyWord)
|
355 |
-
} else {
|
356 |
-
messageMap[this.group_id].set(keyWord, arr)
|
357 |
-
}
|
358 |
-
} else {
|
359 |
-
if (this.e.msg.includes("删除全部")) {
|
360 |
-
tmp = arr
|
361 |
-
arr = []
|
362 |
-
} else {
|
363 |
-
tmp = arr.pop()
|
364 |
-
}
|
365 |
-
|
366 |
-
if (arr.length <= 0) {
|
367 |
-
messageMap[this.group_id].delete(keyWord)
|
368 |
-
} else {
|
369 |
-
messageMap[this.group_id].set(keyWord, arr)
|
370 |
-
}
|
371 |
-
}
|
372 |
-
|
373 |
-
this.saveJson()
|
374 |
-
return this.reply(`删除成功:${this.keyWord}`)
|
375 |
-
}
|
376 |
-
|
377 |
-
async list() {
|
378 |
-
this.isGlobal = Boolean(this.e.msg.match(/^#全局/))
|
379 |
-
|
380 |
-
let page = 1
|
381 |
-
let pageSize = 100
|
382 |
-
let type = "list"
|
383 |
-
|
384 |
-
await this.getGroupId()
|
385 |
-
if (!this.group_id) return false
|
386 |
-
|
387 |
-
this.initMessageMap()
|
388 |
-
|
389 |
-
const search = this.e.msg.replace(/^#(全局)?(消息|词条)/, "").trim()
|
390 |
-
if (search.match(/^列表/))
|
391 |
-
page = search.replace(/^列表/, "") || 1
|
392 |
-
else
|
393 |
-
type = "search"
|
394 |
-
|
395 |
-
let list = messageMap[this.group_id]
|
396 |
-
|
397 |
-
if (lodash.isEmpty(list)) {
|
398 |
-
await this.reply("暂无消息")
|
399 |
-
return
|
400 |
-
}
|
401 |
-
|
402 |
-
let arr = []
|
403 |
-
if (type == "list")
|
404 |
-
for (let [k, v] of messageMap[this.group_id])
|
405 |
-
arr.push({ key: k, val: v, num: arr.length+1 })
|
406 |
-
else
|
407 |
-
for (let [k, v] of messageMap[this.group_id])
|
408 |
-
if (k.includes(search))
|
409 |
-
arr.push({ key: k, val: v, num: arr.length+1 })
|
410 |
-
|
411 |
-
let count = arr.length
|
412 |
-
arr = arr.reverse()
|
413 |
-
|
414 |
-
if (type == "list")
|
415 |
-
arr = this.pagination(page, pageSize, arr)
|
416 |
-
if (lodash.isEmpty(arr)) return false
|
417 |
-
|
418 |
-
let msg = []
|
419 |
-
let num = 0
|
420 |
-
for (const i of arr) {
|
421 |
-
if (num >= page * pageSize) break
|
422 |
-
|
423 |
-
let keyWord = i.key
|
424 |
-
if (!keyWord) continue
|
425 |
-
|
426 |
-
msg.push(`${i.num}. ${keyWord}(${i.val.length})`)
|
427 |
-
num++
|
428 |
-
}
|
429 |
-
msg = [msg.join("\n")]
|
430 |
-
|
431 |
-
if (type == "list" && count > 100)
|
432 |
-
msg.push(`更多内容请翻页查看\n如:#消息列表${Number(page)+1}`)
|
433 |
-
|
434 |
-
let title = `消息列表:第${page}页,共${count}条`
|
435 |
-
if (type == "search")
|
436 |
-
title = `消息${search}:共${count}条`
|
437 |
-
|
438 |
-
return this.reply(await common.makeForwardMsg(this.e, msg, title))
|
439 |
-
}
|
440 |
-
|
441 |
-
/** 分页 */
|
442 |
-
pagination(pageNo, pageSize, array) {
|
443 |
-
let offset = (pageNo-1) * pageSize
|
444 |
-
return offset+pageSize >= array.length ? array.slice(offset, array.length) : array.slice(offset, offset+pageSize)
|
445 |
-
}
|
446 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/my_abi/dataset.py
DELETED
@@ -1,278 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import re
|
3 |
-
|
4 |
-
import cv2
|
5 |
-
import lmdb
|
6 |
-
import six
|
7 |
-
from fastai.vision import *
|
8 |
-
from torchvision import transforms
|
9 |
-
|
10 |
-
from transforms import CVColorJitter, CVDeterioration, CVGeometry
|
11 |
-
from utils import CharsetMapper, onehot
|
12 |
-
|
13 |
-
|
14 |
-
class ImageDataset(Dataset):
|
15 |
-
"`ImageDataset` read data from LMDB database."
|
16 |
-
|
17 |
-
def __init__(self,
|
18 |
-
path:PathOrStr,
|
19 |
-
is_training:bool=True,
|
20 |
-
img_h:int=32,
|
21 |
-
img_w:int=100,
|
22 |
-
max_length:int=25,
|
23 |
-
check_length:bool=True,
|
24 |
-
case_sensitive:bool=False,
|
25 |
-
charset_path:str='data/charset_36.txt',
|
26 |
-
convert_mode:str='RGB',
|
27 |
-
data_aug:bool=True,
|
28 |
-
deteriorate_ratio:float=0.,
|
29 |
-
multiscales:bool=True,
|
30 |
-
one_hot_y:bool=True,
|
31 |
-
return_idx:bool=False,
|
32 |
-
return_raw:bool=False,
|
33 |
-
**kwargs):
|
34 |
-
self.path, self.name = Path(path), Path(path).name
|
35 |
-
assert self.path.is_dir() and self.path.exists(), f"{path} is not a valid directory."
|
36 |
-
self.convert_mode, self.check_length = convert_mode, check_length
|
37 |
-
self.img_h, self.img_w = img_h, img_w
|
38 |
-
self.max_length, self.one_hot_y = max_length, one_hot_y
|
39 |
-
self.return_idx, self.return_raw = return_idx, return_raw
|
40 |
-
self.case_sensitive, self.is_training = case_sensitive, is_training
|
41 |
-
self.data_aug, self.multiscales = data_aug, multiscales
|
42 |
-
self.charset = CharsetMapper(charset_path, max_length=max_length+1)
|
43 |
-
self.c = self.charset.num_classes
|
44 |
-
|
45 |
-
self.env = lmdb.open(str(path), readonly=True, lock=False, readahead=False, meminit=False)
|
46 |
-
assert self.env, f'Cannot open LMDB dataset from {path}.'
|
47 |
-
with self.env.begin(write=False) as txn:
|
48 |
-
self.length = int(txn.get('num-samples'.encode()))
|
49 |
-
|
50 |
-
if self.is_training and self.data_aug:
|
51 |
-
self.augment_tfs = transforms.Compose([
|
52 |
-
CVGeometry(degrees=45, translate=(0.0, 0.0), scale=(0.5, 2.), shear=(45, 15), distortion=0.5, p=0.5),
|
53 |
-
CVDeterioration(var=20, degrees=6, factor=4, p=0.25),
|
54 |
-
CVColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1, p=0.25)
|
55 |
-
])
|
56 |
-
self.totensor = transforms.ToTensor()
|
57 |
-
|
58 |
-
def __len__(self): return self.length
|
59 |
-
|
60 |
-
def _next_image(self, index):
|
61 |
-
next_index = random.randint(0, len(self) - 1)
|
62 |
-
return self.get(next_index)
|
63 |
-
|
64 |
-
def _check_image(self, x, pixels=6):
|
65 |
-
if x.size[0] <= pixels or x.size[1] <= pixels: return False
|
66 |
-
else: return True
|
67 |
-
|
68 |
-
def resize_multiscales(self, img, borderType=cv2.BORDER_CONSTANT):
|
69 |
-
def _resize_ratio(img, ratio, fix_h=True):
|
70 |
-
if ratio * self.img_w < self.img_h:
|
71 |
-
if fix_h: trg_h = self.img_h
|
72 |
-
else: trg_h = int(ratio * self.img_w)
|
73 |
-
trg_w = self.img_w
|
74 |
-
else: trg_h, trg_w = self.img_h, int(self.img_h / ratio)
|
75 |
-
img = cv2.resize(img, (trg_w, trg_h))
|
76 |
-
pad_h, pad_w = (self.img_h - trg_h) / 2, (self.img_w - trg_w) / 2
|
77 |
-
top, bottom = math.ceil(pad_h), math.floor(pad_h)
|
78 |
-
left, right = math.ceil(pad_w), math.floor(pad_w)
|
79 |
-
img = cv2.copyMakeBorder(img, top, bottom, left, right, borderType)
|
80 |
-
return img
|
81 |
-
|
82 |
-
if self.is_training:
|
83 |
-
if random.random() < 0.5:
|
84 |
-
base, maxh, maxw = self.img_h, self.img_h, self.img_w
|
85 |
-
h, w = random.randint(base, maxh), random.randint(base, maxw)
|
86 |
-
return _resize_ratio(img, h/w)
|
87 |
-
else: return _resize_ratio(img, img.shape[0] / img.shape[1]) # keep aspect ratio
|
88 |
-
else: return _resize_ratio(img, img.shape[0] / img.shape[1]) # keep aspect ratio
|
89 |
-
|
90 |
-
def resize(self, img):
|
91 |
-
if self.multiscales: return self.resize_multiscales(img, cv2.BORDER_REPLICATE)
|
92 |
-
else: return cv2.resize(img, (self.img_w, self.img_h))
|
93 |
-
|
94 |
-
def get(self, idx):
|
95 |
-
with self.env.begin(write=False) as txn:
|
96 |
-
image_key, label_key = f'image-{idx+1:09d}', f'label-{idx+1:09d}'
|
97 |
-
try:
|
98 |
-
label = str(txn.get(label_key.encode()), 'utf-8') # label
|
99 |
-
label = re.sub('[^0-9a-zA-Z]+', '', label)
|
100 |
-
if self.check_length and self.max_length > 0:
|
101 |
-
if len(label) > self.max_length or len(label) <= 0:
|
102 |
-
#logging.info(f'Long or short text image is found: {self.name}, {idx}, {label}, {len(label)}')
|
103 |
-
return self._next_image(idx)
|
104 |
-
label = label[:self.max_length]
|
105 |
-
|
106 |
-
imgbuf = txn.get(image_key.encode()) # image
|
107 |
-
buf = six.BytesIO()
|
108 |
-
buf.write(imgbuf)
|
109 |
-
buf.seek(0)
|
110 |
-
with warnings.catch_warnings():
|
111 |
-
warnings.simplefilter("ignore", UserWarning) # EXIF warning from TiffPlugin
|
112 |
-
image = PIL.Image.open(buf).convert(self.convert_mode)
|
113 |
-
if self.is_training and not self._check_image(image):
|
114 |
-
#logging.info(f'Invalid image is found: {self.name}, {idx}, {label}, {len(label)}')
|
115 |
-
return self._next_image(idx)
|
116 |
-
except:
|
117 |
-
import traceback
|
118 |
-
traceback.print_exc()
|
119 |
-
logging.info(f'Corrupted image is found: {self.name}, {idx}, {label}, {len(label)}')
|
120 |
-
return self._next_image(idx)
|
121 |
-
return image, label, idx
|
122 |
-
|
123 |
-
def _process_training(self, image):
|
124 |
-
if self.data_aug: image = self.augment_tfs(image)
|
125 |
-
image = self.resize(np.array(image))
|
126 |
-
return image
|
127 |
-
|
128 |
-
def _process_test(self, image):
|
129 |
-
return self.resize(np.array(image)) # TODO:move is_training to here
|
130 |
-
|
131 |
-
def __getitem__(self, idx):
|
132 |
-
image, text, idx_new = self.get(idx)
|
133 |
-
if not self.is_training: assert idx == idx_new, f'idx {idx} != idx_new {idx_new} during testing.'
|
134 |
-
|
135 |
-
if self.is_training: image = self._process_training(image)
|
136 |
-
else: image = self._process_test(image)
|
137 |
-
if self.return_raw: return image, text
|
138 |
-
image = self.totensor(image)
|
139 |
-
|
140 |
-
length = tensor(len(text) + 1).to(dtype=torch.long) # one for end token
|
141 |
-
label = self.charset.get_labels(text, case_sensitive=self.case_sensitive)
|
142 |
-
label = tensor(label).to(dtype=torch.long)
|
143 |
-
if self.one_hot_y: label = onehot(label, self.charset.num_classes)
|
144 |
-
|
145 |
-
if self.return_idx: y = [label, length, idx_new]
|
146 |
-
else: y = [label, length]
|
147 |
-
return image, y
|
148 |
-
|
149 |
-
|
150 |
-
class TextDataset(Dataset):
|
151 |
-
def __init__(self,
|
152 |
-
path:PathOrStr,
|
153 |
-
delimiter:str='\t',
|
154 |
-
max_length:int=25,
|
155 |
-
charset_path:str='data/charset_36.txt',
|
156 |
-
case_sensitive=False,
|
157 |
-
one_hot_x=True,
|
158 |
-
one_hot_y=True,
|
159 |
-
is_training=True,
|
160 |
-
smooth_label=False,
|
161 |
-
smooth_factor=0.2,
|
162 |
-
use_sm=False,
|
163 |
-
**kwargs):
|
164 |
-
self.path = Path(path)
|
165 |
-
self.case_sensitive, self.use_sm = case_sensitive, use_sm
|
166 |
-
self.smooth_factor, self.smooth_label = smooth_factor, smooth_label
|
167 |
-
self.charset = CharsetMapper(charset_path, max_length=max_length+1)
|
168 |
-
self.one_hot_x, self.one_hot_y, self.is_training = one_hot_x, one_hot_y, is_training
|
169 |
-
if self.is_training and self.use_sm: self.sm = SpellingMutation(charset=self.charset)
|
170 |
-
|
171 |
-
dtype = {'inp': str, 'gt': str}
|
172 |
-
self.df = pd.read_csv(self.path, dtype=dtype, delimiter=delimiter, na_filter=False)
|
173 |
-
self.inp_col, self.gt_col = 0, 1
|
174 |
-
|
175 |
-
def __len__(self): return len(self.df)
|
176 |
-
|
177 |
-
def __getitem__(self, idx):
|
178 |
-
text_x = self.df.iloc[idx, self.inp_col]
|
179 |
-
text_x = re.sub('[^0-9a-zA-Z]+', '', text_x)
|
180 |
-
if not self.case_sensitive: text_x = text_x.lower()
|
181 |
-
if self.is_training and self.use_sm: text_x = self.sm(text_x)
|
182 |
-
|
183 |
-
length_x = tensor(len(text_x) + 1).to(dtype=torch.long) # one for end token
|
184 |
-
label_x = self.charset.get_labels(text_x, case_sensitive=self.case_sensitive)
|
185 |
-
label_x = tensor(label_x)
|
186 |
-
if self.one_hot_x:
|
187 |
-
label_x = onehot(label_x, self.charset.num_classes)
|
188 |
-
if self.is_training and self.smooth_label:
|
189 |
-
label_x = torch.stack([self.prob_smooth_label(l) for l in label_x])
|
190 |
-
x = [label_x, length_x]
|
191 |
-
|
192 |
-
text_y = self.df.iloc[idx, self.gt_col]
|
193 |
-
text_y = re.sub('[^0-9a-zA-Z]+', '', text_y)
|
194 |
-
if not self.case_sensitive: text_y = text_y.lower()
|
195 |
-
length_y = tensor(len(text_y) + 1).to(dtype=torch.long) # one for end token
|
196 |
-
label_y = self.charset.get_labels(text_y, case_sensitive=self.case_sensitive)
|
197 |
-
label_y = tensor(label_y)
|
198 |
-
if self.one_hot_y: label_y = onehot(label_y, self.charset.num_classes)
|
199 |
-
y = [label_y, length_y]
|
200 |
-
|
201 |
-
return x, y
|
202 |
-
|
203 |
-
def prob_smooth_label(self, one_hot):
|
204 |
-
one_hot = one_hot.float()
|
205 |
-
delta = torch.rand([]) * self.smooth_factor
|
206 |
-
num_classes = len(one_hot)
|
207 |
-
noise = torch.rand(num_classes)
|
208 |
-
noise = noise / noise.sum() * delta
|
209 |
-
one_hot = one_hot * (1 - delta) + noise
|
210 |
-
return one_hot
|
211 |
-
|
212 |
-
|
213 |
-
class SpellingMutation(object):
|
214 |
-
def __init__(self, pn0=0.7, pn1=0.85, pn2=0.95, pt0=0.7, pt1=0.85, charset=None):
|
215 |
-
"""
|
216 |
-
Args:
|
217 |
-
pn0: the prob of not modifying characters is (pn0)
|
218 |
-
pn1: the prob of modifying one characters is (pn1 - pn0)
|
219 |
-
pn2: the prob of modifying two characters is (pn2 - pn1),
|
220 |
-
and three (1 - pn2)
|
221 |
-
pt0: the prob of replacing operation is pt0.
|
222 |
-
pt1: the prob of inserting operation is (pt1 - pt0),
|
223 |
-
and deleting operation is (1 - pt1)
|
224 |
-
"""
|
225 |
-
super().__init__()
|
226 |
-
self.pn0, self.pn1, self.pn2 = pn0, pn1, pn2
|
227 |
-
self.pt0, self.pt1 = pt0, pt1
|
228 |
-
self.charset = charset
|
229 |
-
logging.info(f'the probs: pn0={self.pn0}, pn1={self.pn1} ' +
|
230 |
-
f'pn2={self.pn2}, pt0={self.pt0}, pt1={self.pt1}')
|
231 |
-
|
232 |
-
def is_digit(self, text, ratio=0.5):
|
233 |
-
length = max(len(text), 1)
|
234 |
-
digit_num = sum([t in self.charset.digits for t in text])
|
235 |
-
if digit_num / length < ratio: return False
|
236 |
-
return True
|
237 |
-
|
238 |
-
def is_unk_char(self, char):
|
239 |
-
# return char == self.charset.unk_char
|
240 |
-
return (char not in self.charset.digits) and (char not in self.charset.alphabets)
|
241 |
-
|
242 |
-
def get_num_to_modify(self, length):
|
243 |
-
prob = random.random()
|
244 |
-
if prob < self.pn0: num_to_modify = 0
|
245 |
-
elif prob < self.pn1: num_to_modify = 1
|
246 |
-
elif prob < self.pn2: num_to_modify = 2
|
247 |
-
else: num_to_modify = 3
|
248 |
-
|
249 |
-
if length <= 1: num_to_modify = 0
|
250 |
-
elif length >= 2 and length <= 4: num_to_modify = min(num_to_modify, 1)
|
251 |
-
else: num_to_modify = min(num_to_modify, length // 2) # smaller than length // 2
|
252 |
-
return num_to_modify
|
253 |
-
|
254 |
-
def __call__(self, text, debug=False):
|
255 |
-
if self.is_digit(text): return text
|
256 |
-
length = len(text)
|
257 |
-
num_to_modify = self.get_num_to_modify(length)
|
258 |
-
if num_to_modify <= 0: return text
|
259 |
-
|
260 |
-
chars = []
|
261 |
-
index = np.arange(0, length)
|
262 |
-
random.shuffle(index)
|
263 |
-
index = index[: num_to_modify]
|
264 |
-
if debug: self.index = index
|
265 |
-
for i, t in enumerate(text):
|
266 |
-
if i not in index: chars.append(t)
|
267 |
-
elif self.is_unk_char(t): chars.append(t)
|
268 |
-
else:
|
269 |
-
prob = random.random()
|
270 |
-
if prob < self.pt0: # replace
|
271 |
-
chars.append(random.choice(self.charset.alphabets))
|
272 |
-
elif prob < self.pt1: # insert
|
273 |
-
chars.append(random.choice(self.charset.alphabets))
|
274 |
-
chars.append(t)
|
275 |
-
else: # delete
|
276 |
-
continue
|
277 |
-
new_text = ''.join(chars[: self.charset.max_length-1])
|
278 |
-
return new_text if len(new_text) >= 1 else text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/mix.py
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Ways to transform interfaces to produce new interfaces
|
3 |
-
"""
|
4 |
-
import asyncio
|
5 |
-
import warnings
|
6 |
-
|
7 |
-
from gradio_client.documentation import document, set_documentation_group
|
8 |
-
|
9 |
-
import gradio
|
10 |
-
|
11 |
-
set_documentation_group("mix_interface")
|
12 |
-
|
13 |
-
|
14 |
-
@document()
|
15 |
-
class Parallel(gradio.Interface):
|
16 |
-
"""
|
17 |
-
Creates a new Interface consisting of multiple Interfaces in parallel (comparing their outputs).
|
18 |
-
The Interfaces to put in Parallel must share the same input components (but can have different output components).
|
19 |
-
|
20 |
-
Demos: interface_parallel, interface_parallel_load
|
21 |
-
Guides: advanced-interface-features
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(self, *interfaces: gradio.Interface, **options):
|
25 |
-
"""
|
26 |
-
Parameters:
|
27 |
-
interfaces: any number of Interface objects that are to be compared in parallel
|
28 |
-
options: additional kwargs that are passed into the new Interface object to customize it
|
29 |
-
Returns:
|
30 |
-
an Interface object comparing the given models
|
31 |
-
"""
|
32 |
-
outputs = []
|
33 |
-
|
34 |
-
for interface in interfaces:
|
35 |
-
if not (isinstance(interface, gradio.Interface)):
|
36 |
-
warnings.warn(
|
37 |
-
"Parallel requires all inputs to be of type Interface. "
|
38 |
-
"May not work as expected."
|
39 |
-
)
|
40 |
-
outputs.extend(interface.output_components)
|
41 |
-
|
42 |
-
async def parallel_fn(*args):
|
43 |
-
return_values_with_durations = await asyncio.gather(
|
44 |
-
*[interface.call_function(0, list(args)) for interface in interfaces]
|
45 |
-
)
|
46 |
-
return_values = [rv["prediction"] for rv in return_values_with_durations]
|
47 |
-
combined_list = []
|
48 |
-
for interface, return_value in zip(interfaces, return_values):
|
49 |
-
if len(interface.output_components) == 1:
|
50 |
-
combined_list.append(return_value)
|
51 |
-
else:
|
52 |
-
combined_list.extend(return_value)
|
53 |
-
if len(outputs) == 1:
|
54 |
-
return combined_list[0]
|
55 |
-
return combined_list
|
56 |
-
|
57 |
-
parallel_fn.__name__ = " | ".join([io.__name__ for io in interfaces])
|
58 |
-
|
59 |
-
kwargs = {
|
60 |
-
"fn": parallel_fn,
|
61 |
-
"inputs": interfaces[0].input_components,
|
62 |
-
"outputs": outputs,
|
63 |
-
}
|
64 |
-
kwargs.update(options)
|
65 |
-
super().__init__(**kwargs)
|
66 |
-
|
67 |
-
|
68 |
-
@document()
|
69 |
-
class Series(gradio.Interface):
|
70 |
-
"""
|
71 |
-
Creates a new Interface from multiple Interfaces in series (the output of one is fed as the input to the next,
|
72 |
-
and so the input and output components must agree between the interfaces).
|
73 |
-
|
74 |
-
Demos: interface_series, interface_series_load
|
75 |
-
Guides: advanced-interface-features
|
76 |
-
"""
|
77 |
-
|
78 |
-
def __init__(self, *interfaces: gradio.Interface, **options):
|
79 |
-
"""
|
80 |
-
Parameters:
|
81 |
-
interfaces: any number of Interface objects that are to be connected in series
|
82 |
-
options: additional kwargs that are passed into the new Interface object to customize it
|
83 |
-
Returns:
|
84 |
-
an Interface object connecting the given models
|
85 |
-
"""
|
86 |
-
|
87 |
-
async def connected_fn(*data):
|
88 |
-
for idx, interface in enumerate(interfaces):
|
89 |
-
# skip preprocessing for first interface since the Series interface will include it
|
90 |
-
if idx > 0 and not (interface.api_mode):
|
91 |
-
data = [
|
92 |
-
input_component.preprocess(data[i])
|
93 |
-
for i, input_component in enumerate(interface.input_components)
|
94 |
-
]
|
95 |
-
|
96 |
-
# run all of predictions sequentially
|
97 |
-
data = (await interface.call_function(0, list(data)))["prediction"]
|
98 |
-
if len(interface.output_components) == 1:
|
99 |
-
data = [data]
|
100 |
-
|
101 |
-
# skip postprocessing for final interface since the Series interface will include it
|
102 |
-
if idx < len(interfaces) - 1 and not (interface.api_mode):
|
103 |
-
data = [
|
104 |
-
output_component.postprocess(data[i])
|
105 |
-
for i, output_component in enumerate(
|
106 |
-
interface.output_components
|
107 |
-
)
|
108 |
-
]
|
109 |
-
|
110 |
-
if len(interface.output_components) == 1: # type: ignore
|
111 |
-
return data[0]
|
112 |
-
return data
|
113 |
-
|
114 |
-
for interface in interfaces:
|
115 |
-
if not (isinstance(interface, gradio.Interface)):
|
116 |
-
warnings.warn(
|
117 |
-
"Series requires all inputs to be of type Interface. May "
|
118 |
-
"not work as expected."
|
119 |
-
)
|
120 |
-
connected_fn.__name__ = " => ".join([io.__name__ for io in interfaces])
|
121 |
-
|
122 |
-
kwargs = {
|
123 |
-
"fn": connected_fn,
|
124 |
-
"inputs": interfaces[0].input_components,
|
125 |
-
"outputs": interfaces[-1].output_components,
|
126 |
-
"_api_mode": interfaces[0].api_mode, # TODO: set api_mode per-interface
|
127 |
-
}
|
128 |
-
kwargs.update(options)
|
129 |
-
super().__init__(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-1e03cd90.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
.preview.svelte-w0jac3.svelte-w0jac3{display:flex;position:absolute;inset:0;flex-direction:column;z-index:var(--layer-2);backdrop-filter:blur(8px);background:var(--background-fill-primary);height:var(--size-full)}.fixed-height.svelte-w0jac3.svelte-w0jac3{min-height:var(--size-80);max-height:55vh}@media (min-width: 1280px){.fixed-height.svelte-w0jac3.svelte-w0jac3{min-height:450px}}.preview.svelte-w0jac3 img.svelte-w0jac3{width:var(--size-full);height:calc(var(--size-full) - 60px);object-fit:contain}.preview.svelte-w0jac3 img.with-caption.svelte-w0jac3{height:calc(var(--size-full) - 80px)}.caption.svelte-w0jac3.svelte-w0jac3{padding:var(--size-2) var(--size-3);overflow:hidden;color:var(--block-label-text-color);font-weight:var(--weight-semibold);text-align:center;text-overflow:ellipsis;white-space:nowrap}.thumbnails.svelte-w0jac3.svelte-w0jac3{display:flex;position:absolute;bottom:0;justify-content:center;align-items:center;gap:var(--spacing-lg);width:var(--size-full);height:var(--size-14);overflow-x:scroll}.thumbnail-item.svelte-w0jac3.svelte-w0jac3{--ring-color:transparent;position:relative;box-shadow:0 0 0 2px var(--ring-color),var(--shadow-drop);border:1px solid var(--border-color-primary);border-radius:var(--button-small-radius);background:var(--background-fill-secondary);aspect-ratio:var(--ratio-square);width:var(--size-full);height:var(--size-full);overflow:clip}.thumbnail-item.svelte-w0jac3.svelte-w0jac3:hover{--ring-color:var(--color-accent);filter:brightness(1.1)}.thumbnail-item.selected.svelte-w0jac3.svelte-w0jac3{--ring-color:var(--color-accent)}.thumbnail-small.svelte-w0jac3.svelte-w0jac3{flex:none;transform:scale(.9);transition:75ms;width:var(--size-9);height:var(--size-9)}.thumbnail-small.selected.svelte-w0jac3.svelte-w0jac3{--ring-color:var(--color-accent);transform:scale(1);border-color:var(--color-accent)}.thumbnail-small.svelte-w0jac3>img.svelte-w0jac3{width:var(--size-full);height:var(--size-full);overflow:hidden;object-fit:var(--object-fit)}.grid-wrap.svelte-w0jac3.svelte-w0jac3{position:relative;padding:var(--size-2);height:var(--size-full);overflow-y:auto}.grid-container.svelte-w0jac3.svelte-w0jac3{display:grid;position:relative;grid-template-rows:var(--grid-rows);grid-template-columns:var(--grid-cols);gap:var(--spacing-lg)}@media (min-width: 640px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--sm-grid-cols)}}@media (min-width: 768px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--md-grid-cols)}}@media (min-width: 1024px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--lg-grid-cols)}}@media (min-width: 1280px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--xl-grid-cols)}}@media (min-width: 1536px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--2xl-grid-cols)}}.thumbnail-lg.svelte-w0jac3>img.svelte-w0jac3{width:var(--size-full);height:var(--size-full);overflow:hidden;object-fit:var(--object-fit)}.thumbnail-lg.svelte-w0jac3:hover .caption-label.svelte-w0jac3{opacity:.5}.caption-label.svelte-w0jac3.svelte-w0jac3{position:absolute;right:var(--block-label-margin);bottom:var(--block-label-margin);z-index:var(--layer-1);border-top:1px solid var(--border-color-primary);border-left:1px solid var(--border-color-primary);border-radius:var(--block-label-radius);background:var(--background-fill-secondary);padding:var(--block-label-padding);max-width:80%;overflow:hidden;font-size:var(--block-label-text-size);text-align:left;text-overflow:ellipsis;white-space:nowrap}.icon-button.svelte-w0jac3.svelte-w0jac3{position:absolute;top:0;right:0;z-index:var(--layer-1)}
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/json_utils/__init__.py
DELETED
File without changes
|
spaces/Datasculptor/DescriptionGPT/detic/modeling/roi_heads/detic_fast_rcnn.py
DELETED
@@ -1,595 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
import json
|
5 |
-
import numpy as np
|
6 |
-
from typing import Dict, Union
|
7 |
-
import torch
|
8 |
-
from fvcore.nn import giou_loss, smooth_l1_loss
|
9 |
-
from torch import nn
|
10 |
-
from torch.nn import functional as F
|
11 |
-
import fvcore.nn.weight_init as weight_init
|
12 |
-
import detectron2.utils.comm as comm
|
13 |
-
from detectron2.config import configurable
|
14 |
-
from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
|
15 |
-
from detectron2.structures import Boxes, Instances
|
16 |
-
from detectron2.utils.events import get_event_storage
|
17 |
-
from detectron2.modeling.box_regression import Box2BoxTransform
|
18 |
-
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
|
19 |
-
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
|
20 |
-
from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats
|
21 |
-
|
22 |
-
from torch.cuda.amp import autocast
|
23 |
-
from ..utils import load_class_freq, get_fed_loss_inds
|
24 |
-
from .zero_shot_classifier import ZeroShotClassifier
|
25 |
-
|
26 |
-
__all__ = ["DeticFastRCNNOutputLayers"]
|
27 |
-
|
28 |
-
|
29 |
-
class DeticFastRCNNOutputLayers(FastRCNNOutputLayers):
|
30 |
-
@configurable
|
31 |
-
def __init__(
|
32 |
-
self,
|
33 |
-
input_shape: ShapeSpec,
|
34 |
-
*,
|
35 |
-
mult_proposal_score=False,
|
36 |
-
cls_score=None,
|
37 |
-
sync_caption_batch = False,
|
38 |
-
use_sigmoid_ce = False,
|
39 |
-
use_fed_loss = False,
|
40 |
-
ignore_zero_cats = False,
|
41 |
-
fed_loss_num_cat = 50,
|
42 |
-
dynamic_classifier = False,
|
43 |
-
image_label_loss = '',
|
44 |
-
use_zeroshot_cls = False,
|
45 |
-
image_loss_weight = 0.1,
|
46 |
-
with_softmax_prop = False,
|
47 |
-
caption_weight = 1.0,
|
48 |
-
neg_cap_weight = 1.0,
|
49 |
-
add_image_box = False,
|
50 |
-
debug = False,
|
51 |
-
prior_prob = 0.01,
|
52 |
-
cat_freq_path = '',
|
53 |
-
fed_loss_freq_weight = 0.5,
|
54 |
-
softmax_weak_loss = False,
|
55 |
-
**kwargs,
|
56 |
-
):
|
57 |
-
super().__init__(
|
58 |
-
input_shape=input_shape,
|
59 |
-
**kwargs,
|
60 |
-
)
|
61 |
-
self.mult_proposal_score = mult_proposal_score
|
62 |
-
self.sync_caption_batch = sync_caption_batch
|
63 |
-
self.use_sigmoid_ce = use_sigmoid_ce
|
64 |
-
self.use_fed_loss = use_fed_loss
|
65 |
-
self.ignore_zero_cats = ignore_zero_cats
|
66 |
-
self.fed_loss_num_cat = fed_loss_num_cat
|
67 |
-
self.dynamic_classifier = dynamic_classifier
|
68 |
-
self.image_label_loss = image_label_loss
|
69 |
-
self.use_zeroshot_cls = use_zeroshot_cls
|
70 |
-
self.image_loss_weight = image_loss_weight
|
71 |
-
self.with_softmax_prop = with_softmax_prop
|
72 |
-
self.caption_weight = caption_weight
|
73 |
-
self.neg_cap_weight = neg_cap_weight
|
74 |
-
self.add_image_box = add_image_box
|
75 |
-
self.softmax_weak_loss = softmax_weak_loss
|
76 |
-
self.debug = debug
|
77 |
-
|
78 |
-
if softmax_weak_loss:
|
79 |
-
assert image_label_loss in ['max_size']
|
80 |
-
|
81 |
-
if self.use_sigmoid_ce:
|
82 |
-
bias_value = -math.log((1 - prior_prob) / prior_prob)
|
83 |
-
nn.init.constant_(self.cls_score.bias, bias_value)
|
84 |
-
|
85 |
-
if self.use_fed_loss or self.ignore_zero_cats:
|
86 |
-
freq_weight = load_class_freq(cat_freq_path, fed_loss_freq_weight)
|
87 |
-
self.register_buffer('freq_weight', freq_weight)
|
88 |
-
else:
|
89 |
-
self.freq_weight = None
|
90 |
-
|
91 |
-
if self.use_fed_loss and len(self.freq_weight) < self.num_classes:
|
92 |
-
# assert self.num_classes == 11493
|
93 |
-
print('Extending federated loss weight')
|
94 |
-
self.freq_weight = torch.cat(
|
95 |
-
[self.freq_weight,
|
96 |
-
self.freq_weight.new_zeros(
|
97 |
-
self.num_classes - len(self.freq_weight))]
|
98 |
-
)
|
99 |
-
|
100 |
-
assert (not self.dynamic_classifier) or (not self.use_fed_loss)
|
101 |
-
input_size = input_shape.channels * \
|
102 |
-
(input_shape.width or 1) * (input_shape.height or 1)
|
103 |
-
|
104 |
-
if self.use_zeroshot_cls:
|
105 |
-
del self.cls_score
|
106 |
-
del self.bbox_pred
|
107 |
-
assert cls_score is not None
|
108 |
-
self.cls_score = cls_score
|
109 |
-
self.bbox_pred = nn.Sequential(
|
110 |
-
nn.Linear(input_size, input_size),
|
111 |
-
nn.ReLU(inplace=True),
|
112 |
-
nn.Linear(input_size, 4)
|
113 |
-
)
|
114 |
-
weight_init.c2_xavier_fill(self.bbox_pred[0])
|
115 |
-
nn.init.normal_(self.bbox_pred[-1].weight, std=0.001)
|
116 |
-
nn.init.constant_(self.bbox_pred[-1].bias, 0)
|
117 |
-
|
118 |
-
if self.with_softmax_prop:
|
119 |
-
self.prop_score = nn.Sequential(
|
120 |
-
nn.Linear(input_size, input_size),
|
121 |
-
nn.ReLU(inplace=True),
|
122 |
-
nn.Linear(input_size, self.num_classes + 1),
|
123 |
-
)
|
124 |
-
weight_init.c2_xavier_fill(self.prop_score[0])
|
125 |
-
nn.init.normal_(self.prop_score[-1].weight, mean=0, std=0.001)
|
126 |
-
nn.init.constant_(self.prop_score[-1].bias, 0)
|
127 |
-
|
128 |
-
|
129 |
-
@classmethod
|
130 |
-
def from_config(cls, cfg, input_shape):
|
131 |
-
ret = super().from_config(cfg, input_shape)
|
132 |
-
ret.update({
|
133 |
-
'mult_proposal_score': cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE,
|
134 |
-
'sync_caption_batch': cfg.MODEL.SYNC_CAPTION_BATCH,
|
135 |
-
'use_sigmoid_ce': cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE,
|
136 |
-
'use_fed_loss': cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS,
|
137 |
-
'ignore_zero_cats': cfg.MODEL.ROI_BOX_HEAD.IGNORE_ZERO_CATS,
|
138 |
-
'fed_loss_num_cat': cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CAT,
|
139 |
-
'dynamic_classifier': cfg.MODEL.DYNAMIC_CLASSIFIER,
|
140 |
-
'image_label_loss': cfg.MODEL.ROI_BOX_HEAD.IMAGE_LABEL_LOSS,
|
141 |
-
'use_zeroshot_cls': cfg.MODEL.ROI_BOX_HEAD.USE_ZEROSHOT_CLS,
|
142 |
-
'image_loss_weight': cfg.MODEL.ROI_BOX_HEAD.IMAGE_LOSS_WEIGHT,
|
143 |
-
'with_softmax_prop': cfg.MODEL.ROI_BOX_HEAD.WITH_SOFTMAX_PROP,
|
144 |
-
'caption_weight': cfg.MODEL.ROI_BOX_HEAD.CAPTION_WEIGHT,
|
145 |
-
'neg_cap_weight': cfg.MODEL.ROI_BOX_HEAD.NEG_CAP_WEIGHT,
|
146 |
-
'add_image_box': cfg.MODEL.ROI_BOX_HEAD.ADD_IMAGE_BOX,
|
147 |
-
'debug': cfg.DEBUG or cfg.SAVE_DEBUG or cfg.IS_DEBUG,
|
148 |
-
'prior_prob': cfg.MODEL.ROI_BOX_HEAD.PRIOR_PROB,
|
149 |
-
'cat_freq_path': cfg.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH,
|
150 |
-
'fed_loss_freq_weight': cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT,
|
151 |
-
'softmax_weak_loss': cfg.MODEL.ROI_BOX_HEAD.SOFTMAX_WEAK_LOSS,
|
152 |
-
})
|
153 |
-
if ret['use_zeroshot_cls']:
|
154 |
-
ret['cls_score'] = ZeroShotClassifier(cfg, input_shape)
|
155 |
-
return ret
|
156 |
-
|
157 |
-
def losses(self, predictions, proposals, \
|
158 |
-
use_advanced_loss=True,
|
159 |
-
classifier_info=(None,None,None)):
|
160 |
-
"""
|
161 |
-
enable advanced loss
|
162 |
-
"""
|
163 |
-
scores, proposal_deltas = predictions
|
164 |
-
gt_classes = (
|
165 |
-
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
|
166 |
-
)
|
167 |
-
num_classes = self.num_classes
|
168 |
-
if self.dynamic_classifier:
|
169 |
-
_, cls_id_map = classifier_info[1]
|
170 |
-
gt_classes = cls_id_map[gt_classes]
|
171 |
-
num_classes = scores.shape[1] - 1
|
172 |
-
assert cls_id_map[self.num_classes] == num_classes
|
173 |
-
_log_classification_stats(scores, gt_classes)
|
174 |
-
|
175 |
-
if len(proposals):
|
176 |
-
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
|
177 |
-
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
|
178 |
-
gt_boxes = cat(
|
179 |
-
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
|
180 |
-
dim=0,
|
181 |
-
)
|
182 |
-
else:
|
183 |
-
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
|
184 |
-
|
185 |
-
if self.use_sigmoid_ce:
|
186 |
-
loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)
|
187 |
-
else:
|
188 |
-
loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
|
189 |
-
return {
|
190 |
-
"loss_cls": loss_cls,
|
191 |
-
"loss_box_reg": self.box_reg_loss(
|
192 |
-
proposal_boxes, gt_boxes, proposal_deltas, gt_classes,
|
193 |
-
num_classes=num_classes)
|
194 |
-
}
|
195 |
-
|
196 |
-
|
197 |
-
def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):
|
198 |
-
if pred_class_logits.numel() == 0:
|
199 |
-
return pred_class_logits.new_zeros([1])[0] # This is more robust than .sum() * 0.
|
200 |
-
|
201 |
-
B = pred_class_logits.shape[0]
|
202 |
-
C = pred_class_logits.shape[1] - 1
|
203 |
-
|
204 |
-
target = pred_class_logits.new_zeros(B, C + 1)
|
205 |
-
target[range(len(gt_classes)), gt_classes] = 1 # B x (C + 1)
|
206 |
-
target = target[:, :C] # B x C
|
207 |
-
|
208 |
-
weight = 1
|
209 |
-
|
210 |
-
if self.use_fed_loss and (self.freq_weight is not None): # fedloss
|
211 |
-
appeared = get_fed_loss_inds(
|
212 |
-
gt_classes,
|
213 |
-
num_sample_cats=self.fed_loss_num_cat,
|
214 |
-
C=C,
|
215 |
-
weight=self.freq_weight)
|
216 |
-
appeared_mask = appeared.new_zeros(C + 1)
|
217 |
-
appeared_mask[appeared] = 1 # C + 1
|
218 |
-
appeared_mask = appeared_mask[:C]
|
219 |
-
fed_w = appeared_mask.view(1, C).expand(B, C)
|
220 |
-
weight = weight * fed_w.float()
|
221 |
-
if self.ignore_zero_cats and (self.freq_weight is not None):
|
222 |
-
w = (self.freq_weight.view(-1) > 1e-4).float()
|
223 |
-
weight = weight * w.view(1, C).expand(B, C)
|
224 |
-
# import pdb; pdb.set_trace()
|
225 |
-
|
226 |
-
cls_loss = F.binary_cross_entropy_with_logits(
|
227 |
-
pred_class_logits[:, :-1], target, reduction='none') # B x C
|
228 |
-
loss = torch.sum(cls_loss * weight) / B
|
229 |
-
return loss
|
230 |
-
|
231 |
-
|
232 |
-
def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):
|
233 |
-
"""
|
234 |
-
change _no_instance handling
|
235 |
-
"""
|
236 |
-
if pred_class_logits.numel() == 0:
|
237 |
-
return pred_class_logits.new_zeros([1])[0]
|
238 |
-
|
239 |
-
if self.ignore_zero_cats and (self.freq_weight is not None):
|
240 |
-
zero_weight = torch.cat([
|
241 |
-
(self.freq_weight.view(-1) > 1e-4).float(),
|
242 |
-
self.freq_weight.new_ones(1)]) # C + 1
|
243 |
-
loss = F.cross_entropy(
|
244 |
-
pred_class_logits, gt_classes,
|
245 |
-
weight=zero_weight, reduction="mean")
|
246 |
-
elif self.use_fed_loss and (self.freq_weight is not None): # fedloss
|
247 |
-
C = pred_class_logits.shape[1] - 1
|
248 |
-
appeared = get_fed_loss_inds(
|
249 |
-
gt_classes,
|
250 |
-
num_sample_cats=self.fed_loss_num_cat,
|
251 |
-
C=C,
|
252 |
-
weight=self.freq_weight)
|
253 |
-
appeared_mask = appeared.new_zeros(C + 1).float()
|
254 |
-
appeared_mask[appeared] = 1. # C + 1
|
255 |
-
appeared_mask[C] = 1.
|
256 |
-
loss = F.cross_entropy(
|
257 |
-
pred_class_logits, gt_classes,
|
258 |
-
weight=appeared_mask, reduction="mean")
|
259 |
-
else:
|
260 |
-
loss = F.cross_entropy(
|
261 |
-
pred_class_logits, gt_classes, reduction="mean")
|
262 |
-
return loss
|
263 |
-
|
264 |
-
|
265 |
-
def box_reg_loss(
|
266 |
-
self, proposal_boxes, gt_boxes, pred_deltas, gt_classes,
|
267 |
-
num_classes=-1):
|
268 |
-
"""
|
269 |
-
Allow custom background index
|
270 |
-
"""
|
271 |
-
num_classes = num_classes if num_classes > 0 else self.num_classes
|
272 |
-
box_dim = proposal_boxes.shape[1] # 4 or 5
|
273 |
-
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < num_classes))[0]
|
274 |
-
if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
|
275 |
-
fg_pred_deltas = pred_deltas[fg_inds]
|
276 |
-
else:
|
277 |
-
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
|
278 |
-
fg_inds, gt_classes[fg_inds]
|
279 |
-
]
|
280 |
-
|
281 |
-
if self.box_reg_loss_type == "smooth_l1":
|
282 |
-
gt_pred_deltas = self.box2box_transform.get_deltas(
|
283 |
-
proposal_boxes[fg_inds],
|
284 |
-
gt_boxes[fg_inds],
|
285 |
-
)
|
286 |
-
loss_box_reg = smooth_l1_loss(
|
287 |
-
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
|
288 |
-
)
|
289 |
-
elif self.box_reg_loss_type == "giou":
|
290 |
-
fg_pred_boxes = self.box2box_transform.apply_deltas(
|
291 |
-
fg_pred_deltas, proposal_boxes[fg_inds]
|
292 |
-
)
|
293 |
-
loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
|
294 |
-
else:
|
295 |
-
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
|
296 |
-
return loss_box_reg / max(gt_classes.numel(), 1.0)
|
297 |
-
|
298 |
-
def inference(self, predictions, proposals):
|
299 |
-
"""
|
300 |
-
enable use proposal boxes
|
301 |
-
"""
|
302 |
-
predictions = (predictions[0], predictions[1])
|
303 |
-
boxes = self.predict_boxes(predictions, proposals)
|
304 |
-
scores = self.predict_probs(predictions, proposals)
|
305 |
-
if self.mult_proposal_score:
|
306 |
-
proposal_scores = [p.get('objectness_logits') for p in proposals]
|
307 |
-
scores = [(s * ps[:, None]) ** 0.5 \
|
308 |
-
for s, ps in zip(scores, proposal_scores)]
|
309 |
-
image_shapes = [x.image_size for x in proposals]
|
310 |
-
return fast_rcnn_inference(
|
311 |
-
boxes,
|
312 |
-
scores,
|
313 |
-
image_shapes,
|
314 |
-
self.test_score_thresh,
|
315 |
-
self.test_nms_thresh,
|
316 |
-
self.test_topk_per_image,
|
317 |
-
)
|
318 |
-
|
319 |
-
|
320 |
-
def predict_probs(self, predictions, proposals):
|
321 |
-
"""
|
322 |
-
support sigmoid
|
323 |
-
"""
|
324 |
-
# scores, _ = predictions
|
325 |
-
scores = predictions[0]
|
326 |
-
num_inst_per_image = [len(p) for p in proposals]
|
327 |
-
if self.use_sigmoid_ce:
|
328 |
-
probs = scores.sigmoid()
|
329 |
-
else:
|
330 |
-
probs = F.softmax(scores, dim=-1)
|
331 |
-
return probs.split(num_inst_per_image, dim=0)
|
332 |
-
|
333 |
-
|
334 |
-
def image_label_losses(self, predictions, proposals, image_labels, \
|
335 |
-
classifier_info=(None,None,None), ann_type='image'):
|
336 |
-
'''
|
337 |
-
Inputs:
|
338 |
-
scores: N x (C + 1)
|
339 |
-
image_labels B x 1
|
340 |
-
'''
|
341 |
-
num_inst_per_image = [len(p) for p in proposals]
|
342 |
-
scores = predictions[0]
|
343 |
-
scores = scores.split(num_inst_per_image, dim=0) # B x n x (C + 1)
|
344 |
-
if self.with_softmax_prop:
|
345 |
-
prop_scores = predictions[2].split(num_inst_per_image, dim=0)
|
346 |
-
else:
|
347 |
-
prop_scores = [None for _ in num_inst_per_image]
|
348 |
-
B = len(scores)
|
349 |
-
img_box_count = 0
|
350 |
-
select_size_count = 0
|
351 |
-
select_x_count = 0
|
352 |
-
select_y_count = 0
|
353 |
-
max_score_count = 0
|
354 |
-
storage = get_event_storage()
|
355 |
-
loss = scores[0].new_zeros([1])[0]
|
356 |
-
caption_loss = scores[0].new_zeros([1])[0]
|
357 |
-
for idx, (score, labels, prop_score, p) in enumerate(zip(
|
358 |
-
scores, image_labels, prop_scores, proposals)):
|
359 |
-
if score.shape[0] == 0:
|
360 |
-
loss += score.new_zeros([1])[0]
|
361 |
-
continue
|
362 |
-
if 'caption' in ann_type:
|
363 |
-
score, caption_loss_img = self._caption_loss(
|
364 |
-
score, classifier_info, idx, B)
|
365 |
-
caption_loss += self.caption_weight * caption_loss_img
|
366 |
-
if ann_type == 'caption':
|
367 |
-
continue
|
368 |
-
|
369 |
-
if self.debug:
|
370 |
-
p.selected = score.new_zeros(
|
371 |
-
(len(p),), dtype=torch.long) - 1
|
372 |
-
for i_l, label in enumerate(labels):
|
373 |
-
if self.dynamic_classifier:
|
374 |
-
if idx == 0 and i_l == 0 and comm.is_main_process():
|
375 |
-
storage.put_scalar('stats_label', label)
|
376 |
-
label = classifier_info[1][1][label]
|
377 |
-
assert label < score.shape[1]
|
378 |
-
if self.image_label_loss in ['wsod', 'wsddn']:
|
379 |
-
loss_i, ind = self._wsddn_loss(score, prop_score, label)
|
380 |
-
elif self.image_label_loss == 'max_score':
|
381 |
-
loss_i, ind = self._max_score_loss(score, label)
|
382 |
-
elif self.image_label_loss == 'max_size':
|
383 |
-
loss_i, ind = self._max_size_loss(score, label, p)
|
384 |
-
elif self.image_label_loss == 'first':
|
385 |
-
loss_i, ind = self._first_loss(score, label)
|
386 |
-
elif self.image_label_loss == 'image':
|
387 |
-
loss_i, ind = self._image_loss(score, label)
|
388 |
-
elif self.image_label_loss == 'min_loss':
|
389 |
-
loss_i, ind = self._min_loss_loss(score, label)
|
390 |
-
else:
|
391 |
-
assert 0
|
392 |
-
loss += loss_i / len(labels)
|
393 |
-
if type(ind) == type([]):
|
394 |
-
img_box_count = sum(ind) / len(ind)
|
395 |
-
if self.debug:
|
396 |
-
for ind_i in ind:
|
397 |
-
p.selected[ind_i] = label
|
398 |
-
else:
|
399 |
-
img_box_count = ind
|
400 |
-
select_size_count = p[ind].proposal_boxes.area() / \
|
401 |
-
(p.image_size[0] * p.image_size[1])
|
402 |
-
max_score_count = score[ind, label].sigmoid()
|
403 |
-
select_x_count = (p.proposal_boxes.tensor[ind, 0] + \
|
404 |
-
p.proposal_boxes.tensor[ind, 2]) / 2 / p.image_size[1]
|
405 |
-
select_y_count = (p.proposal_boxes.tensor[ind, 1] + \
|
406 |
-
p.proposal_boxes.tensor[ind, 3]) / 2 / p.image_size[0]
|
407 |
-
if self.debug:
|
408 |
-
p.selected[ind] = label
|
409 |
-
|
410 |
-
loss = loss / B
|
411 |
-
storage.put_scalar('stats_l_image', loss.item())
|
412 |
-
if 'caption' in ann_type:
|
413 |
-
caption_loss = caption_loss / B
|
414 |
-
loss = loss + caption_loss
|
415 |
-
storage.put_scalar('stats_l_caption', caption_loss.item())
|
416 |
-
if comm.is_main_process():
|
417 |
-
storage.put_scalar('pool_stats', img_box_count)
|
418 |
-
storage.put_scalar('stats_select_size', select_size_count)
|
419 |
-
storage.put_scalar('stats_select_x', select_x_count)
|
420 |
-
storage.put_scalar('stats_select_y', select_y_count)
|
421 |
-
storage.put_scalar('stats_max_label_score', max_score_count)
|
422 |
-
|
423 |
-
return {
|
424 |
-
'image_loss': loss * self.image_loss_weight,
|
425 |
-
'loss_cls': score.new_zeros([1])[0],
|
426 |
-
'loss_box_reg': score.new_zeros([1])[0]}
|
427 |
-
|
428 |
-
|
429 |
-
def forward(self, x, classifier_info=(None,None,None)):
|
430 |
-
"""
|
431 |
-
enable classifier_info
|
432 |
-
"""
|
433 |
-
if x.dim() > 2:
|
434 |
-
x = torch.flatten(x, start_dim=1)
|
435 |
-
scores = []
|
436 |
-
|
437 |
-
if classifier_info[0] is not None:
|
438 |
-
cls_scores = self.cls_score(x, classifier=classifier_info[0])
|
439 |
-
scores.append(cls_scores)
|
440 |
-
else:
|
441 |
-
cls_scores = self.cls_score(x)
|
442 |
-
scores.append(cls_scores)
|
443 |
-
|
444 |
-
if classifier_info[2] is not None:
|
445 |
-
cap_cls = classifier_info[2]
|
446 |
-
if self.sync_caption_batch:
|
447 |
-
caption_scores = self.cls_score(x, classifier=cap_cls[:, :-1])
|
448 |
-
else:
|
449 |
-
caption_scores = self.cls_score(x, classifier=cap_cls)
|
450 |
-
scores.append(caption_scores)
|
451 |
-
scores = torch.cat(scores, dim=1) # B x C' or B x N or B x (C'+N)
|
452 |
-
|
453 |
-
proposal_deltas = self.bbox_pred(x)
|
454 |
-
if self.with_softmax_prop:
|
455 |
-
prop_score = self.prop_score(x)
|
456 |
-
return scores, proposal_deltas, prop_score
|
457 |
-
else:
|
458 |
-
return scores, proposal_deltas
|
459 |
-
|
460 |
-
|
461 |
-
def _caption_loss(self, score, classifier_info, idx, B):
|
462 |
-
assert (classifier_info[2] is not None)
|
463 |
-
assert self.add_image_box
|
464 |
-
cls_and_cap_num = score.shape[1]
|
465 |
-
cap_num = classifier_info[2].shape[0]
|
466 |
-
score, caption_score = score.split(
|
467 |
-
[cls_and_cap_num - cap_num, cap_num], dim=1)
|
468 |
-
# n x (C + 1), n x B
|
469 |
-
caption_score = caption_score[-1:] # 1 x B # -1: image level box
|
470 |
-
caption_target = caption_score.new_zeros(
|
471 |
-
caption_score.shape) # 1 x B or 1 x MB, M: num machines
|
472 |
-
if self.sync_caption_batch:
|
473 |
-
# caption_target: 1 x MB
|
474 |
-
rank = comm.get_rank()
|
475 |
-
global_idx = B * rank + idx
|
476 |
-
assert (classifier_info[2][
|
477 |
-
global_idx, -1] - rank) ** 2 < 1e-8, \
|
478 |
-
'{} {} {} {} {}'.format(
|
479 |
-
rank, global_idx,
|
480 |
-
classifier_info[2][global_idx, -1],
|
481 |
-
classifier_info[2].shape,
|
482 |
-
classifier_info[2][:, -1])
|
483 |
-
caption_target[:, global_idx] = 1.
|
484 |
-
else:
|
485 |
-
assert caption_score.shape[1] == B
|
486 |
-
caption_target[:, idx] = 1.
|
487 |
-
caption_loss_img = F.binary_cross_entropy_with_logits(
|
488 |
-
caption_score, caption_target, reduction='none')
|
489 |
-
if self.sync_caption_batch:
|
490 |
-
fg_mask = (caption_target > 0.5).float()
|
491 |
-
assert (fg_mask.sum().item() - 1.) ** 2 < 1e-8, '{} {}'.format(
|
492 |
-
fg_mask.shape, fg_mask)
|
493 |
-
pos_loss = (caption_loss_img * fg_mask).sum()
|
494 |
-
neg_loss = (caption_loss_img * (1. - fg_mask)).sum()
|
495 |
-
caption_loss_img = pos_loss + self.neg_cap_weight * neg_loss
|
496 |
-
else:
|
497 |
-
caption_loss_img = caption_loss_img.sum()
|
498 |
-
return score, caption_loss_img
|
499 |
-
|
500 |
-
|
501 |
-
def _wsddn_loss(self, score, prop_score, label):
|
502 |
-
assert prop_score is not None
|
503 |
-
loss = 0
|
504 |
-
final_score = score.sigmoid() * \
|
505 |
-
F.softmax(prop_score, dim=0) # B x (C + 1)
|
506 |
-
img_score = torch.clamp(
|
507 |
-
torch.sum(final_score, dim=0),
|
508 |
-
min=1e-10, max=1-1e-10) # (C + 1)
|
509 |
-
target = img_score.new_zeros(img_score.shape) # (C + 1)
|
510 |
-
target[label] = 1.
|
511 |
-
loss += F.binary_cross_entropy(img_score, target)
|
512 |
-
ind = final_score[:, label].argmax()
|
513 |
-
return loss, ind
|
514 |
-
|
515 |
-
|
516 |
-
def _max_score_loss(self, score, label):
|
517 |
-
loss = 0
|
518 |
-
target = score.new_zeros(score.shape[1])
|
519 |
-
target[label] = 1.
|
520 |
-
ind = score[:, label].argmax().item()
|
521 |
-
loss += F.binary_cross_entropy_with_logits(
|
522 |
-
score[ind], target, reduction='sum')
|
523 |
-
return loss, ind
|
524 |
-
|
525 |
-
|
526 |
-
def _min_loss_loss(self, score, label):
|
527 |
-
loss = 0
|
528 |
-
target = score.new_zeros(score.shape)
|
529 |
-
target[:, label] = 1.
|
530 |
-
with torch.no_grad():
|
531 |
-
x = F.binary_cross_entropy_with_logits(
|
532 |
-
score, target, reduction='none').sum(dim=1) # n
|
533 |
-
ind = x.argmin().item()
|
534 |
-
loss += F.binary_cross_entropy_with_logits(
|
535 |
-
score[ind], target[0], reduction='sum')
|
536 |
-
return loss, ind
|
537 |
-
|
538 |
-
|
539 |
-
def _first_loss(self, score, label):
|
540 |
-
loss = 0
|
541 |
-
target = score.new_zeros(score.shape[1])
|
542 |
-
target[label] = 1.
|
543 |
-
ind = 0
|
544 |
-
loss += F.binary_cross_entropy_with_logits(
|
545 |
-
score[ind], target, reduction='sum')
|
546 |
-
return loss, ind
|
547 |
-
|
548 |
-
|
549 |
-
def _image_loss(self, score, label):
|
550 |
-
assert self.add_image_box
|
551 |
-
target = score.new_zeros(score.shape[1])
|
552 |
-
target[label] = 1.
|
553 |
-
ind = score.shape[0] - 1
|
554 |
-
loss = F.binary_cross_entropy_with_logits(
|
555 |
-
score[ind], target, reduction='sum')
|
556 |
-
return loss, ind
|
557 |
-
|
558 |
-
|
559 |
-
def _max_size_loss(self, score, label, p):
|
560 |
-
loss = 0
|
561 |
-
target = score.new_zeros(score.shape[1])
|
562 |
-
target[label] = 1.
|
563 |
-
sizes = p.proposal_boxes.area()
|
564 |
-
ind = sizes[:-1].argmax().item() if len(sizes) > 1 else 0
|
565 |
-
if self.softmax_weak_loss:
|
566 |
-
loss += F.cross_entropy(
|
567 |
-
score[ind:ind+1],
|
568 |
-
score.new_tensor(label, dtype=torch.long).view(1),
|
569 |
-
reduction='sum')
|
570 |
-
else:
|
571 |
-
loss += F.binary_cross_entropy_with_logits(
|
572 |
-
score[ind], target, reduction='sum')
|
573 |
-
return loss, ind
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
def put_label_distribution(storage, hist_name, hist_counts, num_classes):
|
578 |
-
"""
|
579 |
-
"""
|
580 |
-
ht_min, ht_max = 0, num_classes
|
581 |
-
hist_edges = torch.linspace(
|
582 |
-
start=ht_min, end=ht_max, steps=num_classes + 1, dtype=torch.float32)
|
583 |
-
|
584 |
-
hist_params = dict(
|
585 |
-
tag=hist_name,
|
586 |
-
min=ht_min,
|
587 |
-
max=ht_max,
|
588 |
-
num=float(hist_counts.sum()),
|
589 |
-
sum=float((hist_counts * torch.arange(len(hist_counts))).sum()),
|
590 |
-
sum_squares=float(((hist_counts * torch.arange(len(hist_counts))) ** 2).sum()),
|
591 |
-
bucket_limits=hist_edges[1:].tolist(),
|
592 |
-
bucket_counts=hist_counts.tolist(),
|
593 |
-
global_step=storage._iter,
|
594 |
-
)
|
595 |
-
storage._histograms.append(hist_params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/StyleGAN-NADA/e4e/models/encoders/__init__.py
DELETED
File without changes
|
spaces/Datasculptor/StyleGAN-NADA/styleclip/styleclip_global.py
DELETED
@@ -1,181 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
Code adapted from Stitch it in Time by Tzaban et al.
|
3 |
-
https://github.com/rotemtzaban/STIT
|
4 |
-
'''
|
5 |
-
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
from tqdm import tqdm
|
10 |
-
from pathlib import Path
|
11 |
-
import os
|
12 |
-
|
13 |
-
import clip
|
14 |
-
|
15 |
-
imagenet_templates = [
|
16 |
-
'a bad photo of a {}.',
|
17 |
-
'a photo of many {}.',
|
18 |
-
'a sculpture of a {}.',
|
19 |
-
'a photo of the hard to see {}.',
|
20 |
-
'a low resolution photo of the {}.',
|
21 |
-
'a rendering of a {}.',
|
22 |
-
'graffiti of a {}.',
|
23 |
-
'a bad photo of the {}.',
|
24 |
-
'a cropped photo of the {}.',
|
25 |
-
'a tattoo of a {}.',
|
26 |
-
'the embroidered {}.',
|
27 |
-
'a photo of a hard to see {}.',
|
28 |
-
'a bright photo of a {}.',
|
29 |
-
'a photo of a clean {}.',
|
30 |
-
'a photo of a dirty {}.',
|
31 |
-
'a dark photo of the {}.',
|
32 |
-
'a drawing of a {}.',
|
33 |
-
'a photo of my {}.',
|
34 |
-
'the plastic {}.',
|
35 |
-
'a photo of the cool {}.',
|
36 |
-
'a close-up photo of a {}.',
|
37 |
-
'a black and white photo of the {}.',
|
38 |
-
'a painting of the {}.',
|
39 |
-
'a painting of a {}.',
|
40 |
-
'a pixelated photo of the {}.',
|
41 |
-
'a sculpture of the {}.',
|
42 |
-
'a bright photo of the {}.',
|
43 |
-
'a cropped photo of a {}.',
|
44 |
-
'a plastic {}.',
|
45 |
-
'a photo of the dirty {}.',
|
46 |
-
'a jpeg corrupted photo of a {}.',
|
47 |
-
'a blurry photo of the {}.',
|
48 |
-
'a photo of the {}.',
|
49 |
-
'a good photo of the {}.',
|
50 |
-
'a rendering of the {}.',
|
51 |
-
'a {} in a video game.',
|
52 |
-
'a photo of one {}.',
|
53 |
-
'a doodle of a {}.',
|
54 |
-
'a close-up photo of the {}.',
|
55 |
-
'a photo of a {}.',
|
56 |
-
'the origami {}.',
|
57 |
-
'the {} in a video game.',
|
58 |
-
'a sketch of a {}.',
|
59 |
-
'a doodle of the {}.',
|
60 |
-
'a origami {}.',
|
61 |
-
'a low resolution photo of a {}.',
|
62 |
-
'the toy {}.',
|
63 |
-
'a rendition of the {}.',
|
64 |
-
'a photo of the clean {}.',
|
65 |
-
'a photo of a large {}.',
|
66 |
-
'a rendition of a {}.',
|
67 |
-
'a photo of a nice {}.',
|
68 |
-
'a photo of a weird {}.',
|
69 |
-
'a blurry photo of a {}.',
|
70 |
-
'a cartoon {}.',
|
71 |
-
'art of a {}.',
|
72 |
-
'a sketch of the {}.',
|
73 |
-
'a embroidered {}.',
|
74 |
-
'a pixelated photo of a {}.',
|
75 |
-
'itap of the {}.',
|
76 |
-
'a jpeg corrupted photo of the {}.',
|
77 |
-
'a good photo of a {}.',
|
78 |
-
'a plushie {}.',
|
79 |
-
'a photo of the nice {}.',
|
80 |
-
'a photo of the small {}.',
|
81 |
-
'a photo of the weird {}.',
|
82 |
-
'the cartoon {}.',
|
83 |
-
'art of the {}.',
|
84 |
-
'a drawing of the {}.',
|
85 |
-
'a photo of the large {}.',
|
86 |
-
'a black and white photo of a {}.',
|
87 |
-
'the plushie {}.',
|
88 |
-
'a dark photo of a {}.',
|
89 |
-
'itap of a {}.',
|
90 |
-
'graffiti of the {}.',
|
91 |
-
'a toy {}.',
|
92 |
-
'itap of my {}.',
|
93 |
-
'a photo of a cool {}.',
|
94 |
-
'a photo of a small {}.',
|
95 |
-
'a tattoo of the {}.',
|
96 |
-
]
|
97 |
-
|
98 |
-
CONV_CODE_INDICES = [(0, 512), (1024, 1536), (1536, 2048), (2560, 3072), (3072, 3584), (4096, 4608), (4608, 5120), (5632, 6144), (6144, 6656), (7168, 7680), (7680, 7936), (8192, 8448), (8448, 8576), (8704, 8832), (8832, 8896), (8960, 9024), (9024, 9056)]
|
99 |
-
FFHQ_CODE_INDICES = [(0, 512), (512, 1024), (1024, 1536), (1536, 2048), (2560, 3072), (3072, 3584), (4096, 4608), (4608, 5120), (5632, 6144), (6144, 6656), (7168, 7680), (7680, 7936), (8192, 8448), (8448, 8576), (8704, 8832), (8832, 8896), (8960, 9024), (9024, 9056)] + \
|
100 |
-
[(2048, 2560), (3584, 4096), (5120, 5632), (6656, 7168), (7936, 8192), (8576, 8704), (8896, 8960), (9056, 9088)]
|
101 |
-
|
102 |
-
def zeroshot_classifier(model, classnames, templates, device):
|
103 |
-
|
104 |
-
with torch.no_grad():
|
105 |
-
zeroshot_weights = []
|
106 |
-
for classname in tqdm(classnames):
|
107 |
-
texts = [template.format(classname) for template in templates] # format with class
|
108 |
-
texts = clip.tokenize(texts).to(device) # tokenize
|
109 |
-
class_embeddings = model.encode_text(texts) # embed with text encoder
|
110 |
-
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
|
111 |
-
class_embedding = class_embeddings.mean(dim=0)
|
112 |
-
class_embedding /= class_embedding.norm()
|
113 |
-
zeroshot_weights.append(class_embedding)
|
114 |
-
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
|
115 |
-
return zeroshot_weights
|
116 |
-
|
117 |
-
def expand_to_full_dim(partial_tensor):
|
118 |
-
full_dim_tensor = torch.zeros(size=(1, 9088))
|
119 |
-
|
120 |
-
start_idx = 0
|
121 |
-
for conv_start, conv_end in CONV_CODE_INDICES:
|
122 |
-
length = conv_end - conv_start
|
123 |
-
full_dim_tensor[:, conv_start:conv_end] = partial_tensor[start_idx:start_idx + length]
|
124 |
-
start_idx += length
|
125 |
-
|
126 |
-
return full_dim_tensor
|
127 |
-
|
128 |
-
def get_direction(neutral_class, target_class, beta, di, clip_model=None):
|
129 |
-
|
130 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
131 |
-
|
132 |
-
if clip_model is None:
|
133 |
-
clip_model, _ = clip.load("ViT-B/32", device=device)
|
134 |
-
|
135 |
-
class_names = [neutral_class, target_class]
|
136 |
-
class_weights = zeroshot_classifier(clip_model, class_names, imagenet_templates, device)
|
137 |
-
|
138 |
-
dt = class_weights[:, 1] - class_weights[:, 0]
|
139 |
-
dt = dt / dt.norm()
|
140 |
-
|
141 |
-
dt = dt.float()
|
142 |
-
di = di.float()
|
143 |
-
|
144 |
-
relevance = di @ dt
|
145 |
-
mask = relevance.abs() > beta
|
146 |
-
direction = relevance * mask
|
147 |
-
direction_max = direction.abs().max()
|
148 |
-
if direction_max > 0:
|
149 |
-
direction = direction / direction_max
|
150 |
-
else:
|
151 |
-
raise ValueError(f'Beta value {beta} is too high for mapping from {neutral_class} to {target_class},'
|
152 |
-
f' try setting it to a lower value')
|
153 |
-
return direction
|
154 |
-
|
155 |
-
def style_tensor_to_style_dict(style_tensor, refernce_generator):
|
156 |
-
style_layers = refernce_generator.modulation_layers
|
157 |
-
|
158 |
-
style_dict = {}
|
159 |
-
for layer_idx, layer in enumerate(style_layers):
|
160 |
-
style_dict[layer] = style_tensor[:, FFHQ_CODE_INDICES[layer_idx][0]:FFHQ_CODE_INDICES[layer_idx][1]]
|
161 |
-
|
162 |
-
return style_dict
|
163 |
-
|
164 |
-
def style_dict_to_style_tensor(style_dict, reference_generator):
|
165 |
-
style_layers = reference_generator.modulation_layers
|
166 |
-
|
167 |
-
style_tensor = torch.zeros(size=(1, 9088))
|
168 |
-
for layer in style_dict:
|
169 |
-
layer_idx = style_layers.index(layer)
|
170 |
-
style_tensor[:, FFHQ_CODE_INDICES[layer_idx][0]:FFHQ_CODE_INDICES[layer_idx][1]] = style_dict[layer]
|
171 |
-
|
172 |
-
return style_tensor
|
173 |
-
|
174 |
-
def project_code_with_styleclip(source_latent, source_class, target_class, alpha, beta, reference_generator, di, clip_model=None):
|
175 |
-
edit_direction = get_direction(source_class, target_class, beta, di, clip_model)
|
176 |
-
|
177 |
-
edit_full_dim = expand_to_full_dim(edit_direction)
|
178 |
-
|
179 |
-
source_s = style_dict_to_style_tensor(source_latent, reference_generator)
|
180 |
-
|
181 |
-
return source_s + alpha * edit_full_dim
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/components/icons/full-screen.tsx
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
export function FullScreenIcon() {
|
2 |
-
return (
|
3 |
-
<svg version="1.1" viewBox="0 0 14 14" width="24px" height="24px" xmlns="http://www.w3.org/2000/svg">
|
4 |
-
<title/>
|
5 |
-
<desc/>
|
6 |
-
<defs/>
|
7 |
-
<g fill="none" fill-rule="evenodd" id="Page-1" stroke="none" stroke-width="1">
|
8 |
-
<g fill="currentColor" id="Core" transform="translate(-215.000000, -257.000000)">
|
9 |
-
<g id="fullscreen" transform="translate(215.000000, 257.000000)">
|
10 |
-
<path d="M2,9 L0,9 L0,14 L5,14 L5,12 L2,12 L2,9 L2,9 Z M0,5 L2,5 L2,2 L5,2 L5,0 L0,0 L0,5 L0,5 Z M12,12 L9,12 L9,14 L14,14 L14,9 L12,9 L12,12 L12,12 Z M9,0 L9,2 L12,2 L12,5 L14,5 L14,0 L9,0 L9,0 Z" id="Shape"/>
|
11 |
-
</g>
|
12 |
-
</g>
|
13 |
-
</g>
|
14 |
-
</svg>
|
15 |
-
)
|
16 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dhrushreddy/profile1/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Profile1
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DiamondYin/Voice-ChatGPT-Streamlit-12/app.py
DELETED
@@ -1,293 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import openai
|
3 |
-
import os
|
4 |
-
import base64
|
5 |
-
import glob
|
6 |
-
import json
|
7 |
-
import mistune
|
8 |
-
import pytz
|
9 |
-
import math
|
10 |
-
import requests
|
11 |
-
import time
|
12 |
-
|
13 |
-
from datetime import datetime
|
14 |
-
from openai import ChatCompletion
|
15 |
-
from xml.etree import ElementTree as ET
|
16 |
-
from bs4 import BeautifulSoup
|
17 |
-
from collections import deque
|
18 |
-
from audio_recorder_streamlit import audio_recorder
|
19 |
-
|
20 |
-
def generate_filename(prompt, file_type):
|
21 |
-
central = pytz.timezone('US/Central')
|
22 |
-
safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
|
23 |
-
safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
|
24 |
-
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
25 |
-
|
26 |
-
def transcribe_audio(openai_key, file_path, model):
|
27 |
-
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
28 |
-
headers = {
|
29 |
-
"Authorization": f"Bearer {openai_key}",
|
30 |
-
}
|
31 |
-
with open(file_path, 'rb') as f:
|
32 |
-
data = {'file': f}
|
33 |
-
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
|
34 |
-
if response.status_code == 200:
|
35 |
-
st.write(response.json())
|
36 |
-
|
37 |
-
response2 = chat_with_model(response.json().get('text'), '') # *************************************
|
38 |
-
st.write('Responses:')
|
39 |
-
#st.write(response)
|
40 |
-
st.write(response2)
|
41 |
-
return response.json().get('text')
|
42 |
-
else:
|
43 |
-
st.write(response.json())
|
44 |
-
st.error("Error in API call.")
|
45 |
-
return None
|
46 |
-
|
47 |
-
def save_and_play_audio(audio_recorder):
|
48 |
-
audio_bytes = audio_recorder()
|
49 |
-
if audio_bytes:
|
50 |
-
filename = generate_filename("Recording", "wav")
|
51 |
-
with open(filename, 'wb') as f:
|
52 |
-
f.write(audio_bytes)
|
53 |
-
st.audio(audio_bytes, format="audio/wav")
|
54 |
-
return filename
|
55 |
-
return None
|
56 |
-
|
57 |
-
def create_file(filename, prompt, response):
|
58 |
-
if filename.endswith(".txt"):
|
59 |
-
with open(filename, 'w') as file:
|
60 |
-
file.write(f"{prompt}\n{response}")
|
61 |
-
elif filename.endswith(".htm"):
|
62 |
-
with open(filename, 'w') as file:
|
63 |
-
file.write(f"{prompt} {response}")
|
64 |
-
elif filename.endswith(".md"):
|
65 |
-
with open(filename, 'w') as file:
|
66 |
-
file.write(f"{prompt}\n\n{response}")
|
67 |
-
|
68 |
-
def truncate_document(document, length):
|
69 |
-
return document[:length]
|
70 |
-
def divide_document(document, max_length):
|
71 |
-
return [document[i:i+max_length] for i in range(0, len(document), max_length)]
|
72 |
-
|
73 |
-
def get_table_download_link(file_path):
|
74 |
-
with open(file_path, 'r') as file:
|
75 |
-
data = file.read()
|
76 |
-
b64 = base64.b64encode(data.encode()).decode()
|
77 |
-
file_name = os.path.basename(file_path)
|
78 |
-
ext = os.path.splitext(file_name)[1] # get the file extension
|
79 |
-
if ext == '.txt':
|
80 |
-
mime_type = 'text/plain'
|
81 |
-
elif ext == '.py':
|
82 |
-
mime_type = 'text/plain'
|
83 |
-
elif ext == '.xlsx':
|
84 |
-
mime_type = 'text/plain'
|
85 |
-
elif ext == '.csv':
|
86 |
-
mime_type = 'text/plain'
|
87 |
-
elif ext == '.htm':
|
88 |
-
mime_type = 'text/html'
|
89 |
-
elif ext == '.md':
|
90 |
-
mime_type = 'text/markdown'
|
91 |
-
else:
|
92 |
-
mime_type = 'application/octet-stream' # general binary data type
|
93 |
-
href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
94 |
-
return href
|
95 |
-
|
96 |
-
def CompressXML(xml_text):
|
97 |
-
root = ET.fromstring(xml_text)
|
98 |
-
for elem in list(root.iter()):
|
99 |
-
if isinstance(elem.tag, str) and 'Comment' in elem.tag:
|
100 |
-
elem.parent.remove(elem)
|
101 |
-
return ET.tostring(root, encoding='unicode', method="xml")
|
102 |
-
|
103 |
-
def read_file_content(file,max_length):
|
104 |
-
if file.type == "application/json":
|
105 |
-
content = json.load(file)
|
106 |
-
return str(content)
|
107 |
-
elif file.type == "text/html" or file.type == "text/htm":
|
108 |
-
content = BeautifulSoup(file, "html.parser")
|
109 |
-
return content.text
|
110 |
-
elif file.type == "application/xml" or file.type == "text/xml":
|
111 |
-
tree = ET.parse(file)
|
112 |
-
root = tree.getroot()
|
113 |
-
xml = CompressXML(ET.tostring(root, encoding='unicode'))
|
114 |
-
return xml
|
115 |
-
elif file.type == "text/markdown" or file.type == "text/md":
|
116 |
-
md = mistune.create_markdown()
|
117 |
-
content = md(file.read().decode())
|
118 |
-
return content
|
119 |
-
elif file.type == "text/plain":
|
120 |
-
return file.getvalue().decode()
|
121 |
-
else:
|
122 |
-
return ""
|
123 |
-
|
124 |
-
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
125 |
-
model = model_choice
|
126 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
127 |
-
conversation.append({'role': 'user', 'content': prompt})
|
128 |
-
if len(document_section)>0:
|
129 |
-
conversation.append({'role': 'assistant', 'content': document_section})
|
130 |
-
|
131 |
-
# iterate through the stream of events
|
132 |
-
start_time = time.time()
|
133 |
-
|
134 |
-
|
135 |
-
report = []
|
136 |
-
res_box = st.empty()
|
137 |
-
|
138 |
-
collected_chunks = []
|
139 |
-
collected_messages = []
|
140 |
-
|
141 |
-
for chunk in openai.ChatCompletion.create(
|
142 |
-
model='gpt-3.5-turbo',
|
143 |
-
messages=conversation,
|
144 |
-
temperature=0.5,
|
145 |
-
stream=True
|
146 |
-
):
|
147 |
-
|
148 |
-
collected_chunks.append(chunk) # save the event response
|
149 |
-
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
150 |
-
collected_messages.append(chunk_message) # save the message
|
151 |
-
|
152 |
-
content=chunk["choices"][0].get("delta",{}).get("content")
|
153 |
-
|
154 |
-
try:
|
155 |
-
report.append(content)
|
156 |
-
if len(content) > 0:
|
157 |
-
result = "".join(report).strip()
|
158 |
-
#result = result.replace("\n", "")
|
159 |
-
res_box.markdown(f'*{result}*')
|
160 |
-
except:
|
161 |
-
st.write('.')
|
162 |
-
|
163 |
-
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
164 |
-
#st.write(f"Full conversation received: {full_reply_content}")
|
165 |
-
st.write("Elapsed time:")
|
166 |
-
st.write(time.time() - start_time)
|
167 |
-
return full_reply_content
|
168 |
-
|
169 |
-
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
170 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
171 |
-
conversation.append({'role': 'user', 'content': prompt})
|
172 |
-
if len(file_content)>0:
|
173 |
-
conversation.append({'role': 'assistant', 'content': file_content})
|
174 |
-
response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
|
175 |
-
return response['choices'][0]['message']['content']
|
176 |
-
|
177 |
-
|
178 |
-
def main():
|
179 |
-
# Sidebar and global
|
180 |
-
openai.api_key = os.getenv('OPENAI_KEY')
|
181 |
-
st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
|
182 |
-
menu = ["htm", "txt", "xlsx", "csv", "md", "py"] #619
|
183 |
-
choice = st.sidebar.selectbox("Output File Type:", menu)
|
184 |
-
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
185 |
-
|
186 |
-
# Audio, transcribe, GPT:
|
187 |
-
filename = save_and_play_audio(audio_recorder)
|
188 |
-
if filename is not None:
|
189 |
-
transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
|
190 |
-
st.write(transcription)
|
191 |
-
gptOutput = chat_with_model(transcription, '', model_choice) # *************************************
|
192 |
-
filename = generate_filename(transcription, choice)
|
193 |
-
create_file(filename, transcription, gptOutput)
|
194 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
195 |
-
|
196 |
-
|
197 |
-
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
198 |
-
|
199 |
-
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
200 |
-
with collength:
|
201 |
-
#max_length = 12000 - optimal for gpt35 turbo. 2x=24000 for gpt4. 8x=96000 for gpt4-32k.
|
202 |
-
max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
203 |
-
with colupload:
|
204 |
-
uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
|
205 |
-
|
206 |
-
document_sections = deque()
|
207 |
-
document_responses = {}
|
208 |
-
|
209 |
-
if uploaded_file is not None:
|
210 |
-
file_content = read_file_content(uploaded_file, max_length)
|
211 |
-
document_sections.extend(divide_document(file_content, max_length))
|
212 |
-
|
213 |
-
if len(document_sections) > 0:
|
214 |
-
|
215 |
-
if st.button("👁️ View Upload"):
|
216 |
-
st.markdown("**Sections of the uploaded file:**")
|
217 |
-
for i, section in enumerate(list(document_sections)):
|
218 |
-
st.markdown(f"**Section {i+1}**\n{section}")
|
219 |
-
|
220 |
-
st.markdown("**Chat with the model:**")
|
221 |
-
for i, section in enumerate(list(document_sections)):
|
222 |
-
if i in document_responses:
|
223 |
-
st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
|
224 |
-
else:
|
225 |
-
if st.button(f"Chat about Section {i+1}"):
|
226 |
-
st.write('Reasoning with your inputs...')
|
227 |
-
response = chat_with_model(user_prompt, section, model_choice) # *************************************
|
228 |
-
st.write('Response:')
|
229 |
-
st.write(response)
|
230 |
-
document_responses[i] = response
|
231 |
-
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
232 |
-
create_file(filename, user_prompt, response)
|
233 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
234 |
-
|
235 |
-
if st.button('💬 Chat'):
|
236 |
-
st.write('Reasoning with your inputs...')
|
237 |
-
response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # *************************************
|
238 |
-
st.write('Response:')
|
239 |
-
st.write(response)
|
240 |
-
|
241 |
-
filename = generate_filename(user_prompt, choice)
|
242 |
-
create_file(filename, user_prompt, response)
|
243 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
244 |
-
|
245 |
-
all_files = glob.glob("*.*")
|
246 |
-
all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
|
247 |
-
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
|
248 |
-
|
249 |
-
# sidebar of files
|
250 |
-
file_contents=''
|
251 |
-
next_action=''
|
252 |
-
for file in all_files:
|
253 |
-
col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
|
254 |
-
with col1:
|
255 |
-
if st.button("🌐", key="md_"+file): # md emoji button
|
256 |
-
with open(file, 'r') as f:
|
257 |
-
file_contents = f.read()
|
258 |
-
next_action='md'
|
259 |
-
with col2:
|
260 |
-
st.markdown(get_table_download_link(file), unsafe_allow_html=True)
|
261 |
-
with col3:
|
262 |
-
if st.button("📂", key="open_"+file): # open emoji button
|
263 |
-
with open(file, 'r') as f:
|
264 |
-
file_contents = f.read()
|
265 |
-
next_action='open'
|
266 |
-
with col4:
|
267 |
-
if st.button("🔍", key="read_"+file): # search emoji button
|
268 |
-
with open(file, 'r') as f:
|
269 |
-
file_contents = f.read()
|
270 |
-
next_action='search'
|
271 |
-
with col5:
|
272 |
-
if st.button("🗑", key="delete_"+file):
|
273 |
-
os.remove(file)
|
274 |
-
st.experimental_rerun()
|
275 |
-
|
276 |
-
if len(file_contents) > 0:
|
277 |
-
if next_action=='open':
|
278 |
-
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
279 |
-
if next_action=='md':
|
280 |
-
st.markdown(file_contents)
|
281 |
-
if next_action=='search':
|
282 |
-
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
283 |
-
st.write('Reasoning with your inputs...')
|
284 |
-
#response = chat_with_file_contents(user_prompt, file_contents)
|
285 |
-
response = chat_with_model(user_prompt, file_contents, model_choice)
|
286 |
-
st.write('Response:')
|
287 |
-
st.write(response)
|
288 |
-
filename = generate_filename(file_content_area, choice)
|
289 |
-
create_file(filename, file_content_area, response)
|
290 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
291 |
-
|
292 |
-
if __name__ == "__main__":
|
293 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_instance.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import numpy as np
|
5 |
-
import os
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
9 |
-
from detectron2.data.datasets.coco import load_coco_json, register_coco_instances
|
10 |
-
from detectron2.utils.file_io import PathManager
|
11 |
-
|
12 |
-
ADE_CATEGORIES = [{'id': 7, 'name': 'bed'}, {'id': 8, 'name': 'windowpane'}, {'id': 10, 'name': 'cabinet'}, {'id': 12, 'name': 'person'}, {'id': 14, 'name': 'door'}, {'id': 15, 'name': 'table'}, {'id': 18, 'name': 'curtain'}, {'id': 19, 'name': 'chair'}, {'id': 20, 'name': 'car'}, {'id': 22, 'name': 'painting'}, {'id': 23, 'name': 'sofa'}, {'id': 24, 'name': 'shelf'}, {'id': 27, 'name': 'mirror'}, {'id': 30, 'name': 'armchair'}, {'id': 31, 'name': 'seat'}, {'id': 32, 'name': 'fence'}, {'id': 33, 'name': 'desk'}, {'id': 35, 'name': 'wardrobe'}, {'id': 36, 'name': 'lamp'}, {'id': 37, 'name': 'bathtub'}, {'id': 38, 'name': 'railing'}, {'id': 39, 'name': 'cushion'}, {'id': 41, 'name': 'box'}, {'id': 42, 'name': 'column'}, {'id': 43, 'name': 'signboard'}, {'id': 44, 'name': 'chest of drawers'}, {'id': 45, 'name': 'counter'}, {'id': 47, 'name': 'sink'}, {'id': 49, 'name': 'fireplace'}, {'id': 50, 'name': 'refrigerator'}, {'id': 53, 'name': 'stairs'}, {'id': 55, 'name': 'case'}, {'id': 56, 'name': 'pool table'}, {'id': 57, 'name': 'pillow'}, {'id': 58, 'name': 'screen door'}, {'id': 62, 'name': 'bookcase'}, {'id': 64, 'name': 'coffee table'}, {'id': 65, 'name': 'toilet'}, {'id': 66, 'name': 'flower'}, {'id': 67, 'name': 'book'}, {'id': 69, 'name': 'bench'}, {'id': 70, 'name': 'countertop'}, {'id': 71, 'name': 'stove'}, {'id': 72, 'name': 'palm'}, {'id': 73, 'name': 'kitchen island'}, {'id': 74, 'name': 'computer'}, {'id': 75, 'name': 'swivel chair'}, {'id': 76, 'name': 'boat'}, {'id': 78, 'name': 'arcade machine'}, {'id': 80, 'name': 'bus'}, {'id': 81, 'name': 'towel'}, {'id': 82, 'name': 'light'}, {'id': 83, 'name': 'truck'}, {'id': 85, 'name': 'chandelier'}, {'id': 86, 'name': 'awning'}, {'id': 87, 'name': 'streetlight'}, {'id': 88, 'name': 'booth'}, {'id': 89, 'name': 'television receiver'}, {'id': 90, 'name': 'airplane'}, {'id': 92, 'name': 'apparel'}, {'id': 93, 'name': 'pole'}, {'id': 95, 'name': 'bannister'}, {'id': 97, 'name': 'ottoman'}, {'id': 98, 'name': 'bottle'}, {'id': 102, 'name': 'van'}, {'id': 103, 'name': 'ship'}, {'id': 104, 'name': 'fountain'}, {'id': 107, 'name': 'washer'}, {'id': 108, 'name': 'plaything'}, {'id': 110, 'name': 'stool'}, {'id': 111, 'name': 'barrel'}, {'id': 112, 'name': 'basket'}, {'id': 115, 'name': 'bag'}, {'id': 116, 'name': 'minibike'}, {'id': 118, 'name': 'oven'}, {'id': 119, 'name': 'ball'}, {'id': 120, 'name': 'food'}, {'id': 121, 'name': 'step'}, {'id': 123, 'name': 'trade name'}, {'id': 124, 'name': 'microwave'}, {'id': 125, 'name': 'pot'}, {'id': 126, 'name': 'animal'}, {'id': 127, 'name': 'bicycle'}, {'id': 129, 'name': 'dishwasher'}, {'id': 130, 'name': 'screen'}, {'id': 132, 'name': 'sculpture'}, {'id': 133, 'name': 'hood'}, {'id': 134, 'name': 'sconce'}, {'id': 135, 'name': 'vase'}, {'id': 136, 'name': 'traffic light'}, {'id': 137, 'name': 'tray'}, {'id': 138, 'name': 'ashcan'}, {'id': 139, 'name': 'fan'}, {'id': 142, 'name': 'plate'}, {'id': 143, 'name': 'monitor'}, {'id': 144, 'name': 'bulletin board'}, {'id': 146, 'name': 'radiator'}, {'id': 147, 'name': 'glass'}, {'id': 148, 'name': 'clock'}, {'id': 149, 'name': 'flag'}]
|
13 |
-
|
14 |
-
|
15 |
-
_PREDEFINED_SPLITS = {
|
16 |
-
# point annotations without masks
|
17 |
-
"ade20k_instance_train": (
|
18 |
-
"ADEChallengeData2016/images/training",
|
19 |
-
"ADEChallengeData2016/ade20k_instance_train.json",
|
20 |
-
),
|
21 |
-
"ade20k_instance_val": (
|
22 |
-
"ADEChallengeData2016/images/validation",
|
23 |
-
"ADEChallengeData2016/ade20k_instance_val.json",
|
24 |
-
),
|
25 |
-
}
|
26 |
-
|
27 |
-
|
28 |
-
def _get_ade_instances_meta():
|
29 |
-
thing_ids = [k["id"] for k in ADE_CATEGORIES]
|
30 |
-
assert len(thing_ids) == 100, len(thing_ids)
|
31 |
-
# Mapping from the incontiguous ADE category id to an id in [0, 99]
|
32 |
-
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
|
33 |
-
thing_classes = [k["name"] for k in ADE_CATEGORIES]
|
34 |
-
ret = {
|
35 |
-
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
|
36 |
-
"thing_classes": thing_classes,
|
37 |
-
}
|
38 |
-
return ret
|
39 |
-
|
40 |
-
|
41 |
-
def register_all_ade20k_instance(root):
|
42 |
-
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
|
43 |
-
# Assume pre-defined datasets live in `./datasets`.
|
44 |
-
register_coco_instances(
|
45 |
-
key,
|
46 |
-
_get_ade_instances_meta(),
|
47 |
-
os.path.join(root, json_file) if "://" not in json_file else json_file,
|
48 |
-
os.path.join(root, image_root),
|
49 |
-
)
|
50 |
-
|
51 |
-
|
52 |
-
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
|
53 |
-
register_all_ade20k_instance(_root)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|