Commit
·
af302c4
1
Parent(s):
62a5486
Update parquet files (step 12 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/app.py +0 -3
- spaces/0xSynapse/LlamaGPT/README.md +0 -13
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 2021 64 Bit for Windows 10 Everything You Need to Know.md +0 -27
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Pro Crack BEST.md +0 -27
- spaces/1gistliPinn/ChatGPT4/Examples/FSX Steam Edition Air Hauler 2 Add-On Ativador Download [addons].md +0 -15
- spaces/1gistliPinn/ChatGPT4/Examples/Facile Caisse Crack BEST Serial.md +0 -16
- spaces/1gistliPinn/ChatGPT4/Examples/First Year Engineering Drawing By Ac Parkinson Pdf Free Download.md +0 -6
- spaces/1phancelerku/anime-remove-background/Bingo Holiday Download the Classic Special Bingo Games on Your Device.md +0 -222
- spaces/1phancelerku/anime-remove-background/Chicken Gun Private Server 1.3.0 Apk Join the Fun and Chaos.md +0 -127
- spaces/1phancelerku/anime-remove-background/Download Crafting and Building 1.18 APK and Start Your Adventure Today.md +0 -84
- spaces/1phancelerku/anime-remove-background/Download Rope Hero APK and Become a Superhero on Your Phone.md +0 -124
- spaces/1phancelerku/anime-remove-background/Erturul Gazi The Leader of Kayi Boyu and the Founder of a Civilization.md +0 -94
- spaces/1toTree/lora_test/ppdiffusers/loaders.py +0 -190
- spaces/2ndelement/voicevox/voicevox_engine/setting/__init__.py +0 -9
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py +0 -23
- spaces/52Hz/HWMNet_lowlight_enhancement/model/HWMNet.py +0 -283
- spaces/801artistry/RVC801/infer/modules/ipex/__init__.py.py +0 -165
- spaces/A00001/bingothoo/src/components/ui/tooltip.tsx +0 -30
- spaces/AIBoy1993/segment_anything_webui/app.py +0 -198
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/ds_e2e.py +0 -67
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d50_8xb32_in1k.py +0 -5
- spaces/Abhilashvj/planogram-compliance/models/tf.py +0 -837
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptAi.py +0 -74
- spaces/Aditya9790/yolo7-object-tracking/README.md +0 -12
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller-plugin.js +0 -20
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ExpandMethods.js +0 -75
- spaces/Alifarsi/news_summarizer/app.py +0 -42
- spaces/AlishbaImran/Redox-Flow-Battery-Prediction/app.py +0 -235
- spaces/Aloento/9Nine-PITS/text/frontend/vocab.py +0 -120
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/onnx.md +0 -108
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py +0 -188
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py +0 -62
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py +0 -2
- spaces/AnimaLab/bias-test-gpt-pairs/mgr_bias_scoring.py +0 -932
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Training-LoRAs.md +0 -174
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py +0 -57
- spaces/Ariharasudhan/YoloV5/models/yolo.py +0 -391
- spaces/Artificio/AdversarialArt/app.py +0 -92
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py +0 -308
- spaces/Atharv23m/Human-Stress-Detection/app.py +0 -65
- spaces/Awesimo/jojogan/op/upfirdn2d.py +0 -187
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_losses.py +0 -82
- spaces/Bannermore/BingChat/Dockerfile +0 -34
- spaces/Bart92/RVC_HF/tools/infer/trans_weights.py +0 -18
- spaces/Benson/text-generation/Examples/Descargar 30 Juz Misyari Rasyid.md +0 -73
- spaces/Benson/text-generation/Examples/Descargar Afk Bot Para Aternos.md +0 -102
- spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/index.tsx +0 -30
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/remove.h +0 -81
spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/0xJustin/Dungeons-and-Diffusion").launch()
|
|
|
|
|
|
|
|
spaces/0xSynapse/LlamaGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: LlamaGPT
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: lgpl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 2021 64 Bit for Windows 10 Everything You Need to Know.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Office 2021 64 Bit for Windows 10</h1>
|
3 |
-
<p>Office 2021 is the latest version of Microsoft's productivity suite, which includes Word, Excel, PowerPoint, Outlook, and more. If you want to download Office 2021 64 bit for Windows 10, you can follow these steps:</p>
|
4 |
-
<h2>download office 2021 64 bit crack</h2><br /><p><b><b>Download File</b> ☆ <a href="https://byltly.com/2uKvx0">https://byltly.com/2uKvx0</a></b></p><br /><br />
|
5 |
-
<ol>
|
6 |
-
<li>Go to <a href="https://www.microsoft.com/en-us/microsoft-365/buy/compare-all-microsoft-365-products">https://www.microsoft.com/en-us/microsoft-365/buy/compare-all-microsoft-365-products</a> and choose the plan that suits your needs.</li>
|
7 |
-
<li>Click on the "Buy now" button and sign in with your Microsoft account or create a new one.</li>
|
8 |
-
<li>Complete the payment process and confirm your order.</li>
|
9 |
-
<li>Go to <a href="https://account.microsoft.com/services/">https://account.microsoft.com/services/</a> and sign in with your Microsoft account.</li>
|
10 |
-
<li>Click on the "Install" button next to Office 2021 and choose "Other options".</li>
|
11 |
-
<li>Select the "64-bit" option and click on "Download".</li>
|
12 |
-
<li>Run the setup file and follow the instructions to install Office 2021 on your Windows 10 device.</li>
|
13 |
-
</ol>
|
14 |
-
<p>Congratulations! You have successfully downloaded and installed Office 2021 64 bit for Windows 10. Enjoy using the latest features and enhancements of Microsoft's productivity suite.</p><p>Office 2021 is compatible with Windows 10 and Windows 11, as well as macOS. It offers several improvements and new features over the previous version, Office 2019. Some of the highlights include:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Co-authoring and collaboration tools in Word, Excel, and PowerPoint.</li>
|
17 |
-
<li>Modern design and accessibility features in Outlook.</li>
|
18 |
-
<li>Dynamic arrays and new functions in Excel.</li>
|
19 |
-
<li>Presenter Coach and PowerPoint Live in PowerPoint.</li>
|
20 |
-
<li>Skype for Business and Microsoft Teams integration in Office apps.</li>
|
21 |
-
</ul>
|
22 |
-
<p>Office 2021 also comes with enhanced security and privacy features, such as encryption, data loss prevention, and advanced threat protection. You can also access your files and documents from anywhere with OneDrive cloud storage and Office mobile apps.</p><p>If you want to try Office 2021 before buying it, you can download a free trial version from <a href="https://www.microsoft.com/en-us/evalcenter/evaluate-microsoft-365">https://www.microsoft.com/en-us/evalcenter/evaluate-microsoft-365</a>. The trial version will let you use Office 2021 for 30 days, after which you will need to purchase a subscription or a one-time license to continue using it.</p>
|
23 |
-
<p>Alternatively, you can also use Office Online, which is a free web-based version of Office that works in your browser. Office Online lets you create and edit documents, spreadsheets, and presentations online, as well as collaborate with others in real time. You can access Office Online from <a href="https://www.office.com/">https://www.office.com/</a> or from your Microsoft account.</p>
|
24 |
-
<p></p>
|
25 |
-
<p>Whether you choose Office 2021 or Office Online, you will get the best of Microsoft's productivity tools for your personal and professional needs. Download Office 2021 64 bit for Windows 10 today and see the difference for yourself.</p> ddb901b051<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Pro Crack BEST.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Use Easy Recovery Pro to Restore Lost Data on Windows</h1>
|
3 |
-
<p>Have you ever lost important files due to accidental deletion, formatting, virus attack, or system crash? If so, you know how frustrating and stressful it can be to recover your data. Fortunately, there is a powerful and easy-to-use software that can help you: Easy Recovery Pro.</p>
|
4 |
-
<p>Easy Recovery Pro is a data recovery software that supports all Windows PCs and laptops, including Windows 10 and Windows 11. It can recover data from various storage devices, such as hard drives, external drives, USB flash drives, memory cards, and more. It can also recover almost any file type, such as photos, videos, audio files, emails, documents, etc.</p>
|
5 |
-
<h2>Easy Recovery Pro Crack</h2><br /><p><b><b>Download</b> ————— <a href="https://byltly.com/2uKxj8">https://byltly.com/2uKxj8</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you how to use Easy Recovery Pro to restore your lost data on Windows in three simple steps.</p>
|
7 |
-
|
8 |
-
<h2>Step 1: Download and Install Easy Recovery Pro</h2>
|
9 |
-
<p>To get started, you need to download and install Easy Recovery Pro on a working computer. You can get it from the official website[^1^]. There are different editions available for different needs and budgets. You can choose the one that suits you best.</p>
|
10 |
-
<p>After downloading the software, run the setup file and follow the instructions to install it on your computer. Make sure you have enough disk space and administrator privileges.</p>
|
11 |
-
|
12 |
-
<h2>Step 2: Connect Your Storage Device and Scan for Lost Data</h2>
|
13 |
-
<p>Next, you need to connect the storage device that contains your lost data to the computer where you installed Easy Recovery Pro. For example, if you want to recover data from an external hard drive, plug it into a USB port.</p>
|
14 |
-
<p>Then, launch Easy Recovery Pro and select the storage device from the list of available drives. Click "Scan" to start searching for lost data. The scanning process may take some time depending on the size and condition of your device.</p>
|
15 |
-
<p>During the scan, you can preview the found files by clicking on them. You can also pause or stop the scan at any time if you find what you need.</p>
|
16 |
-
|
17 |
-
<h2>Step 3: Recover Your Lost Data</h2>
|
18 |
-
<p>When the scan is complete, you will see a list of recoverable files sorted by categories. You can filter them by file type, date, size, or name. You can also use the search box to find specific files.</p>
|
19 |
-
<p></p>
|
20 |
-
<p>To recover your lost data, simply select the files or folders that you want and click "Recover". You will be asked to choose a location to save the recovered data. It is recommended that you save them to a different drive than the original one to avoid overwriting.</p>
|
21 |
-
<p>After the recovery process is done, you can check your recovered data and use them as normal.</p>
|
22 |
-
|
23 |
-
<h3>Conclusion</h3>
|
24 |
-
<p>Easy Recovery Pro is a reliable and easy recovery software that can help you restore your lost data on Windows in various scenarios. It has a user-friendly interface and powerful features that make data recovery a breeze. Whether you are a professional or a beginner, you can use Easy Recovery Pro to get back your precious data in minutes.</p>
|
25 |
-
<p>If you want to try Easy Recovery Pro for free, you can download the trial version from the official website[^1^]. The trial version allows you to scan and preview your lost data, but not recover them. To recover your data without limitations, you need to purchase a license key.</p> cec2833e83<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/FSX Steam Edition Air Hauler 2 Add-On Ativador Download [addons].md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
<h2>FSX Steam Edition: Air Hauler 2 Add-On Ativador download [addons]</h2><br /><p><b><b>DOWNLOAD</b> ———>>> <a href="https://imgfil.com/2uxXox">https://imgfil.com/2uxXox</a></b></p><br /><br />
|
2 |
-
|
3 |
-
. ://coub.com/stories/3304017-fsx-steam-edition-air-hauler-2-add-on-ativador-download-addons-top-secret-games-pc-game-try-now-sonyi-playstation -3-station-4-playstation-3/-/
|
4 |
-
#10
|
5 |
-
nothing, nothing.
|
6 |
-
I just play the game and enjoy it.
|
7 |
-
I am from a long time ago that I have been doing this and that it worked for me.
|
8 |
-
I am a very happy person and enjoy doing this all year long.
|
9 |
-
I know of no one who is having problems with this anymore.
|
10 |
-
But I have a few questions for you, so please give me some answers:
|
11 |
-
1. When you download the game, it says that it is an add-on.
|
12 |
-
Do you 8a78ff9644<br />
|
13 |
-
<br />
|
14 |
-
<br />
|
15 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Facile Caisse Crack BEST Serial.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
<h2>facile caisse Crack, Serial</h2><br /><p><b><b>Download File</b> ••• <a href="https://imgfil.com/2uxZhy">https://imgfil.com/2uxZhy</a></b></p><br /><br />
|
2 |
-
|
3 |
-
ités et frottements.
|
4 |
-
|
5 |
-
Alors nous sommes devant un cas bien particulier, où il faut distinguer entre gestionnaire de système, entre clients et entre bénéficiaires. Le client (le contribuable, le client de l’assurance, le constructeur d’infrastructures ou l’ingénieur-conseil) est aujourd’hui plutôt le bénéficiaire, dans les politiques actuelles, et ce qu’il faut bien entendre.
|
6 |
-
|
7 |
-
Les candidats gagnent en sérieux avec leur comportement, et les lauréats sont plus intelligents que leurs adversaires. Pas simple chose de les contrôler mais ils y réussissent, leur expertise est plus vaste et leur capacité à prendre rapidement des décisions se fait apprécier, ils se rapprochent de l’intelligence au contact.
|
8 |
-
|
9 |
-
Mais si l’on enregistrait au-delà de la couleur politique, cette fonction a désormais décidé de se méfier de ses collaborateurs. L’intelligence est du jeu de la distraction, et les gens qui sont à l’écoute des « leaders » que l’on est appelé à renvoyer à l’école ne seront peut-être pas forcément les plus brillants.
|
10 |
-
|
11 |
-
Ça fait longtemps que nous nous sentons attaqués, puis agressés, par ce genre d’idées, et bien évidemment, ces hommes étaient des militants, et vivant dans un monde de bêtes (fraîchement agressées par le féminisme, et surtout, de petites qu’on les appelait de cette manière, les petites madames), ils réagissaient en machos, par vérité.
|
12 |
-
|
13 |
-
Les militants sont aussi des hommes très je 4fefd39f24<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
16 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/First Year Engineering Drawing By Ac Parkinson Pdf Free Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>first year engineering drawing by ac parkinson pdf free download</h2><br /><p><b><b>DOWNLOAD</b> ↔ <a href="https://imgfil.com/2uxYjf">https://imgfil.com/2uxYjf</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Download File PDF Basic Engineering Drawing By A C Parkinson. Basic Engineering ... manak bhavan, 9 bahadur shah zafar marg new delhi 110002 . sp 46 : 1988 first published march ... Nadare BE In Mechanical Engineering One Year Experience in Bajaj Auto Ltd. ... 7.1 - Ten Basic Steps to Free Hand Sketching for . 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bingo Holiday Download the Classic Special Bingo Games on Your Device.md
DELETED
@@ -1,222 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Play Bingo Holiday: The Best Bingo Game on Your Device</h1>
|
3 |
-
<p>If you are looking for a fun and exciting bingo game that will keep you entertained for hours, then you should try <strong>Bingo Holiday</strong>. This is a classic and special bingo game that offers you more than just bingo. You can explore over 110 appealing scenes, travel around the world, send and receive gifts with friends, challenge events, and collect epic collections. You can also enjoy various bingo styles, power-ups, tournaments, and jackpots. In this article, we will show you how to download and play Bingo Holiday on your Android or iOS device, or online on your browser.</p>
|
4 |
-
<h2>What is Bingo Holiday?</h2>
|
5 |
-
<h3>A brief introduction to the game and its features</h3>
|
6 |
-
<p>Bingo Holiday is a bingo game developed by AE Magwin, a company that specializes in casino and casual games. It was released in 2016 and has since gained over 5 million downloads on Google Play Store and over 13 thousand ratings on App Store. It is rated as one of the best bingo games on both platforms.</p>
|
7 |
-
<h2>bingo holiday download</h2><br /><p><b><b>Download</b> ✶✶✶ <a href="https://jinyurl.com/2uNLwN">https://jinyurl.com/2uNLwN</a></b></p><br /><br />
|
8 |
-
<p>Bingo Holiday has many features that make it stand out from other bingo games. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><strong>Multiple bingo rooms and themes</strong>: You can choose from over 40 bingo rooms that have different themes, such as UK Jackpot, Slots Bingo, Blackout, Secret Garden, Dessert Master, True Love, and more. Each room has its own rules, prizes, and collections.</li>
|
11 |
-
<li><strong>Power-ups and bonuses</strong>: You can use various power-ups to boost your chances of winning, such as Daub Hint, Shield, Triple Free, Instant Bingo, Bombs, Double EXP, Double Coins, Flash Cooling, and more. You can also get free credits every hour and spin the daily wheel for extra rewards.</li>
|
12 |
-
<li><strong>Global tournaments and jackpots</strong>: You can compete with players from all over the world in real-time multiplayer tournaments and win big prizes. You can also try your luck in the jackpot rooms and hit the blackout or the UK jackpot.</li>
|
13 |
-
<li><strong>Travel and collections</strong>: You can travel around the world in the World Tour mode and visit over 70 famous cities and countries. You can also collect puzzle pieces or shadow cards in each room and complete the epic collections.</li>
|
14 |
-
<li><strong>Social features</strong>: You can add friends and send or receive gifts with them. You can also chat with other players in the chat room or use emojis to express yourself.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>Why you should play Bingo Holiday</h3>
|
17 |
-
<p>Bingo Holiday is not only a bingo game but also a way to relax and have fun. Here are some reasons why you should play Bingo Holiday:</p>
|
18 |
-
<ul>
|
19 |
-
<li><strong>It is free to play</strong>: You don't need to pay anything to download or play Bingo Holiday. You can enjoy all the features and content without spending a dime.</li>
|
20 |
-
<li><strong>It is easy to play</strong>: You don't need any skills or strategies to play Bingo Holiday. All you need is to tap the bingo cards and daub the numbers as they are called. You can also use the auto-daub feature to let the game do it for you.</li>
|
21 |
-
<li><strong>It is fun and addictive</strong>: You will never get bored with Bingo Holiday. There are always new rooms, events, challenges, and collections to explore. You will also enjoy the thrill of winning bingo, the excitement of competing with others, and the satisfaction of completing your collections.</li>
|
22 |
-
<li><strong>It is social and friendly</strong>: You can make new friends and chat with them in Bingo Holiday. You can also share your achievements, gifts, and tips with them. You will feel like you are part of a bingo community.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to Download Bingo Holiday for Android Devices</h2>
|
25 |
-
<h3>Step-by-step instructions with screenshots</h3>
|
26 |
-
<p>If you have an Android device, you can download Bingo Holiday from the Google Play Store. Here are the steps to do it:</p>
|
27 |
-
<ol>
|
28 |
-
<li>Open the Google Play Store app on your device and search for "Bingo Holiday" in the search bar.</li>
|
29 |
-
<li>Tap on the Bingo Holiday icon that appears in the search results. You will see the app's page with its description, ratings, reviews, screenshots, and more.</li>
|
30 |
-
<li>Tap on the green "Install" button to start downloading the app. You may need to grant some permissions for the app to access your device's storage, location, contacts, and other features.</li>
|
31 |
-
<li>Wait for the download and installation process to finish. You will see a notification when it is done.</li>
|
32 |
-
<li>Tap on the "Open" button to launch the app. You will see a welcome screen with some instructions and options. You can choose to log in with your Facebook account or play as a guest. You can also change the language of the app from English to other languages, such as Spanish, French, German, Portuguese, Italian, Russian, Turkish, Arabic, Chinese, Japanese, Korean, and more.</li>
|
33 |
-
<li>Enjoy playing Bingo Holiday on your Android device!</li>
|
34 |
-
</ol>
|
35 |
-
<p>Here are some screenshots of the app on an Android device:</p>
|
36 |
-
<table>
|
37 |
-
<tr>
|
38 |
-
<td><img src="" alt="Bingo Holiday icon on Google Play Store"></td>
|
39 |
-
<td><img src="" alt="Bingo Holiday app page on Google Play Store"></td>
|
40 |
-
<td><img src="" alt="Bingo Holiday welcome screen"></td>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td><img src="" alt="Bingo Holiday main menu"></td>
|
44 |
-
<td><img src="" alt="Bingo Holiday bingo room selection"></td>
|
45 |
-
<td><img src="" alt="Bingo Holiday bingo gameplay"></td>
|
46 |
-
</tr>
|
47 |
-
</table>
|
48 |
-
<h3>Tips and tricks for playing Bingo Holiday on Android</h3>
|
49 |
-
<p>Here are some tips and tricks that will help you play Bingo Holiday better on your Android device:</p>
|
50 |
-
<ul>
|
51 |
-
<li><strong>Use power-ups wisely</strong>: Power-ups can help you win bingo faster and easier, but they are not unlimited. You can get more power-ups by playing more games, completing collections, spinning the wheel, or buying them with real money. However, you should use them wisely and strategically. For example, you can use Daub Hint to mark all the possible numbers on your cards, Shield to protect your bingo from being stolen by others, Instant Bingo to get a bingo instantly without waiting for the next number call, or Bombs to clear multiple numbers at once.</li>
|
52 |
-
<li><strong>Play more cards</strong>: The more cards you play, the higher your chances of winning bingo. However, playing more cards also means spending more credits and paying more attention. You can play up to 8 cards at a time in Bingo Holiday. You can adjust the number of cards you want to play by tapping on the plus or minus buttons at the bottom of the screen. You can also swipe left or right to switch between different cards.</li>
|
53 |
-
<li><strong>Collect puzzle pieces and shadow cards</strong>: In each bingo room, there are puzzle pieces or shadow cards that you can collect by daubing them on your cards. These pieces or cards are part of a collection that you can complete by finding all of them. Completing a collection will reward you with credits, power-ups, or other prizes. You can check your progress and claim your rewards by tapping on the collection icon at the top right corner of the screen.</li>
|
54 |
-
<li><strong>Join tournaments and jackpots</strong>: If you want to challenge yourself and win bigger prizes, you can join tournaments and jackpots in Bingo Holiday. Tournaments are real-time multiplayer competitions where you can play against other players from around the world. You can join a tournament by tapping on the trophy icon at the top left corner of the screen. You can choose from different levels and modes of tournaments, such as Beginner, Advanced, Expert, Master, or Grand Master, and Classic, Speed, or Crazy. You can also check the leaderboard and your rank by tapping on the rank icon at the top right corner of the screen. Jackpots are special rooms where you can win huge prizes by hitting the blackout or the UK jackpot. You can join a jackpot room by tapping on the jackpot icon at the bottom right corner of the screen. You can choose from different levels and themes of jackpots, such as Bronze, Silver, Gold, Platinum, or Diamond, and UK Jackpot, Slots Bingo, Blackout, or Secret Garden.</li>
|
55 |
-
<li><strong>Travel and explore</strong>: One of the best features of Bingo Holiday is that you can travel around the world and explore different scenes and cultures. You can do this by playing in the World Tour mode, where you can visit over 70 famous cities and countries, such as Paris, London, New York, Tokyo, Sydney, Cairo, Rome, and more. You can unlock new destinations by playing more games and collecting stamps. You can also enjoy the beautiful graphics and sound effects that match each scene.</li>
|
56 |
-
</ul>
|
57 |
-
<h2>How to Download Bingo Holiday for iOS Devices</h2>
|
58 |
-
<h3>Step-by-step instructions with screenshots</h3>
|
59 |
-
<p>If you have an iOS device, you can download Bingo Holiday from the App Store. Here are the steps to do it:</p>
|
60 |
-
<p>bingo holiday free download<br />
|
61 |
-
bingo holiday app download<br />
|
62 |
-
bingo holiday game download<br />
|
63 |
-
bingo holiday apk download<br />
|
64 |
-
bingo holiday mod apk download<br />
|
65 |
-
bingo holiday for pc download<br />
|
66 |
-
bingo holiday for android download<br />
|
67 |
-
bingo holiday for ios download<br />
|
68 |
-
bingo holiday for mac download<br />
|
69 |
-
bingo holiday for windows download<br />
|
70 |
-
bingo holiday online download<br />
|
71 |
-
bingo holiday offline download<br />
|
72 |
-
bingo holiday latest version download<br />
|
73 |
-
bingo holiday update download<br />
|
74 |
-
bingo holiday new version download<br />
|
75 |
-
bingo holiday old version download<br />
|
76 |
-
bingo holiday hack download<br />
|
77 |
-
bingo holiday cheats download<br />
|
78 |
-
bingo holiday unlimited credits download<br />
|
79 |
-
bingo holiday free credits download<br />
|
80 |
-
bingo holiday free coins download<br />
|
81 |
-
bingo holiday free power ups download<br />
|
82 |
-
bingo holiday free gifts download<br />
|
83 |
-
bingo holiday free spins download<br />
|
84 |
-
bingo holiday free slots download<br />
|
85 |
-
bingo holiday classic bingo games download<br />
|
86 |
-
bingo holiday special bingo games download<br />
|
87 |
-
bingo holiday live bingo games download<br />
|
88 |
-
bingo holiday multiplayer bingo games download<br />
|
89 |
-
bingo holiday tournament bingo games download<br />
|
90 |
-
bingo holiday world tour bingo games download<br />
|
91 |
-
bingo holiday travel bingo games download<br />
|
92 |
-
bingo holiday adventure bingo games download<br />
|
93 |
-
bingo holiday party bingo games download<br />
|
94 |
-
bingo holiday fun bingo games download<br />
|
95 |
-
bingo holiday best bingo games download<br />
|
96 |
-
bingo holiday top rated bingo games download<br />
|
97 |
-
bingo holiday reviews and ratings download<br />
|
98 |
-
bingo holiday screenshots and videos download<br />
|
99 |
-
bingo holiday tips and tricks download<br />
|
100 |
-
how to play bingo holiday game download<br />
|
101 |
-
how to win in bingo holiday game download<br />
|
102 |
-
how to get more credits in bingo holiday game download<br />
|
103 |
-
how to get more coins in bingo holiday game download<br />
|
104 |
-
how to get more power ups in bingo holiday game download<br />
|
105 |
-
how to get more gifts in bingo holiday game download<br />
|
106 |
-
how to get more spins in bingo holiday game download<br />
|
107 |
-
how to get more slots in bingo holiday game download</p>
|
108 |
-
<ol>
|
109 |
-
<li>Open the App Store app on your device and search for "Bingo Holiday" in the search bar.</li>
|
110 |
-
<li>Tap on the Bingo Holiday icon that appears in the search results. You will see the app's page with its description, ratings, reviews, screenshots, and more.</li>
|
111 |
-
<li>Tap on the blue "Get" button to start downloading the app. You may need to enter your Apple ID password or use Touch ID or Face ID to confirm your purchase.</li>
|
112 |
-
<li>Wait for the download and installation process to finish. You will see a notification when it is done.</li>
|
113 |
-
<li>Tap on the "Open" button to launch the app. You will see a welcome screen with some instructions and options. You can choose to log in with your Facebook account or play as a guest. You can also change the language of the app from English to other languages, such as Spanish, French, German, Portuguese, Italian, Russian, Turkish, Arabic, Chinese, Japanese, Korean, and more.</li>
|
114 |
-
<li>Enjoy playing Bingo Holiday on your iOS device!</li>
|
115 |
-
</ol>
|
116 |
-
<p>Here are some screenshots of the app on an iOS device:</p>
|
117 |
-
<table>
|
118 |
-
<tr>
|
119 |
-
<td><img src="" alt="Bingo Holiday icon on App Store"></td>
|
120 |
-
<td><img src="" alt="Bingo Holiday app page on App Store"></td>
|
121 |
-
<td><img src="" alt="Bingo Holiday welcome screen"></td>
|
122 |
-
</tr>
|
123 |
-
<tr>
|
124 |
-
<td><img src="" alt="Bingo Holiday main menu"></td>
|
125 |
-
<td><img src="" alt="Bingo Holiday bingo room selection"></td>
|
126 |
-
<td><img src="" alt="Bingo Holiday bingo gameplay"></td>
|
127 |
-
</tr>
|
128 |
-
</table>
|
129 |
-
<h3>Tips and tricks for playing Bingo Holiday on iOS</h3>
|
130 |
-
<p>Here are some tips and tricks that will help you play Bingo Holiday better on your iOS device:</p>
|
131 |
-
<ul>
|
132 |
-
<li><strong>Turn on notifications</strong>: If you want to stay updated with the latest news, events, offers, and tips from Bingo Holiday, you can turn on the notifications for the app. You can do this by going to the Settings app on your device, tapping on Notifications, and finding Bingo Holiday in the list of apps. You can then toggle on the Allow Notifications option and customize the alert style, sounds, badges, and banners.</li>
|
133 |
-
<li><strong>Connect with Facebook</strong>: If you want to save your progress, sync your data across different devices, and play with your Facebook friends, you can connect your Bingo Holiday account with your Facebook account. You can do this by tapping on the Facebook icon at the top left corner of the screen and following the instructions. You will also get a bonus of 1000 credits for connecting with Facebook.</li>
|
134 |
-
<li><strong>Rate and review the app</strong>: If you enjoy playing Bingo Holiday and want to support the developers, you can rate and review the app on the App Store. You can do this by tapping on the Rate Us icon at the top right corner of the screen and choosing a star rating and writing a feedback. You will also get a bonus of 500 credits for rating the app.</li>
|
135 |
-
<li><strong>Watch videos for free credits</strong>: If you run out of credits and don't want to buy them with real money, you can watch some short videos for free credits. You can do this by tapping on the Free Credits icon at the bottom left corner of the screen and choosing the Watch Video option. You will get 50 credits for each video you watch.</li>
|
136 |
-
<li><strong>Check the daily tasks and achievements</strong>: If you want to earn more credits, power-ups, and other rewards, you can complete the daily tasks and achievements in Bingo Holiday. You can do this by tapping on the Tasks icon at the bottom right corner of the screen and checking the list of tasks and achievements. You will see your progress and rewards for each task and achievement. Some examples of tasks and achievements are: play 10 games in any room, win 5 bingos in any room, collect 10 puzzle pieces in any room, etc.</li>
|
137 |
-
</ul>
|
138 |
-
<h2>How to Play Bingo Holiday Online</h2>
|
139 |
-
<h3>The benefits of playing Bingo Holiday online</h3>
|
140 |
-
<p>If you don't have an Android or iOS device, or you don't want to download the app, you can still play Bingo Holiday online on your browser. There are some benefits of playing Bingo Holiday online, such as:</p>
|
141 |
-
<ul>
|
142 |
-
<li><strong>You don't need to download or install anything</strong>: You can play Bingo Holiday online without taking up any space or memory on your device. You just need a stable internet connection and a compatible browser.</li>
|
143 |
-
<li><strong>You can play on any device</strong>: You can play Bingo Holiday online on any device that has a browser, such as a laptop, a desktop, a tablet, or a smartphone. You can also switch between different devices without losing your progress or data.</li>
|
144 |
-
<li><strong>You can access more features and content</strong>: You can play Bingo Holiday online with all the features and content that are available in the app version. You can also access some exclusive features and content that are only available online, such as new rooms, events, promotions, and more.</li>
|
145 |
-
</ul>
|
146 |
-
<h3>How to access Bingo Holiday online and start playing</h3>
|
147 |
-
<p>Here are the steps to access Bingo Holiday online and start playing:</p>
|
148 |
-
<ol>
|
149 |
-
<li>Open your browser and go to <a href="">https://www.bingoholiday.com/</a>, which is the official website of Bingo Holiday.</li>
|
150 |
-
<li>You will see a landing page with some information and options about Bingo Holiday. You can choose to log in with your Facebook account or play as a guest. You can also change the language of the website from English to other languages, such as Spanish, French, German, Portuguese, Italian, Russian, Turkish, Arabic, Chinese, Japanese, Korean, and more.</li>
|
151 |
-
<li>After logging in or choosing to play as a guest, you will see a loading screen with some tips and hints about Bingo Holiday. Wait for the game to load completely.</li>
|
152 |
-
<li>You will see a main menu with different options and modes to play Bingo Holiday. You can choose from World Tour, Tournament, Jackpot, Collection, and more. You can also check your profile, settings, friends, gifts, and messages by tapping on the icons at the top of the screen.</li>
|
153 |
-
<li>Choose the mode or option you want to play and tap on it. You will see a selection of bingo rooms that have different themes, rules, prizes, and collections. You can also see the number of players, the entry fee, and the jackpot amount for each room.</li>
|
154 |
-
<li>Choose the room you want to play and tap on it. You will see a confirmation screen with some information and options about the room. You can choose the number of cards you want to play, the power-ups you want to use, and the auto-daub feature. You can also see the prize pool, the collection progress, and the chat room.</li>
|
155 |
-
<li>Tap on the green "Play" button to start playing bingo. You will see your bingo cards and the bingo caller at the bottom of the screen. You can also see the timer, the leaderboard, the power-ups, and the pause button at the top of the screen.</li>
|
156 |
-
<li>Daub the numbers on your cards as they are called by tapping on them. You can also use power-ups to help you win faster and easier. You can also chat with other players in the chat room or use emojis to express yourself.</li>
|
157 |
-
<li>When you have a bingo, tap on the "Bingo" button to claim it. You will see a celebration screen with your prize and rank. You can also see how many bingos are left in the room and how much time is left until the next game.</li>
|
158 |
-
<li>Enjoy playing Bingo Holiday online on your browser!</li>
|
159 |
-
</ol>
|
160 |
-
<p>Here are some screenshots of the website on a browser:</p>
|
161 |
-
<table>
|
162 |
-
<tr>
|
163 |
-
<td><img src="" alt="Bingo Holiday landing page"></td>
|
164 |
-
<td><img src="" alt="Bingo Holiday main menu"></td>
|
165 |
-
<td><img src="" alt="Bingo Holiday bingo room selection"></td>
|
166 |
-
</tr>
|
167 |
-
<tr>
|
168 |
-
<td><img src="" alt="Bingo Holiday confirmation screen"></td>
|
169 |
-
<td><img src="" alt="Bingo Holiday bingo gameplay"></td>
|
170 |
-
<td><img src="" alt="Bingo Holiday celebration screen"></td>
|
171 |
-
</tr>
|
172 |
-
</table>
|
173 |
-
<h2>Conclusion</h2>
|
174 |
-
<h3>A summary of the main points and a call to action</h3>
|
175 |
-
<p>Bingo Holiday is a bingo game that offers you more than just bingo. You can explore over 110 appealing scenes, travel around the world, send and receive gifts with friends, challenge events, and collect epic collections. You can also enjoy various bingo styles, power-ups, tournaments, and jackpots.</p>
|
176 |
-
<p>You can download and play Bingo Holiday on your Android or iOS device, or online on your browser. We have shown you how to do it in this article with step-by-step instructions and screenshots. We have also given you some tips and tricks that will help you play Bingo Holiday better on your device or online.</p>
|
177 |
-
<p>If you are ready to join the fun and excitement of Bingo Holiday, don't wait any longer. Download or access Bingo Holiday today and start playing bingo like never before!</p>
|
178 |
-
<h2>FAQs</h2>
|
179 |
-
<h3>Q1: Is Bingo Holiday free to play?</h3>
|
180 |
-
<p>A1: Yes, Bingo Holiday is free to play. You don't need to pay anything to download or play Bingo Holiday. You can enjoy all the features and content without spending a dime.</p>
|
181 |
-
<h3>Q2: How can I get more credits and power-ups in Bingo Holiday?</h3>
|
182 |
-
<p>A2: There are many ways to get more credits and power-ups in Bingo Holiday. Some of them are:</p>
|
183 |
-
<ul>
|
184 |
-
<li>Get free credits every hour by tapping on the Free Credits icon at the bottom left corner of the screen.</li>
|
185 |
-
<li>Spin the daily wheel for extra rewards by tapping on the Wheel icon at the bottom right corner of the screen.</li>
|
186 |
-
<li>Complete daily tasks and achievements by tapping on the Tasks icon at the bottom right corner of the screen.</li>
|
187 |
-
<li>Complete collections by collecting puzzle pieces or shadow cards in each room.</li>
|
188 |
-
<li>Join tournaments and jackpots by tapping on the Trophy or Jackpot icons at the top or bottom of the screen.</li>
|
189 |
-
<li>Watch videos for free credits by tapping on the Free Credits icon at the bottom left corner of the screen and choosing the Watch Video option.</li>
|
190 |
-
<li>Rate and review the app on the App Store or Google Play Store by tapping on the Rate Us icon at the top right corner of the screen.</li>
|
191 |
-
<li>Connect with Facebook by tapping on the Facebook icon at the top left corner of the screen.</li>
|
192 |
-
<li>Send and receive gifts with friends by tapping on the Gift icon at the top of the screen.</li>
|
193 |
-
<li>Buy credits and power-ups with real money by tapping on the Shop icon at the top right corner of the screen.</li>
|
194 |
-
</ul>
|
195 |
-
<h3>Q3: How can I play Bingo Holiday with my friends?</h3>
|
196 |
-
<p>A3: There are two ways to play Bingo Holiday with your friends. One is to add them as your friends in the game and invite them to join your bingo room. The other is to join a public bingo room and chat with other players who are also your friends. Here are the steps to do both:</p>
|
197 |
-
<ul>
|
198 |
-
<li>To add your friends as your friends in the game, you need to connect your Bingo Holiday account with your Facebook account. You can do this by tapping on the Facebook icon at the top left corner of the screen and following the instructions. You will then see a list of your Facebook friends who are also playing Bingo Holiday. You can tap on their names to add them as your friends in the game. You can also search for other players by their names or IDs and add them as your friends.</li>
|
199 |
-
<li>To invite your friends to join your bingo room, you need to tap on the Invite icon at the bottom of the screen. You will then see a list of your friends who are online or offline. You can tap on their names to send them an invitation. You can also copy and paste a link to your bingo room and share it with your friends via other apps or platforms.</li>
|
200 |
-
<li>To join a public bingo room and chat with other players who are also your friends, you need to tap on the bingo room you want to join and tap on it. You will then see a chat room at the bottom of the screen. You can tap on the Chat icon to open the chat room and type your message. You can also use emojis to express yourself. You can see the names and profiles of other players who are in the same bingo room as you. You can tap on their names to see their details, send them gifts, or add them as your friends.</li>
|
201 |
-
</ul>
|
202 |
-
<h3>Q4: What are the different bingo rooms and themes in Bingo Holiday?</h3>
|
203 |
-
<p>A4: There are over 40 bingo rooms in Bingo Holiday that have different themes, rules, prizes, and collections. Some of them are:</p>
|
204 |
-
<ul>
|
205 |
-
<li><strong>UK Jackpot</strong>: This is a jackpot room that follows the UK bingo rules. You can win the UK jackpot by daubing all the numbers on your card within 31 calls or less.</li>
|
206 |
-
<li><strong>Slots Bingo</strong>: This is a jackpot room that combines bingo and slots. You can spin the slot machine before each game and get extra rewards or power-ups.</li>
|
207 |
-
<li><strong>Blackout</strong>: This is a jackpot room that requires you to daub all the numbers on your card to win bingo. You can win the blackout jackpot by doing so within 45 calls or less.</li>
|
208 |
-
<li><strong>Secret Garden</strong>: This is a themed room that takes you to a magical garden full of flowers, butterflies, and fairies. You can collect shadow cards in this room and complete the Secret Garden collection.</li>
|
209 |
-
<li><strong>Dessert Master</strong>: This is a themed room that makes you hungry with delicious desserts, such as cakes, pies, cookies, and ice cream. You can collect puzzle pieces in this room and complete the Dessert Master collection.</li>
|
210 |
-
<li><strong>True Love</strong>: This is a themed room that celebrates love and romance. You can find your true love in this room and collect puzzle pieces to complete the True Love collection.</li>
|
211 |
-
<li><strong>And more</strong>: There are many more bingo rooms and themes in Bingo Holiday, such as Halloween Night, Christmas Eve, Lucky Irish, Ancient Egypt, Underwater World, Wild West, and more. You can discover them all by playing Bingo Holiday.</li>
|
212 |
-
</ul>
|
213 |
-
<h3>Q5: How can I contact the customer support of Bingo Holiday?</h3>
|
214 |
-
<p>A5: If you have any questions, problems, suggestions, or feedback about Bingo Holiday, you can contact the customer support of Bingo Holiday by using one of these methods:</p>
|
215 |
-
<ul>
|
216 |
-
<li><strong>Email</strong>: You can send an email to <a href="mailto:[email protected]">[email protected]</a> and get a reply within 24 hours.</li>
|
217 |
-
<li><strong>Facebook</strong>: You can visit the official Facebook page of Bingo Holiday at <a href="">https://www.facebook.com/bingoholiday/</a> and send a message or leave a comment.</li>
|
218 |
-
<li><strong>In-game feedback</strong>: You can tap on the Settings icon at the top right corner of the screen and choose the Feedback option. You can then type your message and attach a screenshot if needed.</li>
|
219 |
-
</ul>
|
220 |
-
<p>The customer support team of Bingo Holiday is friendly and helpful. They will try their best to solve your issues and improve your gaming experience.</p> 401be4b1e0<br />
|
221 |
-
<br />
|
222 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Chicken Gun Private Server 1.3.0 Apk Join the Fun and Chaos.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Chicken Gun Private Server 1.3.0 APK: How to Download and Play</h1>
|
3 |
-
<p>If you are looking for a hilarious and action-packed first-person shooter game where you can play as armed chickens, then you should check out <strong>Chicken Gun</strong>. This game lets you shoot and fight with other chickens in various modes and maps, using different weapons, beaks, sneakers, caps, and even explosive eggs. You can also customize your chicken from head to toe, making it look cool or funny or both.</p>
|
4 |
-
<p>But what if you want to have more fun and freedom with this game? What if you want to play with your friends or other players without any restrictions or limitations? What if you want to have more customization options, more maps, more modes, less lag, and more control over the game settings? Well, then you might want to try playing on a <strong>private server</strong>.</p>
|
5 |
-
<h2>chicken gun private server 1.3.0 apk</h2><br /><p><b><b>Download Zip</b> ⚙⚙⚙ <a href="https://jinyurl.com/2uNMO8">https://jinyurl.com/2uNMO8</a></b></p><br /><br />
|
6 |
-
<p>A private server is an unofficial mod that allows you to create or join a separate server from the official one. This way, you can play with whoever you want, whenever you want, however you want. You can also enjoy some features and benefits that are not available on the official server.</p>
|
7 |
-
<p>In this article, we will show you how to download and install <strong>Chicken Gun Private Server 1.3.0 APK</strong> on your Android device, how to join or create a private server in Chicken Gun, what are the features and benefits of playing on a private server, and some tips and tricks to improve your skills and enjoy the game more. So, without further ado, let's get started!</p>
|
8 |
-
<h2>How to download and install Chicken Gun Private Server 1.3.0 APK on your device</h2>
|
9 |
-
<p>Before you can play on a private server, you need to download and install Chicken Gun Private Server 1.3.0 APK on your Android device. This is an unofficial mod that is not endorsed by the developers of Chicken Gun, so you should use it at your own risk. Here are the steps to follow:</p>
|
10 |
-
<p>chicken gun mod apk private server 1.3.0<br />
|
11 |
-
chicken gun 1.3.0 private server download<br />
|
12 |
-
chicken gun private server apk 1.3.0 free<br />
|
13 |
-
chicken gun hack private server 1.3.0<br />
|
14 |
-
chicken gun private server version 1.3.0<br />
|
15 |
-
chicken gun private server apk mediafire 1.3.0<br />
|
16 |
-
chicken gun private server youtube 1.3.0<br />
|
17 |
-
chicken gun private server apkcombo 1.3.0<br />
|
18 |
-
chicken gun private server update 1.3.0<br />
|
19 |
-
chicken gun private server android 1.3.0<br />
|
20 |
-
chicken gun private server ios 1.3.0<br />
|
21 |
-
chicken gun private server unlimited money 1.3.0<br />
|
22 |
-
chicken gun private server all skins 1.3.0<br />
|
23 |
-
chicken gun private server gameplay 1.3.0<br />
|
24 |
-
chicken gun private server online 1.3.0<br />
|
25 |
-
chicken gun private server offline 1.3.0<br />
|
26 |
-
chicken gun private server new features 1.3.0<br />
|
27 |
-
chicken gun private server no root 1.3.0<br />
|
28 |
-
chicken gun private server no ads 1.3.0<br />
|
29 |
-
chicken gun private server latest version 1.3.0<br />
|
30 |
-
chicken gun private server how to install 1.3.0<br />
|
31 |
-
chicken gun private server how to play 1.3.0<br />
|
32 |
-
chicken gun private server how to join 1.3.0<br />
|
33 |
-
chicken gun private server how to create 1.3.0<br />
|
34 |
-
chicken gun private server how to download 1.3.0<br />
|
35 |
-
chicken gun private server review 1.3.0<br />
|
36 |
-
chicken gun private server tips and tricks 1.3.0<br />
|
37 |
-
chicken gun private server cheats and hacks 1.3.0<br />
|
38 |
-
chicken gun private server codes and coupons 1.3.0<br />
|
39 |
-
chicken gun private server glitches and bugs 1.3.0<br />
|
40 |
-
chicken gun private server fun and funny moments 1.3.0<br />
|
41 |
-
chicken gun private server best and worst weapons 1.3.0<br />
|
42 |
-
chicken gun private server best and worst maps 1.3.0<br />
|
43 |
-
chicken gun private server best and worst modes 1.3.0<br />
|
44 |
-
chicken gun private server best and worst skins 1..30 <br />
|
45 |
-
chicken gun private server comparison and contrast 1..30 <br />
|
46 |
-
chicken gun private server pros and cons 1..30 <br />
|
47 |
-
chicken gun private server advantages and disadvantages 1..30 <br />
|
48 |
-
chicken gun private server benefits and drawbacks 1..30 <br />
|
49 |
-
chicken gun private server ratings and rankings 1..30</p>
|
50 |
-
<ol>
|
51 |
-
<li>Go to this link: <a href="">https://www.mediafire.com/file/8j9w9x5w7w8w9x5/Chicken_Gun_Private_Server_1.3.0.apk/file</a> and click on the green download button to download the APK file.</li>
|
52 |
-
<li>Once the download is complete, locate the APK file in your device's file manager and tap on it to install it.</li>
|
53 |
-
<li>You might need to enable unknown sources in your device's settings to install the APK file. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
54 |
-
<li>After the installation is done, you can launch the game from your app drawer or home screen.</li>
|
55 |
-
</ol>
|
56 |
-
<p>Congratulations! You have successfully installed Chicken Gun Private Server 1.3.0 APK on your device. Now you can join or create a private server and have fun.</p>
|
57 |
-
<h2>How to join and create a private server in Chicken Gun</h2>
|
58 |
-
<p>Now that you have installed Chicken Gun Private Server 1.3.0 APK on your device, you can join or create a private server in Chicken Gun. Here are the steps to follow:</p>
|
59 |
-
<h3>How to join a private server in Chicken Gun</h3>
|
60 |
-
<ol>
|
61 |
-
<li>Launch the game and tap on the multiplayer button on the main menu.</li>
|
62 |
-
<li>Tap on the private server button on the top right corner of the screen.</li>
|
63 |
-
<li>You will see a list of available private servers that you can join. You can also use the search bar to find a specific server by name or password.</li>
|
64 |
-
<li>Tap on the server that you want to join and enter the password if required.</li>
|
65 |
-
<li>You will be taken to the lobby where you can see the server settings, such as the map, the mode, the number of players, and the time limit.</li>
|
66 |
-
<li>Tap on the ready button when you are ready to start the game.</li>
|
67 |
-
</ol>
|
68 |
-
<p>That's it! You have successfully joined a private server in Chicken Gun. Now you can play with other players and have fun.</p>
|
69 |
-
<h3>How to create a private server in Chicken Gun</h3>
|
70 |
-
<ol>
|
71 |
-
<li>Launch the game and tap on the multiplayer button on the main menu.</li>
|
72 |
-
<li>Tap on the private server button on the top right corner of the screen.</li>
|
73 |
-
<li>Tap on the create button on the bottom right corner of the screen.</li>
|
74 |
-
<li>You will be taken to a screen where you can customize your private server settings, such as the map, the mode, the number of players, the time limit, and the password.</li>
|
75 |
-
<li>Tap on the create button when you are done with your settings.</li>
|
76 |
-
<li>You will be taken to the lobby where you can see your server name and password, as well as invite other players by tapping on the invite button.</li>
|
77 |
-
<li>Tap on the ready button when you are ready to start the game.</li>
|
78 |
-
</ol>
|
79 |
-
<p>That's it! You have successfully created a private server in Chicken Gun. Now you can play with your friends or other players and have fun.</p>
|
80 |
-
<h2>What are the features and benefits of playing on a private server</h2>
|
81 |
-
<p>Playing on a private server in Chicken Gun has some features and benefits that are not available on the official server. Here are some of them:</p>
|
82 |
-
<ul>
|
83 |
-
<li><strong>More customization options</strong>: You can customize your chicken, weapon, beak, sneakers, and caps with more colors and styles than on the official server. You can also unlock all the items for free, without having to spend coins or watch ads.</li>
|
84 |
-
<li><strong>More maps and modes</strong>: You can choose from more maps and modes than on the official server. You can also create your own maps and modes with the map editor and the mode editor, and share them with other players.</li>
|
85 |
-
<li><strong>Less lag and better performance</strong>: You can enjoy a smoother and faster gameplay experience on a private server, without having to worry about lag, glitches, or crashes. You can also adjust the graphics quality and the sound effects to suit your preferences.</li>
|
86 |
-
<li><strong>More control over the game rules and settings</strong>: You can change the game rules and settings to your liking on a private server, such as the map, the mode, the number of players, the time limit, and the password. You can also kick or ban any player who is cheating, trolling, or being rude.</li>
|
87 |
-
<li><strong>More fun and freedom with your friends or other players</strong>: You can play with your friends or other players on a private server, without having to follow any restrictions or limitations. You can also chat with them, send them emojis, and invite them to your server. You can also join other servers and meet new people who share your interest in Chicken Gun.</li>
|
88 |
-
</ul>
|
89 |
-
<p>As you can see, playing on a private server in Chicken Gun has many advantages that can make your gaming experience more enjoyable and satisfying. Of course, you should also respect the rules and etiquette of each server, and not abuse or exploit any features or benefits.</p>
|
90 |
-
<h2>Tips and tricks to improve your skills and enjoy the game more</h2>
|
91 |
-
<p>Playing on a private server in Chicken Gun is not only fun, but also challenging. You will face many skilled and competitive players who will test your abilities and strategies. If you want to improve your skills and enjoy the game more, here are some tips and tricks that you can use:</p>
|
92 |
-
<ul>
|
93 |
-
<li><strong>How to use different weapons effectively</strong>: Each weapon in Chicken Gun has its own strengths and weaknesses, such as damage, range, accuracy, fire rate, reload time, and ammo capacity. You should choose a weapon that suits your play style and the situation. For example, if you prefer close-range combat, you might want to use a shotgun or a knife. If you prefer long-range combat, you might want to use a sniper rifle or a rocket launcher. You should also switch weapons depending on the map and the mode. For example, if you are playing on a small map with many obstacles, you might want to use a weapon that has high damage and low accuracy. If you are playing on a large map with open spaces, you might want to use a weapon that has high accuracy and low damage.</li>
|
94 |
-
<li><strong>How to throw explosive eggs strategically</strong>: One of the most unique and fun features of Chicken Gun is that you can throw explosive eggs at your enemies. These eggs can deal massive damage and knock back your enemies, but they can also hurt you if you are too close to them. You should throw explosive eggs strategically, such as when you are outnumbered or cornered, when you want to surprise or distract your enemies, when you want to clear a path or an area, or when you want to finish off a wounded enemy. You should also aim carefully and time your throws well, as the eggs have a fuse time before they explode.</li>
|
95 |
-
<li><strong>How to use the environment to your advantage</strong>: The maps in Chicken Gun are not only diverse and colorful, but also interactive and dynamic. You can use the environment to your advantage, such as by hiding behind cover, jumping on platforms, running on conveyor belts, breaking windows, or using vehicles. You can also interact with some objects, such as barrels, crates, vending machines, or toilets. You can use these objects to create explosions, distractions, traps, or hiding spots. You should also be aware of the hazards in the environment, such as fire, water, electricity, or spikes. You should avoid these hazards or use them against your enemies.</li>
|
96 |
-
<li><strong>How to communicate and cooperate with your teammates</strong>: Chicken Gun is a team-based game, where you can play with up to 10 players on each team. You can communicate and cooperate with your teammates to gain an edge over your enemies. You can use the chat feature to send messages or emojis to your teammates, or use the voice chat feature to talk to them. You can also use the ping feature to mark locations or enemies on the map. You should communicate and cooperate with your teammates to plan your strategies, coordinate your attacks, share your resources, support each other, and have fun.</li>
|
97 |
-
<li><strong>How to avoid common mistakes and pitfalls</strong>: Chicken Gun is a game that requires skill, strategy, and luck. You can improve your chances of winning by avoiding some common mistakes and pitfalls that many players make. Here are some of them:</li>
|
98 |
-
<ul>
|
99 |
-
<li>Don't rush into the enemy territory without checking your surroundings or your ammo. You might run into a trap or an ambush, or run out of ammo at the worst possible moment.</li>
|
100 |
-
<li>Don't camp in one spot for too long. You might become an easy target for snipers or explosive eggs, or miss out on the action and the fun.</li>
|
101 |
-
<li>Don't ignore the objectives of the mode. You might lose the game even if you have more kills than your enemies, if you don't capture the flag, defend the base, or collect the coins.</li>
|
102 |
-
<li>Don't be a lone wolf. You might get overwhelmed by the enemy team, or miss out on the benefits of teamwork and communication.</li>
|
103 |
-
<li>Don't be a jerk. You might ruin the game for yourself and others, or get kicked or banned from the server.</li>
|
104 |
-
</ul>
|
105 |
-
</ul>
|
106 |
-
<h2>Conclusion</h2>
|
107 |
-
<p>Chicken Gun is a hilarious and action-packed first-person shooter game where you can play as armed chickens in various modes and maps. You can also customize your chicken from head to toe, making it look cool or funny or both.</p>
|
108 |
-
<p>If you want to have more fun and freedom with this game, you can try playing on a private server. A private server is an unofficial mod that allows you to create or join a separate server from the official one. This way, you can play with whoever you want, whenever you want, however you want.</p>
|
109 |
-
<p>You can also enjoy some features and benefits that are not available on the official server, such as more customization options, more maps and modes, less lag and better performance, more control over the game rules and settings, and more fun and freedom with your friends or other players.</p>
|
110 |
-
<p>In this article, we have shown you how to download and install Chicken Gun Private Server 1.3.0 APK on your Android device, how to join or create a private server in Chicken Gun, what are the features and benefits of playing on a private server, and some tips and tricks to improve your skills and enjoy the game more.</p>
|
111 |
-
<p>We hope that you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
|
112 |
-
<p>Now, what are you waiting for? Go ahead and try out Chicken Gun Private Server 1.3.0 APK and have fun!</p>
|
113 |
-
<p>Thank you for reading this article.</p>
|
114 |
-
<h2>FAQs</h2>
|
115 |
-
<p>Here are some frequently asked questions about Chicken Gun Private Server 1.3.0 APK:</p>
|
116 |
-
<h3>Q: Is Chicken Gun Private Server 1.3.0 APK safe to use?</h3>
|
117 |
-
<p>A: Chicken Gun Private Server 1.3.0 APK is an unofficial mod that is not endorsed by the developers of Chicken Gun, so you should use it at your own risk. We do not take any responsibility for any damage or harm that may occur from using this mod. You should also be careful about downloading and installing any APK files from unknown sources, as they may contain viruses or malware.</p>
|
118 |
-
<h3>Q: Can I play on a private server with players who are on the official server?</h3>
|
119 |
-
<p>A: No, you cannot play on a private server with players who are on the official server. You can only play with players who are also using the same mod as you. If you want to play with players who are on the official server, you need to uninstall the mod and reinstall the original game from the Google Play Store.</p>
|
120 |
-
<h3>Q: Can I play on a private server offline?</h3>
|
121 |
-
<p>A: No, you cannot play on a private server offline. You need an internet connection to join or create a private server in Chicken Gun. However, you can play the single-player mode offline if you want to practice your skills or have fun by yourself.</p>
|
122 |
-
<h3>Q: How can I update Chicken Gun Private Server 1.3.0 APK?</h3>
|
123 |
-
<p>A: To update Chicken Gun Private Server 1.3.0 APK, you need to download and install the latest version of the mod from the same source that you got it from. You should also check for updates regularly, as new features and bug fixes may be added in the future.</p>
|
124 |
-
<h3>Q: How can I contact the developers of Chicken Gun Private Server 1.3.0 APK?</h3>
|
125 |
-
<p>A: To contact the developers of Chicken Gun Private Server 1.3.0 APK, you can visit their website at <a href="">https://chickengunmod.com/</a> or their Facebook page at <a href="">https://www.facebook.com/chickengunmod/</a>. You can also send them an email at <a href="">[email protected]</a>. You can ask them any questions or give them any feedback or suggestions that you may have.</p> 401be4b1e0<br />
|
126 |
-
<br />
|
127 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Crafting and Building 1.18 APK and Start Your Adventure Today.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Crafting and Building 1.18 APK Download: A Guide for Beginners</h1>
|
3 |
-
<p>Do you like building games? Do you want to unleash your creativity in a sandbox world? Do you want to play with your friends online or offline? If you answered yes to any of these questions, then you might want to try <strong>Crafting and Building</strong>, a free building game for Android devices.</p>
|
4 |
-
<p>In this article, we will tell you everything you need to know about <strong>Crafting and Building</strong>, including what it is, how to download it, what are its main features, and what are some tips and tricks for playing it.</p>
|
5 |
-
<h2>crafting and building 1.18 apk download</h2><br /><p><b><b>DOWNLOAD</b> ——— <a href="https://jinyurl.com/2uNSAQ">https://jinyurl.com/2uNSAQ</a></b></p><br /><br />
|
6 |
-
<h2>What is Crafting and Building?</h2>
|
7 |
-
<p><strong>Crafting and Building</strong> is a new free building game for Android devices that lets you create your own world with blocks. You can build anything you can imagine, from houses to castles to temples, and explore different biomes, such as forests, deserts, mountains, caves, and oceans. You can also play with your friends online or offline, visit their worlds, and help them with their constructions.</p>
|
8 |
-
<p><strong>Crafting and Building</strong> is inspired by popular games like <strong>Minecraft</strong> and <strong>Terraria</strong>, but it has its own unique features and style. It has cool graphics, smooth controls, and a user-friendly interface that makes it easy to play for anyone. It also has no monsters or enemies, so you can focus on building and having fun.</p>
|
9 |
-
<h2>How to download and install Crafting and Building 1.18 APK?</h2>
|
10 |
-
<p><strong>Crafting and Building 1.18 APK</strong> is the latest version of the game, released on April 19, 2023. It has some new features and improvements, such as new blocks, new animals, new skins, new sounds, and bug fixes.</p>
|
11 |
-
<p><strong>Crafting and Building 1.18 APK</strong> can be downloaded from various websites that offer APK files, such as <a href="">APKCombo</a>, <a href="">Aptoide</a>, or <a href="">MCPE Planet</a>. However, you should always be careful when downloading APK files from unknown sources, as they may contain viruses or malware that can harm your device. You should always scan any APK file before installing it on your device.</p>
|
12 |
-
<p><strong>Crafting and Building 1.18 APK</strong> requires Android 5.1 or higher and about 387 MB of storage space on your device. To install it, you need to enable unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. Then, follow the instructions on the screen to complete the installation process.</p>
|
13 |
-
<h2>What are the main features of Crafting and Building 1.18 APK?</h2>
|
14 |
-
<p><strong>Crafting and Building 1.18 APK</strong> offers a fun and creative gameplay experience for the whole family. Here are some of the main features of the game:</p>
|
15 |
-
<p>crafting and building 1.18 apk free download<br />
|
16 |
-
crafting and building 1.18 mod apk download<br />
|
17 |
-
crafting and building 1.18 apk download for android<br />
|
18 |
-
crafting and building 1.18 apk download latest version<br />
|
19 |
-
crafting and building 1.18 apk download uptodown<br />
|
20 |
-
crafting and building 1.18 apk download apkpure<br />
|
21 |
-
crafting and building 1.18 apk download for pc<br />
|
22 |
-
crafting and building 1.18 apk download no ads<br />
|
23 |
-
crafting and building 1.18 apk download unlimited money<br />
|
24 |
-
crafting and building 1.18 apk download offline<br />
|
25 |
-
crafting and building 1.18 apk download hack<br />
|
26 |
-
crafting and building 1.18 apk download mediafıre<br />
|
27 |
-
crafting and building 1.18 apk download android 11<br />
|
28 |
-
crafting and building 1.18 apk download ios<br />
|
29 |
-
crafting and building 1.18 apk download windows 10<br />
|
30 |
-
crafting and building 1.18 apk download full version<br />
|
31 |
-
crafting and building 1.18 apk download online<br />
|
32 |
-
crafting and building 1.18 apk download without verification<br />
|
33 |
-
crafting and building 1.18 apk download mega<br />
|
34 |
-
crafting and building 1.18 apk download google drive<br />
|
35 |
-
crafting and building 1.18 apk download update<br />
|
36 |
-
crafting and building 1.18 apk download old version<br />
|
37 |
-
crafting and building 1.18 apk download cracked<br />
|
38 |
-
crafting and building 1.18 apk download premium<br />
|
39 |
-
crafting and building 1.18 apk download mod menu<br />
|
40 |
-
crafting and building 1.18 apk download original<br />
|
41 |
-
crafting and building 1.18 apk download android oyun club<br />
|
42 |
-
crafting and building 1.18 apk download rexdl<br />
|
43 |
-
crafting and building 1.18 apk download revdl<br />
|
44 |
-
crafting and building 1.18 apk download happy mod<br />
|
45 |
-
crafting and building 1.18 apk download an1.com<br />
|
46 |
-
crafting and building 1.18 apk download mob.org<br />
|
47 |
-
crafting and building 1.18 apk download malavida<br />
|
48 |
-
crafting and building 1.18 apk download softonic<br />
|
49 |
-
crafting and building 1.18 apk download appmirror.net<br />
|
50 |
-
crafting and building 1.18 apk download appvn.com<br />
|
51 |
-
crafting and building 1.18 apk download blackmod.net<br />
|
52 |
-
crafting and building 1.18 apk download platinmods.com<br />
|
53 |
-
crafting and building 1.18 apk download androidpolska.pl<br />
|
54 |
-
crafting and building 1.18 apk download apkmody.io</p>
|
55 |
-
<ul>
|
56 |
-
<li><strong>Build anything you can imagine</strong>: You can use different types of blocks to create anything you want, from houses to castles to temples. You can also use furniture, decorations, paintings, and more to customize your creations.</li>
|
57 |
-
<li><strong>Play with your friends online or offline</strong>: You can play with your friends in multiplayer mode, either online or offline. You can join their worlds or invite them to yours, chat with them, and help them with their constructions.</li>
|
58 |
-
<li><strong>Explore different biomes</strong>: You can explore different biomes in the game, such as forests, deserts, mountains, caves, and oceans. Each biome has its own landscape, animals, villagers, and resources.</li>
|
59 |
-
<li><strong>Cool graphics, smooth controls, and user-friendly interface</strong>: The game has cool graphics that make it look realistic and colorful. The controls are smooth and easy to use. The interface is user-friendly and intuitive.</li>
|
60 |
-
</ul>
|
61 |
-
<h2>What are some tips and tricks for playing Crafting and Building?</h2>
|
62 |
-
<p>If you want to get the most out of <strong>Crafting and Building</strong>, here are some tips and tricks that might help you:</p>
|
63 |
-
<ul>
|
64 |
-
<li><strong>Crafting system</strong>: The game has a simple crafting system that lets you create tools, weapons, furniture, and more from different materials. To craft something, you need to open your inventory and tap on the crafting icon. Then, you can select the item you want to craft from the list or use the search bar to find it. You can also see the required materials for each item.</li>
|
65 |
-
<li><strong>Animals and villagers</strong>: The game has many animals and villagers that you can interact with. Some animals are friendly and some are hostile. You can feed them, pet them, or ride them. Some animals can also be tamed as pets. Villagers are friendly NPCs that live in villages. You can trade with them using emeralds or other items.</li>
|
66 |
-
<li><strong>Pets</strong>: You can tame some animals as pets in the game. To do this, you need to feed them their favorite food until they show hearts above their heads. Then, you can name them and put a collar on them. Pets will follow you around and protect you from hostile animals.</li>
|
67 |
-
<li><strong>No monsters or enemies</strong >: The game has no monsters or enemies, so you can focus on building and having fun. However, you should still be careful of some hostile animals, such as bears, wolves, or sharks. You can also turn on the peaceful mode in the settings if you want to avoid any danger.</li>
|
68 |
-
</ul>
|
69 |
-
<h2>Conclusion</h2>
|
70 |
-
<p><strong>Crafting and Building</strong> is a great game for anyone who loves building games. It is easy to download and install on your Android device, and it offers endless possibilities for creativity, exploration, and multiplayer fun. You can build anything you can imagine, play with your friends online or offline, explore different biomes, and interact with animals and villagers. The game has cool graphics, smooth controls, and a user-friendly interface that makes it suitable for all ages.</p>
|
71 |
-
<p>If you are looking for a new building game to try, you should definitely give <strong>Crafting and Building</strong> a chance. You will not regret it!</p>
|
72 |
-
<h2>FAQs</h2>
|
73 |
-
<h4>Is Crafting and Building free to play?</h4>
|
74 |
-
<p>Yes, <strong>Crafting and Building</strong> is completely free to play. However, it contains ads that can be removed by purchasing the Pro DLC.</p>
|
75 |
-
<h4>Is Crafting and Building safe to download?</h4>
|
76 |
-
<p>Yes, <strong>Crafting and Building</strong> is safe to download from reputable sources. However, you should always scan any APK file before installing it on your device.</p>
|
77 |
-
<h4>Is Crafting and Building compatible with Minecraft?</h4>
|
78 |
-
<p>No, <strong>Crafting and Building</strong> is not compatible with Minecraft. They are different games with different features.</p>
|
79 |
-
<h4>How can I customize my character in Crafting and Building?</h4>
|
80 |
-
<p>You can customize your character in <strong>Crafting and Building</strong> by choosing your gender, skin color, hair style, clothes, accessories, and more.</p>
|
81 |
-
<h4>How can I contact the developers of Crafting and Building?</h4>
|
82 |
-
<p>You can contact the developers of <strong>Crafting and Building</strong> by emailing them at [email protected] or visiting their website at https://protonmobile.com/.</p> 401be4b1e0<br />
|
83 |
-
<br />
|
84 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Rope Hero APK and Become a Superhero on Your Phone.md
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Rope Hero APK Download Game: A Guide for Android Users</h1>
|
3 |
-
<p>If you are looking for a fun and action-packed game that lets you swing around the city like Spider-Man, fight against crime and injustice, and customize your hero with different outfits and weapons, then you might want to try Rope Hero. Rope Hero is a free game for Android devices that has been downloaded over 100 million times on Google Play Store. In this article, we will tell you what Rope Hero is, how to download and install it on your Android device, why you should play it, and some tips and tricks to help you enjoy it more.</p>
|
4 |
-
<h2>rope hero apk download game</h2><br /><p><b><b>DOWNLOAD</b> ►►► <a href="https://jinyurl.com/2uNUdf">https://jinyurl.com/2uNUdf</a></b></p><br /><br />
|
5 |
-
<h2>What is Rope Hero?</h2>
|
6 |
-
<p>Rope Hero is a 3D open-world action game developed by Naxeex Action & RPG Games. In this game, you play as a superhero who has a super rope and can perform mega jumps, climb buildings, and power landings. You can explore the city, complete missions, fight against gangs and criminals, drive various vehicles, and use different weapons. You can also level up your hero, upgrade your skills, and change your appearance. Rope Hero is a game that combines elements of adventure, simulation, shooting, and RPG.</p>
|
7 |
-
<h3>Features of Rope Hero</h3>
|
8 |
-
<p>Some of the features that make Rope Hero an exciting and addictive game are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Realistic physics and graphics: The game has realistic physics that allow you to swing around the city with your rope, jump from building to building, and crash into objects and enemies. The game also has high-quality graphics that create a vivid and immersive environment.</li>
|
11 |
-
<li>Open-world gameplay: The game gives you the freedom to explore the city at your own pace, without any limitations or restrictions. You can go anywhere you want, do anything you want, and interact with anyone you want. You can also find hidden secrets, collectibles, and easter eggs.</li>
|
12 |
-
<li>Diverse missions and challenges: The game has a variety of missions and challenges that test your skills and abilities. You can help the police catch criminals, rescue hostages, stop robberies, destroy enemy bases, and more. You can also participate in races, stunts, parkour, and other activities.</li>
|
13 |
-
<li>Customization options: The game allows you to customize your hero with different outfits, masks, hats, glasses, shoes, and accessories. You can also choose from different weapons, such as guns, grenades, rockets, swords, hammers, axes, and more. You can also upgrade your weapons with different attachments and modifications.</li>
|
14 |
-
<li>Multiple vehicles: The game lets you drive various vehicles, such as cars, bikes, trucks, buses, helicopters, tanks, and more. You can also steal vehicles from the street or buy them from the garage. You can also upgrade your vehicles with different parts and features.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to download and install Rope Hero APK on Android</h3>
|
17 |
-
<p>If you want to download and install Rope Hero APK on your Android device, you can follow these simple steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Go to [Rope Hero APK (Android Game) - Free Download - APKCombo](^1^) or [Rope Hero APK (Android Game) - Free Download - APKCombo](^2^) on your browser.</li>
|
20 |
-
<li>Click on the "Download APK" button to download the latest version of Rope Hero APK file on your device.</li>
|
21 |
-
<li>Once the download is complete, locate the APK file on your device and tap on it to install it.</li>
|
22 |
-
<li>If you see a warning message that says "Install blocked", go to your device settings and enable "Unknown sources" or "Allow from this source" option.</li>
|
23 |
-
<li>Wait for the installation process to finish and then launch the game from your app drawer or home screen.</li>
|
24 |
-
</ol>
|
25 |
-
<h2>Why play Rope Hero?</h2>
|
26 |
-
<p>Rope Hero is a game that offers a lot of fun and entertainment for Android users who love action games. Here are some reasons why you should play Rope Hero:</p>
|
27 |
-
<h3>Pros and cons of Rope Hero</h3>
|
28 |
-
<p>Like any other game, Rope Hero has its pros and cons. Here are some of them:</p>
|
29 |
-
<table>
|
30 |
-
<tr>
|
31 |
-
<th>Pros</th>
|
32 |
-
<th>Cons</th>
|
33 |
-
</tr>
|
34 |
-
<tr>
|
35 |
-
<td>- Fun and addictive gameplay</td>
|
36 |
-
<td>- Some bugs and glitches</td>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td>- Lots of content and variety</td>
|
40 |
-
<td>- Repetitive missions and enemies</td>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>- Cool and realistic graphics</td>
|
44 |
-
<td>- High battery and data consumption</td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>- Easy and intuitive controls</td>
|
48 |
-
<td>- Ads and in-app purchases</td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>- Free and offline mode available</td>
|
52 |
-
<td>- No multiplayer or online mode</td>
|
53 |
-
</tr>
|
54 |
-
</table>
|
55 |
-
<h3>Tips and tricks for playing Rope Hero</h3>
|
56 |
-
<p>If you want to play Rope Hero like a pro, here are some tips and tricks that you can use:</p>
|
57 |
-
<p>rope hero vice town apk download game<br />
|
58 |
-
rope hero android game free download<br />
|
59 |
-
rope hero apk mod unlimited money download game<br />
|
60 |
-
rope hero 3 apk download game<br />
|
61 |
-
rope hero apk download latest version game<br />
|
62 |
-
rope hero apk combo download game<br />
|
63 |
-
rope hero apk offline download game<br />
|
64 |
-
rope hero apk update download game<br />
|
65 |
-
rope hero apk hack download game<br />
|
66 |
-
rope hero apk pure download game<br />
|
67 |
-
rope hero apk obb download game<br />
|
68 |
-
rope hero apk revdl download game<br />
|
69 |
-
rope hero apk rexdl download game<br />
|
70 |
-
rope hero apk uptodown download game<br />
|
71 |
-
rope hero apk mob.org download game<br />
|
72 |
-
rope hero apk mirror download game<br />
|
73 |
-
rope hero apk old version download game<br />
|
74 |
-
rope hero apk for pc download game<br />
|
75 |
-
rope hero apk for ios download game<br />
|
76 |
-
rope hero apk for windows 10 download game<br />
|
77 |
-
rope hero naxeex apk download game<br />
|
78 |
-
rope hero naxeex action rpg games apk download game<br />
|
79 |
-
rope hero naxeex studio apk download game<br />
|
80 |
-
rope hero naxeex llc apk download game<br />
|
81 |
-
rope hero naxeex mod apk download game<br />
|
82 |
-
rope hero naxeex unlimited money apk download game<br />
|
83 |
-
rope hero naxeex hack apk download game<br />
|
84 |
-
rope hero naxeex latest version apk download game<br />
|
85 |
-
rope hero naxeex offline apk download game<br />
|
86 |
-
rope hero naxeex online apk download game<br />
|
87 |
-
rope hero 3d open world city simulator apk download game<br />
|
88 |
-
rope hero 3d superhero simulator apk download game<br />
|
89 |
-
rope hero 3d action adventure apk download game<br />
|
90 |
-
rope hero 3d crime city battle apk download game<br />
|
91 |
-
rope hero 3d spider gangster crime city simulator apk download game<br />
|
92 |
-
rope hero 3d flying superhero simulator mod apk download game<br />
|
93 |
-
rope hero 3d flying superhero simulator hack apk download game<br />
|
94 |
-
rope hero 3d flying superhero simulator unlimited money apk download game<br />
|
95 |
-
rope hero 3d flying superhero simulator latest version apk download game<br />
|
96 |
-
rope hero 3d flying superhero simulator offline apk download game<br />
|
97 |
-
rope hero 3d flying superhero simulator online apk download game<br />
|
98 |
-
amazing spider stickman - super flying spiderman - spiderman games - spiderman games free - spiderman games for kids - spiderman games online - spiderman games offline - spiderman games 2023 - spiderman games 3d - spiderman games hd - spiderman games new - spiderman games best - spiderman games fun - spiderman games cool - spiderman games awesome - spiderman games amazing - spiderman games fantastic - spiderman games incredible - spiderman games ultimate - spiderman games epic - spiderman games legendary - spiderman games popular - spiderman games top - spiderman games classic - spiderman games original - spiderman games pro - spiderman games premium - spiderman games deluxe - spiderman games master - spiderman games expert - spiderman games genius - spiderman games super - spiderman games hyper - spiderman games mega - spiderman games ultra</p>
|
99 |
-
<ul>
|
100 |
-
<li>Use your rope wisely: Your rope is your best friend in this game, as it allows you to move around the city quickly and easily. You can use your rope to swing, climb, jump, and attack. However, you also need to be careful not to hit obstacles or enemies with your rope, as it can damage or break it. You can also use your rope to grab objects or enemies and throw them around.</li>
|
101 |
-
<li>Upgrade your hero regularly: As you play the game, you will earn money and experience that you can use to upgrade your hero. You can upgrade your skills, such as health, stamina, speed, strength, and accuracy. You can also upgrade your weapons, such as damage, range, reload time, and ammo capacity. You can also upgrade your vehicles, such as speed, durability, handling, and fuel efficiency.</li>
|
102 |
-
<li>Change your appearance often: One of the fun aspects of this game is that you can change your appearance with different outfits and accessories. You can choose from different styles, such as superhero, gangster, soldier, clown, ninja, pirate, and more. You can also mix and match different items to create your own unique look. Changing your appearance can also help you blend in with the crowd or stand out from the enemies.</li>
|
103 |
-
<li>Explore the city thoroughly: The city in this game is huge and full of secrets and surprises. You can find hidden items, such as money, weapons, health packs, and more. You can also find easter eggs, such as references to other games, movies, or celebrities. You can also discover new places, such as underground tunnels, rooftops, parks, and more.</li>
|
104 |
-
<li>Have fun and be creative: The best tip for playing this game is to have fun and be creative. You can do whatever you want in this game, without any rules or limits. You can create your own adventures, challenges, and stories. You can also experiment with different combinations of weapons, vehicles, outfits, and skills. You can also try different strategies and tactics to complete the missions or defeat the enemies.</li>
|
105 |
-
</ul>
|
106 |
-
<h2>Conclusion</h2>
|
107 |
-
<p>Rope Hero is a game that offers a lot of fun and entertainment for Android users who love action games. It is a game that lets you become a superhero who can swing around the city with a super rope, fight against crime and injustice, and customize your hero with different outfits and weapons. It is a game that has realistic physics and graphics, open-world gameplay, diverse missions and challenges, customization options, and multiple vehicles. It is a game that you can download and install for free on your Android device by following the simple steps we have provided in this article. It is a game that you should play if you want to experience the thrill and excitement of being a rope hero.</p>
|
108 |
-
<h3>FAQs</h3>
|
109 |
-
<p>Here are some frequently asked questions about Rope Hero:</p>
|
110 |
-
<ol>
|
111 |
-
<li>What is the latest version of Rope Hero APK?</li>
|
112 |
-
<p>The latest version of Rope Hero APK is 4.1.1 which was released on June 14th 2023.</p>
|
113 |
-
<li>Is Rope Hero safe to download?</li>
|
114 |
-
<p>Rope Hero is safe to download as long as you download it from a trusted source like [Rope Hero APK (Android Game) - Free Download - APKCombo] or [Rope Hero APK (Android Game) - Free Download - APKCombo]. However, you should always scan the APK file with an antivirus software before installing it on your device.</p>
|
115 |
-
<li>How much space does Rope Hero require on my device?</li>
|
116 |
-
<p>Rope Hero requires about 100 MB of free space on your device to install and run smoothly.</p>
|
117 |
-
<li>Can I play Rope Hero offline?</li>
|
118 |
-
<p>Yes, you can play Rope Hero offline without an internet connection. However, you will need an internet connection to access some features, such as ads, in-app purchases, and updates.</p>
|
119 |
-
<li>How can I contact the developer of Rope Hero?</li>
|
120 |
-
<p>You can contact the developer of Rope Hero by sending an email to [email protected] or by visiting their website at [Naxeex Action & RPG Games].</p>
|
121 |
-
</ol>
|
122 |
-
<p>I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p> 401be4b1e0<br />
|
123 |
-
<br />
|
124 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Erturul Gazi The Leader of Kayi Boyu and the Founder of a Civilization.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Ertuğrul Gazi Oyunu: A Historical Adventure Game Based on Turkish Hero</h1>
|
3 |
-
<p>If you are a fan of historical drama, action, and adventure, you might have heard of Ertuğrul Gazi Oyunu, a popular Turkish game based on the life of Ertuğrul Gazi, the father of Osman I, the founder of the Ottoman Empire. The game is a role-playing game that consists of 60 episodes, each with its own story, characters, and challenges. The game features realistic 3D graphics, professional music, high-resolution visuals, detailed scenes, multiplayer real characters, history-telling dialogues, and team directions. The game is available for Android and PC platforms, and you can download it for free from Google Play Store or Steam.</p>
|
4 |
-
<h2>Who Was Ertuğrul Gazi?</h2>
|
5 |
-
<p>Ertuğrul Gazi was a 13th-century bey (chief) of the Kayı tribe of Oghuz Turks, who migrated from Central Asia to Anatolia to escape the Mongol invasions. He was a brave warrior who fought against various enemies, such as the Byzantines, the Crusaders, and the Mongols. He was also a loyal ally of the Seljuks of Rum, who granted him lands in Söğüt, near Bilecik. He was the father of Osman I, who established the Ottoman Empire in 1299. Ertuğrul Gazi is considered a hero and a ghazi (a fighter for Islam) by many Turks and Muslims. He is also a popular subject of Turkish literature, art, and media.</p>
|
6 |
-
<h2>ertuğrul gazi oyunu</h2><br /><p><b><b>Download</b> 🆓 <a href="https://jinyurl.com/2uNUwa">https://jinyurl.com/2uNUwa</a></b></p><br /><br />
|
7 |
-
<h2>How Did The Game Developers Get Inspired By His Story And Turkish Culture?</h2>
|
8 |
-
<p>The game developers, UMURO, are a Turkish company that specializes in creating games with historical and cultural themes. They were inspired by the success of Diriliş: Ertuğrul, a Turkish TV series that dramatized the life of Ertuğrul Gazi and his tribe. They wanted to create a game that would allow players to experience the same adventure and excitement as the TV series. They also wanted to showcase the rich history and culture of Turkey, especially during the medieval period. They did extensive research on Ertuğrul Gazi's biography, Turkish history, geography, architecture, clothing, weapons, music, language, and customs. They also consulted with historians, experts, and consultants to ensure accuracy and authenticity.</p>
|
9 |
-
<h2>What Are The Main Objectives And Challenges In The Game?</h2>
|
10 |
-
<p>The game follows Ertuğrul Gazi's journey from his youth to his death. Each episode has its own plot, characters, missions, enemies, allies, locations, and rewards. The player can choose to play as Ertuğrul Gazi or one of his alps (warriors). The player can also customize their character's appearance, skills, weapons, armor, pets, etc. The main objectives of the game are to complete various tasks assigned by Ertuğrul Gazi or other characters; to fight against enemies using combat skills such as sword fighting, horse riding, archery, defense with sword and shield, direction finding with map, swimming, running fast, rolling, cl imbing, stealth, etc.; to explore different locations such as Söğüt, Aleppo, Karacahisar, etc.; to collect various items such as gold, silver, food, weapons, armor, etc.; to interact with other characters such as Halime Sultan, Bamsı Beyrek, Turgut Alp, etc.; and to make decisions that affect the outcome of the game.</p>
|
11 |
-
<h2>How To Use Different Skills And Weapons In Combat, Horse Riding, Archery, Etc.?</h2>
|
12 |
-
<p>The game has a simple and intuitive control system that allows the player to use different skills and weapons in combat, horse riding, archery, etc. The player can use the joystick on the left side of the screen to move their character; the buttons on the right side of the screen to attack, defend, jump, roll, etc.; and the icons on the top of the screen to access the map, inventory, settings, etc. The player can also switch between different weapons such as swords, axes, daggers, bows, etc. by tapping on their icons on the bottom of the screen. The player can also use their horse to travel faster and to fight enemies by using the horse icon on the bottom of the screen. The player can also use their pet (such as a wolf or an eagle) to assist them in combat by using the pet icon on the bottom of the screen.</p>
|
13 |
-
<h2>What Are Some Of The Tips And Tricks To Succeed In The Game?</h2>
|
14 |
-
<p>Some of the tips and tricks to succeed in the game are: - Pay attention to the dialogues and instructions given by Ertuğrul Gazi or other characters. They will provide you with valuable information and hints about your missions and objectives. - Explore your surroundings and collect items that can help you in your quests. You can find gold, silver, food, weapons, armor, etc. in chests, barrels, crates, tents, etc. You can also loot enemies after defeating them. - Upgrade your skills and weapons regularly. You can use gold and silver to buy new skills and weapons from merchants or blacksmiths. You can also use food to heal yourself or your horse. - Use your skills and weapons wisely. Different skills and weapons have different advantages and disadvantages depending on the situation. For example, swords are good for close-range combat but not for long-range combat; bows are good for long-range combat but not for close-range combat; axes are good for breaking shields but not for fast attacks; daggers are good for fast attacks but not for strong attacks; etc. - Use your horse and pet effectively. Your horse can help you travel faster and fight enemies from a distance. Your pet can help you distract or attack enemies or find hidden items or paths. - Make smart decisions that affect the outcome of the game. The game has multiple endings depending on your choices and actions. For example, you can choose to be loyal or betray Ertuğrul Gazi; you can choose to spare or kill your enemies; you can choose to help or ignore your allies; etc.</p>
|
15 |
-
<h2>What Are Some Of The Positive And Negative Aspects Of The Game According To Players And Critics?</h2>
|
16 |
-
<p>Some of the positive aspects of the game according to players and critics are: - The game has a captivating story that is based on real historical events and characters. - The game has realistic 3D graphics that create a immersive atmosphere and environment. - The game has professional music that enhances the mood and emotion of the game. - The game has high-resolution visuals that make the game look stunning and detailed. - The game has detailed scenes that show the culture and lifestyle of the medieval Turks. - The game has multiplayer real characters that allow players to interact with each other online. - The game has history-telling dialogues that educate players about Turkish history and culture. - The game has team directions that allow players to cooperate with each other in missions. Some of the negative aspects of the game according to players and critics are: - The game has some bugs and glitches that affect the gameplay and performance of the game. - The game has some translation errors and grammatical mistakes that affect the quality and clarity of the game. - The game has some repetitive missions and objectives that affect the variety and creativity of the game. - The game has some unrealistic physics and animations that affect the realism and accuracy of the game. - The game has some violent and graphic scenes that may not be suitable for younger or sensitive players.</p>
|
17 |
-
<h2>How Does The Game Compare To Other Similar Games In The Market?</h2>
|
18 |
-
<p>The game is similar to other historical adventure games in the market such as Assassin's Creed, Prince of Persia, Shadow of Mordor, etc. However, the game is unique in its focus on Turkish history and culture, especially during the medieval period and the rise of the Ottoman Empire . The game is also unique in its gameplay and features, such as the realistic 3D graphics, the professional music, the high-resolution visuals, the detailed scenes, the multiplayer real characters, the history-telling dialogues, and the team directions. The game is also unique in its genre, as it is a role-playing game that consists of 60 episodes, each with its own story, characters, and challenges. The game is also unique in its control system, as it allows the player to use different skills and weapons in combat, horse riding, archery, etc. The game is also unique in its outcome, as it has multiple endings depending on the player's choices and actions.</p>
|
19 |
-
<h2>What Are Some Of The Suggestions And Requests For Improvement From The Players?</h2>
|
20 |
-
<p>Some of the suggestions and requests for improvement from the players are: - To fix the bugs and glitches that affect the gameplay and performance of the game. - To improve the translation and grammar of the game to make it more clear and accurate. - To add more variety and creativity to the missions and objectives of the game to make it more fun and challenging. - To improve the physics and animations of the game to make it more realistic and accurate. - To add more options and features to customize the character's appearance, skills, weapons, armor, pets, etc. to make it more personal and diverse. - To add more historical and cultural content to the game to make it more educational and informative. - To add more modes and levels to the game to make it more replayable and enjoyable.</p>
|
21 |
-
<p>ertuğrul gazi oyunu indir<br />
|
22 |
-
ertuğrul gazi oyunu hile<br />
|
23 |
-
ertuğrul gazi oyunu pc<br />
|
24 |
-
ertuğrul gazi oyunu steam<br />
|
25 |
-
ertuğrul gazi oyunu nasıl oynanır<br />
|
26 |
-
ertuğrul gazi oyunu altın hilesi<br />
|
27 |
-
ertuğrul gazi oyunu apk<br />
|
28 |
-
ertuğrul gazi oyunu mod<br />
|
29 |
-
ertuğrul gazi oyunu son bölüm<br />
|
30 |
-
ertuğrul gazi oyunu online<br />
|
31 |
-
ertuğrul gazi oyunu kayı boyunun destanı<br />
|
32 |
-
ertuğrul gazi oyunu umuro<br />
|
33 |
-
ertuğrul gazi oyunu android<br />
|
34 |
-
ertuğrul gazi oyunu osmanlı kuruluşu<br />
|
35 |
-
ertuğrul gazi oyunu kurtuluş savaşı<br />
|
36 |
-
ertuğrul gazi oyunu yeni sezon<br />
|
37 |
-
ertuğrul gazi oyunu izle<br />
|
38 |
-
ertuğrul gazi oyunu yorumlar<br />
|
39 |
-
ertuğrul gazi oyunu puan hilesi<br />
|
40 |
-
ertuğrul gazi oyunu canlı yayın<br />
|
41 |
-
ertuğrul gazi oyunu türkçe dublaj<br />
|
42 |
-
ertuğrul gazi oyunu at sürme<br />
|
43 |
-
ertuğrul gazi oyunu okçuluk<br />
|
44 |
-
ertuğrul gazi oyunu kılıç kalkan<br />
|
45 |
-
ertuğrul gazi oyunu harita bulma<br />
|
46 |
-
ertuğrul gazi oyunu yükleme sorunu<br />
|
47 |
-
ertuğrul gazi oyunu güncelleme<br />
|
48 |
-
ertuğrul gazi oyunu sistem gereksinimleri<br />
|
49 |
-
ertuğrul gazi oyunu klan kurma<br />
|
50 |
-
ertuğrul gazi oyunu hediye kodları<br />
|
51 |
-
ertugrulgazioyn.com resmi web sitesi<br />
|
52 |
-
ertugrulgazioyn.net fan sayfası<br />
|
53 |
-
ertugrulgazioyn.org haber portalı<br />
|
54 |
-
ertugrulgazioyn.info ipucuları ve rehberleri<br />
|
55 |
-
ertugrulgazioyn.biz inceleme ve değerlendirme<br />
|
56 |
-
ertugrulgazioyn.xyz eğlence ve mizah<br />
|
57 |
-
ertugrulgazioyn.club sosyal medya ve forum<br />
|
58 |
-
ertugrulgazioyn.shop altın ve eşya satışı<br />
|
59 |
-
ertugrulgazioyn.live canlı destek ve yardım<br />
|
60 |
-
ertugrulgazioyn.fun yarışma ve etkinlikler</p>
|
61 |
-
<h2>Conclusion</h2>
|
62 |
-
<p>Ertuğrul Gazi Oyunu is a historical adventure game based on Turkish hero Ertuğrul Gazi, the father of Osman I, the founder of the Ottoman Empire. The game is a role-playing game that consists of 60 episodes, each with its own story, characters, and challenges. The game features realistic 3D graphics, professional music, high-resolution visuals, detailed scenes, multiplayer real characters, history-telling dialogues, and team directions. The game is available for Android and PC platforms, and you can download it for free from Google Play Store or Steam. If you are interested in Turkish history and culture, or if you are looking for a thrilling and exciting game to play, you should definitely give Ertuğrul Gazi Oyunu a try. You will not regret it!</p>
|
63 |
-
<p>Do you have any questions or comments about Ertuğrul Gazi Oyunu? Do you want to share your experience or opinion about the game? Do you have any suggestions or requests for improvement for the game developers? If so, please feel free to leave a comment below or contact us through our website or social media. We would love to hear from you!</p>
|
64 |
-
<p>Thank you for reading this article. We hope you enjoyed it and learned something new. Please share this article with your friends and family who might be interested in Ertuğrul Gazi Oyunu or Turkish history and culture. And don't forget to check out our other articles on our website for more interesting and informative topics. See you next time!</p>
|
65 |
-
<h3>FAQs</h3>
|
66 |
-
<p>Here are some of the frequently asked questions about Ertuğrul Gazi Oyunu:</p>
|
67 |
-
<table>
|
68 |
-
<tr>
|
69 |
-
<th>Question</th>
|
70 |
-
<th>Answer</th>
|
71 |
-
</tr>
|
72 |
-
<tr>
|
73 |
-
<td>What is Ertuğrul Gazi Oyunu?</td>
|
74 |
-
<td>Ertuğrul Gazi Oyunu is a historical adventure game based on Turkish hero Ertuğrul Gazi, the father of Osman I, the founder of the Ottoman Empire.</td>
|
75 |
-
</tr>
|
76 |
-
<tr>
|
77 |
-
<td>How can I download and play Ertuğrul Gazi Oyunu?</td>
|
78 |
-
<td>You can download Ertuğrul Gazi Oyunu for free from Google Play Store or Steam. You can play it on your Android device or PC.</td>
|
79 |
-
</tr>
|
80 |
-
<tr>
|
81 |
-
<td>How many episodes are there in Ertuğrul Gazi Oyunu?</td>
|
82 |
-
<td>There are 60 episodes in Ertuğrul Gazi Oyunu, each with its own story, characters, and challenges.</td>
|
83 |
-
</tr>
|
84 |
-
<tr>
|
85 |
-
<td>What are some of the skills and weapons that I can use in Ertuğrul Gazi Oyunu?</td>
|
86 |
-
<td>You can use various skills such as sword fighting, horse riding, archery, defense with sword and shield, direction finding with map, swimming, running fast, rolling, climbing, stealth, etc. You can also use different weapons such as swords, axes, daggers, bows, etc.</td>
|
87 |
-
</tr>
|
88 |
-
<tr>
|
89 |
-
<td>Does Ertuğrul Gazi Oyunu have a multiplayer mode?</td>
|
90 |
-
<td>Yes, Ertuğrul Gazi Oyunu has a multiplayer mode that allows you to play with other players online. You can join or create a team and cooperate with each other in missions.</td>
|
91 |
-
</tr>
|
92 |
-
</table></p> 401be4b1e0<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/loaders.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import os
|
16 |
-
from collections import defaultdict
|
17 |
-
from typing import Callable, Dict, Union
|
18 |
-
|
19 |
-
import paddle
|
20 |
-
import paddle.nn as nn
|
21 |
-
|
22 |
-
from .modeling_utils import _get_model_file, load_dict
|
23 |
-
from .models.cross_attention import LoRACrossAttnProcessor
|
24 |
-
from .utils import HF_CACHE, PPDIFFUSERS_CACHE, logging
|
25 |
-
|
26 |
-
logger = logging.get_logger(__name__)
|
27 |
-
|
28 |
-
|
29 |
-
LORA_WEIGHT_NAME = "paddle_lora_weights.pdparams"
|
30 |
-
|
31 |
-
|
32 |
-
class AttnProcsLayers(nn.Layer):
|
33 |
-
def __init__(self, state_dict: Dict[str, paddle.Tensor]):
|
34 |
-
super().__init__()
|
35 |
-
self.layers = nn.LayerList(state_dict.values())
|
36 |
-
self.mapping = {k: v for k, v in enumerate(state_dict.keys())}
|
37 |
-
self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
|
38 |
-
|
39 |
-
# we add a hook to state_dict() and load_state_dict() so that the
|
40 |
-
# naming fits with `unet.attn_processors`
|
41 |
-
def map_to(state_dict, *args, **kwargs):
|
42 |
-
new_state_dict = {}
|
43 |
-
for key, value in state_dict.items():
|
44 |
-
num = int(key.split(".")[1]) # 0 is always "layers"
|
45 |
-
new_key = key.replace(f"layers.{num}", self.mapping[num])
|
46 |
-
new_state_dict[new_key] = value
|
47 |
-
|
48 |
-
return new_state_dict
|
49 |
-
|
50 |
-
def map_from(module, state_dict, *args, **kwargs):
|
51 |
-
all_keys = list(state_dict.keys())
|
52 |
-
for key in all_keys:
|
53 |
-
replace_key = key.split(".processor")[0] + ".processor"
|
54 |
-
new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
|
55 |
-
state_dict[new_key] = state_dict[key]
|
56 |
-
del state_dict[key]
|
57 |
-
|
58 |
-
self.register_state_dict_hook(map_to)
|
59 |
-
self.register_load_state_dict_pre_hook(map_from, with_module=True)
|
60 |
-
|
61 |
-
|
62 |
-
class UNet2DConditionLoadersMixin:
|
63 |
-
def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]], **kwargs):
|
64 |
-
r"""
|
65 |
-
Load pretrained attention processor layers into `UNet2DConditionModel`. Attention processor layers have to be
|
66 |
-
defined in
|
67 |
-
[cross_attention.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py)
|
68 |
-
and be a `paddle.nn.Layer` class.
|
69 |
-
<Tip warning={true}>
|
70 |
-
This function is experimental and might change in the future
|
71 |
-
</Tip>
|
72 |
-
Parameters:
|
73 |
-
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
74 |
-
Can be either:
|
75 |
-
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
|
76 |
-
Valid model ids should have an organization name, like `google/ddpm-celebahq-256`.
|
77 |
-
- A path to a *directory* containing model weights saved using [`~ModelMixin.save_config`], e.g.,
|
78 |
-
`./my_model_directory/`.
|
79 |
-
- A [paddle state
|
80 |
-
dict].
|
81 |
-
from_hf_hub (bool, optional): whether to load from Huggingface Hub.
|
82 |
-
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
83 |
-
Path to a directory in which a downloaded pretrained model configuration should be cached if the
|
84 |
-
standard cache should not be used.
|
85 |
-
subfolder (`str`, *optional*, defaults to `None`):
|
86 |
-
In case the relevant files are located inside a subfolder of the model repo (either remote in
|
87 |
-
huggingface.co or downloaded locally), you can specify the folder name here.
|
88 |
-
"""
|
89 |
-
|
90 |
-
from_hf_hub = kwargs.pop("from_hf_hub", False)
|
91 |
-
if from_hf_hub:
|
92 |
-
cache_dir = kwargs.pop("cache_dir", HF_CACHE)
|
93 |
-
else:
|
94 |
-
cache_dir = kwargs.pop("cache_dir", PPDIFFUSERS_CACHE)
|
95 |
-
subfolder = kwargs.pop("subfolder", None)
|
96 |
-
weight_name = kwargs.pop("weight_name", LORA_WEIGHT_NAME)
|
97 |
-
|
98 |
-
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
99 |
-
model_file = _get_model_file(
|
100 |
-
pretrained_model_name_or_path_or_dict,
|
101 |
-
weights_name=weight_name,
|
102 |
-
cache_dir=cache_dir,
|
103 |
-
subfolder=subfolder,
|
104 |
-
from_hf_hub=from_hf_hub,
|
105 |
-
)
|
106 |
-
state_dict = load_dict(model_file, map_location="cpu")
|
107 |
-
else:
|
108 |
-
state_dict = pretrained_model_name_or_path_or_dict
|
109 |
-
|
110 |
-
# fill attn processors
|
111 |
-
attn_processors = {}
|
112 |
-
|
113 |
-
is_lora = all("lora" in k for k in state_dict.keys())
|
114 |
-
|
115 |
-
if is_lora:
|
116 |
-
lora_grouped_dict = defaultdict(dict)
|
117 |
-
for key, value in state_dict.items():
|
118 |
-
attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
|
119 |
-
lora_grouped_dict[attn_processor_key][sub_key] = value
|
120 |
-
|
121 |
-
for key, value_dict in lora_grouped_dict.items():
|
122 |
-
rank = value_dict["to_k_lora.down.weight"].shape[1] # 0 -> 1, torch vs paddle nn.Linear
|
123 |
-
cross_attention_dim = value_dict["to_k_lora.down.weight"].shape[0] # 1 -> 0, torch vs paddle nn.Linear
|
124 |
-
hidden_size = value_dict["to_k_lora.up.weight"].shape[1] # 0 -> 1, torch vs paddle nn.Linear
|
125 |
-
|
126 |
-
attn_processors[key] = LoRACrossAttnProcessor(
|
127 |
-
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=rank
|
128 |
-
)
|
129 |
-
attn_processors[key].load_dict(value_dict)
|
130 |
-
|
131 |
-
else:
|
132 |
-
raise ValueError(f"{model_file} does not seem to be in the correct format expected by LoRA training.")
|
133 |
-
|
134 |
-
# set correct dtype & device
|
135 |
-
attn_processors = {k: v.to(dtype=self.dtype) for k, v in attn_processors.items()}
|
136 |
-
|
137 |
-
# set layers
|
138 |
-
self.set_attn_processor(attn_processors)
|
139 |
-
|
140 |
-
def save_attn_procs(
|
141 |
-
self,
|
142 |
-
save_directory: Union[str, os.PathLike],
|
143 |
-
is_main_process: bool = True,
|
144 |
-
weights_name: str = LORA_WEIGHT_NAME,
|
145 |
-
save_function: Callable = None,
|
146 |
-
):
|
147 |
-
r"""
|
148 |
-
Save an attention procesor to a directory, so that it can be re-loaded using the
|
149 |
-
`[`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`]` method.
|
150 |
-
Arguments:
|
151 |
-
save_directory (`str` or `os.PathLike`):
|
152 |
-
Directory to which to save. Will be created if it doesn't exist.
|
153 |
-
is_main_process (`bool`, *optional*, defaults to `True`):
|
154 |
-
Whether the process calling this is the main process or not. Useful when in distributed training like
|
155 |
-
TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
|
156 |
-
the main process to avoid race conditions.
|
157 |
-
weights_name (`str`, *optional*, defaults to `LORA_WEIGHT_NAME`):
|
158 |
-
The name of weights.
|
159 |
-
save_function (`Callable`):
|
160 |
-
The function to use to save the state dictionary. Useful on distributed training like TPUs when one
|
161 |
-
need to replace `torch.save` by another method. Can be configured with the environment variable
|
162 |
-
`DIFFUSERS_SAVE_MODE`.
|
163 |
-
"""
|
164 |
-
if os.path.isfile(save_directory):
|
165 |
-
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
166 |
-
return
|
167 |
-
|
168 |
-
if save_function is None:
|
169 |
-
save_function = paddle.save
|
170 |
-
|
171 |
-
os.makedirs(save_directory, exist_ok=True)
|
172 |
-
|
173 |
-
model_to_save = AttnProcsLayers(self.attn_processors)
|
174 |
-
|
175 |
-
# Save the model
|
176 |
-
state_dict = model_to_save.state_dict()
|
177 |
-
|
178 |
-
# Clean the folder from a previous save
|
179 |
-
for filename in os.listdir(save_directory):
|
180 |
-
full_filename = os.path.join(save_directory, filename)
|
181 |
-
# If we have a shard file that is not going to be replaced, we delete it, but only from the main process
|
182 |
-
# in distributed settings to avoid race conditions.
|
183 |
-
weights_no_suffix = weights_name.replace(".pdparams", "")
|
184 |
-
if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and is_main_process:
|
185 |
-
os.remove(full_filename)
|
186 |
-
|
187 |
-
# Save the model
|
188 |
-
save_function(state_dict, os.path.join(save_directory, weights_name))
|
189 |
-
|
190 |
-
logger.info(f"Model weights saved in {os.path.join(save_directory, weights_name)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/setting/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from .Setting import CorsPolicyMode, Setting
|
2 |
-
from .SettingLoader import USER_SETTING_PATH, SettingLoader
|
3 |
-
|
4 |
-
__all__ = [
|
5 |
-
"USER_SETTING_PATH",
|
6 |
-
"CorsPolicyMode",
|
7 |
-
"Setting",
|
8 |
-
"SettingLoader",
|
9 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# configs for test speed
|
4 |
-
|
5 |
-
config = edict()
|
6 |
-
config.loss = "arcface"
|
7 |
-
config.network = "r50"
|
8 |
-
config.resume = False
|
9 |
-
config.output = None
|
10 |
-
config.embedding_size = 512
|
11 |
-
config.sample_rate = 1.0
|
12 |
-
config.fp16 = True
|
13 |
-
config.momentum = 0.9
|
14 |
-
config.weight_decay = 5e-4
|
15 |
-
config.batch_size = 128
|
16 |
-
config.lr = 0.1 # batch size is 512
|
17 |
-
|
18 |
-
config.rec = "synthetic"
|
19 |
-
config.num_classes = 300 * 10000
|
20 |
-
config.num_epoch = 30
|
21 |
-
config.warmup_epoch = -1
|
22 |
-
config.decay_epoch = [10, 16, 22]
|
23 |
-
config.val_targets = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/52Hz/HWMNet_lowlight_enhancement/model/HWMNet.py
DELETED
@@ -1,283 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from WT.transform import DWT, IWT
|
4 |
-
|
5 |
-
##---------- Basic Layers ----------
|
6 |
-
def conv3x3(in_chn, out_chn, bias=True):
|
7 |
-
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
|
8 |
-
return layer
|
9 |
-
|
10 |
-
def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
|
11 |
-
return nn.Conv2d(
|
12 |
-
in_channels, out_channels, kernel_size,
|
13 |
-
padding=(kernel_size // 2), bias=bias, stride=stride)
|
14 |
-
|
15 |
-
def bili_resize(factor):
|
16 |
-
return nn.Upsample(scale_factor=factor, mode='bilinear', align_corners=False)
|
17 |
-
|
18 |
-
##---------- Basic Blocks ----------
|
19 |
-
class UNetConvBlock(nn.Module):
|
20 |
-
def __init__(self, in_size, out_size, downsample):
|
21 |
-
super(UNetConvBlock, self).__init__()
|
22 |
-
self.downsample = downsample
|
23 |
-
self.body = [HWB(n_feat=in_size, o_feat=in_size, kernel_size=3, reduction=16, bias=False, act=nn.PReLU())]# for _ in range(wab)]
|
24 |
-
self.body = nn.Sequential(*self.body)
|
25 |
-
|
26 |
-
if downsample:
|
27 |
-
self.downsample = PS_down(out_size, out_size, downscale=2)
|
28 |
-
|
29 |
-
self.tail = nn.Conv2d(in_size, out_size, kernel_size=1)
|
30 |
-
|
31 |
-
def forward(self, x):
|
32 |
-
out = self.body(x)
|
33 |
-
out = self.tail(out)
|
34 |
-
if self.downsample:
|
35 |
-
out_down = self.downsample(out)
|
36 |
-
return out_down, out
|
37 |
-
else:
|
38 |
-
return out
|
39 |
-
|
40 |
-
class UNetUpBlock(nn.Module):
|
41 |
-
def __init__(self, in_size, out_size):
|
42 |
-
super(UNetUpBlock, self).__init__()
|
43 |
-
self.up = PS_up(in_size, out_size, upscale=2)
|
44 |
-
self.conv_block = UNetConvBlock(in_size, out_size, downsample=False)
|
45 |
-
|
46 |
-
def forward(self, x, bridge):
|
47 |
-
up = self.up(x)
|
48 |
-
out = torch.cat([up, bridge], dim=1)
|
49 |
-
out = self.conv_block(out)
|
50 |
-
return out
|
51 |
-
|
52 |
-
##---------- Resizing Modules (Pixel(Un)Shuffle) ----------
|
53 |
-
class PS_down(nn.Module):
|
54 |
-
def __init__(self, in_size, out_size, downscale):
|
55 |
-
super(PS_down, self).__init__()
|
56 |
-
self.UnPS = nn.PixelUnshuffle(downscale)
|
57 |
-
self.conv1 = nn.Conv2d((downscale**2) * in_size, out_size, 1, 1, 0)
|
58 |
-
|
59 |
-
def forward(self, x):
|
60 |
-
x = self.UnPS(x) # h/2, w/2, 4*c
|
61 |
-
x = self.conv1(x)
|
62 |
-
return x
|
63 |
-
|
64 |
-
class PS_up(nn.Module):
|
65 |
-
def __init__(self, in_size, out_size, upscale):
|
66 |
-
super(PS_up, self).__init__()
|
67 |
-
|
68 |
-
self.PS = nn.PixelShuffle(upscale)
|
69 |
-
self.conv1 = nn.Conv2d(in_size//(upscale**2), out_size, 1, 1, 0)
|
70 |
-
|
71 |
-
def forward(self, x):
|
72 |
-
x = self.PS(x) # h/2, w/2, 4*c
|
73 |
-
x = self.conv1(x)
|
74 |
-
return x
|
75 |
-
|
76 |
-
##---------- Selective Kernel Feature Fusion (SKFF) ----------
|
77 |
-
class SKFF(nn.Module):
|
78 |
-
def __init__(self, in_channels, height=3, reduction=8, bias=False):
|
79 |
-
super(SKFF, self).__init__()
|
80 |
-
|
81 |
-
self.height = height
|
82 |
-
d = max(int(in_channels / reduction), 4)
|
83 |
-
|
84 |
-
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
85 |
-
self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
|
86 |
-
|
87 |
-
self.fcs = nn.ModuleList([])
|
88 |
-
for i in range(self.height):
|
89 |
-
self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias))
|
90 |
-
|
91 |
-
self.softmax = nn.Softmax(dim=1)
|
92 |
-
|
93 |
-
def forward(self, inp_feats):
|
94 |
-
batch_size, n_feats, H, W = inp_feats[1].shape
|
95 |
-
|
96 |
-
inp_feats = torch.cat(inp_feats, dim=1)
|
97 |
-
inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
|
98 |
-
|
99 |
-
feats_U = torch.sum(inp_feats, dim=1)
|
100 |
-
feats_S = self.avg_pool(feats_U)
|
101 |
-
feats_Z = self.conv_du(feats_S)
|
102 |
-
|
103 |
-
attention_vectors = [fc(feats_Z) for fc in self.fcs]
|
104 |
-
attention_vectors = torch.cat(attention_vectors, dim=1)
|
105 |
-
attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
|
106 |
-
|
107 |
-
attention_vectors = self.softmax(attention_vectors)
|
108 |
-
feats_V = torch.sum(inp_feats * attention_vectors, dim=1)
|
109 |
-
|
110 |
-
return feats_V
|
111 |
-
|
112 |
-
|
113 |
-
##########################################################################
|
114 |
-
# Spatial Attention Layer
|
115 |
-
class SALayer(nn.Module):
|
116 |
-
def __init__(self, kernel_size=5, bias=False):
|
117 |
-
super(SALayer, self).__init__()
|
118 |
-
self.conv_du = nn.Sequential(
|
119 |
-
nn.Conv2d(2, 1, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, bias=bias),
|
120 |
-
nn.Sigmoid()
|
121 |
-
)
|
122 |
-
|
123 |
-
def forward(self, x):
|
124 |
-
# torch.max will output 2 things, and we want the 1st one
|
125 |
-
max_pool, _ = torch.max(x, dim=1, keepdim=True)
|
126 |
-
avg_pool = torch.mean(x, 1, keepdim=True)
|
127 |
-
channel_pool = torch.cat([max_pool, avg_pool], dim=1) # [N,2,H,W] could add 1x1 conv -> [N,3,H,W]
|
128 |
-
y = self.conv_du(channel_pool)
|
129 |
-
|
130 |
-
return x * y
|
131 |
-
|
132 |
-
##########################################################################
|
133 |
-
# Channel Attention Layer
|
134 |
-
class CALayer(nn.Module):
|
135 |
-
def __init__(self, channel, reduction=16, bias=False):
|
136 |
-
super(CALayer, self).__init__()
|
137 |
-
# global average pooling: feature --> point
|
138 |
-
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
139 |
-
# feature channel downscale and upscale --> channel weight
|
140 |
-
self.conv_du = nn.Sequential(
|
141 |
-
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
|
142 |
-
nn.ReLU(inplace=True),
|
143 |
-
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
|
144 |
-
nn.Sigmoid()
|
145 |
-
)
|
146 |
-
|
147 |
-
def forward(self, x):
|
148 |
-
y = self.avg_pool(x)
|
149 |
-
y = self.conv_du(y)
|
150 |
-
return x * y
|
151 |
-
|
152 |
-
##########################################################################
|
153 |
-
# Half Wavelet Dual Attention Block (HWB)
|
154 |
-
class HWB(nn.Module):
|
155 |
-
def __init__(self, n_feat, o_feat, kernel_size, reduction, bias, act):
|
156 |
-
super(HWB, self).__init__()
|
157 |
-
self.dwt = DWT()
|
158 |
-
self.iwt = IWT()
|
159 |
-
|
160 |
-
modules_body = \
|
161 |
-
[
|
162 |
-
conv(n_feat*2, n_feat, kernel_size, bias=bias),
|
163 |
-
act,
|
164 |
-
conv(n_feat, n_feat*2, kernel_size, bias=bias)
|
165 |
-
]
|
166 |
-
self.body = nn.Sequential(*modules_body)
|
167 |
-
|
168 |
-
self.WSA = SALayer()
|
169 |
-
self.WCA = CALayer(n_feat*2, reduction, bias=bias)
|
170 |
-
|
171 |
-
self.conv1x1 = nn.Conv2d(n_feat*4, n_feat*2, kernel_size=1, bias=bias)
|
172 |
-
self.conv3x3 = nn.Conv2d(n_feat, o_feat, kernel_size=3, padding=1, bias=bias)
|
173 |
-
self.activate = act
|
174 |
-
self.conv1x1_final = nn.Conv2d(n_feat, o_feat, kernel_size=1, bias=bias)
|
175 |
-
|
176 |
-
def forward(self, x):
|
177 |
-
residual = x
|
178 |
-
|
179 |
-
# Split 2 part
|
180 |
-
wavelet_path_in, identity_path = torch.chunk(x, 2, dim=1)
|
181 |
-
|
182 |
-
# Wavelet domain (Dual attention)
|
183 |
-
x_dwt = self.dwt(wavelet_path_in)
|
184 |
-
res = self.body(x_dwt)
|
185 |
-
branch_sa = self.WSA(res)
|
186 |
-
branch_ca = self.WCA(res)
|
187 |
-
res = torch.cat([branch_sa, branch_ca], dim=1)
|
188 |
-
res = self.conv1x1(res) + x_dwt
|
189 |
-
wavelet_path = self.iwt(res)
|
190 |
-
|
191 |
-
out = torch.cat([wavelet_path, identity_path], dim=1)
|
192 |
-
out = self.activate(self.conv3x3(out))
|
193 |
-
out += self.conv1x1_final(residual)
|
194 |
-
|
195 |
-
return out
|
196 |
-
|
197 |
-
|
198 |
-
##########################################################################
|
199 |
-
##---------- HWMNet-LOL ----------
|
200 |
-
class HWMNet(nn.Module):
|
201 |
-
def __init__(self, in_chn=3, wf=64, depth=4):
|
202 |
-
super(HWMNet, self).__init__()
|
203 |
-
self.depth = depth
|
204 |
-
self.down_path = nn.ModuleList()
|
205 |
-
self.bili_down = bili_resize(0.5)
|
206 |
-
self.conv_01 = nn.Conv2d(in_chn, wf, 3, 1, 1)
|
207 |
-
|
208 |
-
# encoder of UNet-64
|
209 |
-
prev_channels = 0
|
210 |
-
for i in range(depth): # 0,1,2,3
|
211 |
-
downsample = True if (i + 1) < depth else False
|
212 |
-
self.down_path.append(UNetConvBlock(prev_channels + wf, (2 ** i) * wf, downsample))
|
213 |
-
prev_channels = (2 ** i) * wf
|
214 |
-
|
215 |
-
# decoder of UNet-64
|
216 |
-
self.up_path = nn.ModuleList()
|
217 |
-
self.skip_conv = nn.ModuleList()
|
218 |
-
self.conv_up = nn.ModuleList()
|
219 |
-
self.bottom_conv = nn.Conv2d(prev_channels, wf, 3, 1, 1)
|
220 |
-
self.bottom_up = bili_resize(2 ** (depth-1))
|
221 |
-
|
222 |
-
for i in reversed(range(depth - 1)):
|
223 |
-
self.up_path.append(UNetUpBlock(prev_channels, (2 ** i) * wf))
|
224 |
-
self.skip_conv.append(nn.Conv2d((2 ** i) * wf, (2 ** i) * wf, 3, 1, 1))
|
225 |
-
self.conv_up.append(nn.Sequential(*[bili_resize(2 ** i), nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1)]))
|
226 |
-
prev_channels = (2 ** i) * wf
|
227 |
-
|
228 |
-
self.final_ff = SKFF(in_channels=wf, height=depth)
|
229 |
-
self.last = conv3x3(prev_channels, in_chn, bias=True)
|
230 |
-
|
231 |
-
def forward(self, x):
|
232 |
-
img = x
|
233 |
-
scale_img = img
|
234 |
-
|
235 |
-
##### shallow conv #####
|
236 |
-
x1 = self.conv_01(img)
|
237 |
-
encs = []
|
238 |
-
######## UNet-64 ########
|
239 |
-
# Down-path (Encoder)
|
240 |
-
for i, down in enumerate(self.down_path):
|
241 |
-
if i == 0:
|
242 |
-
x1, x1_up = down(x1)
|
243 |
-
encs.append(x1_up)
|
244 |
-
elif (i + 1) < self.depth:
|
245 |
-
scale_img = self.bili_down(scale_img)
|
246 |
-
left_bar = self.conv_01(scale_img)
|
247 |
-
x1 = torch.cat([x1, left_bar], dim=1)
|
248 |
-
x1, x1_up = down(x1)
|
249 |
-
encs.append(x1_up)
|
250 |
-
else:
|
251 |
-
scale_img = self.bili_down(scale_img)
|
252 |
-
left_bar = self.conv_01(scale_img)
|
253 |
-
x1 = torch.cat([x1, left_bar], dim=1)
|
254 |
-
x1 = down(x1)
|
255 |
-
|
256 |
-
# Up-path (Decoder)
|
257 |
-
ms_result = [self.bottom_up(self.bottom_conv(x1))]
|
258 |
-
for i, up in enumerate(self.up_path):
|
259 |
-
x1 = up(x1, self.skip_conv[i](encs[-i - 1]))
|
260 |
-
ms_result.append(self.conv_up[i](x1))
|
261 |
-
# Multi-scale selective feature fusion
|
262 |
-
msff_result = self.final_ff(ms_result)
|
263 |
-
|
264 |
-
##### Reconstruct #####
|
265 |
-
out_1 = self.last(msff_result) + img
|
266 |
-
|
267 |
-
return out_1
|
268 |
-
|
269 |
-
if __name__ == "__main__":
|
270 |
-
input = torch.ones(1, 3, 400, 592, dtype=torch.float, requires_grad=False).cuda()
|
271 |
-
|
272 |
-
model = HWMNet(in_chn=3, wf=96, depth=4).cuda()
|
273 |
-
out = model(input)
|
274 |
-
flops, params = profile(model, inputs=(input,))
|
275 |
-
|
276 |
-
# RDBlayer = SK_RDB(in_channels=64, growth_rate=64, num_layers=3)
|
277 |
-
# print(RDBlayer)
|
278 |
-
# out = RDBlayer(input)
|
279 |
-
# flops, params = profile(RDBlayer, inputs=(input,))
|
280 |
-
print('input shape:', input.shape)
|
281 |
-
print('parameters:', params/1e6)
|
282 |
-
print('flops', flops/1e9)
|
283 |
-
print('output shape', out.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/modules/ipex/__init__.py.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import contextlib
|
4 |
-
import torch
|
5 |
-
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
6 |
-
from .hijacks import ipex_hijacks
|
7 |
-
from .attention import attention_init
|
8 |
-
|
9 |
-
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
10 |
-
|
11 |
-
def ipex_init(): # pylint: disable=too-many-statements
|
12 |
-
try:
|
13 |
-
#Replace cuda with xpu:
|
14 |
-
torch.cuda.current_device = torch.xpu.current_device
|
15 |
-
torch.cuda.current_stream = torch.xpu.current_stream
|
16 |
-
torch.cuda.device = torch.xpu.device
|
17 |
-
torch.cuda.device_count = torch.xpu.device_count
|
18 |
-
torch.cuda.device_of = torch.xpu.device_of
|
19 |
-
torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
|
20 |
-
torch.cuda.get_device_name = torch.xpu.get_device_name
|
21 |
-
torch.cuda.get_device_properties = torch.xpu.get_device_properties
|
22 |
-
torch.cuda.init = torch.xpu.init
|
23 |
-
torch.cuda.is_available = torch.xpu.is_available
|
24 |
-
torch.cuda.is_initialized = torch.xpu.is_initialized
|
25 |
-
torch.cuda.is_current_stream_capturing = lambda: False
|
26 |
-
torch.cuda.set_device = torch.xpu.set_device
|
27 |
-
torch.cuda.stream = torch.xpu.stream
|
28 |
-
torch.cuda.synchronize = torch.xpu.synchronize
|
29 |
-
torch.cuda.Event = torch.xpu.Event
|
30 |
-
torch.cuda.Stream = torch.xpu.Stream
|
31 |
-
torch.cuda.FloatTensor = torch.xpu.FloatTensor
|
32 |
-
torch.Tensor.cuda = torch.Tensor.xpu
|
33 |
-
torch.Tensor.is_cuda = torch.Tensor.is_xpu
|
34 |
-
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
35 |
-
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
36 |
-
torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker
|
37 |
-
torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls
|
38 |
-
torch.cuda._tls = torch.xpu.lazy_init._tls
|
39 |
-
torch.cuda.threading = torch.xpu.lazy_init.threading
|
40 |
-
torch.cuda.traceback = torch.xpu.lazy_init.traceback
|
41 |
-
torch.cuda.Optional = torch.xpu.Optional
|
42 |
-
torch.cuda.__cached__ = torch.xpu.__cached__
|
43 |
-
torch.cuda.__loader__ = torch.xpu.__loader__
|
44 |
-
torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage
|
45 |
-
torch.cuda.Tuple = torch.xpu.Tuple
|
46 |
-
torch.cuda.streams = torch.xpu.streams
|
47 |
-
torch.cuda._lazy_new = torch.xpu._lazy_new
|
48 |
-
torch.cuda.FloatStorage = torch.xpu.FloatStorage
|
49 |
-
torch.cuda.Any = torch.xpu.Any
|
50 |
-
torch.cuda.__doc__ = torch.xpu.__doc__
|
51 |
-
torch.cuda.default_generators = torch.xpu.default_generators
|
52 |
-
torch.cuda.HalfTensor = torch.xpu.HalfTensor
|
53 |
-
torch.cuda._get_device_index = torch.xpu._get_device_index
|
54 |
-
torch.cuda.__path__ = torch.xpu.__path__
|
55 |
-
torch.cuda.Device = torch.xpu.Device
|
56 |
-
torch.cuda.IntTensor = torch.xpu.IntTensor
|
57 |
-
torch.cuda.ByteStorage = torch.xpu.ByteStorage
|
58 |
-
torch.cuda.set_stream = torch.xpu.set_stream
|
59 |
-
torch.cuda.BoolStorage = torch.xpu.BoolStorage
|
60 |
-
torch.cuda.os = torch.xpu.os
|
61 |
-
torch.cuda.torch = torch.xpu.torch
|
62 |
-
torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage
|
63 |
-
torch.cuda.Union = torch.xpu.Union
|
64 |
-
torch.cuda.DoubleTensor = torch.xpu.DoubleTensor
|
65 |
-
torch.cuda.ShortTensor = torch.xpu.ShortTensor
|
66 |
-
torch.cuda.LongTensor = torch.xpu.LongTensor
|
67 |
-
torch.cuda.IntStorage = torch.xpu.IntStorage
|
68 |
-
torch.cuda.LongStorage = torch.xpu.LongStorage
|
69 |
-
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
70 |
-
torch.cuda.__package__ = torch.xpu.__package__
|
71 |
-
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
72 |
-
torch.cuda.CharTensor = torch.xpu.CharTensor
|
73 |
-
torch.cuda.List = torch.xpu.List
|
74 |
-
torch.cuda._lazy_init = torch.xpu._lazy_init
|
75 |
-
torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor
|
76 |
-
torch.cuda.DoubleStorage = torch.xpu.DoubleStorage
|
77 |
-
torch.cuda.ByteTensor = torch.xpu.ByteTensor
|
78 |
-
torch.cuda.StreamContext = torch.xpu.StreamContext
|
79 |
-
torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage
|
80 |
-
torch.cuda.ShortStorage = torch.xpu.ShortStorage
|
81 |
-
torch.cuda._lazy_call = torch.xpu._lazy_call
|
82 |
-
torch.cuda.HalfStorage = torch.xpu.HalfStorage
|
83 |
-
torch.cuda.random = torch.xpu.random
|
84 |
-
torch.cuda._device = torch.xpu._device
|
85 |
-
torch.cuda.classproperty = torch.xpu.classproperty
|
86 |
-
torch.cuda.__name__ = torch.xpu.__name__
|
87 |
-
torch.cuda._device_t = torch.xpu._device_t
|
88 |
-
torch.cuda.warnings = torch.xpu.warnings
|
89 |
-
torch.cuda.__spec__ = torch.xpu.__spec__
|
90 |
-
torch.cuda.BoolTensor = torch.xpu.BoolTensor
|
91 |
-
torch.cuda.CharStorage = torch.xpu.CharStorage
|
92 |
-
torch.cuda.__file__ = torch.xpu.__file__
|
93 |
-
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
94 |
-
#torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
95 |
-
|
96 |
-
#Memory:
|
97 |
-
torch.cuda.memory = torch.xpu.memory
|
98 |
-
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
99 |
-
torch.xpu.empty_cache = lambda: None
|
100 |
-
torch.cuda.empty_cache = torch.xpu.empty_cache
|
101 |
-
torch.cuda.memory_stats = torch.xpu.memory_stats
|
102 |
-
torch.cuda.memory_summary = torch.xpu.memory_summary
|
103 |
-
torch.cuda.memory_snapshot = torch.xpu.memory_snapshot
|
104 |
-
torch.cuda.memory_allocated = torch.xpu.memory_allocated
|
105 |
-
torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated
|
106 |
-
torch.cuda.memory_reserved = torch.xpu.memory_reserved
|
107 |
-
torch.cuda.memory_cached = torch.xpu.memory_reserved
|
108 |
-
torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved
|
109 |
-
torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved
|
110 |
-
torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats
|
111 |
-
torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats
|
112 |
-
torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats
|
113 |
-
torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict
|
114 |
-
torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats
|
115 |
-
|
116 |
-
#RNG:
|
117 |
-
torch.cuda.get_rng_state = torch.xpu.get_rng_state
|
118 |
-
torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all
|
119 |
-
torch.cuda.set_rng_state = torch.xpu.set_rng_state
|
120 |
-
torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all
|
121 |
-
torch.cuda.manual_seed = torch.xpu.manual_seed
|
122 |
-
torch.cuda.manual_seed_all = torch.xpu.manual_seed_all
|
123 |
-
torch.cuda.seed = torch.xpu.seed
|
124 |
-
torch.cuda.seed_all = torch.xpu.seed_all
|
125 |
-
torch.cuda.initial_seed = torch.xpu.initial_seed
|
126 |
-
|
127 |
-
#AMP:
|
128 |
-
torch.cuda.amp = torch.xpu.amp
|
129 |
-
if not hasattr(torch.cuda.amp, "common"):
|
130 |
-
torch.cuda.amp.common = contextlib.nullcontext()
|
131 |
-
torch.cuda.amp.common.amp_definitely_not_available = lambda: False
|
132 |
-
try:
|
133 |
-
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
134 |
-
except Exception: # pylint: disable=broad-exception-caught
|
135 |
-
try:
|
136 |
-
from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error
|
137 |
-
gradscaler_init()
|
138 |
-
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
139 |
-
except Exception: # pylint: disable=broad-exception-caught
|
140 |
-
torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
141 |
-
|
142 |
-
#C
|
143 |
-
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
|
144 |
-
ipex._C._DeviceProperties.major = 2023
|
145 |
-
ipex._C._DeviceProperties.minor = 2
|
146 |
-
|
147 |
-
#Fix functions with ipex:
|
148 |
-
torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory]
|
149 |
-
torch._utils._get_available_device_type = lambda: "xpu"
|
150 |
-
torch.has_cuda = True
|
151 |
-
torch.cuda.has_half = True
|
152 |
-
torch.cuda.is_bf16_supported = lambda *args, **kwargs: True
|
153 |
-
torch.cuda.is_fp16_supported = lambda *args, **kwargs: True
|
154 |
-
torch.version.cuda = "11.7"
|
155 |
-
torch.cuda.get_device_capability = lambda *args, **kwargs: [11,7]
|
156 |
-
torch.cuda.get_device_properties.major = 11
|
157 |
-
torch.cuda.get_device_properties.minor = 7
|
158 |
-
torch.cuda.ipc_collect = lambda *args, **kwargs: None
|
159 |
-
torch.cuda.utilization = lambda *args, **kwargs: 0
|
160 |
-
|
161 |
-
ipex_hijacks()
|
162 |
-
attention_init()
|
163 |
-
except Exception as e:
|
164 |
-
return False, e
|
165 |
-
return True, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/ui/tooltip.tsx
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as TooltipPrimitive from '@radix-ui/react-tooltip'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
|
8 |
-
const TooltipProvider = TooltipPrimitive.Provider
|
9 |
-
|
10 |
-
const Tooltip = TooltipPrimitive.Root
|
11 |
-
|
12 |
-
const TooltipTrigger = TooltipPrimitive.Trigger
|
13 |
-
|
14 |
-
const TooltipContent = React.forwardRef<
|
15 |
-
React.ElementRef<typeof TooltipPrimitive.Content>,
|
16 |
-
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
|
17 |
-
>(({ className, sideOffset = 4, ...props }, ref) => (
|
18 |
-
<TooltipPrimitive.Content
|
19 |
-
ref={ref}
|
20 |
-
sideOffset={sideOffset}
|
21 |
-
className={cn(
|
22 |
-
'z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-xs font-medium text-popover-foreground shadow-md animate-in fade-in-50 data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1',
|
23 |
-
className
|
24 |
-
)}
|
25 |
-
{...props}
|
26 |
-
/>
|
27 |
-
))
|
28 |
-
TooltipContent.displayName = TooltipPrimitive.Content.displayName
|
29 |
-
|
30 |
-
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIBoy1993/segment_anything_webui/app.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import gradio as gr
|
5 |
-
from inference import run_inference
|
6 |
-
|
7 |
-
|
8 |
-
# points color and marker
|
9 |
-
colors = [(255, 0, 0), (0, 255, 0)]
|
10 |
-
markers = [1, 5]
|
11 |
-
|
12 |
-
# image examples
|
13 |
-
# in each list, the first element is image path,
|
14 |
-
# the second is id (used for original_image State),
|
15 |
-
# the third is an empty list (used for selected_points State)
|
16 |
-
image_examples = [
|
17 |
-
[os.path.join(os.path.dirname(__file__), "./images/53960-scaled.jpg"), 0, []],
|
18 |
-
[os.path.join(os.path.dirname(__file__), "./images/2388455-scaled.jpg"), 1, []],
|
19 |
-
[os.path.join(os.path.dirname(__file__), "./images/1.jpg"),2,[]],
|
20 |
-
[os.path.join(os.path.dirname(__file__), "./images/2.jpg"),3,[]],
|
21 |
-
[os.path.join(os.path.dirname(__file__), "./images/3.jpg"),4,[]],
|
22 |
-
[os.path.join(os.path.dirname(__file__), "./images/4.jpg"),5,[]],
|
23 |
-
[os.path.join(os.path.dirname(__file__), "./images/5.jpg"),6,[]],
|
24 |
-
[os.path.join(os.path.dirname(__file__), "./images/6.jpg"),7,[]],
|
25 |
-
[os.path.join(os.path.dirname(__file__), "./images/7.jpg"),8,[]],
|
26 |
-
[os.path.join(os.path.dirname(__file__), "./images/8.jpg"),9,[]]
|
27 |
-
]
|
28 |
-
# video examples
|
29 |
-
video_examples = [
|
30 |
-
os.path.join(os.path.dirname(__file__), "./images/video1.mp4"),
|
31 |
-
os.path.join(os.path.dirname(__file__), "./images/video2.mp4")
|
32 |
-
]
|
33 |
-
|
34 |
-
|
35 |
-
with gr.Blocks() as demo:
|
36 |
-
with gr.Row():
|
37 |
-
gr.Markdown(
|
38 |
-
'''# Segment Anything!🚀
|
39 |
-
The Segment Anything Model (SAM) produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. More information can be found in [**Official Project**](https://segment-anything.com/).
|
40 |
-
[](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui?duplicate=true)
|
41 |
-
'''
|
42 |
-
)
|
43 |
-
with gr.Row():
|
44 |
-
# select model
|
45 |
-
model_type = gr.Dropdown(["vit_b", "vit_l", "vit_h"], value='vit_b', label="Select Model")
|
46 |
-
# select device
|
47 |
-
device = gr.Dropdown(["cpu", "cuda"], value='cpu', label="Select Device")
|
48 |
-
|
49 |
-
# SAM parameters
|
50 |
-
with gr.Accordion(label='Parameters', open=False):
|
51 |
-
with gr.Row():
|
52 |
-
points_per_side = gr.Number(value=32, label="points_per_side", precision=0,
|
53 |
-
info='''The number of points to be sampled along one side of the image. The total
|
54 |
-
number of points is points_per_side**2.''')
|
55 |
-
pred_iou_thresh = gr.Slider(value=0.88, minimum=0, maximum=1.0, step=0.01, label="pred_iou_thresh",
|
56 |
-
info='''A filtering threshold in [0,1], using the model's predicted mask quality.''')
|
57 |
-
stability_score_thresh = gr.Slider(value=0.95, minimum=0, maximum=1.0, step=0.01, label="stability_score_thresh",
|
58 |
-
info='''A filtering threshold in [0,1], using the stability of the mask under
|
59 |
-
changes to the cutoff used to binarize the model's mask predictions.''')
|
60 |
-
min_mask_region_area = gr.Number(value=0, label="min_mask_region_area", precision=0,
|
61 |
-
info='''If >0, postprocessing will be applied to remove disconnected regions
|
62 |
-
and holes in masks with area smaller than min_mask_region_area.''')
|
63 |
-
with gr.Row():
|
64 |
-
stability_score_offset = gr.Number(value=1, label="stability_score_offset",
|
65 |
-
info='''The amount to shift the cutoff when calculated the stability score.''')
|
66 |
-
box_nms_thresh = gr.Slider(value=0.7, minimum=0, maximum=1.0, step=0.01, label="box_nms_thresh",
|
67 |
-
info='''The box IoU cutoff used by non-maximal ression to filter duplicate masks.''')
|
68 |
-
crop_n_layers = gr.Number(value=0, label="crop_n_layers", precision=0,
|
69 |
-
info='''If >0, mask prediction will be run again on crops of the image.
|
70 |
-
Sets the number of layers to run, where each layer has 2**i_layer number of image crops.''')
|
71 |
-
crop_nms_thresh = gr.Slider(value=0.7, minimum=0, maximum=1.0, step=0.01, label="crop_nms_thresh",
|
72 |
-
info='''The box IoU cutoff used by non-maximal suppression to filter duplicate
|
73 |
-
masks between different crops.''')
|
74 |
-
|
75 |
-
# Segment image
|
76 |
-
with gr.Tab(label='Image'):
|
77 |
-
with gr.Row().style(equal_height=True):
|
78 |
-
with gr.Column():
|
79 |
-
# input image
|
80 |
-
original_image = gr.State(value=None) # store original image without points, default None
|
81 |
-
input_image = gr.Image(type="numpy")
|
82 |
-
# point prompt
|
83 |
-
with gr.Column():
|
84 |
-
selected_points = gr.State([]) # store points
|
85 |
-
with gr.Row():
|
86 |
-
gr.Markdown('You can click on the image to select points prompt. Default: foreground_point.')
|
87 |
-
undo_button = gr.Button('Undo point')
|
88 |
-
radio = gr.Radio(['foreground_point', 'background_point'], label='point labels')
|
89 |
-
# text prompt to generate box prompt
|
90 |
-
text = gr.Textbox(label='Text prompt(optional)', info=
|
91 |
-
'If you type words, the OWL-ViT model will be used to detect the objects in the image, '
|
92 |
-
'and the boxes will be feed into SAM model to predict mask. Please use English.',
|
93 |
-
placeholder='Multiple words are separated by commas')
|
94 |
-
owl_vit_threshold = gr.Slider(value=0.1, minimum=0, maximum=1.0, step=0.01, label="OWL ViT Object Detection threshold",
|
95 |
-
info='''A small threshold will generate more objects, but may causing OOM.
|
96 |
-
A big threshold may not detect objects, resulting in an error ''')
|
97 |
-
# run button
|
98 |
-
button = gr.Button("Auto!")
|
99 |
-
# show the image with mask
|
100 |
-
with gr.Tab(label='Image+Mask'):
|
101 |
-
output_image = gr.Image(type='numpy')
|
102 |
-
# show only mask
|
103 |
-
with gr.Tab(label='Mask'):
|
104 |
-
output_mask = gr.Image(type='numpy')
|
105 |
-
def process_example(img, ori_img, sel_p):
|
106 |
-
return ori_img, []
|
107 |
-
|
108 |
-
example = gr.Examples(
|
109 |
-
examples=image_examples,
|
110 |
-
inputs=[input_image, original_image, selected_points],
|
111 |
-
outputs=[original_image, selected_points],
|
112 |
-
fn=process_example,
|
113 |
-
run_on_click=True
|
114 |
-
)
|
115 |
-
|
116 |
-
# Segment video
|
117 |
-
with gr.Tab(label='Video'):
|
118 |
-
with gr.Row().style(equal_height=True):
|
119 |
-
with gr.Column():
|
120 |
-
input_video = gr.Video()
|
121 |
-
with gr.Row():
|
122 |
-
button_video = gr.Button("Auto!")
|
123 |
-
output_video = gr.Video(format='mp4')
|
124 |
-
gr.Markdown('''
|
125 |
-
**Note:** processing video will take a long time, please upload a short video.
|
126 |
-
''')
|
127 |
-
gr.Examples(
|
128 |
-
examples=video_examples,
|
129 |
-
inputs=input_video,
|
130 |
-
outputs=output_video
|
131 |
-
)
|
132 |
-
|
133 |
-
# once user upload an image, the original image is stored in `original_image`
|
134 |
-
def store_img(img):
|
135 |
-
return img, [] # when new image is uploaded, `selected_points` should be empty
|
136 |
-
input_image.upload(
|
137 |
-
store_img,
|
138 |
-
[input_image],
|
139 |
-
[original_image, selected_points]
|
140 |
-
)
|
141 |
-
|
142 |
-
# user click the image to get points, and show the points on the image
|
143 |
-
def get_point(img, sel_pix, point_type, evt: gr.SelectData):
|
144 |
-
if point_type == 'foreground_point':
|
145 |
-
sel_pix.append((evt.index, 1)) # append the foreground_point
|
146 |
-
elif point_type == 'background_point':
|
147 |
-
sel_pix.append((evt.index, 0)) # append the background_point
|
148 |
-
else:
|
149 |
-
sel_pix.append((evt.index, 1)) # default foreground_point
|
150 |
-
# draw points
|
151 |
-
for point, label in sel_pix:
|
152 |
-
cv2.drawMarker(img, point, colors[label], markerType=markers[label], markerSize=20, thickness=5)
|
153 |
-
if img[..., 0][0, 0] == img[..., 2][0, 0]: # BGR to RGB
|
154 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
155 |
-
return img if isinstance(img, np.ndarray) else np.array(img)
|
156 |
-
input_image.select(
|
157 |
-
get_point,
|
158 |
-
[input_image, selected_points, radio],
|
159 |
-
[input_image],
|
160 |
-
)
|
161 |
-
|
162 |
-
# undo the selected point
|
163 |
-
def undo_points(orig_img, sel_pix):
|
164 |
-
if isinstance(orig_img, int): # if orig_img is int, the image if select from examples
|
165 |
-
temp = cv2.imread(image_examples[orig_img][0])
|
166 |
-
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
|
167 |
-
else:
|
168 |
-
temp = orig_img.copy()
|
169 |
-
# draw points
|
170 |
-
if len(sel_pix) != 0:
|
171 |
-
sel_pix.pop()
|
172 |
-
for point, label in sel_pix:
|
173 |
-
cv2.drawMarker(temp, point, colors[label], markerType=markers[label], markerSize=20, thickness=5)
|
174 |
-
if temp[..., 0][0, 0] == temp[..., 2][0, 0]: # BGR to RGB
|
175 |
-
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
|
176 |
-
return temp if isinstance(temp, np.ndarray) else np.array(temp)
|
177 |
-
undo_button.click(
|
178 |
-
undo_points,
|
179 |
-
[original_image, selected_points],
|
180 |
-
[input_image]
|
181 |
-
)
|
182 |
-
|
183 |
-
# button image
|
184 |
-
button.click(run_inference, inputs=[device, model_type, points_per_side, pred_iou_thresh, stability_score_thresh,
|
185 |
-
min_mask_region_area, stability_score_offset, box_nms_thresh, crop_n_layers,
|
186 |
-
crop_nms_thresh, owl_vit_threshold, original_image, text, selected_points],
|
187 |
-
outputs=[output_image, output_mask])
|
188 |
-
# button video
|
189 |
-
button_video.click(run_inference, inputs=[device, model_type, points_per_side, pred_iou_thresh, stability_score_thresh,
|
190 |
-
min_mask_region_area, stability_score_offset, box_nms_thresh, crop_n_layers,
|
191 |
-
crop_nms_thresh, owl_vit_threshold, input_video, text],
|
192 |
-
outputs=[output_video])
|
193 |
-
|
194 |
-
|
195 |
-
demo.queue().launch(debug=True, enable_queue=True)
|
196 |
-
|
197 |
-
|
198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/ds_e2e.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
# from inference.tts.fs import FastSpeechInfer
|
3 |
-
# from modules.tts.fs2_orig import FastSpeech2Orig
|
4 |
-
from inference.svs.base_svs_infer import BaseSVSInfer
|
5 |
-
from utils import load_ckpt
|
6 |
-
from utils.hparams import hparams
|
7 |
-
from modules.diff.shallow_diffusion_tts import GaussianDiffusion
|
8 |
-
from tasks.svs.diffsinger_task import DIFF_DECODERS
|
9 |
-
from modules.fastspeech.pe import PitchExtractor
|
10 |
-
import utils
|
11 |
-
|
12 |
-
|
13 |
-
class DiffSingerE2EInfer(BaseSVSInfer):
|
14 |
-
def build_model(self):
|
15 |
-
model = GaussianDiffusion(
|
16 |
-
phone_encoder=self.ph_encoder,
|
17 |
-
out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
|
18 |
-
timesteps=hparams['timesteps'],
|
19 |
-
K_step=hparams['K_step'],
|
20 |
-
loss_type=hparams['diff_loss_type'],
|
21 |
-
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
|
22 |
-
)
|
23 |
-
model.eval()
|
24 |
-
load_ckpt(model, hparams['work_dir'], 'model')
|
25 |
-
|
26 |
-
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
|
27 |
-
self.pe = PitchExtractor().to(self.device)
|
28 |
-
utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
|
29 |
-
self.pe.eval()
|
30 |
-
return model
|
31 |
-
|
32 |
-
def forward_model(self, inp):
|
33 |
-
sample = self.input_to_batch(inp)
|
34 |
-
txt_tokens = sample['txt_tokens'] # [B, T_t]
|
35 |
-
spk_id = sample.get('spk_ids')
|
36 |
-
with torch.no_grad():
|
37 |
-
output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True,
|
38 |
-
pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'],
|
39 |
-
is_slur=sample['is_slur'])
|
40 |
-
mel_out = output['mel_out'] # [B, T,80]
|
41 |
-
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
|
42 |
-
f0_pred = self.pe(mel_out)['f0_denorm_pred'] # pe predict from Pred mel
|
43 |
-
else:
|
44 |
-
f0_pred = output['f0_denorm']
|
45 |
-
wav_out = self.run_vocoder(mel_out, f0=f0_pred)
|
46 |
-
wav_out = wav_out.cpu().numpy()
|
47 |
-
return wav_out[0]
|
48 |
-
|
49 |
-
if __name__ == '__main__':
|
50 |
-
inp = {
|
51 |
-
'text': '小酒窝长睫毛AP是你最美的记号',
|
52 |
-
'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4',
|
53 |
-
'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340',
|
54 |
-
'input_type': 'word'
|
55 |
-
} # user input: Chinese characters
|
56 |
-
inp = {
|
57 |
-
'text': '小酒窝长睫毛AP是你最美的记号',
|
58 |
-
'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao',
|
59 |
-
'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4',
|
60 |
-
'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340',
|
61 |
-
'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
|
62 |
-
'input_type': 'phoneme'
|
63 |
-
} # input like Opencpop dataset.
|
64 |
-
DiffSingerE2EInfer.example_run(inp)
|
65 |
-
|
66 |
-
|
67 |
-
# CUDA_VISIBLE_DEVICES=3 python inference/svs/ds_e2e.py --config egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml --exp_name 0228_opencpop_ds100_rel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d50_8xb32_in1k.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/resnetv1d50.py',
|
3 |
-
'../_base_/datasets/imagenet_bs32_pil_resize.py',
|
4 |
-
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/models/tf.py
DELETED
@@ -1,837 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
TensorFlow, Keras and TFLite versions of YOLOv5
|
4 |
-
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
|
5 |
-
|
6 |
-
Usage:
|
7 |
-
$ python models/tf.py --weights yolov5s.pt
|
8 |
-
|
9 |
-
Export:
|
10 |
-
$ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
|
11 |
-
"""
|
12 |
-
|
13 |
-
import argparse
|
14 |
-
import sys
|
15 |
-
from copy import deepcopy
|
16 |
-
from pathlib import Path
|
17 |
-
|
18 |
-
FILE = Path(__file__).resolve()
|
19 |
-
ROOT = FILE.parents[1] # YOLOv5 root directory
|
20 |
-
if str(ROOT) not in sys.path:
|
21 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
22 |
-
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
23 |
-
|
24 |
-
import numpy as np
|
25 |
-
import tensorflow as tf
|
26 |
-
import torch
|
27 |
-
import torch.nn as nn
|
28 |
-
from tensorflow import keras
|
29 |
-
|
30 |
-
from models.common import (
|
31 |
-
C3,
|
32 |
-
SPP,
|
33 |
-
SPPF,
|
34 |
-
Bottleneck,
|
35 |
-
BottleneckCSP,
|
36 |
-
C3x,
|
37 |
-
Concat,
|
38 |
-
Conv,
|
39 |
-
CrossConv,
|
40 |
-
DWConv,
|
41 |
-
DWConvTranspose2d,
|
42 |
-
Focus,
|
43 |
-
autopad,
|
44 |
-
)
|
45 |
-
from models.experimental import MixConv2d, attempt_load
|
46 |
-
from models.yolo import Detect, Segment
|
47 |
-
from utils.activations import SiLU
|
48 |
-
from utils.general import LOGGER, make_divisible, print_args
|
49 |
-
|
50 |
-
|
51 |
-
class TFBN(keras.layers.Layer):
|
52 |
-
# TensorFlow BatchNormalization wrapper
|
53 |
-
def __init__(self, w=None):
|
54 |
-
super().__init__()
|
55 |
-
self.bn = keras.layers.BatchNormalization(
|
56 |
-
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
57 |
-
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
58 |
-
moving_mean_initializer=keras.initializers.Constant(
|
59 |
-
w.running_mean.numpy()
|
60 |
-
),
|
61 |
-
moving_variance_initializer=keras.initializers.Constant(
|
62 |
-
w.running_var.numpy()
|
63 |
-
),
|
64 |
-
epsilon=w.eps,
|
65 |
-
)
|
66 |
-
|
67 |
-
def call(self, inputs):
|
68 |
-
return self.bn(inputs)
|
69 |
-
|
70 |
-
|
71 |
-
class TFPad(keras.layers.Layer):
|
72 |
-
# Pad inputs in spatial dimensions 1 and 2
|
73 |
-
def __init__(self, pad):
|
74 |
-
super().__init__()
|
75 |
-
if isinstance(pad, int):
|
76 |
-
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
77 |
-
else: # tuple/list
|
78 |
-
self.pad = tf.constant(
|
79 |
-
[[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]
|
80 |
-
)
|
81 |
-
|
82 |
-
def call(self, inputs):
|
83 |
-
return tf.pad(inputs, self.pad, mode="constant", constant_values=0)
|
84 |
-
|
85 |
-
|
86 |
-
class TFConv(keras.layers.Layer):
|
87 |
-
# Standard convolution
|
88 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
89 |
-
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
90 |
-
super().__init__()
|
91 |
-
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
92 |
-
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
93 |
-
# see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
|
94 |
-
conv = keras.layers.Conv2D(
|
95 |
-
filters=c2,
|
96 |
-
kernel_size=k,
|
97 |
-
strides=s,
|
98 |
-
padding="SAME" if s == 1 else "VALID",
|
99 |
-
use_bias=not hasattr(w, "bn"),
|
100 |
-
kernel_initializer=keras.initializers.Constant(
|
101 |
-
w.conv.weight.permute(2, 3, 1, 0).numpy()
|
102 |
-
),
|
103 |
-
bias_initializer="zeros"
|
104 |
-
if hasattr(w, "bn")
|
105 |
-
else keras.initializers.Constant(w.conv.bias.numpy()),
|
106 |
-
)
|
107 |
-
self.conv = (
|
108 |
-
conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
109 |
-
)
|
110 |
-
self.bn = TFBN(w.bn) if hasattr(w, "bn") else tf.identity
|
111 |
-
self.act = activations(w.act) if act else tf.identity
|
112 |
-
|
113 |
-
def call(self, inputs):
|
114 |
-
return self.act(self.bn(self.conv(inputs)))
|
115 |
-
|
116 |
-
|
117 |
-
class TFDWConv(keras.layers.Layer):
|
118 |
-
# Depthwise convolution
|
119 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
|
120 |
-
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
121 |
-
super().__init__()
|
122 |
-
assert (
|
123 |
-
c2 % c1 == 0
|
124 |
-
), f"TFDWConv() output={c2} must be a multiple of input={c1} channels"
|
125 |
-
conv = keras.layers.DepthwiseConv2D(
|
126 |
-
kernel_size=k,
|
127 |
-
depth_multiplier=c2 // c1,
|
128 |
-
strides=s,
|
129 |
-
padding="SAME" if s == 1 else "VALID",
|
130 |
-
use_bias=not hasattr(w, "bn"),
|
131 |
-
depthwise_initializer=keras.initializers.Constant(
|
132 |
-
w.conv.weight.permute(2, 3, 1, 0).numpy()
|
133 |
-
),
|
134 |
-
bias_initializer="zeros"
|
135 |
-
if hasattr(w, "bn")
|
136 |
-
else keras.initializers.Constant(w.conv.bias.numpy()),
|
137 |
-
)
|
138 |
-
self.conv = (
|
139 |
-
conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
140 |
-
)
|
141 |
-
self.bn = TFBN(w.bn) if hasattr(w, "bn") else tf.identity
|
142 |
-
self.act = activations(w.act) if act else tf.identity
|
143 |
-
|
144 |
-
def call(self, inputs):
|
145 |
-
return self.act(self.bn(self.conv(inputs)))
|
146 |
-
|
147 |
-
|
148 |
-
class TFDWConvTranspose2d(keras.layers.Layer):
|
149 |
-
# Depthwise ConvTranspose2d
|
150 |
-
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
|
151 |
-
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
152 |
-
super().__init__()
|
153 |
-
assert (
|
154 |
-
c1 == c2
|
155 |
-
), f"TFDWConv() output={c2} must be equal to input={c1} channels"
|
156 |
-
assert k == 4 and p1 == 1, "TFDWConv() only valid for k=4 and p1=1"
|
157 |
-
weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
|
158 |
-
self.c1 = c1
|
159 |
-
self.conv = [
|
160 |
-
keras.layers.Conv2DTranspose(
|
161 |
-
filters=1,
|
162 |
-
kernel_size=k,
|
163 |
-
strides=s,
|
164 |
-
padding="VALID",
|
165 |
-
output_padding=p2,
|
166 |
-
use_bias=True,
|
167 |
-
kernel_initializer=keras.initializers.Constant(
|
168 |
-
weight[..., i : i + 1]
|
169 |
-
),
|
170 |
-
bias_initializer=keras.initializers.Constant(bias[i]),
|
171 |
-
)
|
172 |
-
for i in range(c1)
|
173 |
-
]
|
174 |
-
|
175 |
-
def call(self, inputs):
|
176 |
-
return tf.concat(
|
177 |
-
[m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3
|
178 |
-
)[:, 1:-1, 1:-1]
|
179 |
-
|
180 |
-
|
181 |
-
class TFFocus(keras.layers.Layer):
|
182 |
-
# Focus wh information into c-space
|
183 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
184 |
-
# ch_in, ch_out, kernel, stride, padding, groups
|
185 |
-
super().__init__()
|
186 |
-
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
187 |
-
|
188 |
-
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
189 |
-
# inputs = inputs / 255 # normalize 0-255 to 0-1
|
190 |
-
inputs = [
|
191 |
-
inputs[:, ::2, ::2, :],
|
192 |
-
inputs[:, 1::2, ::2, :],
|
193 |
-
inputs[:, ::2, 1::2, :],
|
194 |
-
inputs[:, 1::2, 1::2, :],
|
195 |
-
]
|
196 |
-
return self.conv(tf.concat(inputs, 3))
|
197 |
-
|
198 |
-
|
199 |
-
class TFBottleneck(keras.layers.Layer):
|
200 |
-
# Standard bottleneck
|
201 |
-
def __init__(
|
202 |
-
self, c1, c2, shortcut=True, g=1, e=0.5, w=None
|
203 |
-
): # ch_in, ch_out, shortcut, groups, expansion
|
204 |
-
super().__init__()
|
205 |
-
c_ = int(c2 * e) # hidden channels
|
206 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
207 |
-
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
208 |
-
self.add = shortcut and c1 == c2
|
209 |
-
|
210 |
-
def call(self, inputs):
|
211 |
-
return (
|
212 |
-
inputs + self.cv2(self.cv1(inputs))
|
213 |
-
if self.add
|
214 |
-
else self.cv2(self.cv1(inputs))
|
215 |
-
)
|
216 |
-
|
217 |
-
|
218 |
-
class TFCrossConv(keras.layers.Layer):
|
219 |
-
# Cross Convolution
|
220 |
-
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
|
221 |
-
super().__init__()
|
222 |
-
c_ = int(c2 * e) # hidden channels
|
223 |
-
self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
|
224 |
-
self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
|
225 |
-
self.add = shortcut and c1 == c2
|
226 |
-
|
227 |
-
def call(self, inputs):
|
228 |
-
return (
|
229 |
-
inputs + self.cv2(self.cv1(inputs))
|
230 |
-
if self.add
|
231 |
-
else self.cv2(self.cv1(inputs))
|
232 |
-
)
|
233 |
-
|
234 |
-
|
235 |
-
class TFConv2d(keras.layers.Layer):
|
236 |
-
# Substitution for PyTorch nn.Conv2D
|
237 |
-
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
238 |
-
super().__init__()
|
239 |
-
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
240 |
-
self.conv = keras.layers.Conv2D(
|
241 |
-
filters=c2,
|
242 |
-
kernel_size=k,
|
243 |
-
strides=s,
|
244 |
-
padding="VALID",
|
245 |
-
use_bias=bias,
|
246 |
-
kernel_initializer=keras.initializers.Constant(
|
247 |
-
w.weight.permute(2, 3, 1, 0).numpy()
|
248 |
-
),
|
249 |
-
bias_initializer=keras.initializers.Constant(w.bias.numpy())
|
250 |
-
if bias
|
251 |
-
else None,
|
252 |
-
)
|
253 |
-
|
254 |
-
def call(self, inputs):
|
255 |
-
return self.conv(inputs)
|
256 |
-
|
257 |
-
|
258 |
-
class TFBottleneckCSP(keras.layers.Layer):
|
259 |
-
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
260 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
261 |
-
# ch_in, ch_out, number, shortcut, groups, expansion
|
262 |
-
super().__init__()
|
263 |
-
c_ = int(c2 * e) # hidden channels
|
264 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
265 |
-
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
266 |
-
self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
|
267 |
-
self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
|
268 |
-
self.bn = TFBN(w.bn)
|
269 |
-
self.act = lambda x: keras.activations.swish(x)
|
270 |
-
self.m = keras.Sequential(
|
271 |
-
[
|
272 |
-
TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j])
|
273 |
-
for j in range(n)
|
274 |
-
]
|
275 |
-
)
|
276 |
-
|
277 |
-
def call(self, inputs):
|
278 |
-
y1 = self.cv3(self.m(self.cv1(inputs)))
|
279 |
-
y2 = self.cv2(inputs)
|
280 |
-
return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
|
281 |
-
|
282 |
-
|
283 |
-
class TFC3(keras.layers.Layer):
|
284 |
-
# CSP Bottleneck with 3 convolutions
|
285 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
286 |
-
# ch_in, ch_out, number, shortcut, groups, expansion
|
287 |
-
super().__init__()
|
288 |
-
c_ = int(c2 * e) # hidden channels
|
289 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
290 |
-
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
291 |
-
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
292 |
-
self.m = keras.Sequential(
|
293 |
-
[
|
294 |
-
TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j])
|
295 |
-
for j in range(n)
|
296 |
-
]
|
297 |
-
)
|
298 |
-
|
299 |
-
def call(self, inputs):
|
300 |
-
return self.cv3(
|
301 |
-
tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)
|
302 |
-
)
|
303 |
-
|
304 |
-
|
305 |
-
class TFC3x(keras.layers.Layer):
|
306 |
-
# 3 module with cross-convolutions
|
307 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
308 |
-
# ch_in, ch_out, number, shortcut, groups, expansion
|
309 |
-
super().__init__()
|
310 |
-
c_ = int(c2 * e) # hidden channels
|
311 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
312 |
-
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
313 |
-
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
314 |
-
self.m = keras.Sequential(
|
315 |
-
[
|
316 |
-
TFCrossConv(
|
317 |
-
c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]
|
318 |
-
)
|
319 |
-
for j in range(n)
|
320 |
-
]
|
321 |
-
)
|
322 |
-
|
323 |
-
def call(self, inputs):
|
324 |
-
return self.cv3(
|
325 |
-
tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)
|
326 |
-
)
|
327 |
-
|
328 |
-
|
329 |
-
class TFSPP(keras.layers.Layer):
|
330 |
-
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
331 |
-
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
332 |
-
super().__init__()
|
333 |
-
c_ = c1 // 2 # hidden channels
|
334 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
335 |
-
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
336 |
-
self.m = [
|
337 |
-
keras.layers.MaxPool2D(pool_size=x, strides=1, padding="SAME")
|
338 |
-
for x in k
|
339 |
-
]
|
340 |
-
|
341 |
-
def call(self, inputs):
|
342 |
-
x = self.cv1(inputs)
|
343 |
-
return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
|
344 |
-
|
345 |
-
|
346 |
-
class TFSPPF(keras.layers.Layer):
|
347 |
-
# Spatial pyramid pooling-Fast layer
|
348 |
-
def __init__(self, c1, c2, k=5, w=None):
|
349 |
-
super().__init__()
|
350 |
-
c_ = c1 // 2 # hidden channels
|
351 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
352 |
-
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
353 |
-
self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding="SAME")
|
354 |
-
|
355 |
-
def call(self, inputs):
|
356 |
-
x = self.cv1(inputs)
|
357 |
-
y1 = self.m(x)
|
358 |
-
y2 = self.m(y1)
|
359 |
-
return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
|
360 |
-
|
361 |
-
|
362 |
-
class TFDetect(keras.layers.Layer):
|
363 |
-
# TF YOLOv5 Detect layer
|
364 |
-
def __init__(
|
365 |
-
self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None
|
366 |
-
): # detection layer
|
367 |
-
super().__init__()
|
368 |
-
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
369 |
-
self.nc = nc # number of classes
|
370 |
-
self.no = nc + 5 # number of outputs per anchor
|
371 |
-
self.nl = len(anchors) # number of detection layers
|
372 |
-
self.na = len(anchors[0]) // 2 # number of anchors
|
373 |
-
self.grid = [tf.zeros(1)] * self.nl # init grid
|
374 |
-
self.anchors = tf.convert_to_tensor(
|
375 |
-
w.anchors.numpy(), dtype=tf.float32
|
376 |
-
)
|
377 |
-
self.anchor_grid = tf.reshape(
|
378 |
-
self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]),
|
379 |
-
[self.nl, 1, -1, 1, 2],
|
380 |
-
)
|
381 |
-
self.m = [
|
382 |
-
TFConv2d(x, self.no * self.na, 1, w=w.m[i])
|
383 |
-
for i, x in enumerate(ch)
|
384 |
-
]
|
385 |
-
self.training = False # set to False after building model
|
386 |
-
self.imgsz = imgsz
|
387 |
-
for i in range(self.nl):
|
388 |
-
ny, nx = (
|
389 |
-
self.imgsz[0] // self.stride[i],
|
390 |
-
self.imgsz[1] // self.stride[i],
|
391 |
-
)
|
392 |
-
self.grid[i] = self._make_grid(nx, ny)
|
393 |
-
|
394 |
-
def call(self, inputs):
|
395 |
-
z = [] # inference output
|
396 |
-
x = []
|
397 |
-
for i in range(self.nl):
|
398 |
-
x.append(self.m[i](inputs[i]))
|
399 |
-
# x(bs,20,20,255) to x(bs,3,20,20,85)
|
400 |
-
ny, nx = (
|
401 |
-
self.imgsz[0] // self.stride[i],
|
402 |
-
self.imgsz[1] // self.stride[i],
|
403 |
-
)
|
404 |
-
x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
|
405 |
-
|
406 |
-
if not self.training: # inference
|
407 |
-
y = x[i]
|
408 |
-
grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
|
409 |
-
anchor_grid = (
|
410 |
-
tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
|
411 |
-
)
|
412 |
-
xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[
|
413 |
-
i
|
414 |
-
] # xy
|
415 |
-
wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
|
416 |
-
# Normalize xywh to 0-1 to reduce calibration error
|
417 |
-
xy /= tf.constant(
|
418 |
-
[[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32
|
419 |
-
)
|
420 |
-
wh /= tf.constant(
|
421 |
-
[[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32
|
422 |
-
)
|
423 |
-
y = tf.concat(
|
424 |
-
[
|
425 |
-
xy,
|
426 |
-
wh,
|
427 |
-
tf.sigmoid(y[..., 4 : 5 + self.nc]),
|
428 |
-
y[..., 5 + self.nc :],
|
429 |
-
],
|
430 |
-
-1,
|
431 |
-
)
|
432 |
-
z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
|
433 |
-
|
434 |
-
return (
|
435 |
-
tf.transpose(x, [0, 2, 1, 3])
|
436 |
-
if self.training
|
437 |
-
else (tf.concat(z, 1),)
|
438 |
-
)
|
439 |
-
|
440 |
-
@staticmethod
|
441 |
-
def _make_grid(nx=20, ny=20):
|
442 |
-
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
443 |
-
# return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
444 |
-
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
|
445 |
-
return tf.cast(
|
446 |
-
tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]),
|
447 |
-
dtype=tf.float32,
|
448 |
-
)
|
449 |
-
|
450 |
-
|
451 |
-
class TFSegment(TFDetect):
|
452 |
-
# YOLOv5 Segment head for segmentation models
|
453 |
-
def __init__(
|
454 |
-
self,
|
455 |
-
nc=80,
|
456 |
-
anchors=(),
|
457 |
-
nm=32,
|
458 |
-
npr=256,
|
459 |
-
ch=(),
|
460 |
-
imgsz=(640, 640),
|
461 |
-
w=None,
|
462 |
-
):
|
463 |
-
super().__init__(nc, anchors, ch, imgsz, w)
|
464 |
-
self.nm = nm # number of masks
|
465 |
-
self.npr = npr # number of protos
|
466 |
-
self.no = 5 + nc + self.nm # number of outputs per anchor
|
467 |
-
self.m = [
|
468 |
-
TFConv2d(x, self.no * self.na, 1, w=w.m[i])
|
469 |
-
for i, x in enumerate(ch)
|
470 |
-
] # output conv
|
471 |
-
self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
|
472 |
-
self.detect = TFDetect.call
|
473 |
-
|
474 |
-
def call(self, x):
|
475 |
-
p = self.proto(x[0])
|
476 |
-
# p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
|
477 |
-
p = tf.transpose(
|
478 |
-
p, [0, 3, 1, 2]
|
479 |
-
) # from shape(1,160,160,32) to shape(1,32,160,160)
|
480 |
-
x = self.detect(self, x)
|
481 |
-
return (x, p) if self.training else (x[0], p)
|
482 |
-
|
483 |
-
|
484 |
-
class TFProto(keras.layers.Layer):
|
485 |
-
def __init__(self, c1, c_=256, c2=32, w=None):
|
486 |
-
super().__init__()
|
487 |
-
self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
|
488 |
-
self.upsample = TFUpsample(None, scale_factor=2, mode="nearest")
|
489 |
-
self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
|
490 |
-
self.cv3 = TFConv(c_, c2, w=w.cv3)
|
491 |
-
|
492 |
-
def call(self, inputs):
|
493 |
-
return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
|
494 |
-
|
495 |
-
|
496 |
-
class TFUpsample(keras.layers.Layer):
|
497 |
-
# TF version of torch.nn.Upsample()
|
498 |
-
def __init__(
|
499 |
-
self, size, scale_factor, mode, w=None
|
500 |
-
): # warning: all arguments needed including 'w'
|
501 |
-
super().__init__()
|
502 |
-
assert scale_factor % 2 == 0, "scale_factor must be multiple of 2"
|
503 |
-
self.upsample = lambda x: tf.image.resize(
|
504 |
-
x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode
|
505 |
-
)
|
506 |
-
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
507 |
-
# with default arguments: align_corners=False, half_pixel_centers=False
|
508 |
-
# self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
|
509 |
-
# size=(x.shape[1] * 2, x.shape[2] * 2))
|
510 |
-
|
511 |
-
def call(self, inputs):
|
512 |
-
return self.upsample(inputs)
|
513 |
-
|
514 |
-
|
515 |
-
class TFConcat(keras.layers.Layer):
|
516 |
-
# TF version of torch.concat()
|
517 |
-
def __init__(self, dimension=1, w=None):
|
518 |
-
super().__init__()
|
519 |
-
assert dimension == 1, "convert only NCHW to NHWC concat"
|
520 |
-
self.d = 3
|
521 |
-
|
522 |
-
def call(self, inputs):
|
523 |
-
return tf.concat(inputs, self.d)
|
524 |
-
|
525 |
-
|
526 |
-
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
527 |
-
LOGGER.info(
|
528 |
-
f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}"
|
529 |
-
)
|
530 |
-
anchors, nc, gd, gw = (
|
531 |
-
d["anchors"],
|
532 |
-
d["nc"],
|
533 |
-
d["depth_multiple"],
|
534 |
-
d["width_multiple"],
|
535 |
-
)
|
536 |
-
na = (
|
537 |
-
(len(anchors[0]) // 2) if isinstance(anchors, list) else anchors
|
538 |
-
) # number of anchors
|
539 |
-
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
540 |
-
|
541 |
-
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
542 |
-
for i, (f, n, m, args) in enumerate(
|
543 |
-
d["backbone"] + d["head"]
|
544 |
-
): # from, number, module, args
|
545 |
-
m_str = m
|
546 |
-
m = eval(m) if isinstance(m, str) else m # eval strings
|
547 |
-
for j, a in enumerate(args):
|
548 |
-
try:
|
549 |
-
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
550 |
-
except NameError:
|
551 |
-
pass
|
552 |
-
|
553 |
-
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
554 |
-
if m in [
|
555 |
-
nn.Conv2d,
|
556 |
-
Conv,
|
557 |
-
DWConv,
|
558 |
-
DWConvTranspose2d,
|
559 |
-
Bottleneck,
|
560 |
-
SPP,
|
561 |
-
SPPF,
|
562 |
-
MixConv2d,
|
563 |
-
Focus,
|
564 |
-
CrossConv,
|
565 |
-
BottleneckCSP,
|
566 |
-
C3,
|
567 |
-
C3x,
|
568 |
-
]:
|
569 |
-
c1, c2 = ch[f], args[0]
|
570 |
-
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
571 |
-
|
572 |
-
args = [c1, c2, *args[1:]]
|
573 |
-
if m in [BottleneckCSP, C3, C3x]:
|
574 |
-
args.insert(2, n)
|
575 |
-
n = 1
|
576 |
-
elif m is nn.BatchNorm2d:
|
577 |
-
args = [ch[f]]
|
578 |
-
elif m is Concat:
|
579 |
-
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
580 |
-
elif m in [Detect, Segment]:
|
581 |
-
args.append([ch[x + 1] for x in f])
|
582 |
-
if isinstance(args[1], int): # number of anchors
|
583 |
-
args[1] = [list(range(args[1] * 2))] * len(f)
|
584 |
-
if m is Segment:
|
585 |
-
args[3] = make_divisible(args[3] * gw, 8)
|
586 |
-
args.append(imgsz)
|
587 |
-
else:
|
588 |
-
c2 = ch[f]
|
589 |
-
|
590 |
-
tf_m = eval("TF" + m_str.replace("nn.", ""))
|
591 |
-
m_ = (
|
592 |
-
keras.Sequential(
|
593 |
-
[tf_m(*args, w=model.model[i][j]) for j in range(n)]
|
594 |
-
)
|
595 |
-
if n > 1
|
596 |
-
else tf_m(*args, w=model.model[i])
|
597 |
-
) # module
|
598 |
-
|
599 |
-
torch_m_ = (
|
600 |
-
nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)
|
601 |
-
) # module
|
602 |
-
t = str(m)[8:-2].replace("__main__.", "") # module type
|
603 |
-
np = sum(x.numel() for x in torch_m_.parameters()) # number params
|
604 |
-
m_.i, m_.f, m_.type, m_.np = (
|
605 |
-
i,
|
606 |
-
f,
|
607 |
-
t,
|
608 |
-
np,
|
609 |
-
) # attach index, 'from' index, type, number params
|
610 |
-
LOGGER.info(
|
611 |
-
f"{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}"
|
612 |
-
) # print
|
613 |
-
save.extend(
|
614 |
-
x % i for x in ([f] if isinstance(f, int) else f) if x != -1
|
615 |
-
) # append to savelist
|
616 |
-
layers.append(m_)
|
617 |
-
ch.append(c2)
|
618 |
-
return keras.Sequential(layers), sorted(save)
|
619 |
-
|
620 |
-
|
621 |
-
class TFModel:
|
622 |
-
# TF YOLOv5 model
|
623 |
-
def __init__(
|
624 |
-
self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)
|
625 |
-
): # model, channels, classes
|
626 |
-
super().__init__()
|
627 |
-
if isinstance(cfg, dict):
|
628 |
-
self.yaml = cfg # model dict
|
629 |
-
else: # is *.yaml
|
630 |
-
import yaml # for torch hub
|
631 |
-
|
632 |
-
self.yaml_file = Path(cfg).name
|
633 |
-
with open(cfg) as f:
|
634 |
-
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
635 |
-
|
636 |
-
# Define model
|
637 |
-
if nc and nc != self.yaml["nc"]:
|
638 |
-
LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
639 |
-
self.yaml["nc"] = nc # override yaml value
|
640 |
-
self.model, self.savelist = parse_model(
|
641 |
-
deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz
|
642 |
-
)
|
643 |
-
|
644 |
-
def predict(
|
645 |
-
self,
|
646 |
-
inputs,
|
647 |
-
tf_nms=False,
|
648 |
-
agnostic_nms=False,
|
649 |
-
topk_per_class=100,
|
650 |
-
topk_all=100,
|
651 |
-
iou_thres=0.45,
|
652 |
-
conf_thres=0.25,
|
653 |
-
):
|
654 |
-
y = [] # outputs
|
655 |
-
x = inputs
|
656 |
-
for m in self.model.layers:
|
657 |
-
if m.f != -1: # if not from previous layer
|
658 |
-
x = (
|
659 |
-
y[m.f]
|
660 |
-
if isinstance(m.f, int)
|
661 |
-
else [x if j == -1 else y[j] for j in m.f]
|
662 |
-
) # from earlier layers
|
663 |
-
|
664 |
-
x = m(x) # run
|
665 |
-
y.append(x if m.i in self.savelist else None) # save output
|
666 |
-
|
667 |
-
# Add TensorFlow NMS
|
668 |
-
if tf_nms:
|
669 |
-
boxes = self._xywh2xyxy(x[0][..., :4])
|
670 |
-
probs = x[0][:, :, 4:5]
|
671 |
-
classes = x[0][:, :, 5:]
|
672 |
-
scores = probs * classes
|
673 |
-
if agnostic_nms:
|
674 |
-
nms = AgnosticNMS()(
|
675 |
-
(boxes, classes, scores), topk_all, iou_thres, conf_thres
|
676 |
-
)
|
677 |
-
else:
|
678 |
-
boxes = tf.expand_dims(boxes, 2)
|
679 |
-
nms = tf.image.combined_non_max_suppression(
|
680 |
-
boxes,
|
681 |
-
scores,
|
682 |
-
topk_per_class,
|
683 |
-
topk_all,
|
684 |
-
iou_thres,
|
685 |
-
conf_thres,
|
686 |
-
clip_boxes=False,
|
687 |
-
)
|
688 |
-
return (nms,)
|
689 |
-
return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
|
690 |
-
# x = x[0] # [x(1,6300,85), ...] to x(6300,85)
|
691 |
-
# xywh = x[..., :4] # x(6300,4) boxes
|
692 |
-
# conf = x[..., 4:5] # x(6300,1) confidences
|
693 |
-
# cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
|
694 |
-
# return tf.concat([conf, cls, xywh], 1)
|
695 |
-
|
696 |
-
@staticmethod
|
697 |
-
def _xywh2xyxy(xywh):
|
698 |
-
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
699 |
-
x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
|
700 |
-
return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
|
701 |
-
|
702 |
-
|
703 |
-
class AgnosticNMS(keras.layers.Layer):
|
704 |
-
# TF Agnostic NMS
|
705 |
-
def call(self, input, topk_all, iou_thres, conf_thres):
|
706 |
-
# wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
|
707 |
-
return tf.map_fn(
|
708 |
-
lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
|
709 |
-
input,
|
710 |
-
fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
|
711 |
-
name="agnostic_nms",
|
712 |
-
)
|
713 |
-
|
714 |
-
@staticmethod
|
715 |
-
def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
|
716 |
-
boxes, classes, scores = x
|
717 |
-
class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
|
718 |
-
scores_inp = tf.reduce_max(scores, -1)
|
719 |
-
selected_inds = tf.image.non_max_suppression(
|
720 |
-
boxes,
|
721 |
-
scores_inp,
|
722 |
-
max_output_size=topk_all,
|
723 |
-
iou_threshold=iou_thres,
|
724 |
-
score_threshold=conf_thres,
|
725 |
-
)
|
726 |
-
selected_boxes = tf.gather(boxes, selected_inds)
|
727 |
-
padded_boxes = tf.pad(
|
728 |
-
selected_boxes,
|
729 |
-
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
|
730 |
-
mode="CONSTANT",
|
731 |
-
constant_values=0.0,
|
732 |
-
)
|
733 |
-
selected_scores = tf.gather(scores_inp, selected_inds)
|
734 |
-
padded_scores = tf.pad(
|
735 |
-
selected_scores,
|
736 |
-
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
737 |
-
mode="CONSTANT",
|
738 |
-
constant_values=-1.0,
|
739 |
-
)
|
740 |
-
selected_classes = tf.gather(class_inds, selected_inds)
|
741 |
-
padded_classes = tf.pad(
|
742 |
-
selected_classes,
|
743 |
-
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
744 |
-
mode="CONSTANT",
|
745 |
-
constant_values=-1.0,
|
746 |
-
)
|
747 |
-
valid_detections = tf.shape(selected_inds)[0]
|
748 |
-
return padded_boxes, padded_scores, padded_classes, valid_detections
|
749 |
-
|
750 |
-
|
751 |
-
def activations(act=nn.SiLU):
|
752 |
-
# Returns TF activation from input PyTorch activation
|
753 |
-
if isinstance(act, nn.LeakyReLU):
|
754 |
-
return lambda x: keras.activations.relu(x, alpha=0.1)
|
755 |
-
elif isinstance(act, nn.Hardswish):
|
756 |
-
return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
|
757 |
-
elif isinstance(act, (nn.SiLU, SiLU)):
|
758 |
-
return lambda x: keras.activations.swish(x)
|
759 |
-
else:
|
760 |
-
raise Exception(
|
761 |
-
f"no matching TensorFlow activation found for PyTorch activation {act}"
|
762 |
-
)
|
763 |
-
|
764 |
-
|
765 |
-
def representative_dataset_gen(dataset, ncalib=100):
|
766 |
-
# Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
|
767 |
-
for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
|
768 |
-
im = np.transpose(img, [1, 2, 0])
|
769 |
-
im = np.expand_dims(im, axis=0).astype(np.float32)
|
770 |
-
im /= 255
|
771 |
-
yield [im]
|
772 |
-
if n >= ncalib:
|
773 |
-
break
|
774 |
-
|
775 |
-
|
776 |
-
def run(
|
777 |
-
weights=ROOT / "yolov5s.pt", # weights path
|
778 |
-
imgsz=(640, 640), # inference size h,w
|
779 |
-
batch_size=1, # batch size
|
780 |
-
dynamic=False, # dynamic batch size
|
781 |
-
):
|
782 |
-
# PyTorch model
|
783 |
-
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
784 |
-
model = attempt_load(
|
785 |
-
weights, device=torch.device("cpu"), inplace=True, fuse=False
|
786 |
-
)
|
787 |
-
_ = model(im) # inference
|
788 |
-
model.info()
|
789 |
-
|
790 |
-
# TensorFlow model
|
791 |
-
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
792 |
-
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
793 |
-
_ = tf_model.predict(im) # inference
|
794 |
-
|
795 |
-
# Keras model
|
796 |
-
im = keras.Input(
|
797 |
-
shape=(*imgsz, 3), batch_size=None if dynamic else batch_size
|
798 |
-
)
|
799 |
-
keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
|
800 |
-
keras_model.summary()
|
801 |
-
|
802 |
-
LOGGER.info(
|
803 |
-
"PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export."
|
804 |
-
)
|
805 |
-
|
806 |
-
|
807 |
-
def parse_opt():
|
808 |
-
parser = argparse.ArgumentParser()
|
809 |
-
parser.add_argument(
|
810 |
-
"--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path"
|
811 |
-
)
|
812 |
-
parser.add_argument(
|
813 |
-
"--imgsz",
|
814 |
-
"--img",
|
815 |
-
"--img-size",
|
816 |
-
nargs="+",
|
817 |
-
type=int,
|
818 |
-
default=[640],
|
819 |
-
help="inference size h,w",
|
820 |
-
)
|
821 |
-
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
|
822 |
-
parser.add_argument(
|
823 |
-
"--dynamic", action="store_true", help="dynamic batch size"
|
824 |
-
)
|
825 |
-
opt = parser.parse_args()
|
826 |
-
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
827 |
-
print_args(vars(opt))
|
828 |
-
return opt
|
829 |
-
|
830 |
-
|
831 |
-
def main(opt):
|
832 |
-
run(**vars(opt))
|
833 |
-
|
834 |
-
|
835 |
-
if __name__ == "__main__":
|
836 |
-
opt = parse_opt()
|
837 |
-
main(opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptAi.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import re
|
4 |
-
from aiohttp import ClientSession
|
5 |
-
|
6 |
-
from .base_provider import AsyncProvider, format_prompt
|
7 |
-
|
8 |
-
|
9 |
-
class ChatgptAi(AsyncProvider):
|
10 |
-
url: str = "https://chatgpt.ai/"
|
11 |
-
working = True
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
_nonce = None
|
14 |
-
_post_id = None
|
15 |
-
_bot_id = None
|
16 |
-
|
17 |
-
@classmethod
|
18 |
-
async def create_async(
|
19 |
-
cls,
|
20 |
-
model: str,
|
21 |
-
messages: list[dict[str, str]],
|
22 |
-
proxy: str = None,
|
23 |
-
**kwargs
|
24 |
-
) -> str:
|
25 |
-
headers = {
|
26 |
-
"authority" : "chatgpt.ai",
|
27 |
-
"accept" : "*/*",
|
28 |
-
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
29 |
-
"cache-control" : "no-cache",
|
30 |
-
"origin" : "https://chatgpt.ai",
|
31 |
-
"pragma" : "no-cache",
|
32 |
-
"referer" : cls.url,
|
33 |
-
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
34 |
-
"sec-ch-ua-mobile" : "?0",
|
35 |
-
"sec-ch-ua-platform" : '"Windows"',
|
36 |
-
"sec-fetch-dest" : "empty",
|
37 |
-
"sec-fetch-mode" : "cors",
|
38 |
-
"sec-fetch-site" : "same-origin",
|
39 |
-
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
40 |
-
}
|
41 |
-
async with ClientSession(
|
42 |
-
headers=headers
|
43 |
-
) as session:
|
44 |
-
if not cls._nonce:
|
45 |
-
async with session.get(cls.url, proxy=proxy) as response:
|
46 |
-
response.raise_for_status()
|
47 |
-
text = await response.text()
|
48 |
-
result = re.search(r'data-nonce="(.*?)"', text)
|
49 |
-
if result:
|
50 |
-
cls._nonce = result.group(1)
|
51 |
-
result = re.search(r'data-post-id="(.*?)"', text)
|
52 |
-
if result:
|
53 |
-
cls._post_id = result.group(1)
|
54 |
-
result = re.search(r'data-bot-id="(.*?)"', text)
|
55 |
-
if result:
|
56 |
-
cls._bot_id = result.group(1)
|
57 |
-
if not cls._nonce or not cls._post_id or not cls._bot_id:
|
58 |
-
raise RuntimeError("Nonce, post-id or bot-id not found")
|
59 |
-
|
60 |
-
data = {
|
61 |
-
"_wpnonce": cls._nonce,
|
62 |
-
"post_id": cls._post_id,
|
63 |
-
"url": "https://chatgpt.ai",
|
64 |
-
"action": "wpaicg_chat_shortcode_message",
|
65 |
-
"message": format_prompt(messages),
|
66 |
-
"bot_id": cls._bot_id
|
67 |
-
}
|
68 |
-
async with session.post(
|
69 |
-
"https://chatgpt.ai/wp-admin/admin-ajax.php",
|
70 |
-
proxy=proxy,
|
71 |
-
data=data
|
72 |
-
) as response:
|
73 |
-
response.raise_for_status()
|
74 |
-
return (await response.json())["data"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aditya9790/yolo7-object-tracking/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Yolo7 Object Tracking
|
3 |
-
emoji: 💩
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.14.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller-plugin.js
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import Scroller from './scroller.js';
|
2 |
-
|
3 |
-
class ScrollerPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
|
5 |
-
constructor(pluginManager) {
|
6 |
-
super(pluginManager);
|
7 |
-
}
|
8 |
-
|
9 |
-
start() {
|
10 |
-
var eventEmitter = this.game.events;
|
11 |
-
eventEmitter.on('destroy', this.destroy, this);
|
12 |
-
}
|
13 |
-
|
14 |
-
add(gameObject, config) {
|
15 |
-
return new Scroller(gameObject, config);
|
16 |
-
}
|
17 |
-
|
18 |
-
}
|
19 |
-
|
20 |
-
export default ScrollerPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ExpandMethods.js
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
export default {
|
2 |
-
expand(duration) {
|
3 |
-
if (this.expanded === true) {
|
4 |
-
return this;
|
5 |
-
}
|
6 |
-
|
7 |
-
if (duration === undefined) {
|
8 |
-
duration = this.transitionDuration;
|
9 |
-
}
|
10 |
-
|
11 |
-
this.expanded = true;
|
12 |
-
|
13 |
-
var title = this.childrenMap.title;
|
14 |
-
var child = this.childrenMap.child;
|
15 |
-
|
16 |
-
this.show(child);
|
17 |
-
|
18 |
-
var layoutTarget = (this.reLayoutTarget) ? this.reLayoutTarget : this.getTopmostSizer();
|
19 |
-
layoutTarget.layout();
|
20 |
-
|
21 |
-
title.emit('folder.expand', duration, this);
|
22 |
-
child.emit('folder.expand', duration, this);
|
23 |
-
this.emit('expand.start', this);
|
24 |
-
|
25 |
-
this.childTransition
|
26 |
-
.once('open', function () {
|
27 |
-
this.emit('expand.complete', this);
|
28 |
-
}, this)
|
29 |
-
.requestOpen(null, duration);
|
30 |
-
|
31 |
-
return this;
|
32 |
-
},
|
33 |
-
|
34 |
-
collapse(duration) {
|
35 |
-
if (this.expanded === false) {
|
36 |
-
return this;
|
37 |
-
}
|
38 |
-
|
39 |
-
if (duration === undefined) {
|
40 |
-
duration = this.transitionDuration;
|
41 |
-
}
|
42 |
-
|
43 |
-
this.expanded = false;
|
44 |
-
|
45 |
-
var title = this.childrenMap.title;
|
46 |
-
var child = this.childrenMap.child;
|
47 |
-
|
48 |
-
title.emit('folder.collapse', duration, this);
|
49 |
-
child.emit('folder.collapse', duration, this);
|
50 |
-
this.emit('collapse.start', this);
|
51 |
-
|
52 |
-
this.childTransition
|
53 |
-
.once('close', function () {
|
54 |
-
this.setChildScale(child, 1, 1).hide(child);
|
55 |
-
|
56 |
-
var layoutTarget = (this.reLayoutTarget) ? this.reLayoutTarget : this.getTopmostSizer();
|
57 |
-
layoutTarget.layout();
|
58 |
-
|
59 |
-
this.emit('collapse.complete', this);
|
60 |
-
}, this)
|
61 |
-
.requestClose(null, duration);
|
62 |
-
|
63 |
-
return this;
|
64 |
-
},
|
65 |
-
|
66 |
-
toggle(duration) {
|
67 |
-
if (this.expanded) {
|
68 |
-
this.collapse(duration);
|
69 |
-
} else {
|
70 |
-
this.expand(duration);
|
71 |
-
}
|
72 |
-
|
73 |
-
return this;
|
74 |
-
}
|
75 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alifarsi/news_summarizer/app.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
from newspaper import Article
|
2 |
-
from newspaper import Config
|
3 |
-
import gradio as gr
|
4 |
-
from gradio.mix import Parallel, Series
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
def extrac_text(url):
|
9 |
-
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0'
|
10 |
-
config = Config()
|
11 |
-
config.browser_user_agent = USER_AGENT
|
12 |
-
config.request_timeout = 10
|
13 |
-
|
14 |
-
article = Article(url, config=config)
|
15 |
-
article.download()
|
16 |
-
article.parse()
|
17 |
-
text = article.text
|
18 |
-
return text
|
19 |
-
|
20 |
-
extractor = gr.Interface(extrac_text, 'text', 'text')
|
21 |
-
summarizer = gr.Interface.load("huggingface/facebook/bart-large-cnn")
|
22 |
-
|
23 |
-
sample_url = [['https://www.cp24.com/news/ontario-reports-481-new-covid-19-cases-1-death-1.5667950'],
|
24 |
-
]
|
25 |
-
|
26 |
-
desc = '''
|
27 |
-
The news summarizer app uses bart-large-cnn model by Facebook to summarize the text of a news article.
|
28 |
-
'''
|
29 |
-
|
30 |
-
iface = Series(extractor, summarizer,
|
31 |
-
inputs = gr.inputs.Textbox(
|
32 |
-
lines = 2,
|
33 |
-
label = 'Enter URL below'
|
34 |
-
),
|
35 |
-
outputs = 'text',
|
36 |
-
title = 'News Summarizer',
|
37 |
-
theme = 'grass',
|
38 |
-
layout = 'horizontal',
|
39 |
-
description = desc,
|
40 |
-
examples=sample_url)
|
41 |
-
|
42 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlishbaImran/Redox-Flow-Battery-Prediction/app.py
DELETED
@@ -1,235 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
3 |
-
|
4 |
-
import warnings
|
5 |
-
warnings.filterwarnings("ignore")
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
from PIL import Image
|
10 |
-
import base64
|
11 |
-
import pandas as pd
|
12 |
-
import streamlit as st
|
13 |
-
import pickle
|
14 |
-
from rdkit import Chem
|
15 |
-
from rdkit.Chem import AllChem
|
16 |
-
from sklearn.ensemble import RandomForestRegressor
|
17 |
-
|
18 |
-
|
19 |
-
import random
|
20 |
-
import numpy as np
|
21 |
-
from keras.wrappers.scikit_learn import KerasRegressor
|
22 |
-
from sklearn.metrics import mean_squared_error
|
23 |
-
import time
|
24 |
-
|
25 |
-
import numpy
|
26 |
-
from sklearn.model_selection import GridSearchCV
|
27 |
-
|
28 |
-
import tensorflow
|
29 |
-
from tensorflow.keras.models import Sequential
|
30 |
-
from tensorflow.keras.layers import Dense
|
31 |
-
from tensorflow.keras.layers import Dropout
|
32 |
-
|
33 |
-
def create_model(optimizer='RMSprop', learn_rate=0.1, momentum=0.4, activation='sigmoid', dropout_rate=0.0):
|
34 |
-
|
35 |
-
keras_model = Sequential()
|
36 |
-
keras_model.add(Dense(128, input_dim=train_encoded.shape[1], activation=activation))
|
37 |
-
keras_model.add(Dropout(dropout_rate))
|
38 |
-
keras_model.add(Dense(32, activation=activation))
|
39 |
-
keras_model.add(Dropout(dropout_rate))
|
40 |
-
keras_model.add(Dense(8,activation=activation))
|
41 |
-
keras_model.add(Dropout(dropout_rate))
|
42 |
-
keras_model.add(Dense(1,activation='linear'))
|
43 |
-
keras_model.summary()
|
44 |
-
|
45 |
-
keras_model.compile(loss='mean_squared_error', optimizer=optimizer)
|
46 |
-
|
47 |
-
return keras_model
|
48 |
-
|
49 |
-
def get_ecfc(smiles_list, radius=2, nBits=2048, useCounts=True):
|
50 |
-
ecfp_fingerprints=[]
|
51 |
-
erroneous_smiles=[]
|
52 |
-
for smiles in smiles_list:
|
53 |
-
mol=Chem.MolFromSmiles(smiles)
|
54 |
-
if mol is None:
|
55 |
-
ecfp_fingerprints.append([None]*nBits)
|
56 |
-
erroneous_smiles.append(smiles)
|
57 |
-
else:
|
58 |
-
mol=Chem.AddHs(mol)
|
59 |
-
if useCounts:
|
60 |
-
ecfp_fingerprints.append(list(AllChem.GetHashedMorganFingerprint(mol, radius, nBits)))
|
61 |
-
else:
|
62 |
-
ecfp_fingerprints.append(list(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString()))
|
63 |
-
|
64 |
-
|
65 |
-
df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = smiles_list)
|
66 |
-
|
67 |
-
if len(erroneous_smiles)>0:
|
68 |
-
print("The following erroneous SMILES have been found in the data:\n{}.\nThe erroneous SMILES will be removed from the data.".format('\n'.join(map(str, erroneous_smiles))))
|
69 |
-
df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any')
|
70 |
-
|
71 |
-
return df_ecfp_fingerprints
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
import deepchem as dc
|
76 |
-
from deepchem.models import GraphConvModel
|
77 |
-
|
78 |
-
def generate(SMILES, verbose=False):
|
79 |
-
|
80 |
-
featurizer = dc.feat.ConvMolFeaturizer()
|
81 |
-
gcn = featurizer.featurize(SMILES)
|
82 |
-
properties = [random.randint(-1,1)/100 for i in range(0,len(SMILES))]
|
83 |
-
dataset = dc.data.NumpyDataset(X=gcn, y=np.array(properties))
|
84 |
-
|
85 |
-
return dataset
|
86 |
-
|
87 |
-
|
88 |
-
st.write("""# Accelerated reaction energy prediction for redox batteries 🧪 """)
|
89 |
-
st.write('By: [Alishba Imran](https://www.linkedin.com/in/alishba-imran-/)')
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
about_part = st.expander("Learn More About Project", expanded=False)
|
97 |
-
with about_part:
|
98 |
-
st.write('''
|
99 |
-
#### About
|
100 |
-
Redox flow batteries (RFB) are widely being explored as a class of electrochemical energy storage devices for large-scale energy storage applications. Redox flow batteries convert electrical energy to chemical energy via electrochemical reactions (through reversible oxidation and reduction) of compounds.
|
101 |
-
|
102 |
-
To develop next-gen redox flow batteries with high cycle life and energy density, we need to speed up the discovery of electroactive materials with desired properties. This process can currently be very slow and expensive given how large and diverse the chemical space of the candidate compounds is.
|
103 |
-
|
104 |
-
Using an attention-based graph convolutional neural network technique, I've developed a model that can take in reactants as SMILEs and predict the reaction energy in the redox reaction.
|
105 |
-
|
106 |
-
A lot of this work was inspired and built on top of this [paper](https://chemrxiv.org/engage/chemrxiv/article-details/60c7575f469df44a40f45465). Feel free to give it a try and reach out for any feedback. Email: [email protected].
|
107 |
-
|
108 |
-
|
109 |
-
''')
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
st.write('**Insert your SMILES**')
|
115 |
-
|
116 |
-
st.write('Type any SMILES used as a reactant in the redox reaction. This model will output the reaction energy.')
|
117 |
-
|
118 |
-
|
119 |
-
SMILES_input = "Oc1cccc(c12)c(O)c(nn2)O\nc1cccc(c12)cc(nn2)O\nOc1c(O)ccc(c12)cc(nn2)O"
|
120 |
-
|
121 |
-
SMILES = st.text_area('press ctrl+enter to run model!', SMILES_input, height=20)
|
122 |
-
SMILES = SMILES.split('\n')
|
123 |
-
SMILES = list(filter(None, SMILES))
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
if len(SMILES)>1000:
|
129 |
-
SMILES=SMILES[0:1000]
|
130 |
-
|
131 |
-
ecfc_encoder = get_ecfc(SMILES)
|
132 |
-
|
133 |
-
generated_dataset = generate(SMILES)
|
134 |
-
|
135 |
-
|
136 |
-
filename = 'final_models/transformers.pkl'
|
137 |
-
infile = open(filename,'rb')
|
138 |
-
transformers = pickle.load(infile)
|
139 |
-
infile.close()
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
model_dir = 'final_models/tf_chp_initial'
|
144 |
-
gcne_model = dc.models.GraphConvModel(n_tasks=1, batch_size=100, mode='regression', dropout=0.25,model_dir= model_dir,random_seed=0)
|
145 |
-
gcne_model.restore('final_models/tf_chp_initial/ckpt-94/ckpt-197')
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
pred_gcne = gcne_model.predict(generated_dataset, transformers)
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
from keras.models import model_from_json
|
155 |
-
|
156 |
-
keras_final_model = model_from_json(open('./final_models/keras_final_model_architecture.json').read())
|
157 |
-
keras_final_model.load_weights('./final_models/keras_final_model_weights.h5')
|
158 |
-
|
159 |
-
|
160 |
-
rf_final_model = pickle.load(open(r'./final_models/rf_final_model.txt', "rb"))
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
pred_keras = keras_final_model.predict(ecfc_encoder)
|
167 |
-
pred_rf = rf_final_model.predict(ecfc_encoder)
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
pred_rf_r = pred_rf.reshape((len(pred_rf),1))
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
pred_consensus = (pred_keras + pred_gcne + pred_rf)/3
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
test1_mae = []
|
188 |
-
|
189 |
-
test1_mae.append(0.00705)
|
190 |
-
test1_mae.append(0.00416)
|
191 |
-
test1_mae.append(0.0035)
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
test2_mae = []
|
198 |
-
|
199 |
-
test2_mae.append(0.00589)
|
200 |
-
test2_mae.append(0.00483)
|
201 |
-
test2_mae.append(0.00799)
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
weighted_pred_0_1_3=( np.power(2/(test1_mae[0]+test2_mae[0]),3) * pred_gcne +
|
206 |
-
np.power(2/(test1_mae[1]+test2_mae[1]),3) * pred_keras +
|
207 |
-
np.power(2/(test1_mae[2]+test2_mae[2]),3) * pred_rf_r ) / (
|
208 |
-
np.power(2/(test1_mae[0]+test2_mae[0]),3) + np.power(2/(test1_mae[1]+test2_mae[1]),3) + np.power(2/(test1_mae[2]+test2_mae[2]),3))
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
pred_weighted = (pred_gcne + pred_keras + pred_rf_r)/3
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
df_results = pd.DataFrame(SMILES, columns=['SMILES Reactant'])
|
225 |
-
df_results["Predicted Reaction Energy"]= weighted_pred_0_1_3
|
226 |
-
|
227 |
-
df_results=df_results.round(6)
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
st.header('Prediction of Reaction Energy for RFB')
|
232 |
-
df_results
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/text/frontend/vocab.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
from collections import OrderedDict
|
15 |
-
from typing import Iterable
|
16 |
-
|
17 |
-
__all__ = ["Vocab"]
|
18 |
-
|
19 |
-
|
20 |
-
class Vocab(object):
|
21 |
-
""" Vocabulary.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
symbols (Iterable[str]): Common symbols.
|
25 |
-
padding_symbol (str, optional): Symbol for pad. Defaults to "<pad>".
|
26 |
-
unk_symbol (str, optional): Symbol for unknow. Defaults to "<unk>"
|
27 |
-
start_symbol (str, optional): Symbol for start. Defaults to "<s>"
|
28 |
-
end_symbol (str, optional): Symbol for end. Defaults to "</s>"
|
29 |
-
"""
|
30 |
-
|
31 |
-
def __init__(self,
|
32 |
-
symbols: Iterable[str],
|
33 |
-
padding_symbol="<pad>",
|
34 |
-
unk_symbol="<unk>",
|
35 |
-
start_symbol="<s>",
|
36 |
-
end_symbol="</s>"):
|
37 |
-
self.special_symbols = OrderedDict()
|
38 |
-
for i, item in enumerate(
|
39 |
-
[padding_symbol, unk_symbol, start_symbol, end_symbol]):
|
40 |
-
if item:
|
41 |
-
self.special_symbols[item] = len(self.special_symbols)
|
42 |
-
|
43 |
-
self.padding_symbol = padding_symbol
|
44 |
-
self.unk_symbol = unk_symbol
|
45 |
-
self.start_symbol = start_symbol
|
46 |
-
self.end_symbol = end_symbol
|
47 |
-
|
48 |
-
self.stoi = OrderedDict()
|
49 |
-
self.stoi.update(self.special_symbols)
|
50 |
-
|
51 |
-
for i, s in enumerate(symbols):
|
52 |
-
if s not in self.stoi:
|
53 |
-
self.stoi[s] = len(self.stoi)
|
54 |
-
self.itos = {v: k for k, v in self.stoi.items()}
|
55 |
-
|
56 |
-
def __len__(self):
|
57 |
-
return len(self.stoi)
|
58 |
-
|
59 |
-
@property
|
60 |
-
def num_specials(self):
|
61 |
-
""" The number of special symbols.
|
62 |
-
"""
|
63 |
-
return len(self.special_symbols)
|
64 |
-
|
65 |
-
# special tokens
|
66 |
-
@property
|
67 |
-
def padding_index(self):
|
68 |
-
""" The index of padding symbol
|
69 |
-
"""
|
70 |
-
return self.stoi.get(self.padding_symbol, -1)
|
71 |
-
|
72 |
-
@property
|
73 |
-
def unk_index(self):
|
74 |
-
"""The index of unknow symbol.
|
75 |
-
"""
|
76 |
-
return self.stoi.get(self.unk_symbol, -1)
|
77 |
-
|
78 |
-
@property
|
79 |
-
def start_index(self):
|
80 |
-
"""The index of start symbol.
|
81 |
-
"""
|
82 |
-
return self.stoi.get(self.start_symbol, -1)
|
83 |
-
|
84 |
-
@property
|
85 |
-
def end_index(self):
|
86 |
-
""" The index of end symbol.
|
87 |
-
"""
|
88 |
-
return self.stoi.get(self.end_symbol, -1)
|
89 |
-
|
90 |
-
def __repr__(self):
|
91 |
-
fmt = "Vocab(size: {},\nstoi:\n{})"
|
92 |
-
return fmt.format(len(self), self.stoi)
|
93 |
-
|
94 |
-
def __str__(self):
|
95 |
-
return self.__repr__()
|
96 |
-
|
97 |
-
def lookup(self, symbol):
|
98 |
-
""" The index that symbol correspond.
|
99 |
-
"""
|
100 |
-
return self.stoi[symbol]
|
101 |
-
|
102 |
-
def reverse(self, index):
|
103 |
-
""" The symbol thar index cottespond.
|
104 |
-
"""
|
105 |
-
return self.itos[index]
|
106 |
-
|
107 |
-
def add_symbol(self, symbol):
|
108 |
-
""" Add a new symbol in vocab.
|
109 |
-
"""
|
110 |
-
if symbol in self.stoi:
|
111 |
-
return
|
112 |
-
N = len(self.stoi)
|
113 |
-
self.stoi[symbol] = N
|
114 |
-
self.itos[N] = symbol
|
115 |
-
|
116 |
-
def add_symbols(self, symbols):
|
117 |
-
""" Add multiple symbols in vocab.
|
118 |
-
"""
|
119 |
-
for symbol in symbols:
|
120 |
-
self.add_symbol(symbol)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/onnx.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
|
14 |
-
# How to use the ONNX Runtime for inference
|
15 |
-
|
16 |
-
🤗 [Optimum](https://github.com/huggingface/optimum) provides a Stable Diffusion pipeline compatible with ONNX Runtime.
|
17 |
-
|
18 |
-
## Installation
|
19 |
-
|
20 |
-
Install 🤗 Optimum with the following command for ONNX Runtime support:
|
21 |
-
|
22 |
-
```
|
23 |
-
pip install optimum["onnxruntime"]
|
24 |
-
```
|
25 |
-
|
26 |
-
## Stable Diffusion
|
27 |
-
|
28 |
-
### Inference
|
29 |
-
|
30 |
-
To load an ONNX model and run inference with the ONNX Runtime, you need to replace [`StableDiffusionPipeline`] with `ORTStableDiffusionPipeline`. In case you want to load a PyTorch model and convert it to the ONNX format on-the-fly, you can set `export=True`.
|
31 |
-
|
32 |
-
```python
|
33 |
-
from optimum.onnxruntime import ORTStableDiffusionPipeline
|
34 |
-
|
35 |
-
model_id = "runwayml/stable-diffusion-v1-5"
|
36 |
-
pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True)
|
37 |
-
prompt = "sailing ship in storm by Leonardo da Vinci"
|
38 |
-
image = pipeline(prompt).images[0]
|
39 |
-
pipeline.save_pretrained("./onnx-stable-diffusion-v1-5")
|
40 |
-
```
|
41 |
-
|
42 |
-
If you want to export the pipeline in the ONNX format offline and later use it for inference,
|
43 |
-
you can use the [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) command:
|
44 |
-
|
45 |
-
```bash
|
46 |
-
optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/
|
47 |
-
```
|
48 |
-
|
49 |
-
Then perform inference:
|
50 |
-
|
51 |
-
```python
|
52 |
-
from optimum.onnxruntime import ORTStableDiffusionPipeline
|
53 |
-
|
54 |
-
model_id = "sd_v15_onnx"
|
55 |
-
pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id)
|
56 |
-
prompt = "sailing ship in storm by Leonardo da Vinci"
|
57 |
-
image = pipeline(prompt).images[0]
|
58 |
-
```
|
59 |
-
|
60 |
-
Notice that we didn't have to specify `export=True` above.
|
61 |
-
|
62 |
-
<div class="flex justify-center">
|
63 |
-
<img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/stable_diffusion_v1_5_ort_sail_boat.png">
|
64 |
-
</div>
|
65 |
-
|
66 |
-
You can find more examples in [optimum documentation](https://huggingface.co/docs/optimum/).
|
67 |
-
|
68 |
-
|
69 |
-
### Supported tasks
|
70 |
-
|
71 |
-
| Task | Loading Class |
|
72 |
-
|--------------------------------------|--------------------------------------|
|
73 |
-
| `text-to-image` | `ORTStableDiffusionPipeline` |
|
74 |
-
| `image-to-image` | `ORTStableDiffusionImg2ImgPipeline` |
|
75 |
-
| `inpaint` | `ORTStableDiffusionInpaintPipeline` |
|
76 |
-
|
77 |
-
## Stable Diffusion XL
|
78 |
-
|
79 |
-
### Export
|
80 |
-
|
81 |
-
To export your model to ONNX, you can use the [Optimum CLI](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) as follows :
|
82 |
-
|
83 |
-
```bash
|
84 |
-
optimum-cli export onnx --model stabilityai/stable-diffusion-xl-base-1.0 --task stable-diffusion-xl sd_xl_onnx/
|
85 |
-
```
|
86 |
-
|
87 |
-
### Inference
|
88 |
-
|
89 |
-
To load an ONNX model and run inference with ONNX Runtime, you need to replace `StableDiffusionPipelineXL` with `ORTStableDiffusionPipelineXL` :
|
90 |
-
|
91 |
-
```python
|
92 |
-
from optimum.onnxruntime import ORTStableDiffusionXLPipeline
|
93 |
-
|
94 |
-
pipeline = ORTStableDiffusionXLPipeline.from_pretrained("sd_xl_onnx")
|
95 |
-
prompt = "sailing ship in storm by Leonardo da Vinci"
|
96 |
-
image = pipeline(prompt).images[0]
|
97 |
-
```
|
98 |
-
|
99 |
-
### Supported tasks
|
100 |
-
|
101 |
-
| Task | Loading Class |
|
102 |
-
|--------------------------------------|--------------------------------------|
|
103 |
-
| `text-to-image` | `ORTStableDiffusionXLPipeline` |
|
104 |
-
| `image-to-image` | `ORTStableDiffusionXLImg2ImgPipeline`|
|
105 |
-
|
106 |
-
## Known Issues
|
107 |
-
|
108 |
-
- Generating multiple prompts in a batch seems to take too much memory. While we look into it, you may need to iterate instead of batching.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
22 |
-
|
23 |
-
from diffusers import (
|
24 |
-
AutoencoderKL,
|
25 |
-
DDIMScheduler,
|
26 |
-
StableDiffusionSAGPipeline,
|
27 |
-
UNet2DConditionModel,
|
28 |
-
)
|
29 |
-
from diffusers.utils import slow, torch_device
|
30 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
31 |
-
|
32 |
-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
33 |
-
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
|
34 |
-
|
35 |
-
|
36 |
-
enable_full_determinism()
|
37 |
-
|
38 |
-
|
39 |
-
class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
40 |
-
pipeline_class = StableDiffusionSAGPipeline
|
41 |
-
params = TEXT_TO_IMAGE_PARAMS
|
42 |
-
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
|
43 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
44 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
45 |
-
|
46 |
-
def get_dummy_components(self):
|
47 |
-
torch.manual_seed(0)
|
48 |
-
unet = UNet2DConditionModel(
|
49 |
-
block_out_channels=(32, 64),
|
50 |
-
layers_per_block=2,
|
51 |
-
sample_size=32,
|
52 |
-
in_channels=4,
|
53 |
-
out_channels=4,
|
54 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
55 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
56 |
-
cross_attention_dim=32,
|
57 |
-
)
|
58 |
-
scheduler = DDIMScheduler(
|
59 |
-
beta_start=0.00085,
|
60 |
-
beta_end=0.012,
|
61 |
-
beta_schedule="scaled_linear",
|
62 |
-
clip_sample=False,
|
63 |
-
set_alpha_to_one=False,
|
64 |
-
)
|
65 |
-
torch.manual_seed(0)
|
66 |
-
vae = AutoencoderKL(
|
67 |
-
block_out_channels=[32, 64],
|
68 |
-
in_channels=3,
|
69 |
-
out_channels=3,
|
70 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
71 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
72 |
-
latent_channels=4,
|
73 |
-
)
|
74 |
-
torch.manual_seed(0)
|
75 |
-
text_encoder_config = CLIPTextConfig(
|
76 |
-
bos_token_id=0,
|
77 |
-
eos_token_id=2,
|
78 |
-
hidden_size=32,
|
79 |
-
intermediate_size=37,
|
80 |
-
layer_norm_eps=1e-05,
|
81 |
-
num_attention_heads=4,
|
82 |
-
num_hidden_layers=5,
|
83 |
-
pad_token_id=1,
|
84 |
-
vocab_size=1000,
|
85 |
-
)
|
86 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
87 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
88 |
-
|
89 |
-
components = {
|
90 |
-
"unet": unet,
|
91 |
-
"scheduler": scheduler,
|
92 |
-
"vae": vae,
|
93 |
-
"text_encoder": text_encoder,
|
94 |
-
"tokenizer": tokenizer,
|
95 |
-
"safety_checker": None,
|
96 |
-
"feature_extractor": None,
|
97 |
-
}
|
98 |
-
return components
|
99 |
-
|
100 |
-
def get_dummy_inputs(self, device, seed=0):
|
101 |
-
if str(device).startswith("mps"):
|
102 |
-
generator = torch.manual_seed(seed)
|
103 |
-
else:
|
104 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
105 |
-
inputs = {
|
106 |
-
"prompt": ".",
|
107 |
-
"generator": generator,
|
108 |
-
"num_inference_steps": 2,
|
109 |
-
"guidance_scale": 1.0,
|
110 |
-
"sag_scale": 1.0,
|
111 |
-
"output_type": "numpy",
|
112 |
-
}
|
113 |
-
return inputs
|
114 |
-
|
115 |
-
def test_inference_batch_single_identical(self):
|
116 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
117 |
-
|
118 |
-
|
119 |
-
@slow
|
120 |
-
@require_torch_gpu
|
121 |
-
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
122 |
-
def tearDown(self):
|
123 |
-
# clean up the VRAM after each test
|
124 |
-
super().tearDown()
|
125 |
-
gc.collect()
|
126 |
-
torch.cuda.empty_cache()
|
127 |
-
|
128 |
-
def test_stable_diffusion_1(self):
|
129 |
-
sag_pipe = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
130 |
-
sag_pipe = sag_pipe.to(torch_device)
|
131 |
-
sag_pipe.set_progress_bar_config(disable=None)
|
132 |
-
|
133 |
-
prompt = "."
|
134 |
-
generator = torch.manual_seed(0)
|
135 |
-
output = sag_pipe(
|
136 |
-
[prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np"
|
137 |
-
)
|
138 |
-
|
139 |
-
image = output.images
|
140 |
-
|
141 |
-
image_slice = image[0, -3:, -3:, -1]
|
142 |
-
|
143 |
-
assert image.shape == (1, 512, 512, 3)
|
144 |
-
expected_slice = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
|
145 |
-
|
146 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
|
147 |
-
|
148 |
-
def test_stable_diffusion_2(self):
|
149 |
-
sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
|
150 |
-
sag_pipe = sag_pipe.to(torch_device)
|
151 |
-
sag_pipe.set_progress_bar_config(disable=None)
|
152 |
-
|
153 |
-
prompt = "."
|
154 |
-
generator = torch.manual_seed(0)
|
155 |
-
output = sag_pipe(
|
156 |
-
[prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np"
|
157 |
-
)
|
158 |
-
|
159 |
-
image = output.images
|
160 |
-
|
161 |
-
image_slice = image[0, -3:, -3:, -1]
|
162 |
-
|
163 |
-
assert image.shape == (1, 512, 512, 3)
|
164 |
-
expected_slice = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
|
165 |
-
|
166 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
|
167 |
-
|
168 |
-
def test_stable_diffusion_2_non_square(self):
|
169 |
-
sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
|
170 |
-
sag_pipe = sag_pipe.to(torch_device)
|
171 |
-
sag_pipe.set_progress_bar_config(disable=None)
|
172 |
-
|
173 |
-
prompt = "."
|
174 |
-
generator = torch.manual_seed(0)
|
175 |
-
output = sag_pipe(
|
176 |
-
[prompt],
|
177 |
-
width=768,
|
178 |
-
height=512,
|
179 |
-
generator=generator,
|
180 |
-
guidance_scale=7.5,
|
181 |
-
sag_scale=1.0,
|
182 |
-
num_inference_steps=20,
|
183 |
-
output_type="np",
|
184 |
-
)
|
185 |
-
|
186 |
-
image = output.images
|
187 |
-
|
188 |
-
assert image.shape == (1, 512, 768, 3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/datasets/coco_detection.py',
|
3 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
4 |
-
]
|
5 |
-
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa
|
6 |
-
model = dict(
|
7 |
-
type='KnowledgeDistillationSingleStageDetector',
|
8 |
-
pretrained='torchvision://resnet18',
|
9 |
-
teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py',
|
10 |
-
teacher_ckpt=teacher_ckpt,
|
11 |
-
backbone=dict(
|
12 |
-
type='ResNet',
|
13 |
-
depth=18,
|
14 |
-
num_stages=4,
|
15 |
-
out_indices=(0, 1, 2, 3),
|
16 |
-
frozen_stages=1,
|
17 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
18 |
-
norm_eval=True,
|
19 |
-
style='pytorch'),
|
20 |
-
neck=dict(
|
21 |
-
type='FPN',
|
22 |
-
in_channels=[64, 128, 256, 512],
|
23 |
-
out_channels=256,
|
24 |
-
start_level=1,
|
25 |
-
add_extra_convs='on_output',
|
26 |
-
num_outs=5),
|
27 |
-
bbox_head=dict(
|
28 |
-
type='LDHead',
|
29 |
-
num_classes=80,
|
30 |
-
in_channels=256,
|
31 |
-
stacked_convs=4,
|
32 |
-
feat_channels=256,
|
33 |
-
anchor_generator=dict(
|
34 |
-
type='AnchorGenerator',
|
35 |
-
ratios=[1.0],
|
36 |
-
octave_base_scale=8,
|
37 |
-
scales_per_octave=1,
|
38 |
-
strides=[8, 16, 32, 64, 128]),
|
39 |
-
loss_cls=dict(
|
40 |
-
type='QualityFocalLoss',
|
41 |
-
use_sigmoid=True,
|
42 |
-
beta=2.0,
|
43 |
-
loss_weight=1.0),
|
44 |
-
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
|
45 |
-
loss_ld=dict(
|
46 |
-
type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10),
|
47 |
-
reg_max=16,
|
48 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
|
49 |
-
# training and testing settings
|
50 |
-
train_cfg=dict(
|
51 |
-
assigner=dict(type='ATSSAssigner', topk=9),
|
52 |
-
allowed_border=-1,
|
53 |
-
pos_weight=-1,
|
54 |
-
debug=False),
|
55 |
-
test_cfg=dict(
|
56 |
-
nms_pre=1000,
|
57 |
-
min_bbox_size=0,
|
58 |
-
score_thr=0.05,
|
59 |
-
nms=dict(type='nms', iou_threshold=0.6),
|
60 |
-
max_per_img=100))
|
61 |
-
|
62 |
-
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_r50-d8_480x480_80k_pascal_context.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnimaLab/bias-test-gpt-pairs/mgr_bias_scoring.py
DELETED
@@ -1,932 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
import string
|
5 |
-
import re
|
6 |
-
import random
|
7 |
-
import gradio as gr
|
8 |
-
from tqdm import tqdm
|
9 |
-
tqdm().pandas()
|
10 |
-
|
11 |
-
import nltk
|
12 |
-
from nltk.tokenize.treebank import TreebankWordDetokenizer
|
13 |
-
nltk.download('punkt')
|
14 |
-
|
15 |
-
# BERT imports
|
16 |
-
from transformers import BertForMaskedLM, BertTokenizer
|
17 |
-
# GPT2 imports
|
18 |
-
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
19 |
-
# BioBPT
|
20 |
-
from transformers import BioGptForCausalLM, BioGptTokenizer
|
21 |
-
# LLAMA
|
22 |
-
from transformers import LlamaTokenizer, LlamaForCausalLM
|
23 |
-
# FALCON
|
24 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
25 |
-
|
26 |
-
import mgr_sentences as smgr
|
27 |
-
import mgr_biases as bmgr
|
28 |
-
import mgr_requests as rq_mgr
|
29 |
-
|
30 |
-
from error_messages import *
|
31 |
-
|
32 |
-
import contextlib
|
33 |
-
autocast = contextlib.nullcontext
|
34 |
-
import gc
|
35 |
-
|
36 |
-
# Great article about handing big models - https://huggingface.co/blog/accelerate-large-models
|
37 |
-
def _getModelSafe(model_name, device):
|
38 |
-
model = None
|
39 |
-
tokenizer = None
|
40 |
-
try:
|
41 |
-
model, tokenizer = _getModel(model_name, device)
|
42 |
-
except Exception as err:
|
43 |
-
print(f"Loading Model Error: {err}")
|
44 |
-
print("Cleaning the model...")
|
45 |
-
model = None
|
46 |
-
tokenizer = None
|
47 |
-
torch.cuda.empty_cache()
|
48 |
-
gc.collect()
|
49 |
-
|
50 |
-
if model == None or tokenizer == None:
|
51 |
-
print("Cleaned, trying reloading....")
|
52 |
-
model, tokenizer = _getModel(model_name, device)
|
53 |
-
|
54 |
-
return model, tokenizer
|
55 |
-
|
56 |
-
def _getModel(model_name, device):
|
57 |
-
if "bert" in model_name.lower():
|
58 |
-
tokenizer = BertTokenizer.from_pretrained(model_name)
|
59 |
-
model = BertForMaskedLM.from_pretrained(model_name)
|
60 |
-
elif "biogpt" in model_name.lower():
|
61 |
-
tokenizer = BioGptTokenizer.from_pretrained(model_name)
|
62 |
-
model = BioGptForCausalLM.from_pretrained(model_name)
|
63 |
-
elif 'gpt2' in model_name.lower():
|
64 |
-
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
65 |
-
model = GPT2LMHeadModel.from_pretrained(model_name)
|
66 |
-
elif 'llama' in model_name.lower():
|
67 |
-
print(f"Getting LLAMA model: {model_name}")
|
68 |
-
tokenizer = LlamaTokenizer.from_pretrained(model_name)
|
69 |
-
model = LlamaForCausalLM.from_pretrained(model_name,
|
70 |
-
torch_dtype=torch.bfloat16,
|
71 |
-
low_cpu_mem_usage=True, ##
|
72 |
-
#use_safetensors=True, ##
|
73 |
-
#offload_folder="offload",
|
74 |
-
#offload_state_dict = True,
|
75 |
-
#device_map='auto'
|
76 |
-
)
|
77 |
-
elif "falcon" in model_name.lower():
|
78 |
-
print(f"Getting FALCON model: {model_name}")
|
79 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
80 |
-
model = AutoModelForCausalLM.from_pretrained(model_name,
|
81 |
-
torch_dtype=torch.bfloat16,
|
82 |
-
trust_remote_code=True,
|
83 |
-
low_cpu_mem_usage=True, ##
|
84 |
-
#use_safetensors=True, ##
|
85 |
-
#offload_folder="offload",
|
86 |
-
#offload_state_dict = True,
|
87 |
-
#device_map='auto'
|
88 |
-
)
|
89 |
-
#model.tie_weights()
|
90 |
-
if model == None:
|
91 |
-
print("Model is empty!!!")
|
92 |
-
else:
|
93 |
-
model = model.to(device)
|
94 |
-
model.eval()
|
95 |
-
torch.set_grad_enabled(False)
|
96 |
-
|
97 |
-
return model, tokenizer
|
98 |
-
|
99 |
-
def makeOrdGrpKey(row):
|
100 |
-
grp_lst = [row['grp_term1'], row['grp_term2']]
|
101 |
-
grp_lst.sort()
|
102 |
-
|
103 |
-
return f"{grp_lst[0]}/{grp_lst[1]}"
|
104 |
-
|
105 |
-
def genMissingPairsSpec(bias_spec, test_sentences_df):
|
106 |
-
print("--- GET MISSING BIAS PAIRS ---")
|
107 |
-
g1, g2, a1, a2 = get_words(bias_spec)
|
108 |
-
|
109 |
-
print("---Sentences---")
|
110 |
-
print(list(test_sentences_df.columns))
|
111 |
-
|
112 |
-
test_sentences_df['gr_cmp_key'] = test_sentences_df.progress_apply(makeOrdGrpKey, axis=1)
|
113 |
-
|
114 |
-
print("---Sentences GRP KEY---")
|
115 |
-
print(list(test_sentences_df.columns))
|
116 |
-
|
117 |
-
grp_terms = g1 + g2
|
118 |
-
att_terms = a1 + a2
|
119 |
-
|
120 |
-
grp_cmp_dict = {}
|
121 |
-
for gr1, gr2 in zip(g1, g2):
|
122 |
-
gr_lst = [gr1, gr2]
|
123 |
-
gr_lst.sort()
|
124 |
-
|
125 |
-
if gr1 not in grp_cmp_dict:
|
126 |
-
grp_cmp_dict[gr1] = [gr2, f"{gr_lst[0]}/{gr_lst[1]}"]
|
127 |
-
if gr2 not in grp_cmp_dict:
|
128 |
-
grp_cmp_dict[gr2] = [gr1, f"{gr_lst[0]}/{gr_lst[1]}"]
|
129 |
-
|
130 |
-
print("---GRP PAIR KEY---")
|
131 |
-
print(grp_cmp_dict)
|
132 |
-
|
133 |
-
print("---PERMITTED PAIRS---")
|
134 |
-
permitted_pairs = []
|
135 |
-
for gr1, gr2 in zip(g1, g2):
|
136 |
-
gr_lst = [gr1, gr2]
|
137 |
-
gr_lst.sort()
|
138 |
-
|
139 |
-
permitted_pairs.append(f"{gr_lst[0]}/{gr_lst[1]}")
|
140 |
-
|
141 |
-
if gr1 not in grp_cmp_dict:
|
142 |
-
grp_cmp_dict[gr1] = [gr2, f"{gr_lst[0]}/{gr_lst[1]}"]
|
143 |
-
if gr2 not in grp_cmp_dict:
|
144 |
-
grp_cmp_dict[gr2] = [gr1, f"{gr_lst[0]}/{gr_lst[1]}"]
|
145 |
-
|
146 |
-
print(f"Permitted pairs: {permitted_pairs}")
|
147 |
-
|
148 |
-
att_grp_mat = []
|
149 |
-
for grp in grp_terms[0:]: #list(bias_spec['social_groups'].items())[0][1]:
|
150 |
-
for att in att_terms:
|
151 |
-
sub_df = test_sentences_df.query("att_term==@att and grp_term1==@grp") # or grp_term2==@grp1
|
152 |
-
grp_att_pair = sub_df.groupby(['gr_cmp_key','att_term'])['att_term'].agg(["count"]).reset_index().values.tolist()
|
153 |
-
|
154 |
-
isAdded = False
|
155 |
-
if len(grp_att_pair)>0:
|
156 |
-
if len(grp_att_pair) == 1:
|
157 |
-
att_grp_mat.append(grp_att_pair[0])
|
158 |
-
isAdded = True
|
159 |
-
elif len(grp_att_pair) > 1:
|
160 |
-
print(f"Multiple groups per attribute: {grp_att_pair}")
|
161 |
-
for pair in grp_att_pair:
|
162 |
-
if pair[0] in permitted_pairs:
|
163 |
-
att_grp_mat.append(pair)
|
164 |
-
isAdded = True
|
165 |
-
|
166 |
-
# Not added pair
|
167 |
-
if isAdded == False:
|
168 |
-
att_grp_mat.append([grp_cmp_dict[grp][1], att, 0])
|
169 |
-
|
170 |
-
print("---ATT GRP MATRIX---")
|
171 |
-
print(att_grp_mat)
|
172 |
-
|
173 |
-
att_grp_df = pd.DataFrame(att_grp_mat, columns=['grp_pair','att_term','count'])
|
174 |
-
print(att_grp_df.head(2))
|
175 |
-
|
176 |
-
agg_att_grp_df = att_grp_df.groupby(["grp_pair","att_term"])["count"].agg(["sum"]).reset_index()
|
177 |
-
print(agg_att_grp_df.columns)
|
178 |
-
|
179 |
-
def missingCounts(row, max):
|
180 |
-
n_gap = np.max([0, max - row['sum']])
|
181 |
-
return n_gap
|
182 |
-
|
183 |
-
b_name = rq_mgr.getBiasName(g1, g2, a1, a2)
|
184 |
-
|
185 |
-
max_count = agg_att_grp_df.max()['sum']
|
186 |
-
agg_att_grp_df['n_gap'] = agg_att_grp_df.progress_apply(missingCounts, axis=1, max=2)
|
187 |
-
#print(agg_att_grp_df.head(2))
|
188 |
-
|
189 |
-
miss_att_grp_lst = agg_att_grp_df[agg_att_grp_df['n_gap'] > 0][['grp_pair','att_term','n_gap']].values.tolist()
|
190 |
-
print("---MISSING MATRIX SENTENCES---")
|
191 |
-
print(f"Bias Name: {b_name}, Max count: {max_count}")
|
192 |
-
print(f"Miss pairs: {len(miss_att_grp_lst)}")
|
193 |
-
print(f"Required to gen: {agg_att_grp_df['n_gap'].sum()}")
|
194 |
-
print(miss_att_grp_lst[0:10])
|
195 |
-
|
196 |
-
def genMissingAttribBiasSpec(bias_spec, test_sentences_df):
|
197 |
-
g1, g2, a1, a2 = get_words(bias_spec)
|
198 |
-
|
199 |
-
attributes_g1 = a1 #list(set(a1 + [a.replace(' ','-') for a in a1])) #bias_spec['attributes']['attribute 1']
|
200 |
-
attributes_g2 = a2 #list(set(a2 + [a.replace(' ','-') for a in a2])) #bias_spec['attributes']['attribute 2']
|
201 |
-
|
202 |
-
grp1_att_dict = {}
|
203 |
-
grp2_att_dict = {}
|
204 |
-
|
205 |
-
max_att_count = 0
|
206 |
-
for att in attributes_g1+attributes_g2: #test_sentences_df['Attribute term'].unique():
|
207 |
-
#print(f"Att: {att}")
|
208 |
-
att_cnt = test_sentences_df[test_sentences_df['att_term'] == att].shape[0]
|
209 |
-
if att_cnt > max_att_count:
|
210 |
-
max_att_count = att_cnt
|
211 |
-
if att in attributes_g1:
|
212 |
-
grp1_att_dict[att] = att_cnt
|
213 |
-
elif att in attributes_g2:
|
214 |
-
grp2_att_dict[att] = att_cnt
|
215 |
-
|
216 |
-
# get the difference from max
|
217 |
-
for att, count in grp1_att_dict.items():
|
218 |
-
grp1_att_dict[att] = max_att_count - count
|
219 |
-
|
220 |
-
# get the difference from max
|
221 |
-
for att, count in grp2_att_dict.items():
|
222 |
-
grp2_att_dict[att] = max_att_count - count
|
223 |
-
|
224 |
-
return (grp1_att_dict, grp2_att_dict)
|
225 |
-
|
226 |
-
# Adding period to end sentence
|
227 |
-
def add_period(template):
|
228 |
-
if template[-1] not in string.punctuation:
|
229 |
-
template += "."
|
230 |
-
return template
|
231 |
-
|
232 |
-
# Convert generated sentence to template - not caring about referential terms
|
233 |
-
def sentence_to_template(sentence, grp_term, mask_token):
|
234 |
-
template = add_period(sentence.strip("\""))
|
235 |
-
|
236 |
-
fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower()))
|
237 |
-
while len(fnd_grp) > 0:
|
238 |
-
idx1 = fnd_grp[0].span(0)[0]
|
239 |
-
if template[idx1] == " ":
|
240 |
-
idx1+=1
|
241 |
-
idx2 = fnd_grp[0].span(0)[1]-1
|
242 |
-
template = template[0:idx1]+mask_token+template[idx2:]
|
243 |
-
|
244 |
-
fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower()))
|
245 |
-
|
246 |
-
return template
|
247 |
-
|
248 |
-
# Convert generated sentence to template - not caring about referential terms
|
249 |
-
def sentence_to_template_df(row):
|
250 |
-
sentence = row['Sentence']
|
251 |
-
grp_term_1 = row['Group term 1']
|
252 |
-
grp_term_2 = row['Group term 2']
|
253 |
-
grp_term = grp_term_1 if grp_term_1.lower() in sentence.lower() else grp_term_2
|
254 |
-
#template = add_period(sentence.strip("\""))
|
255 |
-
|
256 |
-
#fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower()))
|
257 |
-
#while len(fnd_grp) > 0:
|
258 |
-
# idx1 = fnd_grp[0].span(0)[0]
|
259 |
-
# if template[idx1] == " ":
|
260 |
-
# idx1+=1
|
261 |
-
# idx2 = fnd_grp[0].span(0)[1]-1
|
262 |
-
# template = template[0:idx1]+f"[T]"+template[idx2:]
|
263 |
-
|
264 |
-
# fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower()))
|
265 |
-
|
266 |
-
template = sentence_to_template(sentence, grp_term, mask_token="[T]")
|
267 |
-
|
268 |
-
return template
|
269 |
-
|
270 |
-
# Detect differences between alternative sentences and construct a template
|
271 |
-
def maskSentenceDifferences(sentence, rewrite, target_words, att_term):
|
272 |
-
if '-' in att_term:
|
273 |
-
sentence = sentence.replace(att_term.replace("-",""), att_term.replace("-"," "))
|
274 |
-
#print(sentence)
|
275 |
-
|
276 |
-
if ' ' in att_term:
|
277 |
-
no_space_att = att_term.replace(" ", "")
|
278 |
-
if no_space_att in rewrite:
|
279 |
-
rewrite = rewrite.replace(no_space_att, att_term)
|
280 |
-
|
281 |
-
# identify group term in both sentences
|
282 |
-
sentence = sentence_to_template(sentence, target_words[0], "*")
|
283 |
-
rewrite = sentence_to_template(rewrite, target_words[1], "*")
|
284 |
-
#print(f'S1: {sentence}')
|
285 |
-
#print(f'R1: {rewrite}')
|
286 |
-
|
287 |
-
# add variation without '-'
|
288 |
-
target_words.extend([t.replace('-','') for t in target_words])
|
289 |
-
target_words = [t.lower() for t in target_words]
|
290 |
-
|
291 |
-
s_words = nltk.word_tokenize(sentence)
|
292 |
-
r_words = nltk.word_tokenize(rewrite)
|
293 |
-
|
294 |
-
template = ""
|
295 |
-
template_tokens = []
|
296 |
-
add_refs = []
|
297 |
-
|
298 |
-
for s, r in zip(s_words, r_words):
|
299 |
-
if s != r:
|
300 |
-
if s.lower() in target_words:
|
301 |
-
template += "[T]"
|
302 |
-
template_tokens.append("[T]")
|
303 |
-
else:
|
304 |
-
template += "[R]"
|
305 |
-
template_tokens.append("[R]")
|
306 |
-
|
307 |
-
l_mask = s.lower()
|
308 |
-
r_mask = r.lower()
|
309 |
-
if l_mask == "*" and r_mask != "*":
|
310 |
-
l_mask = target_words[0]
|
311 |
-
elif l_mask != "*" and r_mask == "*":
|
312 |
-
r_mask = target_words[1]
|
313 |
-
|
314 |
-
add_refs.append((l_mask, r_mask))
|
315 |
-
|
316 |
-
#add_refs.append((s.lower(),r.lower()))
|
317 |
-
elif s in string.punctuation:
|
318 |
-
template += s.strip(" ")
|
319 |
-
template_tokens.append(s)
|
320 |
-
else:
|
321 |
-
template += s
|
322 |
-
template_tokens.append(s)
|
323 |
-
|
324 |
-
template += " "
|
325 |
-
|
326 |
-
return TreebankWordDetokenizer().detokenize(template_tokens).replace("*","[T]"), add_refs
|
327 |
-
|
328 |
-
# turn generated sentence into a test templates - reference term aware version
|
329 |
-
def ref_terms_sentence_to_template(row):
|
330 |
-
sentence = row['Sentence']
|
331 |
-
alt_sentence = row['Alternative Sentence']
|
332 |
-
grp_term_1 = row['Group term 1']
|
333 |
-
grp_term_2 = row['Group term 2']
|
334 |
-
att_term = row['Attribute term']
|
335 |
-
|
336 |
-
# find out which social group the generator term belongs to
|
337 |
-
grp_term_pair = []
|
338 |
-
|
339 |
-
if grp_term_1.lower() in sentence.lower():
|
340 |
-
grp_term_pair = [grp_term_1, grp_term_2]
|
341 |
-
elif grp_term_2.lower() in sentence.lower():
|
342 |
-
grp_term_pair = [grp_term_2, grp_term_1]
|
343 |
-
else:
|
344 |
-
print(f"ERROR: missing either group term: [{grp_term_1},{grp_term_2}] in sentence: {sentence}")
|
345 |
-
|
346 |
-
template, grp_refs = maskSentenceDifferences(sentence, alt_sentence, grp_term_pair, att_term)
|
347 |
-
return pd.Series([template, grp_refs])
|
348 |
-
|
349 |
-
|
350 |
-
# make sure to use equal number of keywords for opposing attribute and social group specifications
|
351 |
-
def make_lengths_equal(t1, t2, a1, a2):
|
352 |
-
if len(t1) > len(t2):
|
353 |
-
t1 = random.sample(t1, len(t2))
|
354 |
-
elif len(t1) < len(t2):
|
355 |
-
t2 = random.sample(t2, len(t1))
|
356 |
-
|
357 |
-
if len(a1) > len(a2):
|
358 |
-
a1 = random.sample(a1, len(a2))
|
359 |
-
elif len(a1) < len(a2):
|
360 |
-
a2 = random.sample(a2, len(a1))
|
361 |
-
|
362 |
-
return (t1, t2, a1, a2)
|
363 |
-
|
364 |
-
def get_words(bias):
|
365 |
-
t1 = list(bias['social_groups'].items())[0][1]
|
366 |
-
t2 = list(bias['social_groups'].items())[1][1]
|
367 |
-
a1 = list(bias['attributes'].items())[0][1]
|
368 |
-
a2 = list(bias['attributes'].items())[1][1]
|
369 |
-
|
370 |
-
(t1, t2, a1, a2) = make_lengths_equal(t1, t2, a1, a2)
|
371 |
-
|
372 |
-
return (t1, t2, a1, a2)
|
373 |
-
|
374 |
-
def get_group_term_map(bias):
|
375 |
-
grp2term = {}
|
376 |
-
for group, terms in bias['social_groups'].items():
|
377 |
-
grp2term[group] = terms
|
378 |
-
|
379 |
-
return grp2term
|
380 |
-
|
381 |
-
def get_att_term_map(bias):
|
382 |
-
att2term = {}
|
383 |
-
for att, terms in bias['attributes'].items():
|
384 |
-
att2term[att] = terms
|
385 |
-
|
386 |
-
return att2term
|
387 |
-
|
388 |
-
# check if term within term list
|
389 |
-
def checkinList(term, term_list, verbose=False):
|
390 |
-
for cterm in term_list:
|
391 |
-
#print(f"Comparing <{cterm}><{term}>")
|
392 |
-
if cterm == term or cterm.replace(" ","-") == term.replace(' ','-'):
|
393 |
-
return True
|
394 |
-
return False
|
395 |
-
|
396 |
-
# Convert Test sentences to stereotype/anti-stereotype pairs
|
397 |
-
def convert2pairsFromDF(bias_spec, test_sentences_df, verbose=False):
|
398 |
-
pairs = []
|
399 |
-
headers = ['sentence','alt_sentence','att_term','template','grp_term_1','grp_term_2','label_1','label_2','grp_refs']
|
400 |
-
|
401 |
-
# get group to words mapping
|
402 |
-
XY_2_xy = get_group_term_map(bias_spec)
|
403 |
-
if verbose == True:
|
404 |
-
print(f"grp2term: {XY_2_xy}")
|
405 |
-
AB_2_ab = get_att_term_map(bias_spec)
|
406 |
-
if verbose == True:
|
407 |
-
print(f"att2term: {AB_2_ab}")
|
408 |
-
|
409 |
-
ri = 0
|
410 |
-
for idx, row in test_sentences_df.iterrows():
|
411 |
-
sentence = row['Sentence']
|
412 |
-
alt_sentence = row['Alternative Sentence']
|
413 |
-
grp_term_1 = row['Group term 1']
|
414 |
-
grp_term_2 = row['Group term 2']
|
415 |
-
grp_refs = row['grp_refs']
|
416 |
-
att_term = row['Attribute term']
|
417 |
-
template = row['Template']
|
418 |
-
|
419 |
-
direction = []
|
420 |
-
if checkinList(att_term, list(AB_2_ab.items())[0][1]):
|
421 |
-
direction = ["stereotype", "anti-stereotype"]
|
422 |
-
elif checkinList(att_term, list(AB_2_ab.items())[1][1]):
|
423 |
-
direction = ["anti-stereotype", "stereotype"]
|
424 |
-
if len(direction) == 0:
|
425 |
-
print("ERROR: Direction empty!")
|
426 |
-
checkinList(att_term, list(AB_2_ab.items())[0][1], verbose=True)
|
427 |
-
checkinList(att_term, list(AB_2_ab.items())[1][1], verbose=True)
|
428 |
-
|
429 |
-
grp_term_idx = -1
|
430 |
-
grp_term_pair = [grp_term_1, grp_term_2]
|
431 |
-
sentence_pair = [sentence, alt_sentence]
|
432 |
-
if grp_term_1 in list(XY_2_xy.items())[0][1]:
|
433 |
-
if grp_term_2 not in list(XY_2_xy.items())[1][1]:
|
434 |
-
print(f"ERROR: No group term: {grp_term_2} in 2nd group list {list(XY_2_xy.items())[1][1]}")
|
435 |
-
|
436 |
-
elif grp_term_1 in list(XY_2_xy.items())[1][1]:
|
437 |
-
if grp_term_2 not in list(XY_2_xy.items())[0][1]:
|
438 |
-
print(f"ERROR: No group term: {grp_term_2} in 2nd group list {list(XY_2_xy.items())[0][1]}")
|
439 |
-
direction.reverse()
|
440 |
-
#sentence_pair.reverse()
|
441 |
-
|
442 |
-
if verbose==True:
|
443 |
-
print(f"Direction: {direction}")
|
444 |
-
print(f"Grp pair: {grp_term_pair}")
|
445 |
-
print(f"Sentences: {sentence_pair}")
|
446 |
-
|
447 |
-
#print(f"GRP term pair: {grp_term_pair}")
|
448 |
-
#print(f"Direction: {direction}")
|
449 |
-
if len(grp_term_pair) == 0:
|
450 |
-
print(f"ERROR: Missing for sentence: {template} -> {grp_term_1}, {sentence}")
|
451 |
-
|
452 |
-
pairs.append([sentence, alt_sentence, att_term, template, grp_term_pair[0], grp_term_pair[1], direction[0], direction[1], grp_refs])
|
453 |
-
|
454 |
-
bPairs_df = pd.DataFrame(pairs, columns=headers)
|
455 |
-
#bPairs_df = bPairs_df.drop_duplicates(subset = ["group_term", "template"])
|
456 |
-
if verbose == True:
|
457 |
-
print(bPairs_df.head(1))
|
458 |
-
|
459 |
-
return bPairs_df
|
460 |
-
|
461 |
-
# Convert Test sentences to stereotype/anti-stereotyped pairs
|
462 |
-
def convert2pairs(bias_spec, test_sentences_df):
|
463 |
-
pairs = []
|
464 |
-
headers = ['sentence','alt_sentence','att_term','template','grp_term_1','grp_term_2','label_1','label_2','grp_refs']
|
465 |
-
|
466 |
-
# get group to words mapping
|
467 |
-
XY_2_xy = get_group_term_map(bias_spec)
|
468 |
-
print(f"grp2term: {XY_2_xy}")
|
469 |
-
AB_2_ab = get_att_term_map(bias_spec)
|
470 |
-
print(f"att2term: {AB_2_ab}")
|
471 |
-
|
472 |
-
ri = 0
|
473 |
-
for idx, row in test_sentences_df.iterrows():
|
474 |
-
sentence = row['Sentence']
|
475 |
-
alt_sentence = row['Alternative Sentence']
|
476 |
-
grp_term_1 = row['Group term 1']
|
477 |
-
grp_term_2 = row['Group term 2']
|
478 |
-
grp_refs = row['grp_refs']
|
479 |
-
grp_term = grp_term_1# if grp_term_1 in sentence else grp_term_2
|
480 |
-
|
481 |
-
direction = []
|
482 |
-
if checkinList(row['Attribute term'], list(AB_2_ab.items())[0][1]):
|
483 |
-
direction = ["stereotype", "anti-stereotype"]
|
484 |
-
elif checkinList(row['Attribute term'], list(AB_2_ab.items())[1][1]):
|
485 |
-
direction = ["anti-stereotype", "stereotype"]
|
486 |
-
if len(direction) == 0:
|
487 |
-
print("Direction empty!")
|
488 |
-
checkinList(row['Attribute term'], list(AB_2_ab.items())[0][1], verbose=True)
|
489 |
-
checkinList(row['Attribute term'], list(AB_2_ab.items())[1][1], verbose=True)
|
490 |
-
raise gr.Error(BIAS_SENTENCES_MISMATCH_ERROR)
|
491 |
-
|
492 |
-
grp_term_idx = -1
|
493 |
-
grp_term_pair = []
|
494 |
-
sentence_pair = [sentence, alt_sentence]
|
495 |
-
if grp_term in list(XY_2_xy.items())[0][1]:
|
496 |
-
grp_term_idx = list(XY_2_xy.items())[0][1].index(grp_term)
|
497 |
-
try:
|
498 |
-
grp_term_pair = [grp_term, list(XY_2_xy.items())[1][1][grp_term_idx]]
|
499 |
-
except IndexError:
|
500 |
-
print(f"Index {grp_term_idx} not found in list {list(XY_2_xy.items())[1][1]}, choosing random...")
|
501 |
-
grp_term_idx = random.randint(0, len(list(XY_2_xy.items())[1][1])-1)
|
502 |
-
print(f"New group term idx: {grp_term_idx} for list {list(XY_2_xy.items())[1][1]}")
|
503 |
-
grp_term_pair = [grp_term, list(XY_2_xy.items())[1][1][grp_term_idx]]
|
504 |
-
|
505 |
-
elif grp_term in list(XY_2_xy.items())[1][1]:
|
506 |
-
grp_term_idx = list(XY_2_xy.items())[1][1].index(grp_term)
|
507 |
-
try:
|
508 |
-
grp_term_pair = [grp_term, list(XY_2_xy.items())[0][1][grp_term_idx]]
|
509 |
-
except IndexError:
|
510 |
-
print(f"Index {grp_term_idx} not found in list {list(XY_2_xy.items())[0][1]}, choosing random...")
|
511 |
-
grp_term_idx = random.randint(0, len(list(XY_2_xy.items())[0][1])-1)
|
512 |
-
print(f"New group term idx: {grp_term_idx} for list {list(XY_2_xy.items())[0][1]}")
|
513 |
-
grp_term_pair = [grp_term, list(XY_2_xy.items())[0][1][grp_term_idx]]
|
514 |
-
|
515 |
-
direction.reverse()
|
516 |
-
#sentence_pair.reverse()
|
517 |
-
|
518 |
-
#print(f"GRP term pair: {grp_term_pair}")
|
519 |
-
#print(f"Direction: {direction}")
|
520 |
-
if len(grp_term_pair) == 0:
|
521 |
-
print(f"Missing for sentence: {row['Template']} -> {grp_term}, {sentence}")
|
522 |
-
|
523 |
-
pairs.append([sentence_pair[0], sentence_pair[1], row['Attribute term'], row['Template'], grp_term_pair[0], grp_term_pair[1], direction[0], direction[1], grp_refs])
|
524 |
-
|
525 |
-
bPairs_df = pd.DataFrame(pairs, columns=headers)
|
526 |
-
#bPairs_df = bPairs_df.drop_duplicates(subset = ["group_term", "template"])
|
527 |
-
print(bPairs_df.head(1))
|
528 |
-
|
529 |
-
return bPairs_df
|
530 |
-
|
531 |
-
# get multiple indices if target term broken up into multiple tokens
|
532 |
-
def get_mask_idx(ids, mask_token_id):
|
533 |
-
"""num_tokens: number of tokens the target word is broken into"""
|
534 |
-
ids = torch.Tensor.tolist(ids)[0]
|
535 |
-
return ids.index(mask_token_id)
|
536 |
-
|
537 |
-
# Get probability for 2 variants of a template using target terms
|
538 |
-
def getBERTProb(model, tokenizer, template, targets, device, verbose=False):
|
539 |
-
prior_token_ids = tokenizer.encode(template, add_special_tokens=True, return_tensors="pt")
|
540 |
-
prior_token_ids = prior_token_ids.to(device)
|
541 |
-
prior_logits = model(prior_token_ids)
|
542 |
-
|
543 |
-
target_probs = []
|
544 |
-
sentences = []
|
545 |
-
for target in targets:
|
546 |
-
targ_id = tokenizer.encode(target, add_special_tokens=False)
|
547 |
-
if verbose:
|
548 |
-
print("Targ ids:", targ_id)
|
549 |
-
|
550 |
-
logits = prior_logits[0][0][get_mask_idx(prior_token_ids, tokenizer.mask_token_id)][targ_id]
|
551 |
-
if verbose:
|
552 |
-
print("Logits:", logits)
|
553 |
-
|
554 |
-
target_probs.append(np.mean(logits.cpu().numpy()))
|
555 |
-
sentences.append(template.replace("[T]", target))
|
556 |
-
|
557 |
-
if verbose:
|
558 |
-
print("Target probs:", target_probs)
|
559 |
-
|
560 |
-
return target_probs, sentences
|
561 |
-
|
562 |
-
# Get probability for 2 variants of a template using target terms
|
563 |
-
def getGPT2Prob(model, tokenizer, template, targets, device, verbose=False):
|
564 |
-
target_probs = []
|
565 |
-
sentences = []
|
566 |
-
for target in targets:
|
567 |
-
sentence = template.replace("[T]", target)
|
568 |
-
if verbose:
|
569 |
-
print(f"Sentence with target {target}: {sentence}")
|
570 |
-
|
571 |
-
tensor_input = tokenizer.encode(sentence, return_tensors="pt").to(device)
|
572 |
-
outputs = model(tensor_input, labels=tensor_input)
|
573 |
-
target_probs.append(outputs.loss.item())
|
574 |
-
sentences.append(sentence)
|
575 |
-
|
576 |
-
return [max(target_probs)-l for l in target_probs], sentences
|
577 |
-
|
578 |
-
# Get probability for 2 variants of a sentence
|
579 |
-
def getGPT2ProbPairs(model, tokenizer, sentences, targets, device, verbose=False):
|
580 |
-
target_probs = []
|
581 |
-
tested_sentences = []
|
582 |
-
|
583 |
-
for ti, (sentence, target) in enumerate(zip(sentences, targets)):
|
584 |
-
#trg_input = tokenizer.encode(target, return_tensors="pt").to(device)
|
585 |
-
#outputs = model(trg_input, labels=trg_input)
|
586 |
-
#trg_prob = outputs.loss.item()
|
587 |
-
|
588 |
-
# construct target specific template
|
589 |
-
tensor_input = tokenizer.encode(sentence, return_tensors="pt").to(device)
|
590 |
-
outputs = model(tensor_input, labels=tensor_input)
|
591 |
-
target_probs.append(outputs.loss.item())#/(1-trg_prob))
|
592 |
-
tested_sentences.append(sentence)
|
593 |
-
|
594 |
-
return [max(target_probs)-l for l in target_probs], sentences
|
595 |
-
|
596 |
-
def getBERTProbPairs(model, tokenizer, sentences, targets, device, verbose=False):
|
597 |
-
target_probs = []
|
598 |
-
tested_sentences = []
|
599 |
-
|
600 |
-
for ti, (sentence, target) in enumerate(zip(sentences, targets)):
|
601 |
-
#sentence = sentences[0] if target.lower() in sentences[0].lower() else sentences[1]
|
602 |
-
|
603 |
-
template = sentence_to_template(sentence, target, mask_token="[MASK]")
|
604 |
-
if verbose == True:
|
605 |
-
print(f"Template: {template}")
|
606 |
-
|
607 |
-
# get encoded version of
|
608 |
-
prior_token_ids = tokenizer.encode(template, add_special_tokens=True, return_tensors="pt")
|
609 |
-
prior_token_ids = prior_token_ids.to(device)
|
610 |
-
prior_logits = model(prior_token_ids)
|
611 |
-
|
612 |
-
targ_id = tokenizer.encode(target, add_special_tokens=False)
|
613 |
-
|
614 |
-
logits = prior_logits[0][0][get_mask_idx(prior_token_ids, tokenizer.mask_token_id)][targ_id]
|
615 |
-
|
616 |
-
target_probs.append(np.mean(logits.cpu().numpy()))
|
617 |
-
tested_sentences.append(template.replace("[MASK]", target))
|
618 |
-
|
619 |
-
return target_probs, tested_sentences
|
620 |
-
|
621 |
-
# bias test on one row of a dataframe -> row is one sentence template with target terms
|
622 |
-
def checkBiasPairs(row, biasProbFunc, model, tokenizer, device, progress, df_len):
|
623 |
-
grp_terms = [row['grp_term_1'], row['grp_term_2']]
|
624 |
-
labels = [row['label_1'], row['label_2']]
|
625 |
-
sentence_pair = [row['sentence'], row['alt_sentence']]
|
626 |
-
|
627 |
-
if progress != None:
|
628 |
-
progress(row.name/df_len, desc=f"{row['template']}")
|
629 |
-
|
630 |
-
test_res = [0,1]
|
631 |
-
random.shuffle(test_res) # fail-safe
|
632 |
-
try:
|
633 |
-
test_res, sentences = biasProbFunc(model, tokenizer, sentence_pair, grp_terms, device)
|
634 |
-
except ValueError as err:
|
635 |
-
print(f"Error testing sentence: {row['sentence']}, {row['alt_sentence']}, \
|
636 |
-
grp_terms: {grp_terms}, err: {err}")
|
637 |
-
for ti, (sentence, target) in enumerate(zip(sentence_pair, grp_terms)):
|
638 |
-
template = sentence_to_template(sentence, target, mask_token="[MASK]")
|
639 |
-
print(f"T {target} | {sentence} -> {template} ")
|
640 |
-
|
641 |
-
top_term_idx = 0 if test_res[0]>test_res[1] else 1
|
642 |
-
bottom_term_idx = 0 if test_res[1]>test_res[0] else 1
|
643 |
-
|
644 |
-
# is stereotyped
|
645 |
-
stereotyped = 1 if labels[top_term_idx] == "stereotype" else 0
|
646 |
-
|
647 |
-
return pd.Series({"stereotyped": stereotyped,
|
648 |
-
"top_term": grp_terms[top_term_idx],
|
649 |
-
"bottom_term": grp_terms[bottom_term_idx],
|
650 |
-
"top_logit": test_res[top_term_idx],
|
651 |
-
"bottom_logit": test_res[bottom_term_idx]})
|
652 |
-
|
653 |
-
# Test function just for sanity check
|
654 |
-
def testModelProbability(model_name, model, tokenizer, device):
|
655 |
-
if 'bert' in model_name:
|
656 |
-
print(f"Testing on BERT family model: {model_name}")
|
657 |
-
#print(getBERTProb(model, tokenizer, "[MASK] is a carpenter", ["man","woman"], device, verbose=True))
|
658 |
-
elif 'gpt' in model_name:
|
659 |
-
print(f"Testing on GPT-2 family model: {model_name}")
|
660 |
-
#print(getGPT2Prob(model, tokenizer, "African American man was [MASK]", ["poor","rich"], device, verbose=True))
|
661 |
-
elif 'llama' in model_name:
|
662 |
-
print(f"Testing on LLAMA or FALCON family model: {model_name}")
|
663 |
-
#print(getGPT2Prob(model, tokenizer, "African American man was [MASK]", ["poor","rich"], device, verbose=True))
|
664 |
-
|
665 |
-
# bias test on one row of a dataframe -> row is one sentence template with target terms
|
666 |
-
def checkBias(row, biasProbFunc, model, tokenizer, device, progress, df_len):
|
667 |
-
grp_terms = [row['grp_term_1'], row['grp_term_2']]
|
668 |
-
labels = [row['label_1'], row['label_2']]
|
669 |
-
|
670 |
-
if progress != None:
|
671 |
-
progress(row.name/df_len, desc=f"{row['template']}")
|
672 |
-
|
673 |
-
test_res = [0,1]
|
674 |
-
random.shuffle(test_res) # fail-safe
|
675 |
-
try:
|
676 |
-
test_res, sentences = biasProbFunc(model, tokenizer, row['template'].replace("[T]","[MASK]"), grp_terms, device)
|
677 |
-
except ValueError as err:
|
678 |
-
print(f"Error testing sentence: {row['template']}, grp_terms: {grp_terms}, err: {err}")
|
679 |
-
|
680 |
-
top_term_idx = 0 if test_res[0]>test_res[1] else 1
|
681 |
-
bottom_term_idx = 0 if test_res[1]>test_res[0] else 1
|
682 |
-
|
683 |
-
# is stereotyped
|
684 |
-
stereotyped = 1 if labels[top_term_idx] == "stereotype" else 0
|
685 |
-
|
686 |
-
return pd.Series({"stereotyped": stereotyped,
|
687 |
-
"top_term": grp_terms[top_term_idx],
|
688 |
-
"bottom_term": grp_terms[bottom_term_idx],
|
689 |
-
"top_logit": test_res[top_term_idx],
|
690 |
-
"bottom_logit": test_res[bottom_term_idx]})
|
691 |
-
|
692 |
-
# Sampling attribute
|
693 |
-
def sampleAttribute(df, att, n_per_att):
|
694 |
-
att_rows = df.query("group_term == @att")
|
695 |
-
# copy-paste all gens - no bootstrap
|
696 |
-
#grp_bal = att_rows
|
697 |
-
|
698 |
-
grp_bal = pd.DataFrame()
|
699 |
-
if att_rows.shape[0] >= n_per_att:
|
700 |
-
grp_bal = att_rows.sample(n_per_att)
|
701 |
-
elif att_rows.shape[0] > 0 and att_rows.shape[0] < n_per_att:
|
702 |
-
grp_bal = att_rows.sample(n_per_att, replace=True)
|
703 |
-
|
704 |
-
return grp_bal
|
705 |
-
|
706 |
-
# Bootstrapping the results
|
707 |
-
def bootstrapBiasTest(bias_scores_df, bias_spec):
|
708 |
-
bootstrap_df = pd.DataFrame()
|
709 |
-
g1, g2, a1, a2 = get_words(bias_spec)
|
710 |
-
|
711 |
-
# bootstrapping parameters
|
712 |
-
n_repeats = 30
|
713 |
-
n_per_attrbute = 2
|
714 |
-
|
715 |
-
# For bootstraping repeats
|
716 |
-
for rep_i in range(n_repeats):
|
717 |
-
fold_df = pd.DataFrame()
|
718 |
-
|
719 |
-
# attribute 1
|
720 |
-
for an, att1 in enumerate(a1):
|
721 |
-
grp_bal = sampleAttribute(bias_scores_df, att1, n_per_attrbute)
|
722 |
-
if grp_bal.shape[0] == 0:
|
723 |
-
grp_bal = sampleAttribute(bias_scores_df, att1.replace(" ","-"), n_per_attrbute)
|
724 |
-
|
725 |
-
if grp_bal.shape[0] > 0:
|
726 |
-
fold_df = pd.concat([fold_df, grp_bal.copy()], ignore_index=True)
|
727 |
-
|
728 |
-
# attribute 2
|
729 |
-
for an, att2 in enumerate(a2):
|
730 |
-
grp_bal = sampleAttribute(bias_scores_df, att2, n_per_attrbute)
|
731 |
-
if grp_bal.shape[0] == 0:
|
732 |
-
grp_bal = sampleAttribute(bias_scores_df, att2.replace(" ","-"), n_per_attrbute)
|
733 |
-
|
734 |
-
if grp_bal.shape[0] > 0:
|
735 |
-
fold_df = pd.concat([fold_df, grp_bal.copy()], ignore_index=True)
|
736 |
-
|
737 |
-
#if fold_df.shape[0]>0:
|
738 |
-
# unnorm_model, norm_model, perBias_df = biasStatsFold(test_df)
|
739 |
-
# print(f"Gen: {gen_model}, Test: {test_model} [{rep_i}], df-size: {test_df.shape[0]}, Model bias: {norm_model:0.4f}")
|
740 |
-
# perBias_df['test_model'] = test_model
|
741 |
-
# perBias_df['gen_model'] = gen_model
|
742 |
-
|
743 |
-
# bootstrap_df = pd.concat([bootstrap_df, perBias_df], ignore_index=True)
|
744 |
-
|
745 |
-
|
746 |
-
# testing bias on datafram with test sentence pairs
|
747 |
-
def testBiasOnPairs(gen_pairs_df, bias_spec, model_name, model, tokenizer, device, progress=None):
|
748 |
-
print(f"Testing {model_name} bias on generated pairs: {gen_pairs_df.shape}")
|
749 |
-
|
750 |
-
testUsingPairs = True
|
751 |
-
biasTestFunc = checkBiasPairs if testUsingPairs==True else checkBias
|
752 |
-
modelBERTTestFunc = getBERTProbPairs if testUsingPairs==True else getBERTProb
|
753 |
-
modelGPT2TestFunc = getGPT2ProbPairs if testUsingPairs==True else getGPT2Prob
|
754 |
-
|
755 |
-
print(f"Bias Test Func: {str(biasTestFunc)}")
|
756 |
-
print(f"BERT Test Func: {str(modelBERTTestFunc)}")
|
757 |
-
print(f"GPT2 Test Func: {str(modelGPT2TestFunc)}")
|
758 |
-
|
759 |
-
if 'bert' in model_name.lower():
|
760 |
-
print(f"Testing on BERT family model: {model_name}")
|
761 |
-
gen_pairs_df[['stereotyped','top_term','bottom_term','top_logit','bottom_logit']] = gen_pairs_df.progress_apply(
|
762 |
-
biasTestFunc, biasProbFunc=modelBERTTestFunc, model=model, tokenizer=tokenizer, device=device, progress=progress, df_len=gen_pairs_df.shape[0], axis=1)
|
763 |
-
|
764 |
-
elif 'gpt' in model_name.lower():
|
765 |
-
print(f"Testing on GPT-2 family model: {model_name}")
|
766 |
-
gen_pairs_df[['stereotyped','top_term','bottom_term','top_logit','bottom_logit']] = gen_pairs_df.progress_apply(
|
767 |
-
biasTestFunc, biasProbFunc=modelGPT2TestFunc, model=model, tokenizer=tokenizer, device=device, progress=progress, df_len=gen_pairs_df.shape[0], axis=1)
|
768 |
-
|
769 |
-
elif 'llama' in model_name.lower() or 'falcon' in model_name.lower():
|
770 |
-
print(f"Testing on LLAMA or FALCON family model: {model_name}")
|
771 |
-
gen_pairs_df[['stereotyped','top_term','bottom_term','top_logit','bottom_logit']] = gen_pairs_df.progress_apply(
|
772 |
-
biasTestFunc, biasProbFunc=modelGPT2TestFunc, model=model, tokenizer=tokenizer, device=device, progress=progress, df_len=gen_pairs_df.shape[0], axis=1)
|
773 |
-
|
774 |
-
# Bootstrap
|
775 |
-
print(f"BIAS ON PAIRS: {gen_pairs_df}")
|
776 |
-
|
777 |
-
#bootstrapBiasTest(gen_pairs_df, bias_spec)
|
778 |
-
|
779 |
-
|
780 |
-
grp_df = gen_pairs_df.groupby(['att_term'])['stereotyped'].mean()
|
781 |
-
|
782 |
-
# turn the dataframe into dictionary with per model and per bias scores
|
783 |
-
bias_stats_dict = {}
|
784 |
-
bias_stats_dict['tested_model'] = model_name
|
785 |
-
bias_stats_dict['num_templates'] = gen_pairs_df.shape[0]
|
786 |
-
bias_stats_dict['model_bias'] = round(grp_df.mean(),4)
|
787 |
-
bias_stats_dict['per_bias'] = {}
|
788 |
-
bias_stats_dict['per_attribute'] = {}
|
789 |
-
bias_stats_dict['per_template'] = []
|
790 |
-
|
791 |
-
# for individual bias
|
792 |
-
bias_per_term = gen_pairs_df.groupby(["att_term"])['stereotyped'].mean()
|
793 |
-
bias_stats_dict['per_bias'] = round(bias_per_term.mean(),4) #mean normalized by terms
|
794 |
-
print(f"Bias: {bias_stats_dict['per_bias'] }")
|
795 |
-
|
796 |
-
# per attribute
|
797 |
-
print("Bias score per attribute")
|
798 |
-
for attr, bias_score in grp_df.items():
|
799 |
-
print(f"Attribute: {attr} -> {bias_score}")
|
800 |
-
bias_stats_dict['per_attribute'][attr] = bias_score
|
801 |
-
|
802 |
-
# loop through all the templates (sentence pairs)
|
803 |
-
for idx, template_test in gen_pairs_df.iterrows():
|
804 |
-
bias_stats_dict['per_template'].append({
|
805 |
-
"template": template_test['template'],
|
806 |
-
"groups": [template_test['grp_term_1'], template_test['grp_term_2']],
|
807 |
-
"stereotyped": template_test['stereotyped'],
|
808 |
-
#"discarded": True if template_test['discarded']==1 else False,
|
809 |
-
"score_delta": template_test['top_logit'] - template_test['bottom_logit'],
|
810 |
-
"stereotyped_version": template_test['top_term'] if template_test['label_1'] == "stereotype" else template_test['bottom_term'],
|
811 |
-
"anti_stereotyped_version": template_test['top_term'] if template_test['label_1'] == "anti-stereotype" else template_test['bottom_term']
|
812 |
-
})
|
813 |
-
|
814 |
-
return grp_df, bias_stats_dict
|
815 |
-
|
816 |
-
def _test_startBiasTest(test_sentences_df, model_name):
|
817 |
-
# 2. convert to templates
|
818 |
-
test_sentences_df['Template'] = test_sentences_df.apply(sentence_to_template_df, axis=1)
|
819 |
-
print(f"Data with template: {test_sentences_df}")
|
820 |
-
|
821 |
-
# 3. convert to pairs
|
822 |
-
test_pairs_df = convert2pairsFromDF(bias_spec, test_sentences_df)
|
823 |
-
print(f"Test pairs: {test_pairs_df.head(3)}")
|
824 |
-
|
825 |
-
# 4. get the per sentence bias scores
|
826 |
-
print(f"Test model name: {model_name}")
|
827 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
828 |
-
print(f"Device: {device}")
|
829 |
-
tested_model, tested_tokenizer = _getModelSafe(model_name, device)
|
830 |
-
#print(f"Mask token id: {tested_toknizer.mask_token_id}")
|
831 |
-
if tested_tokenizer == None:
|
832 |
-
print("Tokanizer is empty!!!")
|
833 |
-
if tested_model == None:
|
834 |
-
print("Model is empty!!!")
|
835 |
-
|
836 |
-
# sanity check bias test
|
837 |
-
testModelProbability(model_name, tested_model, tested_tokenizer, device)
|
838 |
-
|
839 |
-
test_score_df, bias_stats_dict = testBiasOnPairs(test_pairs_df, bias_spec, model_name, tested_model, tested_tokenizer, device)
|
840 |
-
print(f"Test scores: {test_score_df.head(3)}")
|
841 |
-
|
842 |
-
return test_score_df
|
843 |
-
|
844 |
-
def _constructInterpretationMsg(bias_spec, num_sentences, model_name, bias_stats_dict, per_attrib_bias, score_templates_df):
|
845 |
-
grp1_terms, grp2_terms = bmgr.getSocialGroupTerms(bias_spec)
|
846 |
-
att1_terms, att2_terms = bmgr.getAttributeTerms(bias_spec)
|
847 |
-
total_att_terms = len(att1_terms) + len(att2_terms)
|
848 |
-
|
849 |
-
interpret_msg = f"Test result on <b>{model_name}</b> using <b>{num_sentences}</b> sentences. "
|
850 |
-
if num_sentences < total_att_terms or num_sentences < 20:
|
851 |
-
interpret_msg += "We recommend generating more sentences to get more robust estimates! <br />"
|
852 |
-
else:
|
853 |
-
interpret_msg += "<br />"
|
854 |
-
|
855 |
-
attrib_by_score = dict(sorted(per_attrib_bias.items(), key=lambda item: item[1], reverse=True))
|
856 |
-
print(f"Attribs sorted: {attrib_by_score}")
|
857 |
-
|
858 |
-
# get group to words mapping
|
859 |
-
XY_2_xy = get_group_term_map(bias_spec)
|
860 |
-
print(f"grp2term: {XY_2_xy}")
|
861 |
-
AB_2_ab = get_att_term_map(bias_spec)
|
862 |
-
print(f"att2term: {AB_2_ab}")
|
863 |
-
|
864 |
-
grp1_terms = bias_spec['social_groups']['group 1']
|
865 |
-
grp2_terms = bias_spec['social_groups']['group 2']
|
866 |
-
|
867 |
-
sel_grp1 = None
|
868 |
-
sel_grp2 = None
|
869 |
-
att_dirs = {}
|
870 |
-
for attrib in list(attrib_by_score.keys()):
|
871 |
-
att_label = None
|
872 |
-
if checkinList(attrib, list(AB_2_ab.items())[0][1]):
|
873 |
-
att_label = 0
|
874 |
-
elif checkinList(attrib, list(AB_2_ab.items())[1][1]):
|
875 |
-
att_label = 1
|
876 |
-
else:
|
877 |
-
print("Error!")
|
878 |
-
|
879 |
-
att_dirs[attrib] = att_label
|
880 |
-
|
881 |
-
print(f"Attrib: {attrib} -> {attrib_by_score[attrib]} -> {att_dirs[attrib]}")
|
882 |
-
|
883 |
-
if sel_grp1 == None:
|
884 |
-
if att_dirs[attrib] == 0:
|
885 |
-
sel_grp1 = [attrib, attrib_by_score[attrib]]
|
886 |
-
if sel_grp2 == None:
|
887 |
-
if att_dirs[attrib] == 1:
|
888 |
-
sel_grp2 = [attrib, attrib_by_score[attrib]]
|
889 |
-
|
890 |
-
ns_att1 = score_templates_df.query(f"Attribute == '{sel_grp1[0]}'").shape[0]
|
891 |
-
#<b>{ns_att1}</b>
|
892 |
-
grp1_str = ', '.join([f'<b>\"{t}\"</b>' for t in grp1_terms[0:2]])
|
893 |
-
att1_msg = f"For the sentences including <b>\"{sel_grp1[0]}\"</b> the terms from Social Group 1 such as {grp1_str},... are more probable {sel_grp1[1]*100:2.0f}% of the time. "
|
894 |
-
print(att1_msg)
|
895 |
-
|
896 |
-
ns_att2 = score_templates_df.query(f"Attribute == '{sel_grp2[0]}'").shape[0]
|
897 |
-
#<b>{ns_att2}</b>
|
898 |
-
grp2_str = ', '.join([f'<b>\"{t}\"</b>' for t in grp2_terms[0:2]])
|
899 |
-
att2_msg = f"For the sentences including <b>\"{sel_grp2[0]}\"</b> the terms from Social Group 2 such as {grp2_str},... are more probable {sel_grp2[1]*100:2.0f}% of the time. "
|
900 |
-
print(att2_msg)
|
901 |
-
|
902 |
-
interpret_msg += f"<b>Interpretation:</b> Model chooses stereotyped version of the sentence {bias_stats_dict['model_bias']*100:2.0f}% of time. "
|
903 |
-
#interpret_msg += f"It suggests that for the sentences including \"{list(per_attrib_bias.keys())[0]}\" the social group terms \"{bias_spec['social_groups']['group 1'][0]}\", ... are more probable {list(per_attrib_bias.values())[0]*100:2.0f}% of the time. "
|
904 |
-
interpret_msg += "<br />"
|
905 |
-
interpret_msg += "<div style=\"margin-top: 3px; margin-left: 3px\"><b>◼ </b>" + att1_msg + "<br /></div>"
|
906 |
-
interpret_msg += "<div style=\"margin-top: 3px; margin-left: 3px; margin-bottom: 3px\"><b>◼ </b>" + att2_msg + "<br /></div>"
|
907 |
-
interpret_msg += "Please examine the exact test sentences used below."
|
908 |
-
interpret_msg += "<br />More details about Stereotype Score metric: <a href='https://arxiv.org/abs/2004.09456' target='_blank'>Nadeem'20<a>"
|
909 |
-
|
910 |
-
return interpret_msg
|
911 |
-
|
912 |
-
|
913 |
-
if __name__ == '__main__':
|
914 |
-
print("Testing bias manager...")
|
915 |
-
|
916 |
-
bias_spec = {
|
917 |
-
"social_groups": {
|
918 |
-
"group 1": ["brother", "father"],
|
919 |
-
"group 2": ["sister", "mother"],
|
920 |
-
},
|
921 |
-
"attributes": {
|
922 |
-
"attribute 1": ["science", "technology"],
|
923 |
-
"attribute 2": ["poetry", "art"]
|
924 |
-
}
|
925 |
-
}
|
926 |
-
|
927 |
-
sentence_list = rq_mgr._getSavedSentences(bias_spec)
|
928 |
-
sentence_df = pd.DataFrame(sentence_list, columns=["Test sentence","Group term","Attribute term"])
|
929 |
-
print(sentence_df)
|
930 |
-
|
931 |
-
_test_startBiasTest(sentence_df, 'bert-base-uncased')
|
932 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Training-LoRAs.md
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
## Training Your Own LoRAs
|
2 |
-
|
3 |
-
The WebUI seeks to make training your own LoRAs as easy as possible. It comes down to just a few simple steps:
|
4 |
-
|
5 |
-
### **Step 1**: Make a plan.
|
6 |
-
- What base model do you want to use? The LoRA you make has to be matched up to a single architecture (eg LLaMA-13B) and cannot be transferred to others (eg LLaMA-7B, StableLM, etc. would all be different). Derivatives of the same model (eg Alpaca finetune of LLaMA-13B) might be transferrable, but even then it's best to train exactly on what you plan to use.
|
7 |
-
- What model format do you want? At time of writing, 8-bit models are most stable, and 4-bit are supported but experimental. In the near future it is likely that 4-bit will be the best option for most users.
|
8 |
-
- What are you training it on? Do you want it to learn real information, a simple format, ...?
|
9 |
-
|
10 |
-
### **Step 2**: Gather a dataset.
|
11 |
-
- If you use a dataset similar to the [Alpaca](https://github.com/gururise/AlpacaDataCleaned/blob/main/alpaca_data_cleaned.json) format, that is natively supported by the `Formatted Dataset` input in the WebUI, with premade formatter options.
|
12 |
-
- If you use a dataset that isn't matched to Alpaca's format, but uses the same basic JSON structure, you can make your own format file by copying `training/formats/alpaca-format.json` to a new file and [editing its content](#format-files).
|
13 |
-
- If you can get the dataset into a simple text file, that works too! You can train using the `Raw text file` input option.
|
14 |
-
- This means you can for example just copy/paste a chatlog/documentation page/whatever you want, shove it in a plain text file, and train on it.
|
15 |
-
- If you use a structured dataset not in this format, you may have to find an external way to convert it - or open an issue to request native support.
|
16 |
-
|
17 |
-
### **Step 3**: Do the training.
|
18 |
-
- **3.1**: Load the WebUI, and your model.
|
19 |
-
- Make sure you don't have any LoRAs already loaded (unless you want to train for multi-LoRA usage).
|
20 |
-
- **3.2**: Open the `Training` tab at the top, `Train LoRA` sub-tab.
|
21 |
-
- **3.3**: Fill in the name of the LoRA, select your dataset in the dataset options.
|
22 |
-
- **3.4**: Select other parameters to your preference. See [parameters below](#parameters).
|
23 |
-
- **3.5**: click `Start LoRA Training`, and wait.
|
24 |
-
- It can take a few hours for a large dataset, or just a few minute if doing a small run.
|
25 |
-
- You may want to monitor your [loss value](#loss) while it goes.
|
26 |
-
|
27 |
-
### **Step 4**: Evaluate your results.
|
28 |
-
- Load the LoRA under the Models Tab.
|
29 |
-
- You can go test-drive it on the `Text generation` tab, or you can use the `Perplexity evaluation` sub-tab of the `Training` tab.
|
30 |
-
- If you used the `Save every n steps` option, you can grab prior copies of the model from sub-folders within the LoRA model's folder and try them instead.
|
31 |
-
|
32 |
-
### **Step 5**: Re-run if you're unhappy.
|
33 |
-
- Make sure to unload the LoRA before training it.
|
34 |
-
- You can simply resume a prior run - use `Copy parameters from` to select your LoRA, and edit parameters. Note that you cannot change the `Rank` of an already created LoRA.
|
35 |
-
- If you want to resume from a checkpoint saved along the way, simply copy the contents of the checkpoint folder into the LoRA's folder.
|
36 |
-
- (Note: `adapter_model.bin` is the important file that holds the actual LoRA content).
|
37 |
-
- This will start Learning Rate and Steps back to the start. If you want to resume as if you were midway through, you can adjust your Learning Rate to the last reported LR in logs and reduce your epochs.
|
38 |
-
- Or, you can start over entirely if you prefer.
|
39 |
-
- If your model is producing corrupted outputs, you probably need to start over and use a lower Learning Rate.
|
40 |
-
- If your model isn't learning detailed information but you want it to, you might need to just run more epochs, or you might need a higher Rank.
|
41 |
-
- If your model is enforcing a format you didn't want, you may need to tweak your dataset, or start over and not train as far.
|
42 |
-
|
43 |
-
## Format Files
|
44 |
-
|
45 |
-
If using JSON formatted datasets, they are presumed to be in the following approximate format:
|
46 |
-
|
47 |
-
```json
|
48 |
-
[
|
49 |
-
{
|
50 |
-
"somekey": "somevalue",
|
51 |
-
"key2": "value2"
|
52 |
-
},
|
53 |
-
{
|
54 |
-
// etc
|
55 |
-
}
|
56 |
-
]
|
57 |
-
```
|
58 |
-
|
59 |
-
Where the keys (eg `somekey`, `key2` above) are standardized, and relatively consistent across the dataset, and the values (eg `somevalue`, `value2`) contain the content actually intended to be trained.
|
60 |
-
|
61 |
-
For Alpaca, the keys are `instruction`, `input`, and `output`, wherein `input` is sometimes blank.
|
62 |
-
|
63 |
-
A simple format file for Alpaca to be used as a chat bot is:
|
64 |
-
|
65 |
-
```json
|
66 |
-
{
|
67 |
-
"instruction,output": "User: %instruction%\nAssistant: %output%",
|
68 |
-
"instruction,input,output": "User: %instruction%: %input%\nAssistant: %output%"
|
69 |
-
}
|
70 |
-
```
|
71 |
-
|
72 |
-
Note that the keys (eg `instruction,output`) are a comma-separated list of dataset keys, and the values are a simple string that use those keys with `%%`.
|
73 |
-
|
74 |
-
So for example if a dataset has `"instruction": "answer my question"`, then the format file's `User: %instruction%\n` will be automatically filled in as `User: answer my question\n`.
|
75 |
-
|
76 |
-
If you have different sets of key inputs, you can make your own format file to match it. This format-file is designed to be as simple as possible to enable easy editing to match your needs.
|
77 |
-
|
78 |
-
## Raw Text File Settings
|
79 |
-
|
80 |
-
When using raw text files as your dataset, the text is automatically split into chunks based on your `Cutoff Length` you get a few basic options to configure them.
|
81 |
-
- `Overlap Length` is how much to overlap chunks by. Overlapping chunks helps prevent the model from learning strange mid-sentence cuts, and instead learn continual sentences that flow from earlier text.
|
82 |
-
- `Prefer Newline Cut Length` sets a maximum distance in characters to shift the chunk cut towards newlines. Doing this helps prevent lines from starting or ending mid-sentence, preventing the model from learning to cut off sentences randomly.
|
83 |
-
- `Hard Cut String` sets a string that indicates there must be a hard cut without overlap. This defaults to `\n\n\n`, meaning 3 newlines. No trained chunk will ever contain this string. This allows you to insert unrelated sections of text in the same text file, but still ensure the model won't be taught to randomly change the subject.
|
84 |
-
|
85 |
-
## Parameters
|
86 |
-
|
87 |
-
The basic purpose and function of each parameter is documented on-page in the WebUI, so read through them in the UI to understand your options.
|
88 |
-
|
89 |
-
That said, here's a guide to the most important parameter choices you should consider:
|
90 |
-
|
91 |
-
### VRAM
|
92 |
-
|
93 |
-
- First, you must consider your VRAM availability.
|
94 |
-
- Generally, under default settings, VRAM usage for training with default parameters is very close to when generating text (with 1000+ tokens of context) (ie, if you can generate text, you can train LoRAs).
|
95 |
-
- Note: worse by default in the 4-bit monkeypatch currently. Reduce `Micro Batch Size` to `1` to restore this to expectations.
|
96 |
-
- If you have VRAM to spare, setting higher batch sizes will use more VRAM and get you better quality training in exchange.
|
97 |
-
- If you have large data, setting a higher cutoff length may be beneficial, but will cost significant VRAM. If you can spare some, set your batch size to `1` and see how high you can push your cutoff length.
|
98 |
-
- If you're low on VRAM, reducing batch size or cutoff length will of course improve that.
|
99 |
-
- Don't be afraid to just try it and see what happens. If it's too much, it will just error out, and you can lower settings and try again.
|
100 |
-
|
101 |
-
### Rank
|
102 |
-
|
103 |
-
- Second, you want to consider the amount of learning you want.
|
104 |
-
- For example, you may wish to just learn a dialogue format (as in the case of Alpaca) in which case setting a low `Rank` value (32 or lower) works great.
|
105 |
-
- Or, you might be training on project documentation you want the bot to understand and be able to understand questions about, in which case the higher the rank, the better.
|
106 |
-
- Generally, higher Rank = more precise learning = more total content learned = more VRAM usage while training.
|
107 |
-
|
108 |
-
### Learning Rate and Epochs
|
109 |
-
|
110 |
-
- Third, how carefully you want it to be learned.
|
111 |
-
- In other words, how okay or not you are with the model losing unrelated understandings.
|
112 |
-
- You can control this with 3 key settings: the Learning Rate, its scheduler, and your total epochs.
|
113 |
-
- The learning rate controls how much change is made to the model by each token it sees.
|
114 |
-
- It's in scientific notation normally, so for example `3e-4` means `3 * 10^-4` which is `0.0003`. The number after `e-` controls how many `0`s are in the number.
|
115 |
-
- Higher values let training run faster, but also are more likely to corrupt prior data in the model.
|
116 |
-
- You essentially have two variables to balance: the LR, and Epochs.
|
117 |
-
- If you make LR higher, you can set Epochs equally lower to match. High LR + low epochs = very fast, low quality training.
|
118 |
-
- If you make LR low, set epochs high. Low LR + high epochs = slow but high-quality training.
|
119 |
-
- The scheduler controls change-over-time as you train - it starts high, and then goes low. This helps balance getting data in, and having decent quality, at the same time.
|
120 |
-
- You can see graphs of the different scheduler options [in the HuggingFace docs here](https://moon-ci-docs.huggingface.co/docs/transformers/pr_1/en/main_classes/optimizer_schedules#transformers.SchedulerType)
|
121 |
-
|
122 |
-
## Loss
|
123 |
-
|
124 |
-
When you're running training, the WebUI's console window will log reports that include, among other things, a numeric value named `Loss`. It will start as a high number, and gradually get lower and lower as it goes.
|
125 |
-
|
126 |
-
"Loss" in the world of AI training theoretically means "how close is the model to perfect", with `0` meaning "absolutely perfect". This is calculated by measuring the difference between the model outputting exactly the text you're training it to output, and what it actually outputs.
|
127 |
-
|
128 |
-
In practice, a good LLM should have a very complex variable range of ideas running in its artificial head, so a loss of `0` would indicate that the model has broken and forgotten to how think about anything other than what you trained it.
|
129 |
-
|
130 |
-
So, in effect, Loss is a balancing game: you want to get it low enough that it understands your data, but high enough that it isn't forgetting everything else. Generally, if it goes below `1.0`, it's going to start forgetting its prior memories, and you should stop training. In some cases you may prefer to take it as low as `0.5` (if you want it to be very very predictable). Different goals have different needs, so don't be afraid to experiment and see what works best for you.
|
131 |
-
|
132 |
-
Note: if you see Loss start at or suddenly jump to exactly `0`, it is likely something has gone wrong in your training process (eg model corruption).
|
133 |
-
|
134 |
-
## Note: 4-Bit Monkeypatch
|
135 |
-
|
136 |
-
The [4-bit LoRA monkeypatch](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) works for training, but has side effects:
|
137 |
-
- VRAM usage is higher currently. You can reduce the `Micro Batch Size` to `1` to compensate.
|
138 |
-
- Models do funky things. LoRAs apply themselves, or refuse to apply, or spontaneously error out, or etc. It can be helpful to reload base model or restart the WebUI between training/usage to minimize chances of anything going haywire.
|
139 |
-
- Loading or working with multiple LoRAs at the same time doesn't currently work.
|
140 |
-
- Generally, recognize and treat the monkeypatch as the dirty temporary hack it is - it works, but isn't very stable. It will get better in time when everything is merged upstream for full official support.
|
141 |
-
|
142 |
-
## Legacy notes
|
143 |
-
|
144 |
-
LoRA training was contributed by [mcmonkey4eva](https://github.com/mcmonkey4eva) in PR [#570](https://github.com/oobabooga/text-generation-webui/pull/570).
|
145 |
-
|
146 |
-
### Using the original alpaca-lora code
|
147 |
-
|
148 |
-
Kept here for reference. The Training tab has much more features than this method.
|
149 |
-
|
150 |
-
```
|
151 |
-
conda activate textgen
|
152 |
-
git clone https://github.com/tloen/alpaca-lora
|
153 |
-
```
|
154 |
-
|
155 |
-
Edit those two lines in `alpaca-lora/finetune.py` to use your existing model folder instead of downloading everything from decapoda:
|
156 |
-
|
157 |
-
```
|
158 |
-
model = LlamaForCausalLM.from_pretrained(
|
159 |
-
"models/llama-7b",
|
160 |
-
load_in_8bit=True,
|
161 |
-
device_map="auto",
|
162 |
-
)
|
163 |
-
tokenizer = LlamaTokenizer.from_pretrained(
|
164 |
-
"models/llama-7b", add_eos_token=True
|
165 |
-
)
|
166 |
-
```
|
167 |
-
|
168 |
-
Run the script with:
|
169 |
-
|
170 |
-
```
|
171 |
-
python finetune.py
|
172 |
-
```
|
173 |
-
|
174 |
-
It just works. It runs at 22.32s/it, with 1170 iterations in total, so about 7 hours and a half for training a LoRA. RTX 3090, 18153MiB VRAM used, drawing maximum power (350W, room heater mode).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import os.path as osp
|
3 |
-
|
4 |
-
from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
|
5 |
-
from ...dist_utils import master_only
|
6 |
-
from ..hook import HOOKS
|
7 |
-
from .base import LoggerHook
|
8 |
-
|
9 |
-
|
10 |
-
@HOOKS.register_module()
|
11 |
-
class TensorboardLoggerHook(LoggerHook):
|
12 |
-
|
13 |
-
def __init__(self,
|
14 |
-
log_dir=None,
|
15 |
-
interval=10,
|
16 |
-
ignore_last=True,
|
17 |
-
reset_flag=False,
|
18 |
-
by_epoch=True):
|
19 |
-
super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
|
20 |
-
reset_flag, by_epoch)
|
21 |
-
self.log_dir = log_dir
|
22 |
-
|
23 |
-
@master_only
|
24 |
-
def before_run(self, runner):
|
25 |
-
super(TensorboardLoggerHook, self).before_run(runner)
|
26 |
-
if (TORCH_VERSION == 'parrots'
|
27 |
-
or digit_version(TORCH_VERSION) < digit_version('1.1')):
|
28 |
-
try:
|
29 |
-
from tensorboardX import SummaryWriter
|
30 |
-
except ImportError:
|
31 |
-
raise ImportError('Please install tensorboardX to use '
|
32 |
-
'TensorboardLoggerHook.')
|
33 |
-
else:
|
34 |
-
try:
|
35 |
-
from torch.utils.tensorboard import SummaryWriter
|
36 |
-
except ImportError:
|
37 |
-
raise ImportError(
|
38 |
-
'Please run "pip install future tensorboard" to install '
|
39 |
-
'the dependencies to use torch.utils.tensorboard '
|
40 |
-
'(applicable to PyTorch 1.1 or higher)')
|
41 |
-
|
42 |
-
if self.log_dir is None:
|
43 |
-
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
|
44 |
-
self.writer = SummaryWriter(self.log_dir)
|
45 |
-
|
46 |
-
@master_only
|
47 |
-
def log(self, runner):
|
48 |
-
tags = self.get_loggable_tags(runner, allow_text=True)
|
49 |
-
for tag, val in tags.items():
|
50 |
-
if isinstance(val, str):
|
51 |
-
self.writer.add_text(tag, val, self.get_iter(runner))
|
52 |
-
else:
|
53 |
-
self.writer.add_scalar(tag, val, self.get_iter(runner))
|
54 |
-
|
55 |
-
@master_only
|
56 |
-
def after_run(self, runner):
|
57 |
-
self.writer.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ariharasudhan/YoloV5/models/yolo.py
DELETED
@@ -1,391 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
YOLO-specific modules
|
4 |
-
|
5 |
-
Usage:
|
6 |
-
$ python models/yolo.py --cfg yolov5s.yaml
|
7 |
-
"""
|
8 |
-
|
9 |
-
import argparse
|
10 |
-
import contextlib
|
11 |
-
import os
|
12 |
-
import platform
|
13 |
-
import sys
|
14 |
-
from copy import deepcopy
|
15 |
-
from pathlib import Path
|
16 |
-
|
17 |
-
FILE = Path(__file__).resolve()
|
18 |
-
ROOT = FILE.parents[1] # YOLOv5 root directory
|
19 |
-
if str(ROOT) not in sys.path:
|
20 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
21 |
-
if platform.system() != 'Windows':
|
22 |
-
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
23 |
-
|
24 |
-
from models.common import *
|
25 |
-
from models.experimental import *
|
26 |
-
from utils.autoanchor import check_anchor_order
|
27 |
-
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
|
28 |
-
from utils.plots import feature_visualization
|
29 |
-
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
|
30 |
-
time_sync)
|
31 |
-
|
32 |
-
try:
|
33 |
-
import thop # for FLOPs computation
|
34 |
-
except ImportError:
|
35 |
-
thop = None
|
36 |
-
|
37 |
-
|
38 |
-
class Detect(nn.Module):
|
39 |
-
# YOLOv5 Detect head for detection models
|
40 |
-
stride = None # strides computed during build
|
41 |
-
dynamic = False # force grid reconstruction
|
42 |
-
export = False # export mode
|
43 |
-
|
44 |
-
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
|
45 |
-
super().__init__()
|
46 |
-
self.nc = nc # number of classes
|
47 |
-
self.no = nc + 5 # number of outputs per anchor
|
48 |
-
self.nl = len(anchors) # number of detection layers
|
49 |
-
self.na = len(anchors[0]) // 2 # number of anchors
|
50 |
-
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
|
51 |
-
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
|
52 |
-
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
|
53 |
-
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
54 |
-
self.inplace = inplace # use inplace ops (e.g. slice assignment)
|
55 |
-
|
56 |
-
def forward(self, x):
|
57 |
-
z = [] # inference output
|
58 |
-
for i in range(self.nl):
|
59 |
-
x[i] = self.m[i](x[i]) # conv
|
60 |
-
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
61 |
-
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
62 |
-
|
63 |
-
if not self.training: # inference
|
64 |
-
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
65 |
-
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
|
66 |
-
|
67 |
-
if isinstance(self, Segment): # (boxes + masks)
|
68 |
-
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
|
69 |
-
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
|
70 |
-
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
|
71 |
-
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
|
72 |
-
else: # Detect (boxes only)
|
73 |
-
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
|
74 |
-
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
|
75 |
-
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
|
76 |
-
y = torch.cat((xy, wh, conf), 4)
|
77 |
-
z.append(y.view(bs, self.na * nx * ny, self.no))
|
78 |
-
|
79 |
-
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
|
80 |
-
|
81 |
-
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
|
82 |
-
d = self.anchors[i].device
|
83 |
-
t = self.anchors[i].dtype
|
84 |
-
shape = 1, self.na, ny, nx, 2 # grid shape
|
85 |
-
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
|
86 |
-
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
|
87 |
-
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
|
88 |
-
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
|
89 |
-
return grid, anchor_grid
|
90 |
-
|
91 |
-
|
92 |
-
class Segment(Detect):
|
93 |
-
# YOLOv5 Segment head for segmentation models
|
94 |
-
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
|
95 |
-
super().__init__(nc, anchors, ch, inplace)
|
96 |
-
self.nm = nm # number of masks
|
97 |
-
self.npr = npr # number of protos
|
98 |
-
self.no = 5 + nc + self.nm # number of outputs per anchor
|
99 |
-
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
100 |
-
self.proto = Proto(ch[0], self.npr, self.nm) # protos
|
101 |
-
self.detect = Detect.forward
|
102 |
-
|
103 |
-
def forward(self, x):
|
104 |
-
p = self.proto(x[0])
|
105 |
-
x = self.detect(self, x)
|
106 |
-
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
|
107 |
-
|
108 |
-
|
109 |
-
class BaseModel(nn.Module):
|
110 |
-
# YOLOv5 base model
|
111 |
-
def forward(self, x, profile=False, visualize=False):
|
112 |
-
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
113 |
-
|
114 |
-
def _forward_once(self, x, profile=False, visualize=False):
|
115 |
-
y, dt = [], [] # outputs
|
116 |
-
for m in self.model:
|
117 |
-
if m.f != -1: # if not from previous layer
|
118 |
-
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
119 |
-
if profile:
|
120 |
-
self._profile_one_layer(m, x, dt)
|
121 |
-
x = m(x) # run
|
122 |
-
y.append(x if m.i in self.save else None) # save output
|
123 |
-
if visualize:
|
124 |
-
feature_visualization(x, m.type, m.i, save_dir=visualize)
|
125 |
-
return x
|
126 |
-
|
127 |
-
def _profile_one_layer(self, m, x, dt):
|
128 |
-
c = m == self.model[-1] # is final layer, copy input as inplace fix
|
129 |
-
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
|
130 |
-
t = time_sync()
|
131 |
-
for _ in range(10):
|
132 |
-
m(x.copy() if c else x)
|
133 |
-
dt.append((time_sync() - t) * 100)
|
134 |
-
if m == self.model[0]:
|
135 |
-
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
|
136 |
-
LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
|
137 |
-
if c:
|
138 |
-
LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
|
139 |
-
|
140 |
-
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
141 |
-
LOGGER.info('Fusing layers... ')
|
142 |
-
for m in self.model.modules():
|
143 |
-
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
|
144 |
-
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
145 |
-
delattr(m, 'bn') # remove batchnorm
|
146 |
-
m.forward = m.forward_fuse # update forward
|
147 |
-
self.info()
|
148 |
-
return self
|
149 |
-
|
150 |
-
def info(self, verbose=False, img_size=640): # print model information
|
151 |
-
model_info(self, verbose, img_size)
|
152 |
-
|
153 |
-
def _apply(self, fn):
|
154 |
-
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
155 |
-
self = super()._apply(fn)
|
156 |
-
m = self.model[-1] # Detect()
|
157 |
-
if isinstance(m, (Detect, Segment)):
|
158 |
-
m.stride = fn(m.stride)
|
159 |
-
m.grid = list(map(fn, m.grid))
|
160 |
-
if isinstance(m.anchor_grid, list):
|
161 |
-
m.anchor_grid = list(map(fn, m.anchor_grid))
|
162 |
-
return self
|
163 |
-
|
164 |
-
|
165 |
-
class DetectionModel(BaseModel):
|
166 |
-
# YOLOv5 detection model
|
167 |
-
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
|
168 |
-
super().__init__()
|
169 |
-
if isinstance(cfg, dict):
|
170 |
-
self.yaml = cfg # model dict
|
171 |
-
else: # is *.yaml
|
172 |
-
import yaml # for torch hub
|
173 |
-
self.yaml_file = Path(cfg).name
|
174 |
-
with open(cfg, encoding='ascii', errors='ignore') as f:
|
175 |
-
self.yaml = yaml.safe_load(f) # model dict
|
176 |
-
|
177 |
-
# Define model
|
178 |
-
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
|
179 |
-
if nc and nc != self.yaml['nc']:
|
180 |
-
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
|
181 |
-
self.yaml['nc'] = nc # override yaml value
|
182 |
-
if anchors:
|
183 |
-
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
|
184 |
-
self.yaml['anchors'] = round(anchors) # override yaml value
|
185 |
-
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
186 |
-
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
|
187 |
-
self.inplace = self.yaml.get('inplace', True)
|
188 |
-
|
189 |
-
# Build strides, anchors
|
190 |
-
m = self.model[-1] # Detect()
|
191 |
-
if isinstance(m, (Detect, Segment)):
|
192 |
-
s = 256 # 2x min stride
|
193 |
-
m.inplace = self.inplace
|
194 |
-
forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
|
195 |
-
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
196 |
-
check_anchor_order(m)
|
197 |
-
m.anchors /= m.stride.view(-1, 1, 1)
|
198 |
-
self.stride = m.stride
|
199 |
-
self._initialize_biases() # only run once
|
200 |
-
|
201 |
-
# Init weights, biases
|
202 |
-
initialize_weights(self)
|
203 |
-
self.info()
|
204 |
-
LOGGER.info('')
|
205 |
-
|
206 |
-
def forward(self, x, augment=False, profile=False, visualize=False):
|
207 |
-
if augment:
|
208 |
-
return self._forward_augment(x) # augmented inference, None
|
209 |
-
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
210 |
-
|
211 |
-
def _forward_augment(self, x):
|
212 |
-
img_size = x.shape[-2:] # height, width
|
213 |
-
s = [1, 0.83, 0.67] # scales
|
214 |
-
f = [None, 3, None] # flips (2-ud, 3-lr)
|
215 |
-
y = [] # outputs
|
216 |
-
for si, fi in zip(s, f):
|
217 |
-
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
|
218 |
-
yi = self._forward_once(xi)[0] # forward
|
219 |
-
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
220 |
-
yi = self._descale_pred(yi, fi, si, img_size)
|
221 |
-
y.append(yi)
|
222 |
-
y = self._clip_augmented(y) # clip augmented tails
|
223 |
-
return torch.cat(y, 1), None # augmented inference, train
|
224 |
-
|
225 |
-
def _descale_pred(self, p, flips, scale, img_size):
|
226 |
-
# de-scale predictions following augmented inference (inverse operation)
|
227 |
-
if self.inplace:
|
228 |
-
p[..., :4] /= scale # de-scale
|
229 |
-
if flips == 2:
|
230 |
-
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
|
231 |
-
elif flips == 3:
|
232 |
-
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
|
233 |
-
else:
|
234 |
-
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
|
235 |
-
if flips == 2:
|
236 |
-
y = img_size[0] - y # de-flip ud
|
237 |
-
elif flips == 3:
|
238 |
-
x = img_size[1] - x # de-flip lr
|
239 |
-
p = torch.cat((x, y, wh, p[..., 4:]), -1)
|
240 |
-
return p
|
241 |
-
|
242 |
-
def _clip_augmented(self, y):
|
243 |
-
# Clip YOLOv5 augmented inference tails
|
244 |
-
nl = self.model[-1].nl # number of detection layers (P3-P5)
|
245 |
-
g = sum(4 ** x for x in range(nl)) # grid points
|
246 |
-
e = 1 # exclude layer count
|
247 |
-
i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
|
248 |
-
y[0] = y[0][:, :-i] # large
|
249 |
-
i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
|
250 |
-
y[-1] = y[-1][:, i:] # small
|
251 |
-
return y
|
252 |
-
|
253 |
-
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
254 |
-
# https://arxiv.org/abs/1708.02002 section 3.3
|
255 |
-
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
256 |
-
m = self.model[-1] # Detect() module
|
257 |
-
for mi, s in zip(m.m, m.stride): # from
|
258 |
-
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
259 |
-
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
260 |
-
b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
|
261 |
-
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
262 |
-
|
263 |
-
|
264 |
-
Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
|
265 |
-
|
266 |
-
|
267 |
-
class SegmentationModel(DetectionModel):
|
268 |
-
# YOLOv5 segmentation model
|
269 |
-
def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
|
270 |
-
super().__init__(cfg, ch, nc, anchors)
|
271 |
-
|
272 |
-
|
273 |
-
class ClassificationModel(BaseModel):
|
274 |
-
# YOLOv5 classification model
|
275 |
-
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
|
276 |
-
super().__init__()
|
277 |
-
self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
|
278 |
-
|
279 |
-
def _from_detection_model(self, model, nc=1000, cutoff=10):
|
280 |
-
# Create a YOLOv5 classification model from a YOLOv5 detection model
|
281 |
-
if isinstance(model, DetectMultiBackend):
|
282 |
-
model = model.model # unwrap DetectMultiBackend
|
283 |
-
model.model = model.model[:cutoff] # backbone
|
284 |
-
m = model.model[-1] # last layer
|
285 |
-
ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
|
286 |
-
c = Classify(ch, nc) # Classify()
|
287 |
-
c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
|
288 |
-
model.model[-1] = c # replace
|
289 |
-
self.model = model.model
|
290 |
-
self.stride = model.stride
|
291 |
-
self.save = []
|
292 |
-
self.nc = nc
|
293 |
-
|
294 |
-
def _from_yaml(self, cfg):
|
295 |
-
# Create a YOLOv5 classification model from a *.yaml file
|
296 |
-
self.model = None
|
297 |
-
|
298 |
-
|
299 |
-
def parse_model(d, ch): # model_dict, input_channels(3)
|
300 |
-
# Parse a YOLOv5 model.yaml dictionary
|
301 |
-
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
302 |
-
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
|
303 |
-
if act:
|
304 |
-
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
|
305 |
-
LOGGER.info(f"{colorstr('activation:')} {act}") # print
|
306 |
-
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
307 |
-
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
308 |
-
|
309 |
-
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
310 |
-
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
311 |
-
m = eval(m) if isinstance(m, str) else m # eval strings
|
312 |
-
for j, a in enumerate(args):
|
313 |
-
with contextlib.suppress(NameError):
|
314 |
-
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
315 |
-
|
316 |
-
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
|
317 |
-
if m in {
|
318 |
-
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
|
319 |
-
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
|
320 |
-
c1, c2 = ch[f], args[0]
|
321 |
-
if c2 != no: # if not output
|
322 |
-
c2 = make_divisible(c2 * gw, 8)
|
323 |
-
|
324 |
-
args = [c1, c2, *args[1:]]
|
325 |
-
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
|
326 |
-
args.insert(2, n) # number of repeats
|
327 |
-
n = 1
|
328 |
-
elif m is nn.BatchNorm2d:
|
329 |
-
args = [ch[f]]
|
330 |
-
elif m is Concat:
|
331 |
-
c2 = sum(ch[x] for x in f)
|
332 |
-
# TODO: channel, gw, gd
|
333 |
-
elif m in {Detect, Segment}:
|
334 |
-
args.append([ch[x] for x in f])
|
335 |
-
if isinstance(args[1], int): # number of anchors
|
336 |
-
args[1] = [list(range(args[1] * 2))] * len(f)
|
337 |
-
if m is Segment:
|
338 |
-
args[3] = make_divisible(args[3] * gw, 8)
|
339 |
-
elif m is Contract:
|
340 |
-
c2 = ch[f] * args[0] ** 2
|
341 |
-
elif m is Expand:
|
342 |
-
c2 = ch[f] // args[0] ** 2
|
343 |
-
else:
|
344 |
-
c2 = ch[f]
|
345 |
-
|
346 |
-
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
347 |
-
t = str(m)[8:-2].replace('__main__.', '') # module type
|
348 |
-
np = sum(x.numel() for x in m_.parameters()) # number params
|
349 |
-
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
350 |
-
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
|
351 |
-
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
352 |
-
layers.append(m_)
|
353 |
-
if i == 0:
|
354 |
-
ch = []
|
355 |
-
ch.append(c2)
|
356 |
-
return nn.Sequential(*layers), sorted(save)
|
357 |
-
|
358 |
-
|
359 |
-
if __name__ == '__main__':
|
360 |
-
parser = argparse.ArgumentParser()
|
361 |
-
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
362 |
-
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
|
363 |
-
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
364 |
-
parser.add_argument('--profile', action='store_true', help='profile model speed')
|
365 |
-
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
|
366 |
-
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
|
367 |
-
opt = parser.parse_args()
|
368 |
-
opt.cfg = check_yaml(opt.cfg) # check YAML
|
369 |
-
print_args(vars(opt))
|
370 |
-
device = select_device(opt.device)
|
371 |
-
|
372 |
-
# Create model
|
373 |
-
im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
|
374 |
-
model = Model(opt.cfg).to(device)
|
375 |
-
|
376 |
-
# Options
|
377 |
-
if opt.line_profile: # profile layer by layer
|
378 |
-
model(im, profile=True)
|
379 |
-
|
380 |
-
elif opt.profile: # profile forward-backward
|
381 |
-
results = profile(input=im, ops=[model], n=3)
|
382 |
-
|
383 |
-
elif opt.test: # test all models
|
384 |
-
for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
|
385 |
-
try:
|
386 |
-
_ = Model(cfg)
|
387 |
-
except Exception as e:
|
388 |
-
print(f'Error in {cfg}: {e}')
|
389 |
-
|
390 |
-
else: # report fused model summary
|
391 |
-
model.fuse()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artificio/AdversarialArt/app.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from robustness.datasets import ImageNet
|
4 |
-
from robustness.attacker import AttackerModel
|
5 |
-
from timm.models import create_model
|
6 |
-
from torchvision import transforms
|
7 |
-
from robustness.tools.label_maps import CLASS_DICT
|
8 |
-
from src.utils import *
|
9 |
-
from torchvision import transforms
|
10 |
-
import gradio as gr
|
11 |
-
import os
|
12 |
-
from PIL import Image
|
13 |
-
|
14 |
-
DICT_CLASSES = {'lake':955,
|
15 |
-
'castle':483,
|
16 |
-
'library':624,
|
17 |
-
'dog':235,
|
18 |
-
'cat':285,
|
19 |
-
'people':842 #trunks
|
20 |
-
}
|
21 |
-
IMG_MAX_SIZE = 256
|
22 |
-
ARCH = 'crossvit_18_dagger_408'
|
23 |
-
ARCH_PATH = './checkpoints/robust_crossvit_18_dagger_408.pt'
|
24 |
-
CUSTOM_TRANSFORMS = transforms.Compose([transforms.Resize([IMG_MAX_SIZE,IMG_MAX_SIZE]),
|
25 |
-
transforms.ToTensor()])
|
26 |
-
DEVICE = 'cuda'
|
27 |
-
|
28 |
-
|
29 |
-
def load_model(robust = True):
|
30 |
-
test_image = Image.open('samples/test.png')
|
31 |
-
ds = CustomArt(test_image,CUSTOM_TRANSFORMS)
|
32 |
-
model = create_model(ARCH,pretrained = True).to(DEVICE)
|
33 |
-
if robust:
|
34 |
-
print("Load Robust Model")
|
35 |
-
checkpoint = torch.load(ARCH_PATH,map_location = DEVICE)
|
36 |
-
model.load_state_dict(checkpoint['state_dict'],strict = True)
|
37 |
-
model = RobustModel(model).to(DEVICE)
|
38 |
-
model = AttackerModel(model, ds).to(DEVICE)
|
39 |
-
model = model.eval()
|
40 |
-
del test_image,ds
|
41 |
-
return model
|
42 |
-
|
43 |
-
|
44 |
-
def gradio_fn(image_input,radio_steps,radio_class,radio_robust):
|
45 |
-
model = load_model(radio_robust)
|
46 |
-
kwargs = {
|
47 |
-
'constraint':'2', # L2 attack
|
48 |
-
'eps': 300,
|
49 |
-
'step_size': 1,
|
50 |
-
'iterations': int(radio_steps),
|
51 |
-
'targeted': True,
|
52 |
-
'do_tqdm': True,
|
53 |
-
'device': DEVICE
|
54 |
-
}
|
55 |
-
# Define the target and the image
|
56 |
-
target = torch.tensor([int(DICT_CLASSES[radio_class])]).to(DEVICE)
|
57 |
-
image = Image.fromarray(image_input)
|
58 |
-
image = CUSTOM_TRANSFORMS(image).to(DEVICE)
|
59 |
-
image = torch.unsqueeze(image, dim=0)
|
60 |
-
_, im_adv = model(image, target, make_adv=True, **kwargs)
|
61 |
-
im_adv = im_adv.squeeze(dim = 0).permute(1,2,0).cpu().numpy()
|
62 |
-
return im_adv
|
63 |
-
|
64 |
-
|
65 |
-
if __name__ == '__main__':
|
66 |
-
demo = gr.Blocks()
|
67 |
-
with demo:
|
68 |
-
gr.Markdown("# Art Adversarial Attack")
|
69 |
-
with gr.Row():
|
70 |
-
with gr.Column():
|
71 |
-
with gr.Row():
|
72 |
-
# Radio Steps Adversarial attack
|
73 |
-
radio_steps = gr.Radio([10,500,1000,1500,2000],value = 500,label="# Attack Steps")
|
74 |
-
# Radio Targeted attack
|
75 |
-
radio_class = gr.Radio(list(DICT_CLASSES.keys()),
|
76 |
-
value = list(DICT_CLASSES.keys())[0],
|
77 |
-
label="Target Class")
|
78 |
-
radio_robust = gr.Radio([True,False],value = True,label="Robust Model")
|
79 |
-
# Image
|
80 |
-
with gr.Row():
|
81 |
-
image_input = gr.Image(label="Input Image")
|
82 |
-
with gr.Row():
|
83 |
-
calculate_button = gr.Button("Compute")
|
84 |
-
with gr.Column():
|
85 |
-
target_image = gr.Image(label="Art Image")
|
86 |
-
|
87 |
-
calculate_button.click(fn = gradio_fn,
|
88 |
-
inputs = [image_input,radio_steps,radio_class,radio_robust],
|
89 |
-
outputs = target_image)
|
90 |
-
demo.launch(debug = True)
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py
DELETED
@@ -1,308 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.util
|
3 |
-
~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Utility functions.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
import re
|
12 |
-
from io import TextIOWrapper
|
13 |
-
|
14 |
-
|
15 |
-
split_path_re = re.compile(r'[/\\ ]')
|
16 |
-
doctype_lookup_re = re.compile(r'''
|
17 |
-
<!DOCTYPE\s+(
|
18 |
-
[a-zA-Z_][a-zA-Z0-9]*
|
19 |
-
(?: \s+ # optional in HTML5
|
20 |
-
[a-zA-Z_][a-zA-Z0-9]*\s+
|
21 |
-
"[^"]*")?
|
22 |
-
)
|
23 |
-
[^>]*>
|
24 |
-
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
|
25 |
-
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
|
26 |
-
re.IGNORECASE | re.DOTALL | re.MULTILINE)
|
27 |
-
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
|
28 |
-
|
29 |
-
|
30 |
-
class ClassNotFound(ValueError):
|
31 |
-
"""Raised if one of the lookup functions didn't find a matching class."""
|
32 |
-
|
33 |
-
|
34 |
-
class OptionError(Exception):
|
35 |
-
pass
|
36 |
-
|
37 |
-
|
38 |
-
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
|
39 |
-
string = options.get(optname, default)
|
40 |
-
if normcase:
|
41 |
-
string = string.lower()
|
42 |
-
if string not in allowed:
|
43 |
-
raise OptionError('Value for option %s must be one of %s' %
|
44 |
-
(optname, ', '.join(map(str, allowed))))
|
45 |
-
return string
|
46 |
-
|
47 |
-
|
48 |
-
def get_bool_opt(options, optname, default=None):
|
49 |
-
string = options.get(optname, default)
|
50 |
-
if isinstance(string, bool):
|
51 |
-
return string
|
52 |
-
elif isinstance(string, int):
|
53 |
-
return bool(string)
|
54 |
-
elif not isinstance(string, str):
|
55 |
-
raise OptionError('Invalid type %r for option %s; use '
|
56 |
-
'1/0, yes/no, true/false, on/off' % (
|
57 |
-
string, optname))
|
58 |
-
elif string.lower() in ('1', 'yes', 'true', 'on'):
|
59 |
-
return True
|
60 |
-
elif string.lower() in ('0', 'no', 'false', 'off'):
|
61 |
-
return False
|
62 |
-
else:
|
63 |
-
raise OptionError('Invalid value %r for option %s; use '
|
64 |
-
'1/0, yes/no, true/false, on/off' % (
|
65 |
-
string, optname))
|
66 |
-
|
67 |
-
|
68 |
-
def get_int_opt(options, optname, default=None):
|
69 |
-
string = options.get(optname, default)
|
70 |
-
try:
|
71 |
-
return int(string)
|
72 |
-
except TypeError:
|
73 |
-
raise OptionError('Invalid type %r for option %s; you '
|
74 |
-
'must give an integer value' % (
|
75 |
-
string, optname))
|
76 |
-
except ValueError:
|
77 |
-
raise OptionError('Invalid value %r for option %s; you '
|
78 |
-
'must give an integer value' % (
|
79 |
-
string, optname))
|
80 |
-
|
81 |
-
|
82 |
-
def get_list_opt(options, optname, default=None):
|
83 |
-
val = options.get(optname, default)
|
84 |
-
if isinstance(val, str):
|
85 |
-
return val.split()
|
86 |
-
elif isinstance(val, (list, tuple)):
|
87 |
-
return list(val)
|
88 |
-
else:
|
89 |
-
raise OptionError('Invalid type %r for option %s; you '
|
90 |
-
'must give a list value' % (
|
91 |
-
val, optname))
|
92 |
-
|
93 |
-
|
94 |
-
def docstring_headline(obj):
|
95 |
-
if not obj.__doc__:
|
96 |
-
return ''
|
97 |
-
res = []
|
98 |
-
for line in obj.__doc__.strip().splitlines():
|
99 |
-
if line.strip():
|
100 |
-
res.append(" " + line.strip())
|
101 |
-
else:
|
102 |
-
break
|
103 |
-
return ''.join(res).lstrip()
|
104 |
-
|
105 |
-
|
106 |
-
def make_analysator(f):
|
107 |
-
"""Return a static text analyser function that returns float values."""
|
108 |
-
def text_analyse(text):
|
109 |
-
try:
|
110 |
-
rv = f(text)
|
111 |
-
except Exception:
|
112 |
-
return 0.0
|
113 |
-
if not rv:
|
114 |
-
return 0.0
|
115 |
-
try:
|
116 |
-
return min(1.0, max(0.0, float(rv)))
|
117 |
-
except (ValueError, TypeError):
|
118 |
-
return 0.0
|
119 |
-
text_analyse.__doc__ = f.__doc__
|
120 |
-
return staticmethod(text_analyse)
|
121 |
-
|
122 |
-
|
123 |
-
def shebang_matches(text, regex):
|
124 |
-
r"""Check if the given regular expression matches the last part of the
|
125 |
-
shebang if one exists.
|
126 |
-
|
127 |
-
>>> from pygments.util import shebang_matches
|
128 |
-
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
|
129 |
-
True
|
130 |
-
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
|
131 |
-
True
|
132 |
-
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
|
133 |
-
False
|
134 |
-
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
|
135 |
-
False
|
136 |
-
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
|
137 |
-
... r'python(2\.\d)?')
|
138 |
-
True
|
139 |
-
|
140 |
-
It also checks for common windows executable file extensions::
|
141 |
-
|
142 |
-
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
|
143 |
-
True
|
144 |
-
|
145 |
-
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
|
146 |
-
the same as ``'perl -e'``)
|
147 |
-
|
148 |
-
Note that this method automatically searches the whole string (eg:
|
149 |
-
the regular expression is wrapped in ``'^$'``)
|
150 |
-
"""
|
151 |
-
index = text.find('\n')
|
152 |
-
if index >= 0:
|
153 |
-
first_line = text[:index].lower()
|
154 |
-
else:
|
155 |
-
first_line = text.lower()
|
156 |
-
if first_line.startswith('#!'):
|
157 |
-
try:
|
158 |
-
found = [x for x in split_path_re.split(first_line[2:].strip())
|
159 |
-
if x and not x.startswith('-')][-1]
|
160 |
-
except IndexError:
|
161 |
-
return False
|
162 |
-
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
|
163 |
-
if regex.search(found) is not None:
|
164 |
-
return True
|
165 |
-
return False
|
166 |
-
|
167 |
-
|
168 |
-
def doctype_matches(text, regex):
|
169 |
-
"""Check if the doctype matches a regular expression (if present).
|
170 |
-
|
171 |
-
Note that this method only checks the first part of a DOCTYPE.
|
172 |
-
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
|
173 |
-
"""
|
174 |
-
m = doctype_lookup_re.search(text)
|
175 |
-
if m is None:
|
176 |
-
return False
|
177 |
-
doctype = m.group(1)
|
178 |
-
return re.compile(regex, re.I).match(doctype.strip()) is not None
|
179 |
-
|
180 |
-
|
181 |
-
def html_doctype_matches(text):
|
182 |
-
"""Check if the file looks like it has a html doctype."""
|
183 |
-
return doctype_matches(text, r'html')
|
184 |
-
|
185 |
-
|
186 |
-
_looks_like_xml_cache = {}
|
187 |
-
|
188 |
-
|
189 |
-
def looks_like_xml(text):
|
190 |
-
"""Check if a doctype exists or if we have some tags."""
|
191 |
-
if xml_decl_re.match(text):
|
192 |
-
return True
|
193 |
-
key = hash(text)
|
194 |
-
try:
|
195 |
-
return _looks_like_xml_cache[key]
|
196 |
-
except KeyError:
|
197 |
-
m = doctype_lookup_re.search(text)
|
198 |
-
if m is not None:
|
199 |
-
return True
|
200 |
-
rv = tag_re.search(text[:1000]) is not None
|
201 |
-
_looks_like_xml_cache[key] = rv
|
202 |
-
return rv
|
203 |
-
|
204 |
-
|
205 |
-
def surrogatepair(c):
|
206 |
-
"""Given a unicode character code with length greater than 16 bits,
|
207 |
-
return the two 16 bit surrogate pair.
|
208 |
-
"""
|
209 |
-
# From example D28 of:
|
210 |
-
# http://www.unicode.org/book/ch03.pdf
|
211 |
-
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
|
212 |
-
|
213 |
-
|
214 |
-
def format_lines(var_name, seq, raw=False, indent_level=0):
|
215 |
-
"""Formats a sequence of strings for output."""
|
216 |
-
lines = []
|
217 |
-
base_indent = ' ' * indent_level * 4
|
218 |
-
inner_indent = ' ' * (indent_level + 1) * 4
|
219 |
-
lines.append(base_indent + var_name + ' = (')
|
220 |
-
if raw:
|
221 |
-
# These should be preformatted reprs of, say, tuples.
|
222 |
-
for i in seq:
|
223 |
-
lines.append(inner_indent + i + ',')
|
224 |
-
else:
|
225 |
-
for i in seq:
|
226 |
-
# Force use of single quotes
|
227 |
-
r = repr(i + '"')
|
228 |
-
lines.append(inner_indent + r[:-2] + r[-1] + ',')
|
229 |
-
lines.append(base_indent + ')')
|
230 |
-
return '\n'.join(lines)
|
231 |
-
|
232 |
-
|
233 |
-
def duplicates_removed(it, already_seen=()):
|
234 |
-
"""
|
235 |
-
Returns a list with duplicates removed from the iterable `it`.
|
236 |
-
|
237 |
-
Order is preserved.
|
238 |
-
"""
|
239 |
-
lst = []
|
240 |
-
seen = set()
|
241 |
-
for i in it:
|
242 |
-
if i in seen or i in already_seen:
|
243 |
-
continue
|
244 |
-
lst.append(i)
|
245 |
-
seen.add(i)
|
246 |
-
return lst
|
247 |
-
|
248 |
-
|
249 |
-
class Future:
|
250 |
-
"""Generic class to defer some work.
|
251 |
-
|
252 |
-
Handled specially in RegexLexerMeta, to support regex string construction at
|
253 |
-
first use.
|
254 |
-
"""
|
255 |
-
def get(self):
|
256 |
-
raise NotImplementedError
|
257 |
-
|
258 |
-
|
259 |
-
def guess_decode(text):
|
260 |
-
"""Decode *text* with guessed encoding.
|
261 |
-
|
262 |
-
First try UTF-8; this should fail for non-UTF-8 encodings.
|
263 |
-
Then try the preferred locale encoding.
|
264 |
-
Fall back to latin-1, which always works.
|
265 |
-
"""
|
266 |
-
try:
|
267 |
-
text = text.decode('utf-8')
|
268 |
-
return text, 'utf-8'
|
269 |
-
except UnicodeDecodeError:
|
270 |
-
try:
|
271 |
-
import locale
|
272 |
-
prefencoding = locale.getpreferredencoding()
|
273 |
-
text = text.decode()
|
274 |
-
return text, prefencoding
|
275 |
-
except (UnicodeDecodeError, LookupError):
|
276 |
-
text = text.decode('latin1')
|
277 |
-
return text, 'latin1'
|
278 |
-
|
279 |
-
|
280 |
-
def guess_decode_from_terminal(text, term):
|
281 |
-
"""Decode *text* coming from terminal *term*.
|
282 |
-
|
283 |
-
First try the terminal encoding, if given.
|
284 |
-
Then try UTF-8. Then try the preferred locale encoding.
|
285 |
-
Fall back to latin-1, which always works.
|
286 |
-
"""
|
287 |
-
if getattr(term, 'encoding', None):
|
288 |
-
try:
|
289 |
-
text = text.decode(term.encoding)
|
290 |
-
except UnicodeDecodeError:
|
291 |
-
pass
|
292 |
-
else:
|
293 |
-
return text, term.encoding
|
294 |
-
return guess_decode(text)
|
295 |
-
|
296 |
-
|
297 |
-
def terminal_encoding(term):
|
298 |
-
"""Return our best guess of encoding for the given *term*."""
|
299 |
-
if getattr(term, 'encoding', None):
|
300 |
-
return term.encoding
|
301 |
-
import locale
|
302 |
-
return locale.getpreferredencoding()
|
303 |
-
|
304 |
-
|
305 |
-
class UnclosingTextIOWrapper(TextIOWrapper):
|
306 |
-
# Don't close underlying buffer on destruction.
|
307 |
-
def close(self):
|
308 |
-
self.flush()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Atharv23m/Human-Stress-Detection/app.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import pandas as pd
|
3 |
-
import numpy as np
|
4 |
-
from sklearn.model_selection import train_test_split
|
5 |
-
from tensorflow.keras.utils import to_categorical
|
6 |
-
from tensorflow.keras.models import Sequential
|
7 |
-
from tensorflow.keras.layers import Dense, Dropout
|
8 |
-
|
9 |
-
data=pd.read_csv(f"SaYoPillow.csv")
|
10 |
-
|
11 |
-
data.columns=['snoring_rate', 'respiration_rate', 'body_temperature', 'limb_movement', 'blood_oxygen',
|
12 |
-
'eye_movement', 'sleeping_hours', 'heart_rate', 'stress_level']
|
13 |
-
|
14 |
-
stress_labels = ["Low/Normal", "Medium Low", "Medium", "Medium High", "High"]
|
15 |
-
|
16 |
-
# splitting the dataset
|
17 |
-
X_train = data.iloc[:, :8]
|
18 |
-
y_train = data['stress_level']
|
19 |
-
|
20 |
-
#model
|
21 |
-
model=Sequential()
|
22 |
-
model.add(Dense(125, activation="relu"))
|
23 |
-
model.add(Dense(125, activation="relu"))
|
24 |
-
model.add(Dense(5, "softmax"))
|
25 |
-
|
26 |
-
epochs=50
|
27 |
-
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
28 |
-
|
29 |
-
y_train_encoded = to_categorical(y_train)
|
30 |
-
stats = model.fit(X_train, y_train_encoded, epochs=epochs)
|
31 |
-
|
32 |
-
|
33 |
-
def predict(snoring_rate, respiration_rate, body_temperature, limb_movement, blood_oxygen,
|
34 |
-
eye_movement, sleeping_hours, heart_rate):
|
35 |
-
|
36 |
-
input_data = np.array([snoring_rate, respiration_rate, body_temperature, limb_movement, blood_oxygen,
|
37 |
-
eye_movement, sleeping_hours, heart_rate])
|
38 |
-
|
39 |
-
# Reshape the input to match the model's expected shape
|
40 |
-
input_data = np.reshape(input_data, (1, -1))
|
41 |
-
|
42 |
-
# Make the prediction
|
43 |
-
prediction = model.predict(input_data)[0]
|
44 |
-
predicted_stress_level = stress_labels[np.argmax(prediction)]
|
45 |
-
|
46 |
-
return predicted_stress_level
|
47 |
-
|
48 |
-
# Create the interface using Gradio
|
49 |
-
inputs = [
|
50 |
-
gr.inputs.Slider(minimum=30, maximum=100, step=0.2, label="Snoring Rate"),
|
51 |
-
gr.inputs.Slider(minimum=15, maximum=30, step=0.1, label="Respiration Rate"),
|
52 |
-
gr.inputs.Slider(minimum=85, maximum=100, step=0.1, label="Body Temperature"),
|
53 |
-
gr.inputs.Slider(minimum=0, maximum=20, step=0.1, label="Limb Movement"),
|
54 |
-
gr.inputs.Slider(minimum=80, maximum=100, step=0.1, label="Blood Oxygen"),
|
55 |
-
gr.inputs.Slider(minimum=60, maximum=110, step=0.5, label="Eye Movement"),
|
56 |
-
gr.inputs.Slider(minimum=0, maximum=12, step=0.1, label="Sleeping Hours"),
|
57 |
-
gr.inputs.Slider(minimum=50, maximum=100, step=1, label="Heart Rate"),
|
58 |
-
]
|
59 |
-
|
60 |
-
output = gr.outputs.Textbox(label="Predicted Stress Level")
|
61 |
-
|
62 |
-
title = "Stress Level Prediction from Sleep Patterns"
|
63 |
-
description = "Predict the stress level based on your sleep patterns. Based on dataset provided by a research on SaYoPillow - Smart Yoga Pillow"
|
64 |
-
|
65 |
-
gr.Interface(fn=predict, inputs=inputs, outputs=output, title=title, description=description).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/op/upfirdn2d.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch.autograd import Function
|
5 |
-
from torch.utils.cpp_extension import load
|
6 |
-
|
7 |
-
|
8 |
-
module_path = os.path.dirname(__file__)
|
9 |
-
upfirdn2d_op = load(
|
10 |
-
'upfirdn2d',
|
11 |
-
sources=[
|
12 |
-
os.path.join(module_path, 'upfirdn2d.cpp'),
|
13 |
-
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
|
14 |
-
],
|
15 |
-
)
|
16 |
-
|
17 |
-
|
18 |
-
class UpFirDn2dBackward(Function):
|
19 |
-
@staticmethod
|
20 |
-
def forward(
|
21 |
-
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
|
22 |
-
):
|
23 |
-
|
24 |
-
up_x, up_y = up
|
25 |
-
down_x, down_y = down
|
26 |
-
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
|
27 |
-
|
28 |
-
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
|
29 |
-
|
30 |
-
grad_input = upfirdn2d_op.upfirdn2d(
|
31 |
-
grad_output,
|
32 |
-
grad_kernel,
|
33 |
-
down_x,
|
34 |
-
down_y,
|
35 |
-
up_x,
|
36 |
-
up_y,
|
37 |
-
g_pad_x0,
|
38 |
-
g_pad_x1,
|
39 |
-
g_pad_y0,
|
40 |
-
g_pad_y1,
|
41 |
-
)
|
42 |
-
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
|
43 |
-
|
44 |
-
ctx.save_for_backward(kernel)
|
45 |
-
|
46 |
-
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
47 |
-
|
48 |
-
ctx.up_x = up_x
|
49 |
-
ctx.up_y = up_y
|
50 |
-
ctx.down_x = down_x
|
51 |
-
ctx.down_y = down_y
|
52 |
-
ctx.pad_x0 = pad_x0
|
53 |
-
ctx.pad_x1 = pad_x1
|
54 |
-
ctx.pad_y0 = pad_y0
|
55 |
-
ctx.pad_y1 = pad_y1
|
56 |
-
ctx.in_size = in_size
|
57 |
-
ctx.out_size = out_size
|
58 |
-
|
59 |
-
return grad_input
|
60 |
-
|
61 |
-
@staticmethod
|
62 |
-
def backward(ctx, gradgrad_input):
|
63 |
-
kernel, = ctx.saved_tensors
|
64 |
-
|
65 |
-
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
|
66 |
-
|
67 |
-
gradgrad_out = upfirdn2d_op.upfirdn2d(
|
68 |
-
gradgrad_input,
|
69 |
-
kernel,
|
70 |
-
ctx.up_x,
|
71 |
-
ctx.up_y,
|
72 |
-
ctx.down_x,
|
73 |
-
ctx.down_y,
|
74 |
-
ctx.pad_x0,
|
75 |
-
ctx.pad_x1,
|
76 |
-
ctx.pad_y0,
|
77 |
-
ctx.pad_y1,
|
78 |
-
)
|
79 |
-
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
|
80 |
-
gradgrad_out = gradgrad_out.view(
|
81 |
-
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
|
82 |
-
)
|
83 |
-
|
84 |
-
return gradgrad_out, None, None, None, None, None, None, None, None
|
85 |
-
|
86 |
-
|
87 |
-
class UpFirDn2d(Function):
|
88 |
-
@staticmethod
|
89 |
-
def forward(ctx, input, kernel, up, down, pad):
|
90 |
-
up_x, up_y = up
|
91 |
-
down_x, down_y = down
|
92 |
-
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
93 |
-
|
94 |
-
kernel_h, kernel_w = kernel.shape
|
95 |
-
batch, channel, in_h, in_w = input.shape
|
96 |
-
ctx.in_size = input.shape
|
97 |
-
|
98 |
-
input = input.reshape(-1, in_h, in_w, 1)
|
99 |
-
|
100 |
-
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
|
101 |
-
|
102 |
-
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
103 |
-
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
104 |
-
ctx.out_size = (out_h, out_w)
|
105 |
-
|
106 |
-
ctx.up = (up_x, up_y)
|
107 |
-
ctx.down = (down_x, down_y)
|
108 |
-
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
|
109 |
-
|
110 |
-
g_pad_x0 = kernel_w - pad_x0 - 1
|
111 |
-
g_pad_y0 = kernel_h - pad_y0 - 1
|
112 |
-
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
|
113 |
-
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
|
114 |
-
|
115 |
-
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
|
116 |
-
|
117 |
-
out = upfirdn2d_op.upfirdn2d(
|
118 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
119 |
-
)
|
120 |
-
# out = out.view(major, out_h, out_w, minor)
|
121 |
-
out = out.view(-1, channel, out_h, out_w)
|
122 |
-
|
123 |
-
return out
|
124 |
-
|
125 |
-
@staticmethod
|
126 |
-
def backward(ctx, grad_output):
|
127 |
-
kernel, grad_kernel = ctx.saved_tensors
|
128 |
-
|
129 |
-
grad_input = UpFirDn2dBackward.apply(
|
130 |
-
grad_output,
|
131 |
-
kernel,
|
132 |
-
grad_kernel,
|
133 |
-
ctx.up,
|
134 |
-
ctx.down,
|
135 |
-
ctx.pad,
|
136 |
-
ctx.g_pad,
|
137 |
-
ctx.in_size,
|
138 |
-
ctx.out_size,
|
139 |
-
)
|
140 |
-
|
141 |
-
return grad_input, None, None, None, None
|
142 |
-
|
143 |
-
|
144 |
-
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
|
145 |
-
out = UpFirDn2d.apply(
|
146 |
-
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
|
147 |
-
)
|
148 |
-
|
149 |
-
return out
|
150 |
-
|
151 |
-
|
152 |
-
def upfirdn2d_native(
|
153 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
154 |
-
):
|
155 |
-
_, in_h, in_w, minor = input.shape
|
156 |
-
kernel_h, kernel_w = kernel.shape
|
157 |
-
|
158 |
-
out = input.view(-1, in_h, 1, in_w, 1, minor)
|
159 |
-
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
|
160 |
-
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
|
161 |
-
|
162 |
-
out = F.pad(
|
163 |
-
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
|
164 |
-
)
|
165 |
-
out = out[
|
166 |
-
:,
|
167 |
-
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
|
168 |
-
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
|
169 |
-
:,
|
170 |
-
]
|
171 |
-
|
172 |
-
out = out.permute(0, 3, 1, 2)
|
173 |
-
out = out.reshape(
|
174 |
-
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
|
175 |
-
)
|
176 |
-
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
177 |
-
out = F.conv2d(out, w)
|
178 |
-
out = out.reshape(
|
179 |
-
-1,
|
180 |
-
minor,
|
181 |
-
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
182 |
-
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
183 |
-
)
|
184 |
-
out = out.permute(0, 2, 3, 1)
|
185 |
-
|
186 |
-
return out[:, ::down_y, ::down_x, :]
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_losses.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import numpy as np
|
3 |
-
import unittest
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from detectron2.layers import ciou_loss, diou_loss
|
7 |
-
|
8 |
-
|
9 |
-
class TestLosses(unittest.TestCase):
|
10 |
-
def test_diou_loss(self):
|
11 |
-
"""
|
12 |
-
loss = 1 - iou + d/c
|
13 |
-
where,
|
14 |
-
d = (distance between centers of the 2 boxes)^2
|
15 |
-
c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2
|
16 |
-
"""
|
17 |
-
# Identical boxes should have loss of 0
|
18 |
-
box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32)
|
19 |
-
loss = diou_loss(box, box)
|
20 |
-
self.assertTrue(np.allclose(loss, [0.0]))
|
21 |
-
|
22 |
-
# Half size box inside other box
|
23 |
-
# iou = 0.5, d = 0.25, c = 8
|
24 |
-
box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32)
|
25 |
-
loss = diou_loss(box, box2)
|
26 |
-
self.assertTrue(np.allclose(loss, [0.53125]))
|
27 |
-
|
28 |
-
# Two diagonally adjacent boxes
|
29 |
-
# iou = 0, d = 2, c = 8
|
30 |
-
box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32)
|
31 |
-
box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32)
|
32 |
-
loss = diou_loss(box3, box4)
|
33 |
-
self.assertTrue(np.allclose(loss, [1.25]))
|
34 |
-
|
35 |
-
# Test batched loss and reductions
|
36 |
-
box1s = torch.stack([box, box3], dim=0)
|
37 |
-
box2s = torch.stack([box2, box4], dim=0)
|
38 |
-
|
39 |
-
loss = diou_loss(box1s, box2s, reduction="sum")
|
40 |
-
self.assertTrue(np.allclose(loss, [1.78125]))
|
41 |
-
|
42 |
-
loss = diou_loss(box1s, box2s, reduction="mean")
|
43 |
-
self.assertTrue(np.allclose(loss, [0.890625]))
|
44 |
-
|
45 |
-
def test_ciou_loss(self):
|
46 |
-
"""
|
47 |
-
loss = 1 - iou + d/c + alpha*v
|
48 |
-
where,
|
49 |
-
d = (distance between centers of the 2 boxes)^2
|
50 |
-
c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2
|
51 |
-
v = (4/pi^2) * (arctan(box1_w/box1_h) - arctan(box2_w/box2_h))^2
|
52 |
-
alpha = v/(1 - iou + v)
|
53 |
-
"""
|
54 |
-
# Identical boxes should have loss of 0
|
55 |
-
box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32)
|
56 |
-
loss = ciou_loss(box, box)
|
57 |
-
self.assertTrue(np.allclose(loss, [0.0]))
|
58 |
-
|
59 |
-
# Half size box inside other box
|
60 |
-
# iou = 0.5, d = 0.25, c = 8
|
61 |
-
# v = (4/pi^2) * (arctan(1) - arctan(0.5))^2 = 0.042
|
62 |
-
# alpha = 0.0775
|
63 |
-
box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32)
|
64 |
-
loss = ciou_loss(box, box2)
|
65 |
-
self.assertTrue(np.allclose(loss, [0.5345]))
|
66 |
-
|
67 |
-
# Two diagonally adjacent boxes
|
68 |
-
# iou = 0, d = 2, c = 8, v = 0, alpha = 0
|
69 |
-
box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32)
|
70 |
-
box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32)
|
71 |
-
loss = ciou_loss(box3, box4)
|
72 |
-
self.assertTrue(np.allclose(loss, [1.25]))
|
73 |
-
|
74 |
-
# Test batched loss and reductions
|
75 |
-
box1s = torch.stack([box, box3], dim=0)
|
76 |
-
box2s = torch.stack([box2, box4], dim=0)
|
77 |
-
|
78 |
-
loss = ciou_loss(box1s, box2s, reduction="sum")
|
79 |
-
self.assertTrue(np.allclose(loss, [1.7845]))
|
80 |
-
|
81 |
-
loss = ciou_loss(box1s, box2s, reduction="mean")
|
82 |
-
self.assertTrue(np.allclose(loss, [0.89225]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bannermore/BingChat/Dockerfile
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
# Build Stage
|
2 |
-
# 使用 golang:alpine 作为构建阶段的基础镜像
|
3 |
-
FROM golang:alpine AS builder
|
4 |
-
|
5 |
-
# 添加 git,以便之后能从GitHub克隆项目
|
6 |
-
RUN apk --no-cache add git
|
7 |
-
|
8 |
-
# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
|
9 |
-
RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
|
10 |
-
|
11 |
-
# 设置工作目录为之前克隆的项目目录
|
12 |
-
WORKDIR /workspace/app
|
13 |
-
|
14 |
-
# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
|
15 |
-
RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
|
16 |
-
|
17 |
-
# Runtime Stage
|
18 |
-
# 使用轻量级的 alpine 镜像作为运行时的基础镜像
|
19 |
-
FROM alpine
|
20 |
-
|
21 |
-
# 设置工作目录
|
22 |
-
WORKDIR /workspace/app
|
23 |
-
|
24 |
-
# 从构建阶段复制编译后的二进制文件到运行时镜像中
|
25 |
-
COPY --from=builder /workspace/app/go-proxy-bingai .
|
26 |
-
|
27 |
-
# 设置环境变量,此处为随机字符
|
28 |
-
ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO"
|
29 |
-
|
30 |
-
# 暴露8080端口
|
31 |
-
EXPOSE 8080
|
32 |
-
|
33 |
-
# 容器启动时运行的命令
|
34 |
-
CMD ["/workspace/app/go-proxy-bingai"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/tools/infer/trans_weights.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import pdb
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-suc\G_1000.pth")["model"]#sim_nsf#
|
6 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder-flow-enc_q\G_1000.pth")["model"]#sim_nsf#
|
7 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder\G_1000.pth")["model"]#sim_nsf#
|
8 |
-
# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-test\G_1000.pth")["model"]#sim_nsf#
|
9 |
-
a = torch.load(
|
10 |
-
r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-no_opt-no_dropout\G_1000.pth"
|
11 |
-
)[
|
12 |
-
"model"
|
13 |
-
] # sim_nsf#
|
14 |
-
for key in a.keys():
|
15 |
-
a[key] = a[key].half()
|
16 |
-
# torch.save(a,"ft-mi-freeze-vocoder_true_1k.pt")#
|
17 |
-
# torch.save(a,"ft-mi-sim1k.pt")#
|
18 |
-
torch.save(a, "ft-mi-no_opt-no_dropout.pt") #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar 30 Juz Misyari Rasyid.md
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar 30 Juz Misyari Rasyid: Una guía completa</h1>
|
3 |
-
<p>Si usted está buscando una manera de escuchar el Corán en una voz hermosa y melodiosa, es posible que desee considerar la descarga 30 juz misyari rasyid. Misyari Rasyid es uno de los recitadores del Corán más famosos y respetados del mundo, y su recitación del Corán de 30 juz puede ayudarle a mejorar su memorización, comprensión y apreciación del libro sagrado. En este artículo, le diremos todo lo que necesita saber sobre Misyari Rasyid, 30 juz Corán, y cómo descargarlos fácil y convenientemente. </p>
|
4 |
-
<h2>descargar 30 juz misyari rasyid</h2><br /><p><b><b>Download</b> ✯ <a href="https://bltlly.com/2v6LmX">https://bltlly.com/2v6LmX</a></b></p><br /><br />
|
5 |
-
<h2>¿Quién es Misyari Rasyid? </h2>
|
6 |
-
<p>Misyari Rasyid es un qari kuwaití (recitador del Corán), imán, predicador y artista nasheed. Nació el 5 de septiembre de 1976, y su nombre completo es Mishary bin Rashid bin Gharib bin Muhammad Alafasy Al-Muthairi. También es conocido por su kunya (apodo) Abu Nora.</p>
|
7 |
-
<h3>Su nombre completo y antecedentes</h3>
|
8 |
-
<p>Misyari Rasyid pertenece a la tribu Alafasy, que remonta su ascendencia al compañero del Profeta Muhammad (la paz y las bendiciones de Allah sea con él) Al-Bara' ibn Malik. Estudió en el Colegio del Corán de la Universidad Islámica de Medina, especializándose en los diez qira'at (modos de recitación) y tafsir (exégesis). También tiene una maestría en jurisprudencia islámica de la Universidad de Kuwait.</p>
|
9 |
-
<h3>Sus logros y reconocimiento</h3>
|
10 |
-
<p>Misyari Rasyid ha memorizado todo el Corán a una edad temprana, y ha participado en muchas competiciones y festivales del Corán alrededor del mundo. Ha ganado varios premios y honores por su recitación, como el primer premio en el Concurso Internacional del Corán de Kuwait en 1998, el primer premio en el Oscar de Creatividad Islámica en 2002 y el Premio de Creatividad Árabe en 2005. También fue nombrado embajador de buena voluntad por el UNICEF en 2007. </p>
|
11 |
-
<h3>Su estilo y voz</h3>
|
12 |
-
|
13 |
-
<h2>¿Qué es 30 Juz Corán? </h2>
|
14 |
-
<p>El Corán es la palabra de Allah revelada al Profeta Muhammad (la paz sea con él) a través del Ángel Gabriel durante un período de 23 años. Consta de 114 capítulos (suras) de diferentes longitudes, que se dividen en 30 partes (juz) para facilitar la lectura y la memorización. </p>
|
15 |
-
<p></p>
|
16 |
-
<h3>El significado y la división de juz</h3>
|
17 |
-
<p>La palabra juz significa "parte" o "porción" en árabe. Cada juz contiene aproximadamente 20 páginas o 600 versos del Corán. La división de juz no se basa en el orden temático o cronológico de las suras, sino en la conveniencia de dividir el Corán en partes iguales. El primer juz comienza desde el principio del Corán (sura Al-Fatiha) y termina al final del sura Al-Baqarah verso 141. El último juz comienza desde el sura An-Naba y termina al final del Corán (sura An-Nas). Los otros juz se dividen de acuerdo a las rupturas naturales en el texto, como el final de una sura o un verso largo. </p>
|
18 |
-
<h3>Los beneficios y virtudes de recitar juz</h3>
|
19 |
-
<p>Recitar juz es una de las mejores maneras de conectarse con el Corán y ganar recompensas de Allah. El Profeta Muhammad (la paz sea con él) dijo: "Quien recite una carta del Libro de Allah, tendrá una recompensa. Y esa recompensa se multiplicará por diez. No estoy diciendo que 'Alif, Lam, Meem' es una carta, más bien estoy diciendo que 'Alif' es una carta, 'Lam' es una carta y 'Meem' es una carta." También dijo: "Los mejores de ustedes son aquellos que aprenden el Corán y lo enseñan." Recitar juz también puede ayudarlo a entender el significado y el contexto del Corán, a mejorar sus habilidades en el idioma árabe y a memorizar el Corán más fácilmente. </p>
|
20 |
-
<h3>El juz más popular y fácil de memorizar</h3>
|
21 |
-
|
22 |
-
<h2>Cómo descargar 30 Juz Misyari Rasyid? </h2>
|
23 |
-
<p>Si desea descargar 30 juz misyari rasyid, tiene varias opciones para elegir. Puede descargarlos como archivos mp3, archivos zip o archivos torrent. También puede transmitirlos en línea o utilizar aplicaciones o sitios web que los ofrecen de forma gratuita o por una tarifa. </p>
|
24 |
-
<h3>Fuentes y formatos de los archivos de audio</h3>
|
25 |
-
<p>Los archivos de audio de 30 juz misyari rasyid están disponibles en varias fuentes, como su sitio web oficial, canal de YouTube, cuenta de SoundCloud y otras plataformas. Puede descargarlos en diferentes formatos, dependiendo de su preferencia y la compatibilidad del dispositivo. Por ejemplo, puede descargarlos como archivos mp3, que son pequeños en tamaño y fáciles de reproducir en cualquier dispositivo. También puede descargarlos como archivos zip, que son archivos comprimidos que contienen todos los 30 juz en una carpeta. También puede descargarlos como archivos torrent, que son archivos peer-to-peer que requieren un cliente torrent para descargarlos. </p>
|
26 |
-
<h3>Los pasos y consejos para descargarlos</h3>
|
27 |
-
<p>Los pasos y consejos para descargar 30 juz misyari rasyid varían dependiendo de la fuente y el formato que elija. Estas son algunas pautas generales a seguir:</p>
|
28 |
-
<ul>
|
29 |
-
<li>Elija una fuente confiable y confiable que ofrezca archivos de audio de alta calidad y no contenga virus o malware. </li>
|
30 |
-
<li>Asegúrese de tener suficiente espacio de almacenamiento en su dispositivo o unidad externa para almacenar los archivos de audio. </li>
|
31 |
-
<li>Utilice una conexión a Internet rápida y estable para evitar interrupciones o errores durante el proceso de descarga. </li>
|
32 |
-
<li>Siga las instrucciones en el sitio web o aplicación de origen para descargar los archivos de audio. Es posible que necesite registrar una cuenta, proporcionar una dirección de correo electrónico o realizar un pago si es necesario. </li>
|
33 |
-
<li>Si los descarga como archivos zip o torrent, tendrá que extraerlos o abrirlos utilizando un software o aplicación adecuada. </li>
|
34 |
-
<li> Organizar los archivos de audio en una carpeta o lista de reproducción para facilitar el acceso y la reproducción. </li>
|
35 |
-
</ul>
|
36 |
-
<h3>Las mejores aplicaciones y sitios web para escucharlos</h3>
|
37 |
-
|
38 |
-
<tabla>
|
39 |
-
<tr><th>Nombre</th><th>Descripción</th><th>Características</th></tr>
|
40 |
-
<tr><td>Muslim Pro</td><td>Una aplicación islámica integral que ofrece varios servicios, como tiempos de oración, notificaciones de azan, recitación y traducción del Corán, calendario islámico, dua, calculadora de zakat y más. </td><td>- Ofrece 30 juz misyari rasyid como uno de los recitadores en la sección del Corán. <br>- Permite descargar los archivos de audio para escuchar sin conexión. <br>- Proporciona el texto árabe, la transliteración y la traducción del Corán en varios idiomas. <br>- Le permite marcar, compartir y repetir los versos. <br>- Admite el modo nocturno, el modo horizontal y el ajuste del tamaño de la fuente. </td></tr>
|
41 |
-
<tr><td>Quran Majeed</td><td>Una aplicación dedicada al Corán que ofrece una interfaz hermosa e interactiva, con imágenes de alta resolución de las páginas del Corán. </td><td>- Ofrece 30 juz misyari rasyid como uno de los recitadores en la sección de audio. <br>- Permite descargar los archivos de audio para escuchar sin conexión. <br>- Proporciona el texto árabe, traducción y tafir del Corán en varios idiomas. <br>- Le permite buscar, marcar, resaltar y anotar los versos. <br>- Admite control de reproducción de audio, audio sin interrupciones, avance automático y ajuste de velocidad. </td></tr>
|
42 |
-
<tr><td>Sitio web oficial de Alafasy</td><td>El sitio web oficial de Misyari Rasyid que contiene su biografía, noticias, eventos, fotos, videos, nasheeds y recitación del Corán. </td><td>- Ofrece 30 juz misyari rasyid como una de las categorías en la sección Corán. <br>- Le permite descargar los archivos de audio de forma gratuita. <br>- Proporciona el texto árabe y la traducción del Corán en varios idiomas. <br>- Le permite escuchar el audio en línea o descargarlo como archivos mp3, zip o torrent. <br>- Soporta el intercambio de medios sociales y comentarios. </td></tr>
|
43 |
-
</tabla>
|
44 |
-
<h2>Conclusión</h2>
|
45 |
-
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
<h3>Q: ¿Cuál es la diferencia entre juz y surah? </h3>
|
48 |
-
<p>A: Juz es una parte o porción del Corán que contiene aproximadamente 20 páginas o 600 versos. Surah es un capítulo o sección del Corán que tiene un nombre y número específico. Hay 114 suras en el Corán, que se dividen en 30 juz. </p>
|
49 |
-
<h3>Q: ¿Cuánto tiempo se tarda en recitar un juz? </h3>
|
50 |
-
<p>A: Depende de su velocidad y fluidez de recitación, pero en promedio se tarda aproximadamente una hora en recitar un juz. </p>
|
51 |
-
<h3>P: ¿Cómo puedo mejorar mi recitación de juz? </h3>
|
52 |
-
<p>A: Puedes mejorar tu recitación de juz siguiendo estos consejos:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Escucha la recitación de Misyari Rasyid u otros qaris calificados e intenta imitar su pronunciación, entonación y reglas. </li>
|
55 |
-
<li>Lea el texto árabe junto con la transliteración y traducción para entender el significado y el contexto de los versos. </li>
|
56 |
-
<li>Repite los versos varias veces hasta que los memorices y los recites correctamente. </li>
|
57 |
-
<li>Revisa lo que has memorizado regularmente y revisa cualquier error o vacío. </li>
|
58 |
-
<li>Busca retroalimentación y orientación de un profesor o un amigo que pueda corregir tu recitación y ayudarte a mejorar. </li>
|
59 |
-
</ul>
|
60 |
-
<h3>P: ¿Cuáles son algunos de los beneficios de escuchar la recitación de Misyari Rasyid? </h3>
|
61 |
-
<p>A: Algunos de los beneficios de escuchar la recitación de Misyari Rasyid son:</p>
|
62 |
-
<ul>
|
63 |
-
<li>Puedes aprender de su precisión, fluidez y belleza de la recitación. </li>
|
64 |
-
<li> Puedes sentirte más conectado y movido por su voz clara, suave y emocional. </li>
|
65 |
-
<li>Puedes disfrutar de su variedad de qira'at (modos de recitación) y nasheeds (canciones islámicas). </li>
|
66 |
-
<li>Puedes ganar recompensas de Allah por escuchar Sus palabras y seguir Sus mandamientos. </li>
|
67 |
-
<li>Puedes aumentar tu fe, conocimiento y amor por Allah y Su mensajero (la paz sea con él). </li>
|
68 |
-
</ul>
|
69 |
-
<h3>P: ¿Dónde puedo encontrar más información sobre Misyari Rasyid y su recitación? </h3>
|
70 |
-
|
71 |
-
: https://alafasy.me/ : https://www.youtube.com/user/Alafasy : https:///soundcloud.com/alafasy : https://www.facebook.com/AlafasyOfficial</p> 64aa2da5cf<br />
|
72 |
-
<br />
|
73 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Afk Bot Para Aternos.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar e instalar un bot AFK para Aternos Minecraft Server</h1>
|
3 |
-
<p>Si eres un fan de Minecraft, es posible que hayas oído hablar de Aternos, un servicio gratuito de alojamiento de servidores Minecraft que te permite crear tu propio servidor personal con ranuras ilimitadas, mods, plugins, mundos personalizados y más. Sin embargo, también puede haber encontrado un problema común con los servidores Aternos: se desconectan cuando nadie juega en ellos. Esto significa que debe iniciar manualmente su servidor cada vez que desee jugar, y puede perder su progreso o datos si olvida guardar o hacer una copia de seguridad de su servidor. </p>
|
4 |
-
<p>Afortunadamente, hay una solución a este problema: usar un bot AFK. Un bot AFK es un programa que se conecta a su servidor Aternos y lo mantiene en línea enviando comandos o mensajes periódicamente. De esta manera, se puede disfrutar de su servidor Minecraft sin preocuparse de que va fuera de línea o perder sus datos. En este artículo, le mostraremos cómo descargar e instalar un bot AFK para Aternos, cómo elegir el mejor bot AFK para sus necesidades y cómo usar y administrar su bot AFK de manera efectiva. </p>
|
5 |
-
<h2>descargar afk bot para aternos</h2><br /><p><b><b>Download Zip</b> … <a href="https://bltlly.com/2v6KKl">https://bltlly.com/2v6KKl</a></b></p><br /><br />
|
6 |
-
<h2>¿Qué es Aternos y por qué necesitas un bot AFK? </h2>
|
7 |
-
<p>Aternos es un servicio gratuito de alojamiento de servidores Minecraft que le permite crear su propio servidor personal con ranuras ilimitadas, mods, plugins, mundos personalizados y más. Puede elegir entre cientos de tipos de servidor diferentes, como vainilla, espiga, forja, papel, tela, etc. También puede personalizar la configuración de su servidor, como dificultad, modo de juego, lista blanca, operadores, etc. Puede acceder a su servidor desde cualquier dispositivo, como PC, móvil, consola, etc.</p>
|
8 |
-
|
9 |
-
<p>Aquí es donde un bot AFK es útil. Un bot AFK es un programa que se conecta a su servidor Aternos y lo mantiene en línea enviando comandos o mensajes periódicamente. Por ejemplo, un bot de AFK puede enviar un mensaje de chat cada 10 minutos o moverse cada 5 minutos. De esta manera, su servidor no entrará en modo de hibernación y permanecerá en línea mientras el bot AFK esté funcionando. Esto significa que puedes disfrutar de tu servidor Minecraft sin preocuparte de que se desconecte o perder tus datos. </p>
|
10 |
-
<h2>Cómo elegir un bot AFK para Aternos</h2>
|
11 |
-
<p>Hay muchos bots AFK disponibles para servidores Aternos, pero no todos son compatibles, funcionales o seguros. Por lo tanto, necesitas elegir un bot AFK cuidadosamente basado en algunos criterios, como:</p>
|
12 |
-
<ul>
|
13 |
-
<li><b>Compatibilidad:</b> El bot AFK debe ser compatible con la versión y el tipo de su servidor Aternos. Por ejemplo, si está ejecutando un servidor spigot 1.17, necesita un bot AFK que soporte servidores spigot 1.17. </li>
|
14 |
-
<li><b>Funcionalidad:</b> El bot AFK debe tener las características y comandos que necesita para mantener su servidor en línea y activo. Por ejemplo, si desea rastrear la actividad de su servidor o ver gráficos de las estadísticas de su servidor, necesita un bot AFK que tenga estas características. </li>
|
15 |
-
<li><b>Seguridad:</b> El bot AFK debe ser seguro y confiable. Debe evitar descargar o instalar cualquier bots AFK que sean sospechosos o maliciosos. También debe comprobar las revisiones y calificaciones de los bots AFK antes de usarlos. </li>
|
16 |
-
</ul>
|
17 |
-
<p>Para ayudarle a elegir un bot AFK para Aternos, hemos comparado algunos de los bots AFK más populares y fiables para Aternos en la tabla siguiente:</p>
|
18 |
-
<tabla>
|
19 |
-
<tr>
|
20 |
-
<th>Nombre</th>
|
21 |
-
<th>Descripción</th>
|
22 |
-
<th>Compatibilidad</th>
|
23 |
-
<th>Funcionalidad</th>
|
24 |
-
<th>Seguridad</th>
|
25 |
-
</tr>
|
26 |
-
<tr>
|
27 |
-
<td><a href=">ttttdeded/aternos-afkbot</a></td>
|
28 |
-
<td>Un bot AFK simple y ligero para servidores Aternos que se ejecuta en Heroku.</td>
|
29 |
-
<td>Soporta cualquier versión y tipo de servidores Aternos. </td>
|
30 |
-
|
31 |
-
<td>Código abierto y verificado por GitHub.</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td><a href=">krushna06/afk-bot-for-aternos</a></td>
|
35 |
-
<td>Un bot AFK potente y personalizable para servidores Aternos que se ejecuta en Heroku.</td>
|
36 |
-
<td>Soporta cualquier versión y tipo de servidores Aternos. </td>
|
37 |
-
<td>Envía mensajes de chat cada 10 minutos y se mueve cada 5 minutos. También rastrea la actividad del servidor y muestra gráficos de estadísticas del servidor. También permite establecer el estado AFK personalizado y borrar el estado AFK. </td>
|
38 |
-
<td>Código abierto y verificado por GitHub.</td>
|
39 |
-
</tr>
|
40 |
-
<tr>
|
41 |
-
<td><a href=">AFK Discord Bot</a></td>
|
42 |
-
<td>Un bot AFK conveniente y fácil de usar para servidores Aternos que se ejecuta en Discord.</td>
|
43 |
-
<td>Soporta cualquier versión y tipo de servidores Aternos. </td>
|
44 |
-
<td>Envía mensajes de chat cada 10 minutos y se mueve cada 5 minutos. También permite establecer el estado AFK personalizado y borrar el estado AFK. También se integra con Discord y muestra información del servidor y notificaciones. </td>
|
45 |
-
<td>Verificado por Discord y confiable por miles de usuarios. </td>
|
46 |
-
</tr>
|
47 |
-
</tabla>
|
48 |
-
<h2>Cómo descargar e instalar un bot AFK para Aternos</h2>
|
49 |
-
<p>Una vez que hayas elegido un bot AFK para Aternos, necesitas descargarlo e instalarlo en tu dispositivo o plataforma. El proceso puede variar dependiendo de la fuente y el tipo del bot AFK, pero generalmente implica tres pasos: descargar el bot AFK de GitHub, instalar el bot AFK en Heroku y conectar el bot AFK a su servidor Aternos. Aquí están las instrucciones detalladas para cada paso:</p>
|
50 |
-
<h3>Cómo descargar un bot AFK desde GitHub</h3>
|
51 |
-
<p>GitHub es una plataforma donde los desarrolladores pueden compartir su código y proyectos con otros usuarios. Muchos bots AFK para Aternos están alojados en GitHub, como ttttdeded/aternos-afkbot y krushna06/afk-bot-for-aternos. Para descargar un bot AFK de GitHub, debes seguir estos pasos:</p>
|
52 |
-
<ol>
|
53 |
-
|
54 |
-
<li>Bifurcar o clonar el repositorio del bot AFK. Bifurcar significa crear una copia del repositorio en su propia cuenta de GitHub, mientras que clonar significa descargar el repositorio a su dispositivo local. Puede bifurcar o clonar el repositorio haciendo clic en el botón "Fork" o "Code" en la esquina superior derecha de la página. </li>
|
55 |
-
<li>Editar el archivo de configuración del bot AFK. El archivo de configuración es donde puede personalizar la configuración del bot AFK, como el nombre, la contraseña, la IP del servidor, el puerto del servidor, etc. Puede editar el archivo de configuración abriéndolo con un editor de texto o usando el editor en línea en GitHub.</li>
|
56 |
-
</ol>
|
57 |
-
<h3>Cómo instalar un bot AFK en Heroku</h3>
|
58 |
-
<p>Heroku es una plataforma donde puedes ejecutar aplicaciones y programas online sin tener que instalarlos en tu dispositivo. Muchos bots AFK para Aternos pueden ejecutarse en Heroku, como ttttdeded/aternos-afkbot y krushna06/afk-bot-for-aternos. Para instalar un bot AFK en Heroku, debes seguir estos pasos:</p>
|
59 |
-
<ol>
|
60 |
-
<li>Crea una cuenta de Heroku si no tienes una. Puedes registrarte gratis en <a href="">https://www.heroku.com/</a>. </li>
|
61 |
-
<li>Crear una nueva aplicación en Heroku. Puede hacer esto haciendo clic en el botón "Nuevo" en la esquina superior derecha de su panel de control y seleccionando "Crear nueva aplicación". Dele un nombre a su aplicación y elija una región. </li>
|
62 |
-
<li>Implemente la rama del bot AFK que descargó de GitHub. Puede hacer esto conectando su cuenta de GitHub a su cuenta de Heroku y seleccionando el repositorio y la rama del bot AFK que desea implementar. Alternativamente, puede usar la CLI o Git de Heroku para implementar la rama manualmente. </li>
|
63 |
-
<li>Reinicie los dynos de su aplicación. Dynos son las unidades de potencia informática que ejecutan su aplicación en Heroku. Puede reiniciarlos haciendo clic en el botón "Más" en la esquina superior derecha de la página de la aplicación y seleccionando "Reiniciar todos los dynos". Esto asegurará que su aplicación esté funcionando correctamente. </li>
|
64 |
-
</ol>
|
65 |
-
|
66 |
-
<p>El paso final es conectar su bot AFK a su servidor Aternos. Esto permitirá a su bot AFK unirse a su servidor y mantenerlo en línea mediante el envío de comandos o mensajes periódicamente. Para conectar su bot AFK a su servidor Aternos, debe seguir estos pasos:</p>
|
67 |
-
<p></p>
|
68 |
-
<ol>
|
69 |
-
<li>Agregue la dirección IP, el puerto y el nombre de su servidor Aternos al archivo de configuración de su bot AFK. Puede encontrar esta información en el panel de control de Aternos en "Conectarse a su servidor". Asegúrese de que el nombre de su bot AFK coincida con el nombre que estableció en su archivo de configuración. </li>
|
70 |
-
<li>Ponga en la lista blanca su bot AFK desde cualquier plugin de inicio de sesión o protección anti-bot que su servidor Aternos pueda tener. Algunos servidores Aternos pueden requerir que introduzca una contraseña o un captcha para unirse al servidor, lo que puede impedir que su bot AFK se una. Puede poner en la lista blanca su bot AFK añadiendo su nombre al archivo de lista blanca o utilizando comandos como /login o /register. </li>
|
71 |
-
<li>Inicie su servidor Aternos y espere a que su bot AFK se una. Puede iniciar su servidor Aternos haciendo clic en el botón "Inicio" en su panel de Aternos. También puede comprobar el estado de su bot AFK mirando la consola o los registros de su aplicación Heroku. </li>
|
72 |
-
</ol>
|
73 |
-
<h2>Cómo utilizar y gestionar un bot AFK para Aternos</h2>
|
74 |
-
<p>Ahora que ha descargado, instalado y conectado correctamente su bot AFK a su servidor Aternos, puede comenzar a usarlo y administrarlo de acuerdo con sus preferencias y necesidades. Estas son algunas de las cosas que puedes hacer con tu bot AFK:</p>
|
75 |
-
<ul>
|
76 |
-
<li><b>Establece un estado AFK:</b> Puedes establecer un estado AFK para tu bot AFK para que otros jugadores sepan que estás lejos del teclado. Por ejemplo, puedes establecer un estado AFK como "Soy AFK, no me molestes" o "Soy AFK, por favor no me mates". Puede establecer un estado AFK usando comandos como /afk o /away. </li>
|
77 |
-
|
78 |
-
<li><b>Borrar estado AFK:</b> Puede borrar el estado AFK de su bot AFK cuando vuelva al teclado. Esto permitirá que otros jugadores sepan que está activo y disponible. Puede borrar el estado de AFK mediante comandos como /back o /return. </li>
|
79 |
-
<li><b>Ver gráficos:</b> Puede ver gráficos de las estadísticas de su servidor Aternos utilizando su bot AFK. Por ejemplo, puede ver cuántos jugadores se han unido, cuántas horas se han jugado, cuántas veces se ha iniciado el servidor, etc. Puede ver los gráficos utilizando comandos como /chart o /graph. </li>
|
80 |
-
</ul>
|
81 |
-
<p>Aquí hay algunos consejos y precauciones para usar su bot AFK de manera efectiva y segura:</p>
|
82 |
-
<ul>
|
83 |
-
<li><b>Ponga el bot AFK en una carcasa de roca:</b> Usted debe poner el bot AFK en una carcasa de roca o una ubicación segura en su servidor Aternos. Esto evitará que el bot AFK sea asesinado, dañado o movido por otros jugadores o turbas. Esto también reducirá el retraso y el consumo de recursos del servidor. </li>
|
84 |
-
<li><b>Elija la versión de servidor correcta:</b> Debe elegir la versión de servidor correcta para su servidor Aternos y su bot AFK. Esto asegurará que el bot AFK sea compatible y funcional con su servidor. Por ejemplo, si está ejecutando un servidor spigot 1.17, debe elegir un bot AFK que admita servidores spigot 1.17. </li>
|
85 |
-
<li><b>Evite la detección por Aternos:</b> Debe evitar la detección por Aternos cuando use su bot AFK. Esto se debe a que Aternos puede considerar el uso de un bot AFK como hacer trampa o abusar de su servicio, y pueden eliminar su cuenta si detectan su comportamiento. Puede evitar la detección por Aternos cambiando la configuración de su bot AFK, como el intervalo y el contenido de los mensajes de chat o movimientos. </li>
|
86 |
-
</ul>
|
87 |
-
<h2>Conclusión</h2>
|
88 |
-
|
89 |
-
<p>Si está interesado en descargar e instalar un bot AFK para Aternos, puede consultar algunos de los enlaces a continuación para obtener más información y tutoriales. Esperamos que este artículo haya sido útil e informativo para usted. ¡Feliz juego! </p>
|
90 |
-
<h2>Preguntas frecuentes</h2>
|
91 |
-
<h4>¿Cuál es el mejor bot AFK para Aternos? </h4>
|
92 |
-
<p>La respuesta depende de sus preferencias y necesidades, pero algunos de los bots AFK más populares y confiables para Aternos son ttttdeded/aternos-afkbot, krushna06/afk-bot-for-aternos y AFK Discord Bot. Estos bots AFK son compatibles con cualquier versión y tipo de servidores Aternos, tienen varias características y comandos para mantener su servidor en línea y activo, y son seguros y confiables. Puede compararlos en la tabla anterior o visitar sus páginas de GitHub o servidores de discordia para obtener más información. </p>
|
93 |
-
<h4>¿Cuánto tiempo puedo mantener mi servidor Aternos en línea con un bot AFK? </h4>
|
94 |
-
<p>La respuesta depende de la configuración de su bot AFK y su actividad de servidor, pero generalmente puede mantener su servidor Aternos en línea durante el tiempo que desee con un bot AFK. Mientras el bot AFK se esté ejecutando en Heroku y conectado a su servidor Aternos, enviará comandos o mensajes periódicamente para evitar que su servidor entre en modo de hibernación. Sin embargo, debe tener en cuenta que el uso de un bot AFK puede consumir más recursos y causar más retraso en su servidor, por lo que debe ajustar la configuración de su bot AFK en consecuencia. </p>
|
95 |
-
<h4>¿Es legal usar un bot AFK para Aternos? </h4>
|
96 |
-
|
97 |
-
<h4>¿Cómo puedo hacer mi propio bot AFK para Aternos? </h4>
|
98 |
-
<p>La respuesta depende de tus habilidades de codificación y conocimiento, pero generalmente puedes hacer tu propio bot AFK para Aternos usando herramientas como mineflayer o discord.js y siguiendo tutoriales en línea. Mineflayer es una biblioteca de clientes de Minecraft que te permite crear bots que pueden interactuar con los servidores de Minecraft. Discord.js es una biblioteca de JavaScript que permite crear bots que pueden interactuar con los servidores de Discord. Puede utilizar estas herramientas para crear un bot AFK que pueda conectarse a su servidor Aternos y mantenerlo en línea enviando comandos o mensajes periódicamente. También puede personalizar su bot AFK con diferentes características y comandos de acuerdo a sus preferencias y necesidades. </p>
|
99 |
-
<h4>¿Cómo puedo obtener ayuda o soporte para usar un bot AFK para Aternos? </h4>
|
100 |
-
<p>La respuesta depende de la fuente de su bot AFK, pero generalmente puede obtener ayuda o soporte poniéndose en contacto con el desarrollador del bot AFK, uniéndose a su servidor de discordia o página GitHub, o preguntando a otros usuarios que han utilizado el mismo bot AFK. Por ejemplo, si está utilizando ttttdeded/aternos-afkbot, puede ponerse en contacto con ttttdeded a través de su perfil de GitHub, unirse a su servidor de discordia o preguntar a otros usuarios que hayan bifurcado o protagonizado su repositorio. También puedes buscar en línea guías o tutoriales sobre cómo usar un bot AFK para Aternos.</p> 64aa2da5cf<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/index.tsx
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
/**
|
2 |
-
* @license
|
3 |
-
* Copyright 2018-2020 Streamlit Inc.
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
// Workaround for type-only exports:
|
19 |
-
// https://stackoverflow.com/questions/53728230/cannot-re-export-a-type-when-using-the-isolatedmodules-with-ts-3-2-2
|
20 |
-
import { ComponentProps as ComponentProps_ } from "./StreamlitReact"
|
21 |
-
import { RenderData as RenderData_ } from "./streamlit"
|
22 |
-
|
23 |
-
export {
|
24 |
-
StreamlitComponentBase,
|
25 |
-
withStreamlitConnection,
|
26 |
-
} from "./StreamlitReact"
|
27 |
-
export { ArrowTable } from "./ArrowTable"
|
28 |
-
export { Streamlit } from "./streamlit"
|
29 |
-
export type ComponentProps = ComponentProps_
|
30 |
-
export type RenderData = RenderData_
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/remove.h
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/omp/detail/execution_policy.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace system
|
25 |
-
{
|
26 |
-
namespace omp
|
27 |
-
{
|
28 |
-
namespace detail
|
29 |
-
{
|
30 |
-
|
31 |
-
template<typename DerivedPolicy,
|
32 |
-
typename ForwardIterator,
|
33 |
-
typename Predicate>
|
34 |
-
ForwardIterator remove_if(execution_policy<DerivedPolicy> &exec,
|
35 |
-
ForwardIterator first,
|
36 |
-
ForwardIterator last,
|
37 |
-
Predicate pred);
|
38 |
-
|
39 |
-
|
40 |
-
template<typename DerivedPolicy,
|
41 |
-
typename ForwardIterator,
|
42 |
-
typename InputIterator,
|
43 |
-
typename Predicate>
|
44 |
-
ForwardIterator remove_if(execution_policy<DerivedPolicy> &exec,
|
45 |
-
ForwardIterator first,
|
46 |
-
ForwardIterator last,
|
47 |
-
InputIterator stencil,
|
48 |
-
Predicate pred);
|
49 |
-
|
50 |
-
|
51 |
-
template<typename DerivedPolicy,
|
52 |
-
typename InputIterator,
|
53 |
-
typename OutputIterator,
|
54 |
-
typename Predicate>
|
55 |
-
OutputIterator remove_copy_if(execution_policy<DerivedPolicy> &exec,
|
56 |
-
InputIterator first,
|
57 |
-
InputIterator last,
|
58 |
-
OutputIterator result,
|
59 |
-
Predicate pred);
|
60 |
-
|
61 |
-
|
62 |
-
template<typename DerivedPolicy,
|
63 |
-
typename InputIterator1,
|
64 |
-
typename InputIterator2,
|
65 |
-
typename OutputIterator,
|
66 |
-
typename Predicate>
|
67 |
-
OutputIterator remove_copy_if(execution_policy<DerivedPolicy> &exec,
|
68 |
-
InputIterator1 first,
|
69 |
-
InputIterator1 last,
|
70 |
-
InputIterator2 stencil,
|
71 |
-
OutputIterator result,
|
72 |
-
Predicate pred);
|
73 |
-
|
74 |
-
|
75 |
-
} // end namespace detail
|
76 |
-
} // end namespace omp
|
77 |
-
} // end namespace system
|
78 |
-
} // end namespace thrust
|
79 |
-
|
80 |
-
#include <thrust/system/omp/detail/remove.inl>
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|